From ebfb7f707a7ccd066213176d9385cd46830cb680 Mon Sep 17 00:00:00 2001 From: aliX Date: Fri, 5 Mar 2021 09:23:04 +1300 Subject: [PATCH] latest upstream merge (#179) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Use path instead of ident (#7809) * Add proper `commit_all` to `TestExternalities` (#7808) * Add proper `commit_all` to `TestExternalities` This pr adds a propoer `commit_all` function to `TestExternalities` to commit all changes from the overlay to the internal backend. Besides that it fixes some bugs with handling empty dbs when calculating a delta storage root. It also changes the way data is added to the in memory backend. * Update primitives/state-machine/src/testing.rs Co-authored-by: cheme * Don't allow self proxies (#7803) * Allow council to slash treasury tip (#7753) * wk2051 | D4 |Allow council to slash treasury tip | p1 * Update frame/tips/src/lib.rs Co-authored-by: Xiliang Chen * wk2051 | D5 |Allow council to slash treasury tip | p2 * wk2051 | D5 |Allow council to slash treasury tip | p3 * wk2051 | D5 |Allow council to slash treasury tip | p4 * wk2051 | D5 |Allow council to slash treasury tip | p5 * random change * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_tips --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/tips/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fix typo * Update frame/tips/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/tips/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/tips/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/tips/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/tips/src/tests.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * wk2052 | D1 | Allow council to slash treasury tip | p6 Co-authored-by: Xiliang Chen Co-authored-by: Shawn Tabrizi Co-authored-by: Parity Benchmarking Bot Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Review feedback * Review feedback * Update docs * More docs * Make it private * Use `None` * Use apply transaction * Update primitives/state-machine/src/testing.rs Co-authored-by: cheme Co-authored-by: Shawn Tabrizi Co-authored-by: RK Co-authored-by: Xiliang Chen Co-authored-by: Parity Benchmarking Bot Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Guillaume Thiolliere * Define ss58 prefix inside the runtime (#7810) * Add SS58Prefix type to the frame_system config trait * Remove unused chain_id runtime interface * remove some unecessary bound (#7813) * Fix ss58check test when executed with other tests (#7815) There was a bug that could make other ss58 tests fail when being executed with this one in parallel. This test changes the default ss58 version and if other tests are run at the time the default version is changed, they would fail. To fix this problem, we now run the actual test as a new process. * Cleanup some warnings (#7816) * client: cleanup redundant semicolon warnings * grandpa: remove usage of deprecated compare_and_swap * Happy new year (#7814) * Happy new year Updates the copyright years and fixes wrong license headers. * Fix the template * Split HEADER into HEADER-APACHE & HEADER-GPL * contracts: Allow runtime authors to define a chain extension (#7548) * Make host functions return TrapReason This avoids the need to manually store any trap reasons to the `Runtime` from the host function. This adds the following benefits: * It properly composes with the upcoming chain extensions * Missing to set a trap value is now a compile error * Add chain extension The chain extension is a way for the contract author to add new host functions for contracts to call. * Add tests for chain extensions * Fix regression in set_rent.wat fixture Not all offsets where properly updated when changing the fixtures for the new salt on instantiate. * Pre-charge a weight amount based off the specified length * Improve fn write docs * Renamed state to phantom * Fix typo * *: Update to libp2p v0.33.0 (#7759) * *: Update to libp2p v0.33.0 * client/network: Consistently track request arrival time With https://github.com/libp2p/rust-libp2p/pull/1886/ one is guaranteed to receive either a `ResponseSent` or a `InboundFailure` event for each received inbound request via `RequestResponseEvent::Message`. Given this guarantee there is no need to track arrival times in a best-effort manner and thus there is no need to use a LRU cache for arrival times. * client/offchain: Adjust to PeerId API changes * contracts: Lazy storage removal (#7740) * Do not evict a contract from within a call stack We don't want to trigger contract eviction automatically when a contract is called. This is because those changes can be reverted due to how storage transactions are used at the moment. More Information: https://github.com/paritytech/substrate/issues/6439#issuecomment-648754324 It can be re-introduced once the linked issue is resolved. In the meantime `claim_surcharge` must be called to evict a contract. * Lazily delete storage in on_initialize instead of when removing the contract * Add missing documentation of new error * Make Module::claim_surcharge public It being the only dispatchable that is private is an oversight. * review: Add final newline * review: Simplify assert statement * Add test that checks that partial remove of a contract works * Premote warning to error * Added missing docs for seal_terminate * Lazy deletion should only take AVERAGE_ON_INITIALIZE_RATIO of the block * Added informational about the lazy deletion throughput * Avoid lazy deletion in case the block is already full * Prevent queue decoding in case of an already full block * Add test that checks that on_initialize honors block limits * fix template (#7823) * rename HEADER files so that they are consistent with LICENSE filenames (#7825) * contracts: Prevent contracts from allocating a too large buffer (#7818) * Prevent contracts from allocating a too large buffer * Fix possible integer overflow * Improve error message on where clause on pallet error (#7821) * improve error message on where clause on pallet error * Revert "improve error message on where clause on pallet error" This reverts commit 5a3cc38976813fccef3357833553ce30f5b988ea. * Revert "Revert "improve error message on where clause on pallet error"" This reverts commit e3b3fca6bc4fa89816f80dbcb82dc4536a9b2549. * Feat sp keystore (#7826) * delete not used VRFTranscriptValue * specification variable naming * minor fix (#7828) * Participation Lottery Pallet (#7221) * Basic design * start adding tests * finish tests * clean up crates * use call index for match * finish benchmarks * add to runtime * fix * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_lottery --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/lottery/src/weights.rs --template=./.maintain/frame-weight-template.hbs * more efficient storage * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_lottery --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/lottery/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update lib.rs * Update bin/node/runtime/src/lib.rs * trait -> config * add repeating lottery * new benchmarks * fix build * move trait for warning * feedback from @xlc * add stop_repeat * fix * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_lottery --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/lottery/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Support static calls * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_lottery --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/lottery/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fix test * add loop to mitigate modulo bias * Update weights for worst case scenario loop * Initialize pot with ED * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_lottery --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/lottery/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Parity Benchmarking Bot * client/network: Use request response for block requests (#7478) * client/network: Add scaffolding for finality req to use req resp #sc * client/network/src/finality_requests: Remove * client/network/src/behaviour: Pass request id down to sync * client/network: Use request response for block requests * client/network: Move handler logic into *_*_handler.rs * client/network: Track ongoing finality requests in protocol.rs * client/network: Remove commented out finalization initialization * client/network: Add docs for request handlers * client/network/finality_request_handler: Log errors * client/network/block_request_handler: Log errors * client/network: Format * client/network: Handle block request failure * protocols/network: Fix tests * client/network/src/behaviour: Handle request sending errors * client/network: Move response handling into custom method * client/network/protocol: Handle block response errors * client/network/protocol: Remove tracking of obsolete requests * client/network/protocol: Remove block request start time tracking This will be handled generically via request-responses. * client/network/protocol: Refactor on_*_request_started * client/network: Pass protocol config instead of protocol name * client/network: Pass protocol config in tests * client/network/config: Document request response configs * client/network/src/_request_handler: Document protocol config gen * client/network/src/protocol: Document Peer request values * client/network: Rework request response to always use oneshot * client/network: Unified metric reporting for all request protocols * client/network: Move protobuf parsing into protocol.rs * client/network/src/protocol: Return pending events after poll * client/network: Improve error handling and documentation * client/network/behaviour: Remove outdated error types * Update client/network/src/block_request_handler.rs Co-authored-by: Ashley * Update client/network/src/finality_request_handler.rs Co-authored-by: Ashley * client/network/protocol: Reduce reputation on timeout * client/network/protocol: Refine reputation changes * client/network/block_request_handler: Set and explain queue length * client/service: Deny block requests when light client * client/service: Fix role matching * client: Enforce line width * client/network/request_responses: Fix unit tests * client/network: Expose time to build response via metrics * client/network/request_responses: Fix early connection closed error * client/network/protocol: Fix line length * client/network/protocol: Disconnect on most request failures * client/network/protocol: Disconnect peer when oneshot is canceled * client/network/protocol: Disconnect peer even when connection closed * client/network/protocol: Remove debugging log line * client/network/request_response: Use Clone::clone for error * client/network/request_response: Remove outdated comment With libp2p v0.33.0 libp2p-request-response properly sends inbound failures on connections being closed. Co-authored-by: Addie Wagenknecht Co-authored-by: Ashley * fix : remove `_{ }` syntax from benchmark macro (#7822) * commented use of common * hack to pass tests * another hack * remove all commented code * fix the easy tests * temp hack * follow through comma hack until better solution * patch macro * missed one * update benchmarks * update docs * fix docs * removed too much * fix changes Co-authored-by: Shawn Tabrizi * Improve spans of pallet macro (#7830) * fix spans * convert name to snake case * Fix master build (#7837) * Fix master build * Use correct copyright year * babe: expose next epoch data (#7829) * babe: expose next epoch data * babe: add runtime api for next_epoch * babe: avoid reading next authorities from storage unnecessarily * babe: add notes about epoch duration constraints * babe: guard against overflow * babe: add test for fetching current and next epoch data * contracts: Add configurable per-storage item cost (#7819) * Rework rent parameters * No need for empty_pair_count any longer * Parameterize runtime * upgrade a few dependencies (#7831) * upgrade a few dependencies * make it compile at the expense of duplicate deps * fix web-wasm and a warning * introduce activate-wasm-bindgen-features crate * Revert "introduce activate-wasm-bindgen-features crate" This reverts commit 5a6e41e683f8a4844c0a735dcd08caabb2313f11. * add getrandom feature to sc-consensus-aura * CI: remove squash and fix buildah push (#7841) * Fix incorrect use of syn::exports (#7838) * Fix incorrect use of syn::exports Instead of using `syn::exports` we should import the trait from the quote crate directly. * Use own macro for test cases to fix compilation with latest syn * Fix test * Subkey should not import the entire world. (#7845) There is no reason for subkey to import the default Substrate node to support a feature that would only be usable for the Substrate node. Subkey itself should be more the default key management binary for Substrate related chains. If certain chains require some special functionality, they can easily stick together their own "my-chain-key". * Rework priority groups, take 2 (#7700) * Rework priority groups * Broken tests fix * Fix warning causing CI to fail * [Hack] Try restore backwards-compatibility * Fix peerset bug * Doc fixes and clean up * Error on state mismatch * Try debug CI * CI debugging * [CI debug] Can I please see this line * Revert "[CI debug] Can I please see this line" This reverts commit 4b7cf7c1511f579cd818b21d46bd11642dfac5cb. * Revert "CI debugging" This reverts commit 9011f1f564b860386dc7dd6ffa9fc34ea7107623. * Fix error! which isn't actually an error * Fix Ok() returned when actually Err() * Tweaks and fixes * Fix build * Peerset bugfix * [Debug] Try outbound GrandPa slots * Another bugfix * Revert "[Debug] Try outbound GrandPa slots" This reverts commit d175b9208c088faad77d9f0ce36ff6f48bd92dd3. * [Debug] Try outbound GrandPa slots * Apply suggestions from code review Co-authored-by: Max Inden * Use consts for hardcoded peersets * Revert "Try debug CI" This reverts commit 62c4ad5e79c03d561c714a008022ecac463a597e. * Renames * Line widths * Add doc Co-authored-by: Max Inden * Better Handle Dead Accounts in Balances (#7843) * Don't mutate storage when account is dead and should stay dead * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * more concrete storage noop Co-authored-by: Parity Benchmarking Bot * bump fs-swap (#7834) * UniArts reserve SS58 address id 38 (#7651) * UniArts reserve SS58 address id 45 * Update ss58-registry.json Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Xiang Li Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> * Update to futures 0.3.9 (#7854) * Store dispatch info of calls locally in weight calculation (#7849) * utility * sudo * more * recovery * better formatting * client/network: Re-enable light_client_handler.rs unit tests (#7853) * Fix max log level (#7851) With the switch to tracing we did not set the `max_log_level` anymore. This resulted in a performance degradation as logging did not early exited and thus, `trace` logs were at least resolved every time. This pr fixes it by ensuring that we set the correct max log level. * Fix missing network for uniarts (#7859) * contracts: Collect rent for the first block during deployment (#7847) * Pay first rent during instantiation * Fix and add new tests * Do not increment trie id counter on failure * Merge 2.0.1 backport branch into mainline master (#7842) * Backport paritytech/substrate#7381 * Bring back genesis storage build in aura/timestamp To not change spec version, see https://github.com/paritytech/substrate/pull/7686#discussion_r540032743 * Backport paritytech/substrate#7238 * Backport paritytech/substrate#7395 * Bump impl_version * Fix UI tests and bump trybuild dep See https://github.com/rust-lang/rust/pull/73996 Backports: https://github.com/paritytech/substrate/pull/7764 https://github.com/paritytech/substrate/pull/7656 * Partially backport paritytech/substrate#7838 * Release frame-support with a dep compilation fix * Bump patch level for remaining crates This is done because at the time of writing cargo-unleash does not fully support partial workspace publishing and mixes both local and crates.io versions of the packages, leading to errors in the release check workflow. * Backport paritytech/substrate#7854 ...to fix compilation error when using futures-* v0.3.9. * Adding Changelog entry for patch release Co-authored-by: Bastian Köcher Co-authored-by: Benjamin Kampmann * Bump cargo-unleash to latest alpha release (#7867) * Bump sha2 from 0.8.2 to 0.9.2 (#7643) * Bump sha2 from 0.8.2 to 0.9.2 Bumps [sha2](https://github.com/RustCrypto/hashes) from 0.8.2 to 0.9.2. - [Release notes](https://github.com/RustCrypto/hashes/releases) - [Commits](https://github.com/RustCrypto/hashes/compare/sha2-v0.8.2...streebog-v0.9.2) Signed-off-by: dependabot[bot] * Fix compilation error Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Bastian Köcher * bumpd minor version (#7873) * Add Prometheus alerts if unbounded channels are too large (#7866) * Add Prometheus alerts if unbounded channels are too large * Tweaks * Bump retain_mut from 0.1.1 to 0.1.2 (#7869) Bumps [retain_mut](https://github.com/upsuper/retain_mut) from 0.1.1 to 0.1.2. - [Release notes](https://github.com/upsuper/retain_mut/releases) - [Commits](https://github.com/upsuper/retain_mut/compare/v0.1.1...v0.1.2) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> * babe: initialize next authorities on genesis (#7872) * babe: initialize next authorities on genesis * babe: add test for genesis authorities * Update serde and parity-multiaddr, to fix master CI (#7877) * Add ss58 version prefix for CORD (from Dhiway) (#7862) * Add ss58 version prefix for CORD * Add ss58 version prefix for CORD * network-gossip: add metric for number of local messages (#7871) * network-gossip: add metric for number of local messages * grandpa: fix GossipEngine missing metrics registry parameter * network-gossip: increase known messages cache size * network-gossip: fix tests * grandpa: remove unnecessary clone Co-authored-by: Max Inden * network-gossip: count registered and expired messages separately * network-gossip: add comment on known messages cache size * network-gossip: extend comment with cache size in memory Co-authored-by: Max Inden * Clean-up pass in network/src/protocol.rs (#7889) * Remove statistics system * Remove ContextData struct * Remove next_request_id * Some TryFrom nit-picking * Use constants for peer sets * contracts: Don't read the previous value when overwriting a storage item (#7879) * Add `len` function that can return the length of a storage item efficiently * Make use of the new len function in contracts * Fix benchmarks * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Remove unused imports Co-authored-by: Parity Benchmarking Bot * Fix clear prefix check to avoid erasing child trie roots. (#7848) * Fix clear prefix check to avoid erasing child trie roots. * Renaming and extend existing test with check. * last nitpicks. * CI: test prometheus alerts moved to check; deploy depends on tests; chore (#7887) * pallet_authority_discovery: introduce current_authorities and next_authorities methods (#7892) * split authorities discovery keys for the current and next session * Revert "split authorities discovery keys for the current and next session" This reverts commit 0a40b8b4c14e85d95357a27f6db30199cbe0aa4d. * pallet_authority_discovery: introduce a next_authorities method * address feedback * amend the doccomments * make helper error types generics (#7878) * make helper error types generics * avoid From dep in runner helper logic * slip of the pen, bump futures to 0.3.9 * more generics * generic var spaces Co-authored-by: Andronik Ordian * network-gossip: add metric for number of local messages (#7871) * network-gossip: add metric for number of local messages * grandpa: fix GossipEngine missing metrics registry parameter * network-gossip: increase known messages cache size * network-gossip: fix tests * grandpa: remove unnecessary clone Co-authored-by: Max Inden * network-gossip: count registered and expired messages separately * network-gossip: add comment on known messages cache size * network-gossip: extend comment with cache size in memory Co-authored-by: Max Inden * Clean-up pass in network/src/protocol.rs (#7889) * Remove statistics system * Remove ContextData struct * Remove next_request_id * Some TryFrom nit-picking * Use constants for peer sets * contracts: Don't read the previous value when overwriting a storage item (#7879) * Add `len` function that can return the length of a storage item efficiently * Make use of the new len function in contracts * Fix benchmarks * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Remove unused imports Co-authored-by: Parity Benchmarking Bot * Fix clear prefix check to avoid erasing child trie roots. (#7848) * Fix clear prefix check to avoid erasing child trie roots. * Renaming and extend existing test with check. * last nitpicks. * use follow paths to std standarad components * line width Co-authored-by: Bernhard Schuster Co-authored-by: Andronik Ordian Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Max Inden Co-authored-by: Pierre Krieger Co-authored-by: Alexander Theißen Co-authored-by: Parity Benchmarking Bot Co-authored-by: cheme * Add payment_queryFeeDetails RPC (#7692) * Return FeeDetails in compute_fee_raw() * Add payment_queryDetails rpc * Simplify serde attribute a bit * Fix line width check * Use saturating_add() * Move transaction payment rpc types to types.rs * Add file header * Fix test * Update Cargo.lock * Nit * Apply the review suggestions * . * . * Fix serde * Fix rust doc * . * Update frame/transaction-payment/src/types.rs Co-authored-by: Guillaume Thiolliere * Use NumberOrHex in fee details RPC * Address review feedback * Nits * Update some docs * Address review * Update frame/transaction-payment/src/types.rs Co-authored-by: Guillaume Thiolliere * Happy 2021 * Nit * Address code review * Remove needless bound Co-authored-by: Guillaume Thiolliere * Use checked math when calculating storage size (#7885) * contracts: Cap the surcharge reward by the amount of rent that way payed by a contract (#7870) * Add rent_payed field to the contract info * Don't pay out more as reward as was spent in rent * Make successful evictions free * Add tests to check that surcharge reward is capped by rent payed * review: Fixed docs * Update the Grafana dashboards (#7886) * Log target before prefix for more consistent logging (#7897) * Log target before prefix for more consistent logging As requested, this moves the target before the prefix to have consistent logging between logs with and without a prefix. * Add a space * contracts: Fix failing benchmark test (#7900) * CI: trigger simnet master and wait for status (#7899) * CI: trigger simnet master and wait for status * chore: remove leftovers from chaosnet; remove flaming-fir deployment * Storage chains part 1 (#7868) * CLI options and DB upgrade * Transaction storage * Block pruning * Block pruning test * Style * Naming * Apply suggestions from code review Co-authored-by: Bastian Köcher * Apply suggestions from code review Co-authored-by: Bastian Köcher * Style Co-authored-by: Bastian Köcher * tests: fix UI test so we can update CI image (#7901) * tests: fix UI test so we can update CI image * CI: remove diener installation from the script as it's installed in CI image * tests: another fix * tests: fix another fix * tests: NLoEOF * tests: another broken stderr * *: Update to libp2p v0.34.0 (#7888) * *: Update to libp2p v0.34.0 * client/network: Update bytes, unsigned-varint and asynchronous-codec * client: Update to prost v0.7 * Fix bad debug_assert (#7904) * fix template (#7905) * improve benchmarking error output (#7863) * add concat Vec function and use it for better error logging in add_benchmark! macro * refactor benchmark error reporting to use format! and RuntimeString * Fix not restoring non-poisoned state (#7906) * Add Test for Variable Components in Benchmarking (#7902) * Adds a test for variable components * Clean up traces of common parameters which are removed now * fix cargo fmt (#7907) * Add ss58 version prefix for Patract/Jupiter (from PatractHubs) (#7785) * Introduces account existence providers reference counting (#7363) * Initial draft * Latest changes * Final bits. * Fixes * Fixes * Test fixes * Fix tests * Fix babe tests * Fix * Fix * Fix * Fix * Fix * fix warnings in assets * Fix UI tests * fix line width * Fix * Update frame/system/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/system/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Fix * fix unused warnings * Fix * Update frame/system/src/lib.rs Co-authored-by: Guillaume Thiolliere * Update frame/system/src/lib.rs Co-authored-by: Guillaume Thiolliere * Fix * fix slash and comprehensive slash test * fix reserved slash and comprehensive tests * check slash on non-existent account * Revert "Fix UI tests" This reverts commit e818dc7c0556baefe39b9cf3e34ff8546e96c590. * Fix * Fix utility tests * keep dispatch error backwards compatible * Fix * Fix * fix ui test * Companion checker shouldn't be so anal. * Fix * Fix * Fix * Apply suggestions from code review Co-authored-by: Alexander Popiak * Update frame/balances/src/lib.rs Co-authored-by: Alexander Popiak * return correct slash info when failing gracefully * fix missing import * Update frame/system/src/lib.rs Co-authored-by: Guillaume Thiolliere * Fix * Update frame/balances/src/tests_local.rs Co-authored-by: Guillaume Thiolliere * Fixes Co-authored-by: Shawn Tabrizi Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Guillaume Thiolliere Co-authored-by: Alexander Popiak * fix clippy error (#7912) * Allow configuring Yamux window size (#7916) * Decouple Stkaing and Election - Part1: Support traits (#7908) * Base features and traits. * Fix the build * Remove unused boxing * Self review cleanup * Fix build * Feat suppert procedural (#7913) * fix clippy replace clone with copy * fix clippy warning pattern * fix clippy warning replace into_iter with iter * replace match with if let * replace =0 with is_empty * replace or with or_else * replace vec! with Vec::new * Expose BountyUpdatePeriod. (#7921) * Add ss58 version prefix for Litentry (#7918) * Address review comments of #7916 (#7917) * Update details for the Polymesh network (#7919) Co-authored-by: Adam Dossa * Update ss58 registry for Robonomics (#7923) * babe: log block and slot number on verification (#7920) * babe: log block and slot number on verification * babe: debug log formatting Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Add explicit limits to notifications sizes and adjust yamux buffer size (#7925) * Add explicit limits to notifications sizes and adjust yamux buffer size * Docfix * Tests * Document these 10 bytes * Freeze Assets and Asset Metadata (#7346) * Features needed for reserve-backed stablecoins * Builds & tests. * Double map for an efficient destroy. * Update frame/assets/src/lib.rs Co-authored-by: Nikolay Volf * ED/zombie-count/refs Feature: ED/minimum balance enforcement Feature: enforce zombie count Feature: allow system-alive accounts to exist, but add reference * Update frame/assets/src/lib.rs Co-authored-by: Nikolay Volf * Update frame/assets/Cargo.toml Co-authored-by: Niklas Adolfsson * Docs * Some tests * More tests * Allow for max_zombies to be adjusted * Test for set_max_zombies * Tests and a couple of fixes * First few benchmarks * Benchmarks. * Fix error message in test * Fixes * Fixes * Fixes * cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain dev --steps 50 --repeat 20 --extrinsic * --execution=wasm --wasm-execution=compiled --output ./bin/node/runtime/src/weights --header ./HEADER --pallet pallet_assets * Update frame/assets/src/lib.rs Co-authored-by: Guillaume Thiolliere * Fixes * Fixes * Fixes * cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain dev --steps 50 --repeat 20 --extrinsic * --execution=wasm --wasm-execution=compiled --output ./bin/node/runtime/src/weights --header ./HEADER --pallet pallet_assets * Fixes * Update default weight * Add proper verification to benchmarks * minor improvements to tests * Add `freeze_asset` and `thaw_asset` * Add metadata * fix build * Update benchmarks * fix line width * cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain dev --steps 50 --repeat 20 --extrinsic * --execution=wasm --wasm-execution=compiled --output ./bin/node/runtime/src/weights --header ./HEADER --pallet pallet_assets * update default weights * destroy cleans up metadata * more comprehensive lifecycle test * update docs * Update frame/assets/src/benchmarking.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Fix * New weights system * fix compile * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_assets --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/assets/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fix compile * fix up * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_assets --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/assets/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fixes to pallet compile * fix node build * remote diff artifacts * less diff * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_assets --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/assets/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update frame/assets/src/lib.rs * Update frame/assets/src/lib.rs * usize to u32 * missed some usize * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_assets --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/assets/src/weights.rs --template=./.maintain/frame-weight-template.hbs Co-authored-by: Gav Wood Co-authored-by: Nikolay Volf Co-authored-by: Niklas Adolfsson Co-authored-by: Parity Benchmarking Bot Co-authored-by: Guillaume Thiolliere Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Increase UnboundedChannelVeryLarge threshold from 5k to 15k (#7931) * Sync: Propagate block announcement data (#7903) * Sync: Propagate block announcement data This pr adds a feature to the sync protocol to propagate the data that we received alongside a block announcement. This is done by adding a cache that caches the last X block announcement data where X is set to the number of `in_peers` (giving every peer the chance to send us a different block). This will be required by parachains to ensure that even peers who are not connected to a collator receive the data alongside the block announcement to properly validate it and request the block. * Review comment * Bring back the code and add new variant to ensure we don't insert block announce data when something wasn't checked * Also use out_peers * Disable Nagle algorithm (#7932) * Disable Nagle algorithm * Oops, didn't compile * Migrate frame-system to pallet attribute macro (#7898) * PRINT_PALLET_UPGRADE=1 cargo check -p frame-system * Copy attributes, imports, mods and type defs * Copy Config trait * Annotate constants * Tabify * Migrate hooks * Upgrade template rename interface to hooks * Migrate pallet call * Migrate Event * Migrate Error * Migrate Origin * Remove optional validate_unsigned * Remove remaining TODO_MAYBE_WHERE_CLAUSE * Overwrite original lib.rs with migrated lib2.rs. * Add required Event IsType constraint * Add disable supertrait check * Fix leftover Trait trait * Add missing pallet prefix for weight attributes * Add missing Error type parameter * Add missing Hooks type parameter * Private call visibility, restore original helper types and helpers etc * Fix hooks type parameter * Rename RawEvent to Event * Add missing storage type annotations * Remove unused imports * Add GenesisConfig helpers for compat * Fix unused import warnings * Update frame/support/procedural/src/storage/print_pallet_upgrade.rs Co-authored-by: Guillaume Thiolliere * Fix test errors and warnings * Fix remaining errors and warnings * Apply review suggestion: fix formatting Co-authored-by: Guillaume Thiolliere * Apply review suggestion: annotate BlockLength as constant Co-authored-by: Guillaume Thiolliere * Apply review suggestion: add triling comma Co-authored-by: Guillaume Thiolliere * Apply review suggestion: add triling comma Co-authored-by: Guillaume Thiolliere * Apply review suggestion: add trailing comma Co-authored-by: Guillaume Thiolliere * Apply review suggestion: fix storage type indentation * Apply review suggestion: remove redundant Origin type alias * Add missing codec derives for BlockLength * Restore module docs * Module -> Pallet renamel * Revert "Update frame/support/procedural/src/storage/print_pallet_upgrade.rs" This reverts commit d2a2d5b6 * Apply review suggestion: merge crate imports Co-authored-by: Alexander Theißen * Revert "Upgrade template rename interface to hooks" This reverts commit 306f0239 * Single line import * Refactor generated genesis build * Import sp_io::storage * Revert previous, fully qualify sp_io::storage * Fix ui tests * Fix errors after merge, missing changes * Set UpgradedToDualRefCount to true in genesis build * Annotated Runtime version with constant, exposing it via metadata * Add metadata attribute Co-authored-by: Guillaume Thiolliere Co-authored-by: Alexander Theißen * Telemetry per node (#7463) * Allow validators to block and kick their nominator set. (#7930) * Allow validators to block and kick their nominator set. * migration * Test * Better migration * Fixes * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update frame/staking/src/lib.rs Co-authored-by: Shawn Tabrizi Co-authored-by: Parity Benchmarking Bot Co-authored-by: Shawn Tabrizi * Fix elections-phragmen and proxy issue (#7040) * Fix elections-phragmen and proxy issue * remove TODO * Update bond to be per-vote * Update frame/elections-phragmen/src/lib.rs * Fix benchmakrs * Fix weight as well. * Add license * Make weight interpreted wasm! 🤦🏻‍♂️ * Remove a bunch of TODOs * Add migration * Better storage version. * Functionify. * Fix deposit scheme. * remove legacy bond. * Master.into() * better logging. * Fix benchmarking test * Fix confused deposit collection. * Add fine * Better name for storage item * Fix name again. * remove unused * Update frame/elections-phragmen/src/lib.rs Co-authored-by: Guillaume Thiolliere * Update frame/elections-phragmen/src/lib.rs Co-authored-by: Guillaume Thiolliere * cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain dev --steps 50 --repeat 20 --extrinsic * --execution=wasm --wasm-execution=compiled --output ./bin/node/runtime/src/weights --header ./HEADER --pallet pallet_elections_phragmen * new weight fns * Fix build * Fix line width * fix benchmakrs * fix warning * cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain dev --steps 50 --repeat 20 --extrinsic * --execution=wasm --wasm-execution=compiled --output ./bin/node/runtime/src/weights --header ./HEADER --pallet pallet_elections_phragmen * Tune the stake again * cargo run --release --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml -- benchmark --chain dev --steps 50 --repeat 20 --extrinsic * --execution=wasm --wasm-execution=compiled --output ./bin/node/runtime/src/weights --header ./HEADER --pallet pallet_elections_phragmen * All tests work again. * A large number of fixes. * more fixes. * Fix node build * Some fixes to benchmarks * Fix some warnings. * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_elections_phragmen --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/elections-phragmen/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_elections_phragmen --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/elections-phragmen/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update frame/elections-phragmen/src/lib.rs Co-authored-by: Guillaume Thiolliere * a batch of review comments. * Fix a test. * Fix some more tests. * do migration with pallet version??? * Final touches. * Remove unused storage. * another rounds of changes and fixes. * Update frame/elections-phragmen/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/elections-phragmen/src/lib.rs Co-authored-by: Shawn Tabrizi * Review grumbles. * Fix a bit more. * Fix build * Experimental: independent migration. * WIP: isolated migration logics * clean up. * make migration struct private and move migration to own file * add doc * fix StorageInstance new syntax * Update frame/elections-phragmen/src/migrations_3_0_0.rs Co-authored-by: Shawn Tabrizi * another round of self-review. * bit better formatting * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_elections_phragmen --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/elections-phragmen/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Fix tests. * Round of self-review * Clean migrations * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_elections_phragmen --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/elections-phragmen/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Revert unwanted change to construct-runtime Co-authored-by: Gavin Wood Co-authored-by: Guillaume Thiolliere Co-authored-by: Shawn Tabrizi Co-authored-by: Parity Benchmarking Bot * .maintain: Replace sentry-node with local-docker-test-network (#7943) Sentry nodes are deprecated. Thus there is no need for `.maintain/sentry-node` to spin up a sentry node test environment. Instead this commit rewrites the setup to contain two full-connected validators and one light client. With the steps below one can now spin up a local test network with two validators, one light-client, Prometheus and Grafana. - cargo build --release - sudo docker-compose -f .maintain/local-docker-test-network/docker-compose.yml up * Attempt to remove the `where` bounds in arithmetic. (#7933) * Attempt to remove the where bounds. * Fix further and further. * Format better. * Update primitives/npos-elections/src/lib.rs * fix build * remove unused * Minor contributor docs update (#7948) - Fixing Link - Clarify that no-force-push applies to Pull Requests, too. * Make offchain indexing work (#7940) * Make offchain indexing work This fixes some bugs with offchain indexing to make it actually working ;) * Fix tests * Fix browser build * Update client/db/src/offchain.rs Co-authored-by: cheme * Remove seperation between prefix and key Co-authored-by: cheme * Grandpa warp sync request-response protocol (#7711) * Made a start * So the proof between authority set is phragmen one, this is crazy big, or is there some signing of the result : that is the storage key, damn? * ok getting from header digest seems doable. * for testing * get set id from storage directly (should use runtime to handler change). * move test to init * correct auth key * fix iteration * Correct proof content * actually update block number. * actually check last justif against its header * justification relation to new authorities through header hash check is needed here. This assumes the hash from header is calculated. * Few changes * Connected up cheme's branch * Clean up * Move things around a bit so that adding the grandpa warp sync request response protocol happens in the node code * Nits * Changes to comments * Cheme changes * Remove todos and test compile. * Rename _authority_ related proof function to _warp_sync_ . * Update client/grandpa-warp-sync/src/lib.rs quick fix * Put the warp sync request response protocol behind a feature flag because we dont' need it on a light client. * Update client/grandpa-warp-sync/src/lib.rs Quick fix * Update Cargo.lock * Adding test, comment on limitation related to 'delay', this could be implemented but with a cost. * Set between a delay override last fragment. * Check for pending authority set change at start. * adjust index * custom cache is not a good idea. * Use a simple cache instead. * restore broken indentation * Address crate rename * Merge conflict badly resolved, sorry Co-authored-by: cheme Co-authored-by: Pierre Krieger * Cleaner GRANDPA RPC API for proving finality (#7339) * grandpa: persist block number for last block of authority set * grandpa: fix authority_set_changes field in tests * grandpa: fix date on copyright notice * grandpa-rpc: implement cleaner api for prove finality rpc * grandpa-rpc: replace the old prove_finality with the new one * grandpa: undo accidental whitespace change * grandpa-rpc: start work on redo of the finality_proof RPC API * grandpa: manual impl of Decode for AuthoritySet * grandpa: add comment about appending changes for forced changes * grandpa: flip order in set changes, tidy up some comments * grandpa: update some of the doc comments * grandpa: store authority set changes when applying forced changes * grandpa: simplify finality_proof.rs * grandpa: move checks and extend tests in finality_proof * grandpa: address first set of review comments * grandpa: check that set changes have well-defined start * grandpa: rework prove_finality and assocated tests * grandpa: make AuthoritySetChanges tuple struct * grandpa: add assertions for tracking auth set changes * grandpa: remove StorageAndProofProvider trait * grandpa: return more informative results for unexpected input to RPC * grandpa: tiny tweak to error msg * grandpa: fix tests * grandpa: add error specific to finality_proof * grandpa: fix review comments * grandpa: proper migration to new AuthoritySet * grandpa: fix long lines * grandpa: fix unused warning after merge Co-authored-by: André Silva * Allow transaction for offchain indexing (#7290) * Moving offchain change set to state machine overlay change set, preparing use of change set internally. * Make change set generic over key and value, and use it for offchain indexing. * test ui change * remaining delta * generating with standard method * Remove 'drain_committed' function, and documentation. * Default constructor for enabling offchain indexing. * Remove offchain change specific iterators. * remove pub accessor * keep previous hierarchy, just expose iterator instead. * Update primitives/state-machine/src/overlayed_changes/mod.rs Co-authored-by: Tomasz Drwięga * fix line break * missing renamings * fix import * fix new state-machine tests. * Don't expose InnerValue type. * Add test similar to set_storage. * Remove conditional offchain storage (hard to instantiate correctly). * fix * offchain as children cannot fail if top doesn't Co-authored-by: Addie Wagenknecht Co-authored-by: Tomasz Drwięga * Enable sync mode for paritydb (#7961) * Very minor typo in the docs (#7967) Found this very minor typo when browsing the docs. * Remove hidden item NonExhaustive in syn crate (#7969) * Detect conflicting module names in `construct_runtime!` (#7968) * pallet minor doc improvment (#7922) * doc improvment * additional fixes * another fix * better code suggestion * Apply suggestions from code review Co-authored-by: David * Apply suggestions from code review Co-authored-by: Alexander Popiak * Apply suggestions from code review Co-authored-by: Alexander Popiak * apply suggestion * apply suggestion * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * apply suggestion * better guideline on reexport * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * apopiak suggestion * clearer check suggestion * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak * Update frame/support/src/lib.rs Co-authored-by: Alexander Popiak Co-authored-by: David Co-authored-by: Alexander Popiak * Make pallets use construct_runtime (#7950) Co-authored-by: Bastian Köcher Co-authored-by: David * client/network/req-resp: Prevent request id collision (#7957) * client/network/req-resp: Add unit test for request id collision * client/network/req-resp: Prevent request id collision `RequestId` is a monotonically increasing integer, starting at `1`. A `RequestId` is unique for a single `RequestResponse` behaviour, but not across multiple `RequestResponse` behaviours. Thus when handling `RequestId` in the context of multiple `RequestResponse` behaviours, one needs to couple the protocol name with the `RequestId` to get a unique request identifier. This commit ensures that pending requests (`pending_requests`) and pending responses (`pending_response_arrival_time`) are tracked both by their protocol name and `RequestId`. * client/network/req-resp: Remove unused import * client/network/req-resp: Introduce ProtocolRequestId struct * client/network/req-resp: Update test doc comment Treat `RequestId` as an opaque type. * client/network/req-resp: Improve expect proof * use construct_runtime for more pallet (#7974) * Migrate some more pallets to construct_runtime (#7975) * WIP converting balances tests to construct_runtime * Converting balances tests_local to construct_runtime * Fix up system and balances Events * Use static Call instance in tests * Migrate indices to construct_runtime * Migrate babe test to construct_runtime * Update frame/indices/src/mock.rs Co-authored-by: Guillaume Thiolliere * Update frame/babe/src/mock.rs Co-authored-by: Guillaume Thiolliere * Update frame/babe/src/mock.rs Co-authored-by: Bastian Köcher * Remove redundant import Co-authored-by: Guillaume Thiolliere Co-authored-by: Bastian Köcher * Pallet proc macro doc improvements (#7955) * Fix weight syntax in comments * Mention to add `IsType` bound * Link to subsee * Fix link * Update frame/support/procedural/src/pallet/parse/call.rs Co-authored-by: David * Apply review suggestion from @dvdplm, make StorageInstance doc link * fix ui test Co-authored-by: David Co-authored-by: thiolliere * Refuse to start substrate without providing an explicit chain (#7977) * client/network: Report reputation changes via response (#7958) * client/network: Report reputation changes via response When handling a request by a remote peer in a request response handler, one might want to in- or de-crease the reputation of the peer. E.g. one might want to decrease the reputation slightly for each request, given that it forces the local node to do work, or one might want to issue a larger reputation change due to a malformed request by the remote peer. Instead of having to pass a peerset handle to each request response handler, this commit suggests to allow handlers to isssue reputation changes via the provided `pending_response` `oneshot` channel. A reputation change issued by a request response handler via the `pending_response` channel is received by the `RequestResponsesBehaviour` which passes the reputation change up as an event to eventually be send to a peerset via a peerset handle. * client/network/req-resp: Use Vec::new instead of None::> * client/network: Rename Response to OutgoingResponse Given that a request-response request is not called `Request` but `InomingRequest`, rename a request-response response to `OutgoingResponse`. * client/finality-grandpa-warp: Send empty rep change via response * Migrate pallet-template to pallet attribute macro (#7981) * Converting pallet-template to Framev2 macro's * Add newline * Convert all indents to tabs * Update bin/node-template/pallets/template/src/lib.rs * Update bin/node-template/pallets/template/src/lib.rs Co-authored-by: Guillaume Thiolliere * Fix Network trait implementation not doing what it's supposed to do (#7985) * Make pallet use construct_runtime in tests instead of impl_* (#7986) * Rewrite the async code in `BasicQueue` (#7988) * Rewrite the async code in `BasicQueue` This is some smaller change to rewrite the async code in `BasicQueue`. I require this for some other pr I'm working on ;) * Update primitives/consensus/common/src/import_queue/basic_queue.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update primitives/consensus/common/src/import_queue/basic_queue.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update primitives/consensus/common/src/import_queue/basic_queue.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Hmm :D Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * grandpa: remove runtime checks in prove_finality (#7953) Remove checks that involve cross checking authorities in the runtime against what we have stored in the AuthoritySetChanges. * Ensure transactional with ? works in frame v2 (#7982) * Increase maximum size of transaction notifications (#7993) * Let mock in pallet-template use construct_runtime (#7991) * Fix state cache for cumulus (#7990) * Fix state cache for cumulus * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Merkle Mountain Range pallet improvements (#7891) * Add stateless verification helper function. * Split MMR primitives. * Add RuntimeAPI * RuntimeAPI with OpaqueLeaves * Bump spec_version,. * Move primitives back to frame. * Fix OpaqueLeaf encoding. * Add block number to frame_system implementation of LeafDataProvider. * Relax leaf codec requirements and fix OpaqueLeaf * Add root to debug line. * Apply suggestions from code review Co-authored-by: Hernando Castano * Typo. Co-authored-by: Hernando Castano * Introduce a `Slot` type (#7997) * Introduce a `Slot` type Instead of having some type definition that only was used in half of the code or directly using `u64`, this adds a new unit type wrapper `Slot`. This makes it especially easy for the outside api to know what type is expected/returned. * Change epoch duratioC * rename all instances of slot number to slot * Make the constructor private Co-authored-by: André Silva * Clarify and expand ProvideInherent docs (#7941) * Clarify and expand docs. * clarify that a pallet can verify an inherent without providing one. * Clarify what calls `is_inherent_required`. * caution and link to issue * typo * Apply suggestions from code review Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Introduce sc_peerset::DropReason (#7996) * Introduce sc_peerset::DropReason * Fix peerset tests * Doc fixes for sc-telemetry & API struct rename (#7934) * Doc fixes for sc-telemetry * Fix flag to disable log reloading * Forgot to reverse the conditions * Apply suggestion * Rename pattern to directives * Rename GlobalLoggerBuilder to LoggerBuilder * Return instead of expect * Use transparent outside the enum * Update client/tracing/src/logging/directives.rs Co-authored-by: Bastian Köcher Co-authored-by: Bastian Köcher * Fix telemetry span not entering properly & enter span in sc-cli (#7951) * WIP * WIP * Test * bug fix * WIP * Revert "WIP" This reverts commit 4e51e9adfdf0dc7cf37b562b60a0e83ca1d0b00d. * doc * Improve comment on why all spans are preserved * Added missing suggestion from previous PR * Use BoxFuture * Move TelemetrySpan creation to sc-cli, need to test... * Test code * Adapt user code * Revert "Test code" This reverts commit 333806b2fe1626efaa2691f9f44d0b4dd979bc36. * Update client/service/src/task_manager/mod.rs Co-authored-by: David * Better & simpler solution Co-authored-by: David * Update parity-scale-codec to 2.0 (#7994) * update cargo.toml * use 2.0 in mmmr * Test is_inherent_required (#8002) * () * master.into() * Update frame/support/src/inherent.rs Co-authored-by: Bastian Köcher * address comment Co-authored-by: Bastian Köcher * Sync: Fix issue of not freeing a block announcement slot (#8006) * Sync: Fix issue of not freeing a block announcement slot There was a bug that when the block announcement validation returned an error, the slot reserved for this validation wasn't freed. This could lead to a situation where we rejected any block announcement from such a peer for that the block announcement returned an error multiple times. * Better logging * Fuck I'm dumb * :facepalm: * CI: return flaming fir deployment (#8007) * CI: return flaming-fir deployment jobs * CI: no need in manual jobs; 'updated image' * chore: fix typos (#8013) * Fix tracing spans are not being forwarded to spawned task (#8009) * Fix tracing spans are not being forwarded to spawned task There is a bug that tracing spans are not forwarded to spawned task. The problem was that only the telemetry span was forwarded. The solution to this is to use the tracing provided `in_current_span` to capture the current active span and pass the telemetry span explictely. We will now always enter the span when the future is polled. This is essentially the same strategy as tracing is doing with its `Instrumented`, but now extended for our use case with having multiple spans active. * More tests * make AllModules public (#8017) * make AllModules public * add doc comments for AllModules * client/network: Use request response for light client requests (#7895) * client/network: Re-enable light_client_handler.rs unit tests * client/network: Add scaffolding for light client using req-resp * client/network: Make it compile * client/network: Rename OutEvent SendRequest * client/network: Restructure light client request client and handler * client/network: Rename light client request client to sender * client/network: Remove light client prepare_request * client/network/src/light: Rework configuration * client/network: Formatting * client/network/light: Remove RequestId * client/network/light: Make request functions methods * client/network/light: Refactor request wrapping * client/network/light: Fix warnings * client/network/light: Serialize request in method * client/network/light: Make returning response a method * client/network/light: Depend on request response to timeout requests * client/network: Fix test compilation * client/network/light: Re-enable connection test * client/network/light: Re-enable timeout test * client/network/light: Re-enable incorrect_response test * client/network/light: Re-enable wrong_response_type test * client/network/light: Re-enable retry_count_failures test * client/network/light: Re-enable issue_request tests * client/network/light: Re-enable send_receive tests * client/network/light: Deduplicate test logic * client/network/light: Remove unused imports * client/network/light: Handle request failure * client/network/light: Move generate_protocol_config * client/network: Fix test compilation * client/network: Rename light client request client to sender * client/network: Handle too-many-requests error * client/network: Update outdated comments * client/network/light: Choose any peer if none has best block defined * .maintain: Replace sentry-node with local-docker-test-network Sentry nodes are deprecated. Thus there is no need for `.maintain/sentry-node` to spin up a sentry node test environment. Instead this commit rewrites the setup to contain two full-connected validators and one light client. With the steps below one can now spin up a local test network with two validators, one light-client, Prometheus and Grafana. - cargo build --release - sudo docker-compose -f .maintain/local-docker-test-network/docker-compose.yml up * client/network/light: Handle oneshot cancellation * client/network/light: Do not reduce retry count on missing peer * client/network/request-response: Assert in debug request id to be unique * client/network/light: Choose same limit as block request protocol * client/network: Report reputation changes via response Allow request response protocol handlers to issue reputation changes, by sending them back along with the response payload. * client/network: Remove resolved TODOs * relax translate closure to FnMut (#8019) * Remove outdated Grafana information (#8012) * contracts: Emit event on contract termination (#8014) * contracts: Remove redundant bool argument from the eviction event * contracts: Improve event documentation * contracts: Emit event on contract termination * Fix tracing tests (#8022) * Fix tracing tests The tests were not working properly. 1. Some test was setting a global subscriber, this could lead to racy conditions with other tests. 2. A logging test called `process::exit` which is completly wrong. * Update client/tracing/src/lib.rs Co-authored-by: David * Review comments Co-authored-by: David * contracts: Improve documentation (#8018) * contracts: Document seal_input * contracts: Improve `ReturnCode` docs. * contracts: Improve seal_restore_to docs * review: Improved wording * Disable Kademlia random walk when --reserved-nodes is passed (#7999) * Disable Kademlia random walk when --reserved-nodes is passed * Update client/network/src/discovery.rs Co-authored-by: Roman Borschel Co-authored-by: Roman Borschel * Convert AURA to new pallet macro (#8020) * Rename system_networkState to system_unstable_networkState (#8001) * Decouple the session validators from im-online (#7127) * Decouple the session validators from im-online * . * Add SessionInterface trait in im-online Add ValidatorId in im-online Trait Make im-online compile Make substrate binary compile * Fix merging issue * Make all compile * Fix tests * Avoid using frame dep in primitives via pallet-session-common * Merge ValidatorSet into SessionInterface trait Wrap a few too long lines Add some docs * Move pallet-sesion-common into pallet-session * Move SessionInterface to sp-session and impl it in session pallet Ref https://github.com/paritytech/substrate/pull/7127#discussion_r494892472 * Split put historical::FullValidatorIdentification trait * Fix line width * Fix staking mock * Fix session doc test * Simplify >::ValidatorId as ValidatorId * Nits * Clean up. * Make it compile by commenting out report_offence_im_online bench * Tests * Nits * Move OneSessionHandler to sp-session * Fix tests * Add some docs * . * Fix typo * Rename to ValidatorSet::session_index() * Add some more docs * . * Remove extra empty line * Fix line width check . * Apply suggestions from code review * Cleaup Cargo.toml * Aura has migrated to Pallet now Co-authored-by: Tomasz Drwięga * better formatting for doc comments (#8030) * Add a send_request function to NetworkService (#8008) * Add a `send_request` to `NetworkService`. This function delivers responses via a provided sender and also allows for sending requests to currently not connected peers. * Document caveats of send_request better. * Fix compilation in certain cases. * Update docs + introduce IfDisconnected enum for more readable function calls. * Doc fix. * Rename send_request to detached_request. * Whitespace fix - arrrgh * Update client/network/src/service.rs spaces/tabs Co-authored-by: Pierre Krieger * Update client/network/src/request_responses.rs Documentation fix Co-authored-by: Roman Borschel * Update client/network/src/service.rs Typo. Co-authored-by: Roman Borschel * Update client/network/src/service.rs Better docs. Co-authored-by: Roman Borschel * Update client/network/src/service.rs Typo. Co-authored-by: Roman Borschel * Update client/network/src/service.rs Doc improvements. Co-authored-by: Roman Borschel * Remove error in logs on dialing a peer. This is now valid behaviour. * Rename detached_request to start_request. As suggested by @romanb. * Fix merged master. * Fix too long lines. Co-authored-by: Pierre Krieger Co-authored-by: Roman Borschel * Storage chains: serve transactions over IPFS/bitswap (#7963) * IPFS server for transactions * Style * Indent * Log message * CLI option * Apply suggestions from code review Co-authored-by: Pierre Krieger * Style * Style * Minor fixes Co-authored-by: Pierre Krieger * Improve log line (#8032) Co-authored-by: parity-processbot <> * Export `IfDisconnected` in public module. (#8034) * frame-system: Index type 'MaybeSerializeDeserialize' bound. (#8035) * contracts: Make ChainExtension trait generic over the runtime (#8003) * AURA: Switch to `CurrentSlot` instead of `LastTimestamp` (#8023) * Convert AURA to new pallet macro * AURA: Switch to `CurrentSlot` instead of `LastTimestamp` This switches AURA to use `CurrentSlot` instead of `LastTimestamp`. * Add missing file * Update frame/aura/src/migrations.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Remove the runtime side provide inherent code * Use correct weight * Add TODO * Remove the Inherent from AURA * :facepalm: * Remove unused stuff Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * contracts: Charge rent for code storage (#7935) * contracts: Implement refcounting for wasm code * contracts: Charge rent for code storage * contracts: Fix dispatchables erroneously refunding base costs * Fixed typos in comments. Co-authored-by: Andrew Jones * Remove awkward empty line * Fix more typos in docs * Fix typos in docs Co-authored-by: Andrew Jones * Split up complicated expression Co-authored-by: Andrew Jones * review: Remove unused return value * Fix typos Co-authored-by: Andrew Jones * review: Fix refcount being reset to one on re-instrumentation * Document evictable_code parameter * Make Executable::execute consume and store itself * Added comments about stale values * Disregard struct size in occupied_storage() Co-authored-by: Andrew Jones * Migrate assets pallet to new macros (#7984) * Prep: move things around to suggested order * Compiles, tests pass * cleanup * cleanup 2 * Fix dead doc-links * Add back documentation for storage items * Switch benchmarks to use `Event` rather than `RawEvent`. * Update frame/assets/src/lib.rs Co-authored-by: Guillaume Thiolliere * reviwe feedback * Obey line length checks Co-authored-by: Guillaume Thiolliere * babe, grandpa: cleanup stale equivocation reports (#8041) * grandpa: check equivocation report staleness on `validate_unsigned` * babe: check equivocation report staleness on `validate_unsigned` * node: bump spec_version * babe, grandpa: remove duplicate call destructuring * move some pallet test to use construct_runtime (#8049) * migrate some more pallets * revert example-offcahin-worker as not straightforward * fix mmr * Migrate more pallet tests to construct_runtime (#8051) * Migrate bounties tests to use construct_runtime * Migrate contracts tests to use construct_runtime * Migrate democracy tests to use construct_runtime * review: rename TreasuryEvent -> TreasuryError * Update dependencies ahead of next release (#8015) Updates dependencies: parity-db 0.2.2 paste prometheus 0.11 cfg-if 1.0 strum 0.20 env_logger 0.8 pin-project prost nix platforms quickcheck 1.0 * transaction-pool: drop unpropagable txs if local node cant author blocks (#8048) * transaction-pool: drop unpropagable txs if local node cant author blocks * fix test compilation * transaction-pool: remove unnecessary static bound on CanAuthor Co-authored-by: Tomasz Drwięga * rpc-api: add translation for PoolError::Unactionable * transaction-pool: add test for rejecting unactionable transactions * basic-authorship: fix doc test * transaction-pool: fix benchmark compilation * transaction-pool: rename CanAuthor to IsValidator * transaction-pool: nit in error message Co-authored-by: Tomasz Drwięga * Fix some problems with `prove_warp_sync` (#8037) * Fix some problems with prove_warp_sync * Update client/finality-grandpa/src/finality_proof.rs Co-authored-by: cheme Co-authored-by: cheme * CheckSpecVersion reference fix (#8056) * CheckSpecVersion reference fix * Update frame/example/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Guillaume Thiolliere Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * contracts: Remove ConfigCache (#8047) * contracts: Remove ConfigCache * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Fixup test Co-authored-by: Parity Benchmarking Bot * Extend SS58 network identifiers (#8039) * Initial draft * Fixes * Fixes * Fixes * Fixes * Fixes * Improve readability, add format filter. * Link * Fixes * Update primitives/core/src/crypto.rs Co-authored-by: Bastian Köcher * Suggestions from review Co-authored-by: Bastian Köcher * Use construct_runtime in tests (#8059) * impl some more * add serde * remove unused * fix staking fuzz * fix system bench Co-authored-by: Shawn Tabrizi * Bump wasmtime from 0.19.0 to 0.22.0 (#7865) * Bump wasmtime from 0.19.0 to 0.22.0 Bumps [wasmtime](https://github.com/bytecodealliance/wasmtime) from 0.19.0 to 0.22.0. - [Release notes](https://github.com/bytecodealliance/wasmtime/releases) - [Changelog](https://github.com/bytecodealliance/wasmtime/blob/main/docs/WASI-some-possible-changes.md) - [Commits](https://github.com/bytecodealliance/wasmtime/compare/v0.19.0...v0.22.0) Signed-off-by: dependabot[bot] * Account for ImportType::name() being an Optional * Account for parameters being a impl Iterator now Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Alexander Theißen Co-authored-by: Bastian Köcher * Remove backwards-compatibility networking hack (#8068) * Remove backwards-compatibility networking hack * Fix compilation * Try fix * Switch to latest `impl-trait-for-tuples` (#8082) Switches to the latest version everywhere now, as I fixed the problems in the crate ;) * Add Crust Network SS58 Address (#8064) * Add Crust Address Format * Add Crust Address Format * Delete extra ss58 json info * Add Ares SS58 address type (#8061) * Replace last usages of `<() as PalletInfo>` in substrate (#8080) * replace last occurences * Update frame/support/src/traits.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/support/test/src/lib.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix dispatch test * move PanicPalletInfo to tests module Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Remove PalletInfo impl for () (#8090) * WasmExecutor takes a cache directory (#8057) That is useful for executors like wasmtime which produces compiled code and can actually benefit from caching under some circumstances * Add Aventus ss58 address (#8050) * Update crypto.rs * Update ss58-registry.json * quote fields * Update ss58-registry.json * Update crypto.rs * Update ss58-registry.json * Update ss58-registry.json * Update ss58-registry.json * Update crypto.rs * Update ss58-registry.json * sc-network: switch on default features for libp2p on non-wasm-builds (#8088) * pallet macro: easier syntax for `#[pallet::pallet]` with `struct Pallet(_)` (#8091) * Pallet attribute macro migrate guidelines minor fixes (#8094) * Fix pallet attribute macro guidelines. * Typo fixes. * Switch to use `diener patch` for companion build (#8073) This switch to the new `diener patch` command to patch all Substrate crates in Polkadot. This should remove the requirement to manually merge Substrate master to make the companion build, as we now would use the already with master merged code from this build job local checkout. * Migrate pallet-balances to pallet attribute macro (#7936) * Initial migration of balances pallet * Fix some errors * Remove unused imports * Formatting and removing some todos * Delete Subtrait * Add genesis builder impls for tests * Fix GenesisConfig impl * Make set_balance visible to tests, rename RawEvent to Event * Fix tests with Event rename etc. * More test RawEvent renames * Even more RawEvent renames * Rename module to pallet in comments * Add PalletInfo impl to avid storage collision, fixes tests * Apply review suggestion: remove trailing a Co-authored-by: David * BalancesEvent alias * Remove BalancesEvent alias * Review suggestion: remove redundant comment * Apply review suggestion: make vis super * Fis doc links * Add RawEvent alias * Add missing Instance parameter to deprecated RawEvent alias * Fix RawEvent deprecation warnings Co-authored-by: David * babe, grandpa: set longevity for equivocation report transactions (#8076) * babe: set longevity for equivocation report transactions * grandpa: set longevity for equivocation report transaction * babe, grandpa: fix tests * node: add ReportLongevity to babe and grandpa modules * node: bump spec_version * Migrate pallet-timestamp to pallet attribute macro. (#8078) * Migrate pallet-timestamp to pallet attribute macro. * Migrate inherent. * Unify private visbility. * Update benchmarking. * Update storage usages. * Use log level error to report telemetry (#8097) This fix the issue when running the node with -lwarn, the telemetry cannot be initialized properly. * Releasing 3.0 (#8098) * bumping version for next release * add changelog * add guide * [CI] Move check_labels to github actions (#8099) * move lib.sh to common dir * make check-labels a github action workflow * Update sc-finality-grandp-warp-sync to 0.9.0 and remove 'publish = false' (#8109) * Remove all code related to sentry nodes (#8079) * Remove all code related to sentry nodes * More fixing * Add code blocks to doc diagrams (#8118) * Simplify runtime api error handling (#8114) * Ahh * Work work work * Fix all the compilation errors * Fix test * More fixes... * CI: temp. allow cargo deny to fail (#8122) * Update Grafana dashboards (#8127) * grandpa: make the VotingRule API async (#8101) * grandpa: make the VotingRule api async * grandpa: add docs to VotingRuleResult * grandpa: formatting * grandpa: use async blocks Co-authored-by: Bastian Köcher * grandpa: expose VotingRuleResult * grandpa: revert some broken changes to async syntax * grandpa: use finality-grandpa v0.14.0 * grandpa: bump impl_version Co-authored-by: Bastian Köcher * Create a macro which automates creation of benchmark test suites. (#8104) * Create a macro which automates creation of benchmark test suites. * bump impl_version * allow unused on test_bench_by_name * use proper doctest ignore attribute * Explicitly hand the Module to the test suite Much better practice than depending on it showing up implicitly in the namespace. * explicitly import what we need into `mod tests` * bench_module is `ident` not `tt` Co-authored-by: Guillaume Thiolliere * allow end users to specify arguments for new_test_ext This turned out to be surprisingly easy. On reflection, it turns out that of course the compiler can't eagerly evaluate the function call, but needs to paste it in everywhere desired. * enable explicitly specifying the path to the benchmarks invocation also enable optional trailing commas * Revert "bump impl_version" This reverts commit 0209e4de33fd43873f8cfc6875815d0fd6151e63. * list failing benchmark tests and the errors which caused the failure * harden benchmark tests against internal panics * suppress warning about ignored profiles unfortunately, setting the profile here doesn't do anything; we'd need to set it in every leaf package anyway. However, as this was just making the default explicit anyway, I think it's safe enough to remove entirely. * impl_benchmark_test_suite for assets * impl_benchmark_test_suite for balances * impl_benchmark_test_suite for bounties * impl_benchmark_test_suite for Collective * impl_benchmark_test_suite for Contracts * impl_benchmark_test_suite for Democracy * don't impl_benchmark_test_suite for Elections-Phragmen * impl_benchmark_test_suite for Identity Note that Identity tests currently fail. They failed in an identical way before this change, so as far as I'm concerned, the status quo is good enough for now. * impl_benchmark_test_suite for ImOnline * impl_benchmark_test_suite for indices For this crate also, the test suite fails identically with and without this change, so we can say that this change is not the cause of the tests' failure to compile. * impl_benchmark_test_suite for lottery * impl_benchmark_test_suite for merkle-mountain-range * impl_benchmark_test_suite for Multisig These tests fail identically with and without the change, so the change seems unlikely to be the origin of the failures. * impl_benchmark_test_suite for offences * impl_benchmark_test_suite for Proxy Fails identically with and without this change. * impl_benchmark_test_suite for scheduler * impl_benchmark_test_suite for session It turns out to be important to be able to exclude items marked `#[extra]` sometimes. Who knew? * impl_benchmark_test_suite for staking * impl_benchmark_test_suite for system * impl_benchmark_test_suite for timestamp * impl_benchmark_test_suite for tips * impl_benchmark_test_suite for treasury * impl_benchmark_test_suite for utility Note that benchmark tests fail identically before and after this change. * impl_benchmark_test_suite for vesting * fix wrong module name in impl_benchmark_test_suite in Offences * address line length nits * enable optional keyword argument: exec_name Took a _lot_ of macro-wrangling to get the functionality that I want, but now you have the option to pass in ```rust impl_benchmark_test_suite!( Elections, crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), crate::tests::Test, exec_name = build_and_execute, ); ``` and have it expand out properly. A selected fragment of the expansion: ```rust fn test_benchmarks() { crate::tests::ExtBuilder::default() .desired_members(13) .desired_runners_up(7) .build_and_execute(|| { ``` * get rid of dead code Co-authored-by: Guillaume Thiolliere * Move dust collection hook to outside of account mutate (#8087) * Move dust collection hook to outside of account mutate * Fix dust cleanup in nested mutates. * Fixes * Fixes * Apply suggestions from code review Co-authored-by: Guillaume Thiolliere * dust removal reentrancy test case integration (#8133) * dust removal reentrancy test case integration * Update frame/balances/src/tests_reentrancy.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/balances/src/tests_reentrancy.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/balances/src/tests_reentrancy.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/balances/src/tests_reentrancy.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * Update frame/balances/src/tests_reentrancy.rs Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * dust removal reentrancy test case integration | removed dependency on tests.rs * dust removal reentrancy test case integration | formatt correction * dust removal reentrancy test case integration | formatt correction Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Guillaume Thiolliere Co-authored-by: RK Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> * fix deprecated usage of panic (#8134) * Remove inherent in pallet-babe (#8124) * Fix telemetry span not entering properly attempt 3 (#8043) * Fix tracing tests (#8022) * Fix tracing tests The tests were not working properly. 1. Some test was setting a global subscriber, this could lead to racy conditions with other tests. 2. A logging test called `process::exit` which is completly wrong. * Update client/tracing/src/lib.rs Co-authored-by: David * Review comments Co-authored-by: David * Fix tracing spans are not being forwarded to spawned task (#8009) * Fix tracing spans are not being forwarded to spawned task There is a bug that tracing spans are not forwarded to spawned task. The problem was that only the telemetry span was forwarded. The solution to this is to use the tracing provided `in_current_span` to capture the current active span and pass the telemetry span explictely. We will now always enter the span when the future is polled. This is essentially the same strategy as tracing is doing with its `Instrumented`, but now extended for our use case with having multiple spans active. * More tests * Proper test for telemetry and prefix span * WIP * Fix test (need to create & enter the span at the same time) * WIP * Remove telemtry_span from sc_service config * CLEANUP * Update comment * Incorrect indent * More meaningful name * Dedent * Naming XD * Attempt to make a more complete test * lint * Missing licenses * Remove user data * CLEANUP * Apply suggestions from code review Co-authored-by: Bastian Köcher * CLEANUP * Apply suggestion * Update bin/node/cli/tests/telemetry.rs Co-authored-by: David * Wrapping lines Co-authored-by: Bastian Köcher Co-authored-by: David * Unbreak browser test CI (#8149) * Benchmark macro: Allow multiple bounds in where (#8116) * Make it clear in CLI that paritydb is experimental (#8152) * Make it clear in CLI that paritydb is experimental Sadly this is a breaking change for the CLI. * Update client/cli/src/params/database_params.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Extract transactions handling from protocol.rs (#8110) * Extract transactions handling from protocol.rs * Oops, boolean * Do this better * Update client/network/src/transactions.rs Co-authored-by: Nikolay Volf * [WIP] Fix handshake * Finish handshake change * Bugfix Co-authored-by: Nikolay Volf * Remove `OnSlot` associated type (#8156) Currently we always use a boxed future everywhere anyway. This also enables us to use a boxed `SlotWorker` (which is required for Cumulus). * Fix warning in rustdoc job (#8159) * Fix warning in rustdoc job * More fixes * Remove `build-rust-doc` job Remove this job until upstream is fixed: https://github.com/rust-lang/rust/issues/82284 * CI: temp. remove of the publishing job, no use of it w/o build Co-authored-by: Denis P * CI: test and update ci image, codeowners (#8142) * CI: prep for the new ff deployment * CI: variable for CI image * git: add CI team and remove Max from CODEOWNERS * CI: diener should be updated in CI image, not here. * CI: diener should be updated in CI image, not here. * CI: run cargo deny on changes to manifests and lock; run build jobs on schedules [skip ci] * CI: remove flaming-fir deployment, it will be handled from s3 updates [skip ci] * CI: trigger simnet with a certain substrate version * CI: remove cargo-audit in favor of cargo-deny; prepare for being triggered * CI: prepare to be triggered * CI: chore * Migration testing runtime API/Bot (#8038) * A clean new attempt * Checkpoint to move remote. * A lot of dependency wiring to make it feature gated. * bad macro, bad macro. * Undo the DB mess. * Update frame/support/src/traits.rs Co-authored-by: Alexander Popiak * Apply suggestions from code review Co-authored-by: Alexander Popiak * unbreak the build * Update frame/try-runtime/src/lib.rs Co-authored-by: Bastian Köcher * Update utils/frame/try-runtime/cli/Cargo.toml Co-authored-by: Shawn Tabrizi * Update frame/try-runtime/Cargo.toml Co-authored-by: Shawn Tabrizi * Address most review grumbles. * Fix build * Add some comments * Remove allowing one pallet at a time. * More grumbles. * relocate remote-ext * Fix build Co-authored-by: Alexander Popiak Co-authored-by: Bastian Köcher Co-authored-by: Shawn Tabrizi * Ensure we spawn the block import worker as an essential task (#8155) * Ensure we spawn the block import worker as an essential task This pr ensures that we spawn the block import worker as an essential task. This is quite important as we need to bring down the node when the block import is done. Besides that it adds some debug output to the block import worker. * Don't be stupid :D * Update to libp2p-0.35.1 (#8141) * contracts: Consider contract size in weights (#8086) * contracts: Consider contract size in weights * Bump spec version * Whitespace fix Co-authored-by: Guillaume Thiolliere * Correct pre-charged code weight even in the error case * Use the instrumented code size in weight calculation * Charge the cost of re-instrumentation from the gas meter * Fix benchmark * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_contracts --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/contracts/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Better documentation of return types Co-authored-by: Guillaume Thiolliere Co-authored-by: Parity Benchmarking Bot * Allow `transfer_keep_alive` to transfer all free balance (#8125) * Migrate examples to use pallet macro (#8138) * Make keystore return `None` when a key doesn't exist (#8163) * Make keystore return `None` when a key doesn't exist * Fixes * More fixes * Update comment * Update primitives/keystore/src/lib.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Update client/keystore/src/local.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Address comments * Update client/keystore/src/local.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * contracts: Convert to framev2 macros (#8157) * contracts: Convert to framev2 * Reduce the API surface of the crate * Remove unused import * Merge import block * Use pallet::metadata to reduce metadata diff * Remove the explicit "Null" from AccountCounter * Fix: stash account reaped when ledger.active == ED (#8170) * do not reap account when active == ed * add tests + refactor * Bump thread_local (#8174) * Return number of keys removed when calling `storage_kill` on child trie (#8166) * Initial piping of returning amount of keys killed * One more test for `None` limit * forgot to update * fix return value * use version 3 * Update to return `KillOutcome` * Update name to KillChildStorageResult * Make Regex in ss58codec and secret phrase crypto static (#8117) (#8177) * Add a Prometheus alert on no incoming connection (#7517) * Bump lru dependency (#8182) * Decouple Staking and Election - Part 2 Unsigned Phase (#7909) * Base features and traits. * pallet and unsigned phase * Undo bad formattings. * some formatting cleanup. * Small self-cleanup. * Make it all build * self-review * Some doc tests. * Some changes from other PR * Fix session test * Update Cargo.lock * Update frame/election-provider-multi-phase/src/lib.rs Co-authored-by: Guillaume Thiolliere * Some review comments * Rename + make encode/decode * Do an assert as well, just in case. * Fix build * Update frame/election-provider-multi-phase/src/unsigned.rs Co-authored-by: Guillaume Thiolliere * Las comment * fix staking fuzzer. * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Add one last layer of feasibility check as well. * Last fixes to benchmarks * Some more docs. * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_election_provider_multi_phase --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/election-provider-multi-phase/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Some nits * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Fix doc * Mkae ci green Co-authored-by: Shawn Tabrizi Co-authored-by: Guillaume Thiolliere Co-authored-by: Parity Benchmarking Bot * Remove suicide from frame_system weights (#8184) * remove suicide from frame_system weights * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=frame_system --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/system/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Update frame/system/src/weights.rs Co-authored-by: Parity Benchmarking Bot Co-authored-by: Shawn Tabrizi * Reserve ss58 prefix 48 for Neatcoin (#8165) * Reserved ss58 prefixes for Neatcoin * Switch to use 63 * Switch to use 48 * Make `on_slot` return the block with the post header (#8188) * Make `on_slot` return the block with the post header Before this pr `on_slot` returned the pre block. However this is wrong, because adding some post digest changes the hash of the header. Thus, we need to make sure to return the correct block that uses the post header. * Update primitives/consensus/common/src/block_import.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * [multisig, insubstantial] WeightTooLow -> MaxWeightTooLow (#8112) * Update lib.rs (#8192) Turns out the polkadot bot assumes that this guys is exported from the root of the pallet. * Add some debug_asserts for #8171 (#8181) * Display nicer inspect results. (#8198) * Move proof generation to the type system level (#8185) * Start * Finish!!!! * Update client/basic-authorship/src/basic_authorship.rs Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Review comments Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * grandpa: rewrite warp sync proof generation (#8148) * grandpa: use AuthoritySetChanges to generate warp sync proof * node: init grandpa warp sync protocol * grandpa: iterator for AuthoritySetChanges * grandpa: rewrite warp sync proof generation * grandpa: remove old code for warp sync generation * grandpa: fix indentation * grandpa: fix off by one * grandpa: use binary search to find start idx when generating warp sync proof * grandpa: add method to verify warp sync proofs * grandpa: remove unnecessary code to skip authority set changes * grandpa: add test for warp sync proof generation and verification * grandpa: add missing docs * grandpa: remove trailing comma * Add ss58 prefix for HydraDX (#8058) * Add ss58 prefix for HydraDX * fix formatting * allow to write pre and post runtime upgrade in pallet macro (#8194) * Add an is_finished boolean to the grandpa warp sync response (#8203) * Fix networking debug_asserts (#8200) * Fix networking debug_asserts * Fix comment * pallet macro broke `benchmarks_instance`, fix by introducing `benchmarks_instance_pallet` (#8190) Co-authored-by: Peter Goodspeed-Niklaus * Frame Benchmarking v3.1.0 released (#8206) * Releasing frame-benchmarking 3.1 * bump in the entire dependency tree * contracts: Release as v3.0.0 and add reserved field to `ContractInfoOf` (#8175) * contracts: Update README * contracts: Add CHANGELOG.md * contracts: Bump version to v3.0.0 and allow publish * Typos Co-authored-by: Andrew Jones * Improve wording in the changelog * contracts: Add reserved field to ContractInfoOf for future proofing * also bump frame-benchmarking * update lockfile Co-authored-by: Andrew Jones Co-authored-by: Benjamin Kampmann * chore: fix typos for contract (#8178) * Fix transactions not being propagated to authorities (#8212) * Better identifier and logging for runtime upgrades (#8123) * A clean new attempt * Checkpoint to move remote. * A lot of dependency wiring to make it feature gated. * bad macro, bad macro. * Undo the DB mess. * Update frame/support/src/traits.rs Co-authored-by: Alexander Popiak * Apply suggestions from code review Co-authored-by: Alexander Popiak * unbreak the build * Better logging and ids for migrations * Fix doc. * Test * Update frame/try-runtime/src/lib.rs Co-authored-by: Bastian Köcher * Update utils/frame/try-runtime/cli/Cargo.toml Co-authored-by: Shawn Tabrizi * Update frame/try-runtime/Cargo.toml Co-authored-by: Shawn Tabrizi * Address most review grumbles. * Fix build * Add some comments * Remove allowing one pallet at a time. * Rework the PR * nit * Slightly better error handling. * Remove files * Update utils/frame/remote-externalities/src/lib.rs Co-authored-by: Bastian Köcher * Update frame/support/src/dispatch.rs * Update frame/support/src/dispatch.rs * Fix test * Make extension trait. * Bring back try-runtime/std * remove bincode * Remove warning * Change test features Co-authored-by: Alexander Popiak Co-authored-by: Bastian Köcher Co-authored-by: Shawn Tabrizi * Fix ignored error in benchmark tests (#8214) * fix ignored error in benchmark tests * use normal format for str * explicit match Co-authored-by: Shawn Tabrizi * Gilts Pallet (#8139) * Initial draft * Enlarge function drafted. * Thaw draft * Retract_bid draft * Final bits of draft impl. * Test mockup * Tests * Docs * Add benchmark scaffold * Integrate weights * All benchmarks done * Missing file * Remove stale comments * Fixes * Fixes * Allow for priority queuing. * Another test and a fix * Fixes * Fixes * cargo run --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_gilt --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/gilt/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Grumble * Update frame/gilt/src/tests.rs Co-authored-by: Shawn Tabrizi * Update frame/gilt/src/tests.rs Co-authored-by: Shawn Tabrizi * Grumble * Update frame/gilt/src/tests.rs Co-authored-by: Shawn Tabrizi * Update frame/gilt/src/lib.rs Co-authored-by: Shawn Tabrizi * Update frame/gilt/src/lib.rs Co-authored-by: Shawn Tabrizi * Fix unreserve ordering * Grumble * Fixes Co-authored-by: Parity Benchmarking Bot Co-authored-by: Shawn Tabrizi * emit event on remark (#8120) Co-authored-by: Parity Benchmarking Bot Co-authored-by: Shawn Tabrizi * grandpa: maintain invariants when evaluating aggregated voting rules (#8186) * grandpa: maintain invariants when evaluating aggregated voting rules * grandpa: update comment on VotingRules::restrict_vote * grandpa: simplify comment * Fix merge build issue * Init `RuntimeLogger` automatically for each runtime api call (#8128) * Init `RuntimeLogger` automatically for each runtime api call This pr change the runtime api in such a way to always and automatically enable the `RuntimeLogger`. This enables the user to use `log` or `tracing` from inside the runtime to create log messages. As logging introduces some extra code and especially increases the size of the wasm blob. It is advised to disable all logging completely with `sp-api/disable-logging` when doing the wasm builds for the on-chain wasm runtime. Besides these changes, the pr also brings most of the logging found in frame to the same format "runtime::*". * Update frame/im-online/src/lib.rs Co-authored-by: Guillaume Thiolliere * Update test-utils/runtime/Cargo.toml * Fix test * Don't use tracing in the runtime, as we don't support it :D * Fixes Co-authored-by: Guillaume Thiolliere * babe: make plan_config_change callable (#8233) * Fix state mismatch in case of bad handshake (#8230) * Update Rust toolchain * Delete some of the files which are specific to Polkadot's maintenance * Fix attestation build and enable it * Fix Generic Asset * Fix the previous merge * Remove github actions and workflows until later when we adjust them for Plug Co-authored-by: Shawn Tabrizi Co-authored-by: Bastian Köcher Co-authored-by: cheme Co-authored-by: RK Co-authored-by: Xiliang Chen Co-authored-by: Parity Benchmarking Bot Co-authored-by: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Co-authored-by: Guillaume Thiolliere Co-authored-by: Alexander Theißen Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Max Inden Co-authored-by: honeywest <50997103+honeywest@users.noreply.github.com> Co-authored-by: kaichao Co-authored-by: Addie Wagenknecht Co-authored-by: Ashley Co-authored-by: Stanly Johnson Co-authored-by: Andronik Ordian Co-authored-by: Denis Pisarev Co-authored-by: Pierre Krieger Co-authored-by: tuminfei Co-authored-by: Xiang Li Co-authored-by: joe petrowski <25483142+joepetrowski@users.noreply.github.com> Co-authored-by: Igor Matuszewski Co-authored-by: Bastian Köcher Co-authored-by: Benjamin Kampmann Co-authored-by: Benjamin Kampmann Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Satish Mohan <54302767+smohan-dw@users.noreply.github.com> Co-authored-by: Bernhard Schuster Co-authored-by: Bernhard Schuster Co-authored-by: Liu-Cheng Xu Co-authored-by: Arkadiy Paronyan Co-authored-by: ropottnik Co-authored-by: Aten Co-authored-by: Gavin Wood Co-authored-by: Alexander Popiak Co-authored-by: Krzysztof Jelski Co-authored-by: Black3HDF <29630164+Satoshi-Kusumoto@users.noreply.github.com> Co-authored-by: Adam Dossa Co-authored-by: Adam Dossa Co-authored-by: Sergei Lonshakov Co-authored-by: Nikolay Volf Co-authored-by: Niklas Adolfsson Co-authored-by: Andrew Jones Co-authored-by: Cecile Tonglet Co-authored-by: Jon Häggblad Co-authored-by: André Silva Co-authored-by: Tomasz Drwięga Co-authored-by: Gerben van de Wiel Co-authored-by: Wei Tang Co-authored-by: David Co-authored-by: Hernando Castano Co-authored-by: Joshy Orndorff Co-authored-by: Amar Singh Co-authored-by: yjh <465402634@qq.com> Co-authored-by: Roman Borschel Co-authored-by: Tomasz Drwięga Co-authored-by: Robert Klotzner Co-authored-by: Shaopeng Wang Co-authored-by: Alejandro Martinez Andres Co-authored-by: Bastian Köcher Co-authored-by: Kun Co-authored-by: frank <450595468@qq.com> Co-authored-by: Sergei Shulepov Co-authored-by: nahuseyoum <39748285+nahuseyoum@users.noreply.github.com> Co-authored-by: Martin Pugh Co-authored-by: Peter Goodspeed-Niklaus Co-authored-by: Albrecht <14820950+weichweich@users.noreply.github.com> Co-authored-by: Robin Syihab Co-authored-by: joshua-mir Co-authored-by: Jakub Pánik --- .circleci/config.yml | 8 +- .github/allowed-actions.js | 7 - .github/dependabot.yml | 7 - .../workflows/burnin-label-notification.yml | 17 - .github/workflows/md-link-check.yml | 19 - .github/workflows/mlc_config.json | 7 - .github/workflows/release-bot.yml | 18 - .gitignore | 1 + .gitlab-ci.yml | 700 -------- .maintain/{gitlab => common}/lib.sh | 12 +- .maintain/docker/subkey.Dockerfile | 31 - .maintain/docker/substrate.Dockerfile | 45 - .maintain/flamingfir-deploy.sh | 35 - .maintain/{gitlab => github}/check_labels.sh | 13 +- .maintain/gitlab/check_line_width.sh | 55 - .../gitlab/check_polkadot_companion_build.sh | 98 -- .../gitlab/check_polkadot_companion_status.sh | 103 -- .maintain/gitlab/check_runtime.sh | 124 -- .maintain/gitlab/check_signed.sh | 16 - .maintain/gitlab/generate_changelog.sh | 85 - .maintain/gitlab/publish_draft_release.sh | 54 - .maintain/gitlab/skip_if_draft.sh | 14 - .maintain/kubernetes/Chart.yaml | 12 - .maintain/kubernetes/README.md | 47 - .../templates/poddisruptionbudget.yaml | 10 - .maintain/kubernetes/templates/secrets.yaml | 11 - .maintain/kubernetes/templates/service.yaml | 54 - .../kubernetes/templates/serviceaccount.yaml | 10 - .../kubernetes/templates/statefulset.yaml | 139 -- .maintain/kubernetes/values.yaml | 59 - .../alerting-rules/alerting-rules.yaml | 7 + .../substrate-networking.json | 100 +- .../substrate-service-tasks.json | 43 +- .rustfmt.toml | 4 + Cargo.lock | 351 +++- Cargo.toml | 26 +- Process.json | 29 - bin/node-template/node/Cargo.toml | 2 +- bin/node-template/node/src/service.rs | 15 +- bin/node-template/pallets/template/Cargo.toml | 1 + bin/node-template/runtime/Cargo.toml | 2 +- bin/node/bench/src/construct.rs | 3 +- bin/node/browser-testing/Cargo.toml | 8 +- bin/node/cli/Cargo.toml | 15 +- bin/node/cli/src/chain_spec.rs | 1 + bin/node/cli/src/cli.rs | 5 + bin/node/cli/src/command.rs | 15 + bin/node/cli/src/service.rs | 33 +- bin/node/cli/tests/telemetry.rs | 102 ++ bin/node/cli/tests/websocket_server.rs | 281 ++++ bin/node/executor/Cargo.toml | 5 +- bin/node/executor/tests/basic.rs | 32 +- bin/node/executor/tests/common.rs | 18 +- bin/node/executor/tests/fees.rs | 24 +- bin/node/inspect/src/lib.rs | 2 +- bin/node/rpc/Cargo.toml | 4 +- bin/node/rpc/src/lib.rs | 3 +- bin/node/runtime/Cargo.toml | 61 +- bin/node/runtime/src/constants.rs | 2 +- bin/node/runtime/src/lib.rs | 279 ++-- bin/node/testing/Cargo.toml | 2 +- bin/node/testing/src/genesis.rs | 1 + client/api/src/call_executor.rs | 2 +- client/api/src/in_mem.rs | 6 +- client/authority-discovery/Cargo.toml | 2 +- client/authority-discovery/src/lib.rs | 4 +- client/authority-discovery/src/worker.rs | 12 +- .../authority-discovery/src/worker/tests.rs | 8 +- client/basic-authorship/README.md | 3 +- .../basic-authorship/src/basic_authorship.rs | 92 +- client/basic-authorship/src/lib.rs | 3 +- client/block-builder/Cargo.toml | 1 - client/block-builder/src/lib.rs | 59 +- client/chain-spec/derive/Cargo.toml | 2 +- client/cli/Cargo.toml | 2 +- client/cli/src/arg_enums.rs | 35 +- client/cli/src/commands/run_cmd.rs | 42 +- client/cli/src/config.rs | 6 +- client/cli/src/params/database_params.rs | 3 +- client/cli/src/params/pruning_params.rs | 4 +- client/consensus/aura/src/lib.rs | 89 +- client/consensus/babe/src/authorship.rs | 4 +- client/consensus/babe/src/lib.rs | 28 +- client/consensus/babe/src/tests.rs | 18 +- .../manual-seal/src/consensus/babe.rs | 6 +- client/consensus/manual-seal/src/lib.rs | 2 +- .../consensus/manual-seal/src/seal_block.rs | 7 +- client/consensus/pow/src/lib.rs | 21 +- client/consensus/pow/src/worker.rs | 22 +- client/consensus/slots/src/lib.rs | 47 +- client/executor/common/src/error.rs | 12 +- client/executor/src/native_executor.rs | 4 +- client/executor/src/wasm_runtime.rs | 6 +- client/finality-grandpa-warp-sync/Cargo.toml | 30 +- client/finality-grandpa-warp-sync/src/lib.rs | 64 +- .../finality-grandpa-warp-sync/src/proof.rs | 305 ++++ client/finality-grandpa/Cargo.toml | 6 +- client/finality-grandpa/rpc/Cargo.toml | 2 +- client/finality-grandpa/rpc/src/lib.rs | 7 +- client/finality-grandpa/src/authorities.rs | 56 +- .../src/communication/gossip.rs | 7 +- client/finality-grandpa/src/environment.rs | 223 +-- client/finality-grandpa/src/finality_proof.rs | 486 +----- client/finality-grandpa/src/import.rs | 17 +- client/finality-grandpa/src/justification.rs | 39 +- client/finality-grandpa/src/lib.rs | 16 +- client/finality-grandpa/src/observer.rs | 5 - client/finality-grandpa/src/tests.rs | 28 +- client/finality-grandpa/src/voting_rule.rs | 212 ++- client/keystore/src/lib.rs | 4 - client/keystore/src/local.rs | 130 +- client/light/src/call_executor.rs | 2 +- client/network-gossip/Cargo.toml | 4 +- client/network/Cargo.toml | 8 +- client/network/src/behaviour.rs | 55 +- client/network/src/block_request_handler.rs | 2 +- client/network/src/config.rs | 22 +- client/network/src/gossip/tests.rs | 1 + client/network/src/lib.rs | 1 + client/network/src/protocol.rs | 376 +---- client/network/src/protocol/event.rs | 8 +- .../src/protocol/generic_proto/behaviour.rs | 31 +- client/network/src/protocol/message.rs | 1 - client/network/src/service.rs | 81 +- client/network/src/service/tests.rs | 1 + client/network/src/transactions.rs | 488 ++++++ client/network/test/Cargo.toml | 2 +- client/network/test/src/lib.rs | 2 + client/offchain/src/lib.rs | 4 +- client/peerset/Cargo.toml | 2 +- client/rpc-api/src/system/helpers.rs | 2 - client/rpc/src/author/mod.rs | 9 +- client/rpc/src/state/mod.rs | 5 +- client/rpc/src/state/state_full.rs | 26 +- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 29 +- client/service/src/client/call_executor.rs | 12 +- client/service/src/client/client.rs | 40 +- client/service/src/config.rs | 4 - client/service/src/error.rs | 6 +- client/service/src/lib.rs | 1 - client/service/src/metrics.rs | 1 - client/service/src/task_manager/mod.rs | 59 +- client/service/src/task_manager/tests.rs | 128 +- client/service/test/src/client/light.rs | 2 +- client/service/test/src/client/mod.rs | 4 +- client/service/test/src/lib.rs | 3 +- client/state-db/src/lib.rs | 8 +- client/telemetry/Cargo.toml | 2 +- client/tracing/proc-macro/Cargo.toml | 2 +- client/transaction-pool/src/api.rs | 5 +- client/transaction-pool/src/lib.rs | 2 - docs/CODEOWNERS | 12 +- docs/Upgrading-2.0-to-3.0.md | 18 +- frame/assets/Cargo.toml | 3 +- frame/assets/README.md | 2 +- frame/assets/src/benchmarking.rs | 120 +- frame/atomic-swap/Cargo.toml | 1 + frame/aura/Cargo.toml | 1 + frame/authority-discovery/Cargo.toml | 1 + frame/authorship/Cargo.toml | 1 + frame/babe/Cargo.toml | 9 +- frame/babe/src/default_weights.rs | 4 + frame/babe/src/equivocation.rs | 22 +- frame/babe/src/lib.rs | 79 +- frame/babe/src/mock.rs | 11 +- frame/babe/src/tests.rs | 44 +- frame/balances/Cargo.toml | 5 +- frame/balances/src/benchmarking.rs | 93 +- frame/balances/src/lib.rs | 179 +- frame/balances/src/tests.rs | 15 +- frame/balances/src/tests_local.rs | 2 +- frame/balances/src/tests_reentrancy.rs | 310 ++++ frame/balances/src/weights.rs | 2 +- frame/benchmarking/Cargo.toml | 4 +- frame/benchmarking/src/lib.rs | 383 ++++- frame/benchmarking/src/tests.rs | 3 +- frame/benchmarking/src/utils.rs | 2 +- frame/bounties/Cargo.toml | 3 +- frame/bounties/src/benchmarking.rs | 30 +- frame/collective/Cargo.toml | 5 +- frame/collective/src/benchmarking.rs | 89 +- frame/collective/src/lib.rs | 23 +- frame/contracts/CHANGELOG.md | 78 + frame/contracts/Cargo.toml | 14 +- frame/contracts/README.md | 24 +- frame/contracts/common/Cargo.toml | 3 +- frame/contracts/proc-macro/Cargo.toml | 3 +- frame/contracts/rpc/Cargo.toml | 7 +- frame/contracts/rpc/runtime-api/Cargo.toml | 5 +- frame/contracts/src/benchmarking/code.rs | 24 +- frame/contracts/src/benchmarking/mod.rs | 222 +-- frame/contracts/src/chain_extension.rs | 6 +- frame/contracts/src/exec.rs | 218 ++- frame/contracts/src/gas.rs | 159 +- frame/contracts/src/lib.rs | 1068 ++++++------ frame/contracts/src/rent.rs | 43 +- frame/contracts/src/schedule.rs | 39 +- frame/contracts/src/storage.rs | 26 +- frame/contracts/src/tests.rs | 126 +- frame/contracts/src/wasm/code_cache.rs | 73 +- frame/contracts/src/wasm/env_def/macros.rs | 12 +- frame/contracts/src/wasm/env_def/mod.rs | 6 +- frame/contracts/src/wasm/mod.rs | 50 +- frame/contracts/src/wasm/runtime.rs | 114 +- frame/contracts/src/weights.rs | 1262 +++++++------- frame/democracy/Cargo.toml | 3 +- frame/democracy/src/benchmarking.rs | 49 +- .../election-provider-multi-phase/Cargo.toml | 70 + .../src/benchmarking.rs | 282 ++++ .../src/helpers.rs | 159 ++ .../election-provider-multi-phase/src/lib.rs | 1457 +++++++++++++++++ .../election-provider-multi-phase/src/mock.rs | 381 +++++ .../src/unsigned.rs | 873 ++++++++++ .../src/weights.rs | 150 ++ frame/elections-phragmen/Cargo.toml | 5 +- frame/elections-phragmen/src/benchmarking.rs | 89 +- frame/elections-phragmen/src/lib.rs | 11 +- .../src/migrations_3_0_0.rs | 29 +- frame/elections/Cargo.toml | 1 + frame/example-offchain-worker/Cargo.toml | 3 + frame/example-offchain-worker/README.md | 2 +- frame/example-offchain-worker/src/lib.rs | 368 +++-- frame/example-parallel/Cargo.toml | 1 + frame/example-parallel/src/lib.rs | 120 +- frame/example-parallel/src/tests.rs | 3 +- frame/example/Cargo.toml | 2 +- frame/example/src/lib.rs | 344 ++-- frame/executive/Cargo.toml | 3 + frame/executive/src/lib.rs | 78 +- frame/gilt/Cargo.toml | 46 + frame/gilt/README.md | 2 + frame/gilt/src/benchmarking.rs | 136 ++ frame/gilt/src/lib.rs | 582 +++++++ frame/gilt/src/mock.rs | 138 ++ frame/gilt/src/tests.rs | 499 ++++++ frame/gilt/src/weights.rs | 164 ++ frame/grandpa/Cargo.toml | 10 +- frame/grandpa/src/equivocation.rs | 20 +- frame/grandpa/src/mock.rs | 10 +- frame/identity/Cargo.toml | 3 +- frame/identity/src/benchmarking.rs | 35 +- frame/im-online/Cargo.toml | 5 +- frame/im-online/src/benchmarking.rs | 23 +- frame/im-online/src/lib.rs | 14 +- frame/indices/Cargo.toml | 3 +- frame/indices/src/benchmarking.rs | 23 +- frame/lottery/Cargo.toml | 3 +- frame/lottery/src/benchmarking.rs | 25 +- frame/membership/Cargo.toml | 1 + frame/merkle-mountain-range/Cargo.toml | 3 +- .../primitives/Cargo.toml | 2 + .../primitives/src/lib.rs | 16 +- .../merkle-mountain-range/src/benchmarking.rs | 21 +- frame/multisig/Cargo.toml | 3 +- frame/multisig/src/benchmarking.rs | 29 +- frame/multisig/src/lib.rs | 4 +- frame/multisig/src/tests.rs | 2 +- frame/nicks/Cargo.toml | 1 + frame/node-authorization/Cargo.toml | 3 + frame/node-authorization/src/lib.rs | 16 +- frame/offences/Cargo.toml | 3 + frame/offences/benchmarking/Cargo.toml | 4 +- frame/offences/benchmarking/src/lib.rs | 23 +- frame/offences/benchmarking/src/mock.rs | 9 + frame/offences/src/lib.rs | 11 +- frame/proxy/Cargo.toml | 3 +- frame/proxy/src/benchmarking.rs | 29 +- frame/randomness-collective-flip/Cargo.toml | 1 + frame/recovery/Cargo.toml | 1 + frame/scheduler/Cargo.toml | 7 +- frame/scheduler/src/benchmarking.rs | 24 +- frame/scheduler/src/lib.rs | 13 +- frame/scored-pool/Cargo.toml | 1 + frame/session/Cargo.toml | 1 + frame/session/benchmarking/Cargo.toml | 6 +- frame/session/benchmarking/src/lib.rs | 22 +- frame/session/benchmarking/src/mock.rs | 12 +- frame/session/src/lib.rs | 18 +- frame/session/src/tests.rs | 2 +- frame/society/Cargo.toml | 1 + frame/staking/Cargo.toml | 14 +- frame/staking/fuzzer/Cargo.toml | 1 + frame/staking/fuzzer/src/mock.rs | 14 +- frame/staking/fuzzer/src/submit_solution.rs | 2 +- frame/staking/reward-curve/Cargo.toml | 2 +- frame/staking/reward-curve/src/lib.rs | 8 +- frame/staking/src/benchmarking.rs | 48 +- frame/staking/src/lib.rs | 354 +++- frame/staking/src/mock.rs | 27 +- frame/staking/src/offchain_election.rs | 37 +- frame/staking/src/testing_utils.rs | 2 +- frame/staking/src/tests.rs | 134 +- frame/staking/src/weights.rs | 232 +-- frame/sudo/Cargo.toml | 1 + frame/support/Cargo.toml | 8 +- frame/support/procedural/Cargo.toml | 2 +- frame/support/procedural/src/lib.rs | 2 +- .../procedural/src/pallet/expand/hooks.rs | 18 + frame/support/procedural/tools/Cargo.toml | 2 +- .../procedural/tools/derive/Cargo.toml | 2 +- frame/support/src/debug.rs | 247 --- frame/support/src/dispatch.rs | 130 +- frame/support/src/hash.rs | 4 +- frame/support/src/lib.rs | 39 +- frame/support/src/storage/child.rs | 23 +- .../src/storage/generator/double_map.rs | 6 +- frame/support/src/storage/generator/map.rs | 4 +- frame/support/src/storage/mod.rs | 15 +- frame/support/src/storage/unhashed.rs | 2 +- frame/support/src/traits.rs | 128 +- frame/system/Cargo.toml | 3 + frame/system/benchmarking/Cargo.toml | 2 +- frame/system/benchmarking/src/lib.rs | 32 +- frame/system/src/lib.rs | 37 +- frame/system/src/limits.rs | 8 + frame/system/src/offchain.rs | 6 +- frame/system/src/weights.rs | 65 +- frame/timestamp/Cargo.toml | 7 +- frame/timestamp/README.md | 2 +- frame/timestamp/src/benchmarking.rs | 21 +- frame/timestamp/src/lib.rs | 7 +- frame/tips/Cargo.toml | 3 +- frame/tips/src/benchmarking.rs | 25 +- frame/transaction-payment/Cargo.toml | 1 + .../rpc/runtime-api/Cargo.toml | 2 + .../rpc/runtime-api/src/lib.rs | 60 + frame/transaction-payment/src/types.rs | 53 +- frame/treasury/Cargo.toml | 3 +- frame/treasury/src/benchmarking.rs | 25 +- frame/try-runtime/Cargo.toml | 31 + frame/try-runtime/src/lib.rs | 37 + frame/utility/Cargo.toml | 3 +- frame/utility/src/benchmarking.rs | 22 +- frame/vesting/Cargo.toml | 3 +- frame/vesting/src/benchmarking.rs | 25 +- primitives/api/Cargo.toml | 13 + primitives/api/proc-macro/Cargo.toml | 2 +- .../api/proc-macro/src/decl_runtime_apis.rs | 54 +- .../api/proc-macro/src/impl_runtime_apis.rs | 31 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 80 +- primitives/api/proc-macro/src/utils.rs | 3 +- primitives/api/src/lib.rs | 119 +- primitives/api/test/Cargo.toml | 2 + primitives/api/test/tests/decl_and_impl.rs | 39 +- primitives/api/test/tests/runtime_calls.rs | 38 +- .../ui/mock_only_error_associated_type.rs | 19 - .../ui/mock_only_error_associated_type.stderr | 5 - .../test/tests/ui/mock_only_one_error_type.rs | 29 - .../tests/ui/mock_only_one_error_type.stderr | 29 - primitives/arithmetic/src/per_things.rs | 5 + primitives/blockchain/Cargo.toml | 2 +- primitives/blockchain/src/error.rs | 14 +- primitives/consensus/babe/src/inherents.rs | 1 + primitives/consensus/common/Cargo.toml | 2 +- .../consensus/common/src/block_import.rs | 23 +- .../common/src/import_queue/basic_queue.rs | 29 +- primitives/consensus/common/src/lib.rs | 108 +- primitives/core/src/crypto.rs | 43 +- primitives/core/src/hashing.rs | 2 +- primitives/core/src/hexdisplay.rs | 6 + primitives/core/src/testing.rs | 10 + primitives/core/src/traits.rs | 29 +- primitives/debug-derive/Cargo.toml | 2 +- primitives/externalities/src/lib.rs | 7 +- primitives/finality-grandpa/Cargo.toml | 2 +- primitives/finality-grandpa/src/lib.rs | 2 +- primitives/inherents/src/lib.rs | 2 +- primitives/io/src/lib.rs | 59 +- primitives/keystore/src/lib.rs | 69 +- primitives/keystore/src/testing.rs | 48 +- .../runtime-interface/proc-macro/Cargo.toml | 2 +- primitives/runtime/Cargo.toml | 7 +- primitives/runtime/src/lib.rs | 1 + primitives/runtime/src/runtime_logger.rs | 108 ++ primitives/session/src/lib.rs | 2 +- primitives/state-machine/src/basic.rs | 29 +- primitives/state-machine/src/ext.rs | 61 +- primitives/state-machine/src/lib.rs | 28 +- primitives/state-machine/src/read_only.rs | 2 +- primitives/state-machine/src/testing.rs | 18 +- primitives/storage/src/lib.rs | 13 +- primitives/tasks/src/async_externalities.rs | 2 +- prml/attestation/Cargo.toml | 8 +- prml/attestation/src/benchmarking.rs | 29 +- prml/attestation/src/lib.rs | 50 +- prml/attestation/src/mock.rs | 40 +- prml/generic-asset/Cargo.toml | 2 +- prml/generic-asset/rpc/Cargo.toml | 2 +- prml/generic-asset/rpc/runtime-api/Cargo.toml | 2 +- prml/generic-asset/src/benchmarking.rs | 62 +- prml/generic-asset/src/imbalances.rs | 4 +- prml/generic-asset/src/impls.rs | 512 +++--- prml/generic-asset/src/lib.rs | 30 +- prml/generic-asset/src/mock.rs | 131 +- prml/generic-asset/src/tests.rs | 1238 +++++++------- prml/generic-asset/src/types.rs | 4 +- prml/generic-asset/src/weights.rs | 2 +- prml/support/src/lib.rs | 7 +- prml/validator-manager/src/lib.rs | 2 +- ss58-registry.json | 18 + test-utils/derive/Cargo.toml | 2 +- test-utils/runtime/Cargo.toml | 6 +- test-utils/runtime/build.rs | 10 +- .../runtime/client/src/block_builder_ext.rs | 2 +- test-utils/runtime/client/src/lib.rs | 10 + test-utils/runtime/src/lib.rs | 22 +- test-utils/src/lib.rs | 2 +- utils/browser/src/lib.rs | 4 +- utils/frame/benchmarking-cli/Cargo.toml | 2 +- utils/frame/remote-externalities/Cargo.toml | 34 + utils/frame/remote-externalities/src/lib.rs | 455 +++++ utils/frame/try-runtime/cli/Cargo.toml | 32 + utils/frame/try-runtime/cli/src/lib.rs | 178 ++ utils/wasm-builder/src/builder.rs | 25 +- utils/wasm-builder/src/wasm_project.rs | 37 +- 416 files changed, 15489 insertions(+), 9159 deletions(-) delete mode 100644 .github/allowed-actions.js delete mode 100644 .github/dependabot.yml delete mode 100644 .github/workflows/burnin-label-notification.yml delete mode 100644 .github/workflows/md-link-check.yml delete mode 100644 .github/workflows/mlc_config.json delete mode 100644 .github/workflows/release-bot.yml delete mode 100644 .gitlab-ci.yml rename .maintain/{gitlab => common}/lib.sh (89%) delete mode 100644 .maintain/docker/subkey.Dockerfile delete mode 100644 .maintain/docker/substrate.Dockerfile delete mode 100755 .maintain/flamingfir-deploy.sh rename .maintain/{gitlab => github}/check_labels.sh (76%) delete mode 100755 .maintain/gitlab/check_line_width.sh delete mode 100755 .maintain/gitlab/check_polkadot_companion_build.sh delete mode 100755 .maintain/gitlab/check_polkadot_companion_status.sh delete mode 100755 .maintain/gitlab/check_runtime.sh delete mode 100755 .maintain/gitlab/check_signed.sh delete mode 100755 .maintain/gitlab/generate_changelog.sh delete mode 100755 .maintain/gitlab/publish_draft_release.sh delete mode 100755 .maintain/gitlab/skip_if_draft.sh delete mode 100644 .maintain/kubernetes/Chart.yaml delete mode 100644 .maintain/kubernetes/README.md delete mode 100644 .maintain/kubernetes/templates/poddisruptionbudget.yaml delete mode 100644 .maintain/kubernetes/templates/secrets.yaml delete mode 100644 .maintain/kubernetes/templates/service.yaml delete mode 100644 .maintain/kubernetes/templates/serviceaccount.yaml delete mode 100644 .maintain/kubernetes/templates/statefulset.yaml delete mode 100644 .maintain/kubernetes/values.yaml create mode 100644 .rustfmt.toml delete mode 100644 Process.json create mode 100644 bin/node/cli/tests/telemetry.rs create mode 100644 bin/node/cli/tests/websocket_server.rs create mode 100644 client/finality-grandpa-warp-sync/src/proof.rs create mode 100644 client/network/src/transactions.rs create mode 100644 frame/balances/src/tests_reentrancy.rs create mode 100644 frame/contracts/CHANGELOG.md create mode 100644 frame/election-provider-multi-phase/Cargo.toml create mode 100644 frame/election-provider-multi-phase/src/benchmarking.rs create mode 100644 frame/election-provider-multi-phase/src/helpers.rs create mode 100644 frame/election-provider-multi-phase/src/lib.rs create mode 100644 frame/election-provider-multi-phase/src/mock.rs create mode 100644 frame/election-provider-multi-phase/src/unsigned.rs create mode 100644 frame/election-provider-multi-phase/src/weights.rs create mode 100644 frame/gilt/Cargo.toml create mode 100644 frame/gilt/README.md create mode 100644 frame/gilt/src/benchmarking.rs create mode 100644 frame/gilt/src/lib.rs create mode 100644 frame/gilt/src/mock.rs create mode 100644 frame/gilt/src/tests.rs create mode 100644 frame/gilt/src/weights.rs delete mode 100644 frame/support/src/debug.rs create mode 100644 frame/try-runtime/Cargo.toml create mode 100644 frame/try-runtime/src/lib.rs delete mode 100644 primitives/api/test/tests/ui/mock_only_error_associated_type.rs delete mode 100644 primitives/api/test/tests/ui/mock_only_error_associated_type.stderr delete mode 100644 primitives/api/test/tests/ui/mock_only_one_error_type.rs delete mode 100644 primitives/api/test/tests/ui/mock_only_one_error_type.stderr create mode 100644 primitives/runtime/src/runtime_logger.rs create mode 100644 utils/frame/remote-externalities/Cargo.toml create mode 100644 utils/frame/remote-externalities/src/lib.rs create mode 100644 utils/frame/try-runtime/cli/Cargo.toml create mode 100644 utils/frame/try-runtime/cli/src/lib.rs diff --git a/.circleci/config.yml b/.circleci/config.yml index 74dd8a631b..de970a552b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -13,8 +13,8 @@ commands: source ~/.cargo/env rustup install $RUST_VERSION rustup default $RUST_VERSION - rustup install nightly-2020-11-16 - rustup target add wasm32-unknown-unknown --toolchain=nightly-2020-11-16 + rustup install nightly-2021-02-28 + rustup target add wasm32-unknown-unknown --toolchain=nightly-2021-02-28 rustup target add x86_64-unknown-linux-musl --toolchain=$RUST_VERSION export RUSTC_WRAPPER="" # sccache is uninstalled at this point so it must be unset here for `wasm-gc` install command -v wasm-gc || cargo install --git https://github.com/alexcrichton/wasm-gc --force @@ -76,7 +76,7 @@ jobs: resource_class: large environment: BASH_ENV: ~/.cargo/env - RUST_VERSION: 1.49.0 + RUST_VERSION: 1.50.0 RUSTC_WRAPPER: sccache SCCACHE_CACHE_SIZE: 10G steps: @@ -92,7 +92,7 @@ jobs: resource_class: large environment: BASH_ENV: ~/.cargo/env - RUST_VERSION: 1.49.0 + RUST_VERSION: 1.50.0 RUSTC_WRAPPER: sccache SCCACHE_CACHE_SIZE: 10G steps: diff --git a/.github/allowed-actions.js b/.github/allowed-actions.js deleted file mode 100644 index 4fb8947580..0000000000 --- a/.github/allowed-actions.js +++ /dev/null @@ -1,7 +0,0 @@ -// This is a whitelist of GitHub Actions that are approved for use in this project. -// If a new or existing workflow file is updated to use an action or action version -// not listed here, CI will fail. - -module.exports = [ - 'gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236', // gaurav-nelson/github-action-markdown-link-check@v1.0.8 -] diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index d782bb80f7..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,7 +0,0 @@ -version: 2 -updates: - - package-ecosystem: "cargo" - directory: "/" - labels: ["A2-insubstantial", "B0-silent", "C1-low"] - schedule: - interval: "daily" diff --git a/.github/workflows/burnin-label-notification.yml b/.github/workflows/burnin-label-notification.yml deleted file mode 100644 index 22f15c0ec3..0000000000 --- a/.github/workflows/burnin-label-notification.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Notify devops when burn-in label applied -on: - pull_request: - types: [labeled] - -jobs: - notify-devops: - runs-on: ubuntu-latest - steps: - - name: Notify devops - if: github.event.label.name == 'A1-needsburnin' - uses: s3krit/matrix-message-action@v0.0.3 - with: - room_id: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ROOM_ID }} - access_token: ${{ secrets.POLKADOT_DEVOPS_MATRIX_ACCESS_TOKEN }} - message: "@room Burn-in request received for [${{ github.event.pull_request.title }}](${{ github.event.pull_request.html_url }})" - server: "matrix.parity.io" diff --git a/.github/workflows/md-link-check.yml b/.github/workflows/md-link-check.yml deleted file mode 100644 index 868569911d..0000000000 --- a/.github/workflows/md-link-check.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Check Links - -on: - pull_request: - branches: - - master - push: - branches: - - master - -jobs: - markdown-link-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - - uses: gaurav-nelson/github-action-markdown-link-check@7481451f70251762f149d69596e3e276ebf2b236 - with: - use-quiet-mode: 'yes' - config-file: '.github/workflows/mlc_config.json' diff --git a/.github/workflows/mlc_config.json b/.github/workflows/mlc_config.json deleted file mode 100644 index e7e620b39e..0000000000 --- a/.github/workflows/mlc_config.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "ignorePatterns": [ - { - "pattern": "^https://crates.io", - } - ] -} diff --git a/.github/workflows/release-bot.yml b/.github/workflows/release-bot.yml deleted file mode 100644 index ed0a8e5435..0000000000 --- a/.github/workflows/release-bot.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Pushes release updates to a pre-defined Matrix room -on: - release: - types: - - edited - - prereleased - - published -jobs: - ping_matrix: - runs-on: ubuntu-latest - steps: - - name: send message - uses: s3krit/matrix-message-action@v0.0.3 - with: - room_id: ${{ secrets.MATRIX_ROOM_ID }} - access_token: ${{ secrets.MATRIX_ACCESS_TOKEN }} - message: "**${{github.event.repository.full_name}}:** A release has been ${{github.event.action}}
Release version [${{github.event.release.tag_name}}](${{github.event.release.html_url}})

***Description:***
${{github.event.release.body}}
" - server: "matrix.parity.io" diff --git a/.gitignore b/.gitignore index c8f1ea9567..ce302c74e1 100644 --- a/.gitignore +++ b/.gitignore @@ -23,3 +23,4 @@ rls*.log **/hfuzz_workspace/ .cargo/ .cargo-remote.toml +*.bin diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml deleted file mode 100644 index de1655c39d..0000000000 --- a/.gitlab-ci.yml +++ /dev/null @@ -1,700 +0,0 @@ -# .gitlab-ci.yml -# -# substrate -# -# pipelines can be triggered manually in the web - -# SAMPLE JOB TEMPLATE - This is not a complete example but is enough to build a -# simple CI job. For full documentation, visit https://docs.gitlab.com/ee/ci/yaml/ -# -# my-example-job: -# stage: test # One of the stages listed below this job (required) -# image: paritytech/tools:latest # Any docker image (required) -# allow_failure: true # Allow the pipeline to continue if this job fails (default: false) -# needs: -# - job: test-linux # Any jobs that are required to run before this job (optional) -# variables: -# MY_ENVIRONMENT_VARIABLE: "some useful value" # Environment variables passed to the job (optional) -# script: -# - echo "List of shell commands to run in your job" -# - echo "You can also just specify a script here, like so:" -# - ./.maintain/gitlab/my_amazing_script.sh - -stages: - - check - - test - - build - - publish - - deploy - - flaming-fir - -workflow: - rules: - - if: $CI_COMMIT_TAG - - if: $CI_COMMIT_BRANCH - -variables: &default-vars - GIT_STRATEGY: fetch - GIT_DEPTH: 100 - CARGO_INCREMENTAL: 0 - DOCKER_OS: "debian:stretch" - ARCH: "x86_64" - # FIXME set to release - CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.11" - CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder" - -default: - cache: {} - -.collect-artifacts: &collect-artifacts - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}" - when: on_success - expire_in: 7 days - paths: - - artifacts/ - -.kubernetes-build: &kubernetes-build - tags: - - kubernetes-parity-build - interruptible: true - -.docker-env: &docker-env - image: paritytech/ci-linux:production - before_script: - - rustup show - - cargo --version - - sccache -s - retry: - max: 2 - when: - - runner_system_failure - - unknown_failure - - api_failure - interruptible: true - tags: - - linux-docker - -.test-refs: &test-refs - rules: - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - -.build-refs: &build-refs - rules: - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - -#### stage: .pre - -skip-if-draft: - image: paritytech/tools:latest - <<: *kubernetes-build - stage: .pre - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - echo "Commit message is ${CI_COMMIT_MESSAGE}" - - echo "Ref is ${CI_COMMIT_REF_NAME}" - - echo "pipeline source is ${CI_PIPELINE_SOURCE}" - - ./.maintain/gitlab/skip_if_draft.sh - -#### stage: check - -check-runtime: - stage: check - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - variables: - <<: *default-vars - GITLAB_API: "https://gitlab.parity.io/api/v4" - GITHUB_API_PROJECT: "parity%2Finfrastructure%2Fgithub-api" - script: - - ./.maintain/gitlab/check_runtime.sh - allow_failure: true - -check-signed-tag: - stage: check - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - ./.maintain/gitlab/check_signed.sh - -check-line-width: - stage: check - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_line_width.sh - allow_failure: true - -test-dependency-rules: - stage: check - image: paritytech/tools:latest - <<: *kubernetes-build - script: - - .maintain/ensure-deps.sh - -test-prometheus-alerting-rules: - stage: check - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_BRANCH - changes: - - .gitlab-ci.yml - - .maintain/monitoring/**/* - script: - - promtool check rules .maintain/monitoring/alerting-rules/alerting-rules.yaml - - cat .maintain/monitoring/alerting-rules/alerting-rules.yaml | promtool test rules .maintain/monitoring/alerting-rules/alerting-rule-tests.yaml - -#### stage: test - -cargo-audit: - stage: test - <<: *docker-env - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - cargo audit - allow_failure: true - -cargo-deny: - stage: test - <<: *docker-env - rules: - - if: $CI_COMMIT_MESSAGE =~ /skip-checks/ - when: never - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_PIPELINE_SOURCE == "schedule" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - cargo deny check --hide-inclusion-graph -c .maintain/deny.toml - after_script: - - echo "___The complete log is in the artifacts___" - - cargo deny check -c .maintain/deny.toml 2> deny.log - artifacts: - name: $CI_COMMIT_SHORT_SHA - expire_in: 3 days - when: always - paths: - - deny.log - -cargo-check-benches: - stage: test - <<: *docker-env - <<: *test-refs - script: - - SKIP_WASM_BUILD=1 time cargo +nightly check --benches --all - - cargo run --release -p node-bench -- ::node::import::native::sr25519::transfer_keep_alive::paritydb::small - - cargo run --release -p node-bench -- ::trie::read::small - - sccache -s - -cargo-check-subkey: - stage: test - <<: *docker-env - <<: *test-refs - script: - - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo check --release - - sccache -s - -test-deterministic-wasm: - stage: test - <<: *docker-env - <<: *test-refs - variables: - <<: *default-vars - WASM_BUILD_NO_COLOR: 1 - script: - # build runtime - - cargo build --verbose --release -p node-runtime - # make checksum - - sha256sum target/release/wbuild/node-runtime/target/wasm32-unknown-unknown/release/node_runtime.wasm > checksum.sha256 - # clean up – FIXME: can we reuse some of the artifacts? - - cargo clean - # build again - - cargo build --verbose --release -p node-runtime - # confirm checksum - - sha256sum -c checksum.sha256 - - sccache -s - -test-linux-stable: &test-linux - stage: test - <<: *docker-env - <<: *test-refs - variables: - <<: *default-vars - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: "-Cdebug-assertions=y -Dwarnings" - RUST_BACKTRACE: 1 - WASM_BUILD_NO_COLOR: 1 - script: - # this job runs all tests in former runtime-benchmarks, frame-staking and wasmtime tests - - time cargo test --workspace --locked --release --verbose --features runtime-benchmarks --manifest-path bin/node/cli/Cargo.toml - - SUBSTRATE_TEST_TIMEOUT=1 time cargo test -p substrate-test-utils --release --verbose --locked -- --ignored timeout - - sccache -s - -unleash-check: - stage: test - <<: *docker-env - rules: - - if: $CI_COMMIT_MESSAGE =~ /skip-checks/ - when: never - # .test-refs - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - - cargo unleash check ${CARGO_UNLEASH_PKG_DEF} - -test-frame-examples-compile-to-wasm: - # into one job - stage: test - <<: *docker-env - <<: *test-refs - variables: - <<: *default-vars - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: -Cdebug-assertions=y - RUST_BACKTRACE: 1 - script: - - cd frame/example-offchain-worker/ - - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features - - cd ../example - - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features - - sccache -s - -test-linux-stable-int: - <<: *test-linux - script: - - echo "___Logs will be partly shown at the end in case of failure.___" - - echo "___Full log will be saved to the job artifacts only in case of failure.___" - - WASM_BUILD_NO_COLOR=1 - RUST_LOG=sync=trace,consensus=trace,client=trace,state-db=trace,db=trace,forks=trace,state_db=trace,storage_cache=trace - time cargo test -p node-cli --release --verbose --locked -- --ignored - &> ${CI_COMMIT_SHORT_SHA}_int_failure.log - - sccache -s - after_script: - - awk '/FAILED|^error\[/,0' ${CI_COMMIT_SHORT_SHA}_int_failure.log - artifacts: - name: $CI_COMMIT_SHORT_SHA - when: on_failure - expire_in: 3 days - paths: - - ${CI_COMMIT_SHORT_SHA}_int_failure.log - -check-web-wasm: - stage: test - <<: *docker-env - <<: *test-refs - script: - # WASM support is in progress. As more and more crates support WASM, we - # should add entries here. See https://github.com/paritytech/substrate/issues/2416 - # Note: we don't need to test crates imported in `bin/node/cli` - - time cargo build --manifest-path=client/consensus/aura/Cargo.toml --target=wasm32-unknown-unknown --features getrandom - # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features browser --target=wasm32-unknown-unknown -Z features=itarget - # with-tracing must be explicitly activated, we run a test to ensure this works as expected in both cases - - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features - - time cargo +nightly test --manifest-path primitives/tracing/Cargo.toml --no-default-features --features=with-tracing - - sccache -s - -test-full-crypto-feature: - stage: test - <<: *docker-env - <<: *test-refs - variables: - <<: *default-vars - # Enable debug assertions since we are running optimized builds for testing - # but still want to have debug assertions. - RUSTFLAGS: -Cdebug-assertions=y - RUST_BACKTRACE: 1 - script: - - cd primitives/core/ - - time cargo +nightly build --verbose --no-default-features --features full_crypto - - cd ../application-crypto - - time cargo +nightly build --verbose --no-default-features --features full_crypto - - sccache -s - -cargo-check-macos: - stage: test - # shell runner on mac ignores the image set in *docker-env - <<: *docker-env - <<: *test-refs - script: - - SKIP_WASM_BUILD=1 time cargo check --release - - sccache -s - tags: - - osx - -#### stage: build - -check-polkadot-companion-status: - stage: build - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_polkadot_companion_status.sh - -check-polkadot-companion-build: - stage: build - <<: *docker-env - <<: *test-refs - needs: - - job: test-linux-stable-int - artifacts: false - script: - - ./.maintain/gitlab/check_polkadot_companion_build.sh - after_script: - - cd polkadot && git rev-parse --abbrev-ref HEAD - allow_failure: true - -test-browser-node: - stage: build - <<: *docker-env - <<: *test-refs - needs: - - job: check-web-wasm - artifacts: false - variables: - <<: *default-vars - CHROMEDRIVER_ARGS: "--log-level=INFO --whitelisted-ips=127.0.0.1" - CARGO_TARGET_WASM32_UNKNOWN_UNKNOWN_RUNNER: "wasm-bindgen-test-runner" - WASM_BINDGEN_TEST_TIMEOUT: 120 - script: - - cargo +nightly test --target wasm32-unknown-unknown -p node-browser-testing -Z features=itarget - -build-linux-substrate: &build-binary - stage: build - <<: *collect-artifacts - <<: *docker-env - rules: - # .build-refs with manual on PRs - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: manual - allow_failure: true - needs: - - job: test-linux-stable - artifacts: false - before_script: - - mkdir -p ./artifacts/substrate/ - script: - - WASM_BUILD_NO_COLOR=1 time cargo build --release --verbose - - mv ./target/release/substrate ./artifacts/substrate/. - - echo -n "Substrate version = " - - if [ "${CI_COMMIT_TAG}" ]; then - echo "${CI_COMMIT_TAG}" | tee ./artifacts/substrate/VERSION; - else - ./artifacts/substrate/substrate --version | - sed -n -E 's/^substrate ([0-9.]+.*-[0-9a-f]{7,13})-.*$/\1/p' | - tee ./artifacts/substrate/VERSION; - fi - - sha256sum ./artifacts/substrate/substrate | tee ./artifacts/substrate/substrate.sha256 - - printf '\n# building node-template\n\n' - - ./.maintain/node-template-release.sh ./artifacts/substrate/substrate-node-template.tar.gz - - cp -r .maintain/docker/substrate.Dockerfile ./artifacts/substrate/ - - sccache -s - -build-linux-subkey: &build-subkey - stage: build - <<: *collect-artifacts - <<: *docker-env - rules: - # .build-refs with manual on PRs - - if: $CI_PIPELINE_SOURCE == "web" - - if: $CI_COMMIT_REF_NAME == "master" - - if: $CI_COMMIT_REF_NAME == "tags" - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - when: manual - allow_failure: true - needs: - - job: cargo-check-subkey - artifacts: false - before_script: - - mkdir -p ./artifacts/subkey - script: - - cd ./bin/utils/subkey - - SKIP_WASM_BUILD=1 time cargo build --release --verbose - - cd - - - mv ./target/release/subkey ./artifacts/subkey/. - - echo -n "Subkey version = " - - ./artifacts/subkey/subkey --version | - sed -n -E 's/^subkey ([0-9.]+.*)/\1/p' | - tee ./artifacts/subkey/VERSION; - - sha256sum ./artifacts/subkey/subkey | tee ./artifacts/subkey/subkey.sha256 - - cp -r .maintain/docker/subkey.Dockerfile ./artifacts/subkey/ - - sccache -s - -build-macos-subkey: - <<: *build-subkey - tags: - - osx - -build-rust-doc: - stage: build - <<: *docker-env - <<: *test-refs - needs: - - job: test-linux-stable - artifacts: false - variables: - <<: *default-vars - RUSTFLAGS: -Dwarnings - artifacts: - name: "${CI_JOB_NAME}_${CI_COMMIT_REF_NAME}-doc" - when: on_success - expire_in: 7 days - paths: - - ./crate-docs/ - script: - - rm -f ./crate-docs/index.html # use it as an indicator if the job succeeds - - SKIP_WASM_BUILD=1 RUSTDOCFLAGS="--html-in-header $(pwd)/.maintain/rustdoc-header.html" - time cargo +nightly doc --no-deps --workspace --all-features --verbose - - mv ./target/doc ./crate-docs - - echo "" > ./crate-docs/index.html - - sccache -s - -#### stage: publish - -.build-push-docker-image: &build-push-docker-image - <<: *build-refs - <<: *kubernetes-build - image: quay.io/buildah/stable - variables: &docker-build-vars - <<: *default-vars - GIT_STRATEGY: none - DOCKERFILE: $PRODUCT.Dockerfile - IMAGE_NAME: docker.io/parity/$PRODUCT - before_script: - - cd ./artifacts/$PRODUCT/ - - VERSION="$(cat ./VERSION)" - - echo "${PRODUCT} version = ${VERSION}" - - test -z "${VERSION}" && exit 1 - script: - - test "$Docker_Hub_User_Parity" -a "$Docker_Hub_Pass_Parity" || - ( echo "no docker credentials provided"; exit 1 ) - - buildah bud - --format=docker - --build-arg VCS_REF="${CI_COMMIT_SHA}" - --build-arg BUILD_DATE="$(date -u '+%Y-%m-%dT%H:%M:%SZ')" - --tag "$IMAGE_NAME:$VERSION" - --tag "$IMAGE_NAME:latest" - --file "$DOCKERFILE" . - - echo "$Docker_Hub_Pass_Parity" | - buildah login --username "$Docker_Hub_User_Parity" --password-stdin docker.io - - buildah info - - buildah push --format=v2s2 "$IMAGE_NAME:$VERSION" - - buildah push --format=v2s2 "$IMAGE_NAME:latest" - - buildah logout "$IMAGE_NAME" - -publish-docker-substrate: - stage: publish - <<: *build-push-docker-image - # collect VERSION artifact here to pass it on to kubernetes - <<: *collect-artifacts - needs: - - job: build-linux-substrate - artifacts: true - variables: - <<: *docker-build-vars - PRODUCT: substrate - after_script: - # only VERSION information is needed for the deployment - - find ./artifacts/ -depth -not -name VERSION -type f -delete - -publish-docker-subkey: - stage: publish - <<: *build-push-docker-image - needs: - - job: build-linux-subkey - artifacts: true - variables: - <<: *docker-build-vars - PRODUCT: subkey - -publish-s3-release: - stage: publish - <<: *build-refs - <<: *kubernetes-build - needs: - - job: build-linux-substrate - artifacts: true - - job: build-linux-subkey - artifacts: true - image: paritytech/awscli:latest - variables: - GIT_STRATEGY: none - BUCKET: "releases.parity.io" - PREFIX: "substrate/${ARCH}-${DOCKER_OS}" - script: - - aws s3 sync ./artifacts/ s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/substrate/VERSION)/ - - echo "update objects in latest path" - - aws s3 sync s3://${BUCKET}/${PREFIX}/$(cat ./artifacts/substrate/VERSION)/ s3://${BUCKET}/${PREFIX}/latest/ - after_script: - - aws s3 ls s3://${BUCKET}/${PREFIX}/latest/ - --recursive --human-readable --summarize - -publish-s3-doc: - stage: publish - image: paritytech/awscli:latest - allow_failure: true - needs: - - job: build-rust-doc - artifacts: true - - job: build-linux-substrate - artifacts: false - <<: *build-refs - <<: *kubernetes-build - variables: - GIT_STRATEGY: none - BUCKET: "releases.parity.io" - PREFIX: "substrate-rustdoc" - script: - - test -r ./crate-docs/index.html || ( - echo "./crate-docs/index.html not present, build:rust:doc:release job not complete"; - exit 1 - ) - - aws s3 sync --delete --size-only --only-show-errors - ./crate-docs/ s3://${BUCKET}/${PREFIX}/ - after_script: - - aws s3 ls s3://${BUCKET}/${PREFIX}/ - --human-readable --summarize - -publish-draft-release: - stage: publish - image: paritytech/tools:latest - rules: - - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - ./.maintain/gitlab/publish_draft_release.sh - allow_failure: true - -publish-to-crates-io: - stage: publish - <<: *docker-env - rules: - - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 - script: - - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - - cargo unleash em-dragons --no-check --owner github:paritytech:core-devs ${CARGO_UNLEASH_PKG_DEF} - allow_failure: true - -#### stage: deploy - -deploy-prometheus-alerting-rules: - stage: deploy - needs: - - job: test-prometheus-alerting-rules - artifacts: false - interruptible: true - retry: 1 - tags: - - kubernetes-parity-build - image: paritytech/kubetools:latest - environment: - name: parity-mgmt-polkadot-alerting - variables: - NAMESPACE: monitoring - PROMETHEUSRULE: prometheus-k8s-rules-polkadot-alerting - RULES: .maintain/monitoring/alerting-rules/alerting-rules.yaml - script: - - echo "deploying prometheus alerting rules" - - kubectl -n ${NAMESPACE} patch prometheusrule ${PROMETHEUSRULE} - --type=merge --patch "$(sed 's/^/ /;1s/^/spec:\n/' ${RULES})" - rules: - - if: $CI_COMMIT_REF_NAME == "master" - changes: - - .gitlab-ci.yml - - .maintain/monitoring/**/* - -trigger-simnet: - stage: deploy - rules: - # this job runs only on nightly pipeline with the mentioned variable, against `master` branch - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" - needs: - - job: publish-docker-substrate - artifacts: false - trigger: - project: parity/simnet - branch: master - strategy: depend - -.validator-deploy: &validator-deploy - stage: deploy - rules: - # this job runs only on nightly pipeline with the mentioned variable, against `master` branch - - if: $CI_COMMIT_REF_NAME == "master" && $CI_PIPELINE_SOURCE == "schedule" && $PIPELINE == "nightly" - needs: - # script will fail if there is no artifacts/substrate/VERSION - - job: publish-docker-substrate - artifacts: true - image: parity/azure-ansible:v2 - allow_failure: true - interruptible: true - tags: - - linux-docker - -validator 1 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator1 - -validator 2 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator2 - -validator 3 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator3 - -validator 4 4: - <<: *validator-deploy - script: - - ./.maintain/flamingfir-deploy.sh flamingfir-validator4 - -#### stage: .post - -check-labels: - stage: .post - image: paritytech/tools:latest - <<: *kubernetes-build - rules: - - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - script: - - ./.maintain/gitlab/check_labels.sh diff --git a/.maintain/gitlab/lib.sh b/.maintain/common/lib.sh similarity index 89% rename from .maintain/gitlab/lib.sh rename to .maintain/common/lib.sh index 33477b52f5..1d4be0ecc7 100755 --- a/.maintain/gitlab/lib.sh +++ b/.maintain/common/lib.sh @@ -66,11 +66,17 @@ has_label(){ repo="$1" pr_id="$2" label="$3" + + # These will exist if the function is called in Gitlab. + # If the function's called in Github, we should have GITHUB_ACCESS_TOKEN set + # already. if [ -n "$GITHUB_RELEASE_TOKEN" ]; then - out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/pulls/$pr_id") - else - out=$(curl -H "Authorization: token $GITHUB_PR_TOKEN" -s "$api_base/$repo/pulls/$pr_id") + GITHUB_TOKEN="$GITHUB_RELEASE_TOKEN" + elif [ -n "$GITHUB_PR_TOKEN" ]; then + GITHUB_TOKEN="$GITHUB_PR_TOKEN" fi + + out=$(curl -H "Authorization: token $GITHUB_TOKEN" -s "$api_base/$repo/pulls/$pr_id") [ -n "$(echo "$out" | tr -d '\r\n' | jq ".labels | .[] | select(.name==\"$label\")")" ] } diff --git a/.maintain/docker/subkey.Dockerfile b/.maintain/docker/subkey.Dockerfile deleted file mode 100644 index 9184cad5b4..0000000000 --- a/.maintain/docker/subkey.Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -FROM debian:stretch-slim - -# metadata -ARG VCS_REF -ARG BUILD_DATE - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="parity/subkey" \ - io.parity.image.description="Subkey: key generating utility for Substrate." \ - io.parity.image.source="https://github.com/paritytech/substrate/blob/${VCS_REF}/.maintain/docker/subkey.Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://github.com/paritytech/substrate/tree/${VCS_REF}/subkey" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# add user -RUN useradd -m -u 1000 -U -s /bin/sh -d /subkey subkey - -# add subkey binary to docker image -COPY ./subkey /usr/local/bin - -USER subkey - -# check if executable works in this container -RUN /usr/local/bin/subkey --version - -ENTRYPOINT ["/usr/local/bin/subkey"] - diff --git a/.maintain/docker/substrate.Dockerfile b/.maintain/docker/substrate.Dockerfile deleted file mode 100644 index 7cd4576a9e..0000000000 --- a/.maintain/docker/substrate.Dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -FROM debian:stretch-slim - -# metadata -ARG VCS_REF -ARG BUILD_DATE - -LABEL io.parity.image.authors="devops-team@parity.io" \ - io.parity.image.vendor="Parity Technologies" \ - io.parity.image.title="parity/substrate" \ - io.parity.image.description="Substrate: The platform for blockchain innovators." \ - io.parity.image.source="https://github.com/paritytech/substrate/blob/${VCS_REF}/.maintain/docker/Dockerfile" \ - io.parity.image.revision="${VCS_REF}" \ - io.parity.image.created="${BUILD_DATE}" \ - io.parity.image.documentation="https://wiki.parity.io/Parity-Substrate" - -# show backtraces -ENV RUST_BACKTRACE 1 - -# install tools and dependencies -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get upgrade -y && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y \ - libssl1.1 \ - ca-certificates \ - curl && \ -# apt cleanup - apt-get autoremove -y && \ - apt-get clean && \ - find /var/lib/apt/lists/ -type f -not -name lock -delete; \ -# add user - useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate - -# add substrate binary to docker image -COPY ./substrate /usr/local/bin - -USER substrate - -# check if executable works in this container -RUN /usr/local/bin/substrate --version - -EXPOSE 30333 9933 9944 -VOLUME ["/substrate"] - -ENTRYPOINT ["/usr/local/bin/substrate"] - diff --git a/.maintain/flamingfir-deploy.sh b/.maintain/flamingfir-deploy.sh deleted file mode 100755 index 8f0fb3a2bc..0000000000 --- a/.maintain/flamingfir-deploy.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -RETRY_COUNT=10 -RETRY_ATTEMPT=0 -SLEEP_TIME=15 -TARGET_HOST="$1" -COMMIT=$(cat artifacts/substrate/VERSION) -DOWNLOAD_URL="https://releases.parity.io/substrate/x86_64-debian:stretch/${COMMIT}/substrate/substrate" -POST_DATA='{"extra_vars":{"artifact_path":"'${DOWNLOAD_URL}'","target_host":"'${TARGET_HOST}'"}}' - -JOB_ID=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" --header "Content-type: application/json" --post-data "${POST_DATA}" https://ansible-awx.parity.io/api/v2/job_templates/32/launch/ | jq .job) - -echo "Launched job: $JOB_ID" - - -while [ ${RETRY_ATTEMPT} -le ${RETRY_COUNT} ] ; do - export RETRY_RESULT=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" https://ansible-awx.parity.io/api/v2/jobs/${JOB_ID}/ | jq .status) - RETRY_ATTEMPT=$(( $RETRY_ATTEMPT +1 )) - sleep $SLEEP_TIME - if [ $(echo $RETRY_RESULT | egrep -e successful -e failed) ] ; then - break - fi -done - -AWX_OUTPUT=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" https://ansible-awx.parity.io/api/v2/jobs/${JOB_ID}/stdout?format=txt_download) - -echo "AWX job log:" -echo "${AWX_OUTPUT}" - - -JOB_STATUS=$(wget -O - --header "Authorization: Bearer ${AWX_TOKEN}" https://ansible-awx.parity.io/api/v2/jobs/${JOB_ID}/ | jq .status ) - -echo "===================================" -echo -e "Ansible AWX Remote Job: ${JOB_ID} \x1B[31mStatus: ${JOB_STATUS}\x1B[0m" -echo "===================================" diff --git a/.maintain/gitlab/check_labels.sh b/.maintain/github/check_labels.sh similarity index 76% rename from .maintain/gitlab/check_labels.sh rename to .maintain/github/check_labels.sh index 5ab099b382..75190db668 100755 --- a/.maintain/gitlab/check_labels.sh +++ b/.maintain/github/check_labels.sh @@ -1,11 +1,14 @@ #!/usr/bin/env bash -#shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" +#shellcheck source=../common/lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../common/lib.sh" + +repo="$GITHUB_REPOSITORY" +pr="$GITHUB_PR" ensure_labels() { for label in "$@"; do - if has_label 'paritytech/substrate' "$CI_COMMIT_BRANCH" "$label"; then + if has_label "$repo" "$pr" "$label"; then return 0 fi done @@ -27,7 +30,7 @@ criticality_labels=( 'C9-critical' ) -echo "[+] Checking release notes (B) labels for $CI_COMMIT_BRANCH" +echo "[+] Checking release notes (B) labels" if ensure_labels "${releasenotes_labels[@]}"; then echo "[+] Release notes label detected. All is well." else @@ -35,7 +38,7 @@ else exit 1 fi -echo "[+] Checking release criticality (C) labels for $CI_COMMIT_BRANCH" +echo "[+] Checking release criticality (C) labels" if ensure_labels "${criticality_labels[@]}"; then echo "[+] Release criticality label detected. All is well." else diff --git a/.maintain/gitlab/check_line_width.sh b/.maintain/gitlab/check_line_width.sh deleted file mode 100755 index ebab3013e4..0000000000 --- a/.maintain/gitlab/check_line_width.sh +++ /dev/null @@ -1,55 +0,0 @@ -#!/bin/sh -# -# check if line width of rust source files is not beyond x characters -# -set -e -set -o pipefail - -BASE_ORIGIN="origin" -BASE_BRANCH_NAME="master" -LINE_WIDTH="120" -GOOD_LINE_WIDTH="100" -BASE_BRANCH="${BASE_ORIGIN}/${BASE_BRANCH_NAME}" -git fetch ${BASE_ORIGIN} ${BASE_BRANCH_NAME} --depth 100 -BASE_HASH=$(git merge-base ${BASE_BRANCH} HEAD) - -git diff --name-only ${BASE_HASH} -- \*.rs | ( while read file -do - if [ ! -f ${file} ]; - then - echo "Skipping removed file." - elif git diff ${BASE_HASH} -- ${file} | grep -q "^+.\{$(( $LINE_WIDTH + 1 ))\}" - then - if [ -z "${FAIL}" ] - then - echo "| error!" - echo "| Lines must not be longer than ${LINE_WIDTH} characters." - echo "| " - echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" - echo "|" - FAIL="true" - fi - echo "| file: ${file}" - git diff ${BASE_HASH} -- ${file} \ - | grep -n "^+.\{$(( $LINE_WIDTH + 1))\}" - echo "|" - else - if git diff ${BASE_HASH} -- ${file} | grep -q "^+.\{$(( $GOOD_LINE_WIDTH + 1 ))\}" - then - if [ -z "${FAIL}" ] - then - echo "| warning!" - echo "| Lines should be longer than ${GOOD_LINE_WIDTH} characters only in exceptional circumstances!" - echo "| " - echo "| see more https://github.com/paritytech/substrate/blob/master/docs/STYLE_GUIDE.md" - echo "|" - fi - echo "| file: ${file}" - git diff ${BASE_HASH} -- ${file} | grep -n "^+.\{$(( $GOOD_LINE_WIDTH + 1 ))\}" - echo "|" - fi - fi -done - -test -z "${FAIL}" -) diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh deleted file mode 100755 index e5b308d038..0000000000 --- a/.maintain/gitlab/check_polkadot_companion_build.sh +++ /dev/null @@ -1,98 +0,0 @@ -#!/usr/bin/env sh -# -# check if a pr is compatible with polkadot companion pr or master if not -# available -# -# to override one that was just mentioned mark companion pr in the body of the -# polkadot pr like -# -# polkadot companion: paritytech/polkadot#567 -# - -set -e - -github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" -# use github api v3 in order to access the data without authentication -github_header="Authorization: token ${GITHUB_PR_TOKEN}" - -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } - - - -boldcat <<-EOT - - -check_polkadot_companion_build -============================== - -this job checks if there is a string in the description of the pr like - -polkadot companion: paritytech/polkadot#567 - - -it will then run cargo check from this polkadot's branch with substrate code -from this pull request. otherwise, it will uses master instead - - -EOT - -# Set the user name and email to make merging work -git config --global user.name 'CI system' -git config --global user.email '<>' - -# Merge master into our branch before building Polkadot to make sure we don't miss -# any commits that are required by Polkadot. -git fetch --depth 100 origin -git merge origin/master - -# Clone the current Polkadot master branch into ./polkadot. -# NOTE: we need to pull enough commits to be able to find a common -# ancestor for successfully performing merges below. -git clone --depth 20 https://github.com/paritytech/polkadot.git - -cargo install -f diener - -cd polkadot - -# either it's a pull request then check for a companion otherwise use -# polkadot:master -if expr match "${CI_COMMIT_REF_NAME}" '^[0-9]\+$' >/dev/null -then - boldprint "this is pull request no ${CI_COMMIT_REF_NAME}" - - pr_data_file="$(mktemp)" - # get the last reference to a pr in polkadot - curl -sSL -H "${github_header}" -o "${pr_data_file}" \ - "${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME}" - - pr_body="$(sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p' "${pr_data_file}")" - - pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ - | tail -n 1)" - - if [ "${pr_companion}" ] - then - boldprint "companion pr specified/detected: #${pr_companion}" - git fetch origin refs/pull/${pr_companion}/head:pr/${pr_companion} - git checkout pr/${pr_companion} - git merge origin/master - else - boldprint "no companion branch found - building polkadot:master" - fi - rm -f "${pr_data_file}" -else - boldprint "this is not a pull request - building polkadot:master" -fi - -# Patch all Substrate crates in Polkadot -diener patch --crates-to-patch ../ --substrate - -# Test Polkadot pr or master branch with this Substrate commit. -cargo update -p sp-io -time cargo test --all --release --verbose --features=real-overseer - -cd parachain/test-parachains/adder/collator/ -time cargo test --release --verbose --locked --features=real-overseer diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh deleted file mode 100755 index 4714baf54f..0000000000 --- a/.maintain/gitlab/check_polkadot_companion_status.sh +++ /dev/null @@ -1,103 +0,0 @@ -#!/bin/sh -# -# check for a polkadot companion pr and ensure it has approvals and is -# mergeable -# - -github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" -github_api_polkadot_pull_url="https://api.github.com/repos/paritytech/polkadot/pulls" -# use github api v3 in order to access the data without authentication -github_header="Authorization: token ${GITHUB_PR_TOKEN}" - -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } - - - -boldcat <<-EOT - - -check_polkadot_companion_status -=============================== - -this job checks if there is a string in the description of the pr like - -polkadot companion: paritytech/polkadot#567 - -and checks its status. - - -EOT - - -if ! [ "${CI_COMMIT_REF_NAME}" -gt 0 2>/dev/null ] -then - boldprint "this doesn't seem to be a pull request" - exit 1 -fi - -boldprint "this is pull request no ${CI_COMMIT_REF_NAME}" - -pr_body="$(curl -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME} \ - | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" - -# get companion if explicitly specified -pr_companion="$(echo "${pr_body}" | sed -n -r \ - -e 's;^.*[Cc]ompanion.*paritytech/polkadot#([0-9]+).*$;\1;p' \ - -e 's;^.*[Cc]ompanion.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ - | tail -n 1)" - -if [ -z "${pr_companion}" ] -then - boldprint "no companion pr found" - exit 0 -fi - -boldprint "companion pr: #${pr_companion}" - -# check the status of that pull request - needs to be -# mergable and approved - -curl -H "${github_header}" -sS -o companion_pr.json \ - ${github_api_polkadot_pull_url}/${pr_companion} - -pr_head_sha=$(jq -r -e '.head.sha' < companion_pr.json) -boldprint "Polkadot PR's HEAD SHA: $pr_head_sha" - -if jq -e .merged < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} already merged" - exit 0 -fi - -if jq -e '.mergeable' < companion_pr.json >/dev/null -then - boldprint "polkadot pr #${pr_companion} mergeable" -else - boldprint "polkadot pr #${pr_companion} not mergeable" - exit 1 -fi - -curl -H "${github_header}" -sS -o companion_pr_reviews.json \ - ${github_api_polkadot_pull_url}/${pr_companion}/reviews - -# If there are any 'CHANGES_REQUESTED' reviews for the *current* review -jq -r -e '.[] | select(.state == "CHANGES_REQUESTED").commit_id' \ - < companion_pr_reviews.json > companion_pr_reviews_current.json -while IFS= read -r line; do - if [ "$line" = "$pr_head_sha" ]; then - boldprint "polkadot pr #${pr_companion} has CHANGES_REQUESTED for the latest commit" - exit 1 - fi -done < companion_pr_reviews_current.json - -# Then we check for at least 1 APPROVED -if [ -z "$(jq -r -e '.[].state | select(. == "APPROVED")' < companion_pr_reviews.json)" ]; then - boldprint "polkadot pr #${pr_companion} not APPROVED" - exit 1 -fi - -boldprint "polkadot pr #${pr_companion} state APPROVED" -exit 0 - - diff --git a/.maintain/gitlab/check_runtime.sh b/.maintain/gitlab/check_runtime.sh deleted file mode 100755 index 6d009c5aaf..0000000000 --- a/.maintain/gitlab/check_runtime.sh +++ /dev/null @@ -1,124 +0,0 @@ -#!/bin/sh -# -# -# check for any changes in the node/src/runtime, frame/ and primitives/sr_* trees. if -# there are any changes found, it should mark the PR breaksconsensus and -# "auto-fail" the PR if there isn't a change in the runtime/src/lib.rs file -# that alters the version. - -set -e # fail on any error - - - -VERSIONS_FILE="bin/node/runtime/src/lib.rs" - -boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } -boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } - -github_label () { - echo - echo "# run github-api job for labeling it ${1}" - curl -sS -X POST \ - -F "token=${CI_JOB_TOKEN}" \ - -F "ref=master" \ - -F "variables[LABEL]=${1}" \ - -F "variables[PRNO]=${CI_COMMIT_REF_NAME}" \ - ${GITLAB_API}/projects/${GITHUB_API_PROJECT}/trigger/pipeline -} - - -boldprint "latest 10 commits of ${CI_COMMIT_REF_NAME}" -git log --graph --oneline --decorate=short -n 10 - -boldprint "make sure the master branch and release tag are available in shallow clones" -git fetch --depth=${GIT_DEPTH:-100} origin master -git fetch --depth=${GIT_DEPTH:-100} origin release -git tag -f release FETCH_HEAD -git log -n1 release - - -boldprint "check if the wasm sources changed" -if ! git diff --name-only origin/master...${CI_COMMIT_SHA} \ - | grep -v -e '^primitives/sr-arithmetic/fuzzer' \ - | grep -q -e '^bin/node/src/runtime' -e '^frame/' -e '^primitives/sr-' -then - boldcat <<-EOT - - no changes to the runtime source code detected - - EOT - - exit 0 -fi - - - -# check for spec_version updates: if the spec versions changed, then there is -# consensus-critical logic that has changed. the runtime wasm blobs must be -# rebuilt. - -add_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ - | sed -n -r "s/^\+[[:space:]]+spec_version: +([0-9]+),$/\1/p")" -sub_spec_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ - | sed -n -r "s/^\-[[:space:]]+spec_version: +([0-9]+),$/\1/p")" - - - -if [ "${add_spec_version}" != "${sub_spec_version}" ] -then - - github_label "D2-breaksapi" - - boldcat <<-EOT - - changes to the runtime sources and changes in the spec version. - - spec_version: ${sub_spec_version} -> ${add_spec_version} - - EOT - exit 0 - -else - # check for impl_version updates: if only the impl versions changed, we assume - # there is no consensus-critical logic that has changed. - - add_impl_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ - | sed -n -r 's/^\+[[:space:]]+impl_version: +([0-9]+),$/\1/p')" - sub_impl_version="$(git diff tags/release...${CI_COMMIT_SHA} ${VERSIONS_FILE} \ - | sed -n -r 's/^\-[[:space:]]+impl_version: +([0-9]+),$/\1/p')" - - - # see if the impl version changed - if [ "${add_impl_version}" != "${sub_impl_version}" ] - then - boldcat <<-EOT - - changes to the runtime sources and changes in the impl version. - - impl_version: ${sub_impl_version} -> ${add_impl_version} - - EOT - exit 0 - fi - - - boldcat <<-EOT - - wasm source files changed but not the spec/impl version. If changes made do not alter logic, - just bump 'impl_version'. If they do change logic, bump 'spec_version'. - - source file directories: - - bin/node/src/runtime - - frame - - primitives/sr-* - - versions file: ${VERSIONS_FILE} - - EOT -fi - -# dropped through. there's something wrong; exit 1. - -exit 1 - -# vim: noexpandtab diff --git a/.maintain/gitlab/check_signed.sh b/.maintain/gitlab/check_signed.sh deleted file mode 100755 index 7c4cc47bab..0000000000 --- a/.maintain/gitlab/check_signed.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" - -version="$CI_COMMIT_TAG" - -echo '[+] Checking tag has been signed' -check_tag "paritytech/substrate" "$version" -case $? in - 0) echo '[+] Tag found and has been signed'; exit 0 - ;; - 1) echo '[!] Tag found but has not been signed. Aborting release.'; exit 1 - ;; - 2) echo '[!] Tag not found. Aborting release.'; exit 1 -esac diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh deleted file mode 100755 index c13871f50e..0000000000 --- a/.maintain/gitlab/generate_changelog.sh +++ /dev/null @@ -1,85 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" - -version="$2" -last_version="$1" - -all_changes="$(sanitised_git_logs "$last_version" "$version")" -runtime_changes="" -api_changes="" -client_changes="" -changes="" -migrations="" - -while IFS= read -r line; do - pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') - - # Skip if the PR has the silent label - this allows us to skip a few requests - if has_label 'paritytech/substrate' "$pr_id" 'B0-silent'; then - continue - fi - if has_label 'paritytech/substrate' "$pr_id" 'B3-apinoteworthy' ; then - api_changes="$api_changes -$line" - fi - if has_label 'paritytech/substrate' "$pr_id" 'B5-clientnoteworthy'; then - client_changes="$client_changes -$line" - fi - if has_label 'paritytech/substrate' "$pr_id" 'B7-runtimenoteworthy'; then - runtime_changes="$runtime_changes -$line" - fi - if has_label 'paritytech/substrate' "$pr_id" 'D1-runtime-migration'; then - migrations="$migrations -$line" - fi -done <<< "$all_changes" - -# Make the substrate section if there are any substrate changes -if [ -n "$runtime_changes" ] || - [ -n "$api_changes" ] || - [ -n "$client_changes" ] || - [ -n "$migrations" ]; then - changes=$(cat << EOF -Substrate changes ------------------ - -EOF -) - if [ -n "$runtime_changes" ]; then - changes="$changes - -Runtime -------- -$runtime_changes" - fi - if [ -n "$client_changes" ]; then - changes="$changes - -Client ------- -$client_changes" - fi - if [ -n "$api_changes" ]; then - changes="$changes - -API ---- -$api_changes" - fi - release_text="$release_text - -$changes" -fi -if [ -n "$migrations" ]; then - changes="$changes - -Runtime Migrations ------------------- -$migrations" -fi - -echo "$changes" diff --git a/.maintain/gitlab/publish_draft_release.sh b/.maintain/gitlab/publish_draft_release.sh deleted file mode 100755 index c5813718a6..0000000000 --- a/.maintain/gitlab/publish_draft_release.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash - -# shellcheck source=lib.sh -source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" - -version="$CI_COMMIT_TAG" - -# Note that this is not the last *tagged* version, but the last *published* version -last_version=$(last_github_release 'paritytech/substrate') - -release_text="$(./generate_release_text.sh "$last_version" "$version")" - -echo "[+] Pushing release to github" -# Create release on github -release_name="Substrate $version" -data=$(jq -Rs --arg version "$version" \ - --arg release_name "$release_name" \ - --arg release_text "$release_text" \ -'{ - "tag_name": $version, - "target_commitish": "master", - "name": $release_name, - "body": $release_text, - "draft": true, - "prerelease": false -}' < /dev/null) - -out=$(curl -s -X POST --data "$data" -H "Authorization: token $GITHUB_RELEASE_TOKEN" "$api_base/paritytech/substrate/releases") - -html_url=$(echo "$out" | jq -r .html_url) - -if [ "$html_url" == "null" ] -then - echo "[!] Something went wrong posting:" - echo "$out" -else - echo "[+] Release draft created: $html_url" -fi - -echo '[+] Sending draft release URL to Matrix' - -msg_body=$(cat <Release pipeline for Substrate $version complete.
-Draft release created: $html_url -EOF -) -send_message "$(structure_message "$msg_body" "$formatted_msg_body")" "$MATRIX_ROOM_ID" "$MATRIX_ACCESS_TOKEN" - -echo "[+] Done! Maybe the release worked..." diff --git a/.maintain/gitlab/skip_if_draft.sh b/.maintain/gitlab/skip_if_draft.sh deleted file mode 100755 index cf6ea6a5b3..0000000000 --- a/.maintain/gitlab/skip_if_draft.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/sh -url="https://api.github.com/repos/paritytech/substrate/pulls/${CI_COMMIT_REF_NAME}" -echo "[+] API URL: $url" - -draft_state=$(curl -H "Authorization: token ${GITHUB_PR_TOKEN}" "$url" | jq -r .draft) -echo "[+] Draft state: $draft_state" - -if [ "$draft_state" = 'true' ]; then - echo "[!] PR is currently a draft, stopping pipeline" - exit 1 -else - echo "[+] PR is not a draft. Proceeding with CI pipeline" - exit 0 -fi diff --git a/.maintain/kubernetes/Chart.yaml b/.maintain/kubernetes/Chart.yaml deleted file mode 100644 index 8e000ae09f..0000000000 --- a/.maintain/kubernetes/Chart.yaml +++ /dev/null @@ -1,12 +0,0 @@ -name: substrate -version: 0.2 -appVersion: 0.9.1 -description: "Substrate: The platform for blockchain innovators" -home: https://substrate.network/ -icon: https://substrate.network/favicon.ico -sources: - - https://github.com/paritytech/substrate/ -maintainers: - - name: Paritytech Devops Team - email: devops-team@parity.io -tillerVersion: ">=2.8.0" diff --git a/.maintain/kubernetes/README.md b/.maintain/kubernetes/README.md deleted file mode 100644 index 0f3ec38990..0000000000 --- a/.maintain/kubernetes/README.md +++ /dev/null @@ -1,47 +0,0 @@ - - -# Substrate Kubernetes Helm Chart - -This [Helm Chart](https://helm.sh/) can be used for deploying containerized -**Substrate** to a [Kubernetes](https://kubernetes.io/) cluster. - - -## Prerequisites - -- Tested on Kubernetes 1.10.7-gke.6 - -## Installation - -To install the chart with the release name `my-release` into namespace -`my-namespace` from within this directory: - -```console -$ helm install --namespace my-namespace --name my-release --values values.yaml ./ -``` - -The command deploys Substrate on the Kubernetes cluster in the configuration -given in `values.yaml`. When the namespace is omitted it'll be installed in -the default one. - - -## Removal of the Chart - -To uninstall/delete the `my-release` deployment: - -```console -$ helm delete --namespace my-namespace my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - - -## Upgrading - -Once the chart is installed and a new version should be deployed helm takes -care of this by - -```console -$ helm upgrade --namespace my-namespace --values values.yaml my-release ./ -``` - - diff --git a/.maintain/kubernetes/templates/poddisruptionbudget.yaml b/.maintain/kubernetes/templates/poddisruptionbudget.yaml deleted file mode 100644 index 56958b1fba..0000000000 --- a/.maintain/kubernetes/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ .Values.GitlabEnvSlug | default .Values.app }} -spec: - selector: - matchLabels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - maxUnavailable: 1 - diff --git a/.maintain/kubernetes/templates/secrets.yaml b/.maintain/kubernetes/templates/secrets.yaml deleted file mode 100644 index 97e73ae7ff..0000000000 --- a/.maintain/kubernetes/templates/secrets.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if .Values.validator.keys }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ .Values.app }}-secrets - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} -type: Opaque -data: - secrets: {{ .Values.validator.keys | default "" }} -{{- end }} diff --git a/.maintain/kubernetes/templates/service.yaml b/.maintain/kubernetes/templates/service.yaml deleted file mode 100644 index b14bb74c10..0000000000 --- a/.maintain/kubernetes/templates/service.yaml +++ /dev/null @@ -1,54 +0,0 @@ -# see: -# https://kubernetes.io/docs/tutorials/services/ -# https://kubernetes.io/docs/concepts/services-networking/service/ -# headless service for rpc -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.app }}-rpc -spec: - ports: - - port: 9933 - name: http-rpc - - port: 9944 - name: websocket-rpc - selector: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - sessionAffinity: None - type: ClusterIP - clusterIP: None ---- -{{- if .Values.listen_node_port }} -apiVersion: v1 -kind: Service -metadata: - name: {{ .Values.app }} -spec: - ports: - - port: 30333 - name: p2p - nodePort: 30333 - protocol: TCP - selector: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - sessionAffinity: None - type: NodePort - # don't route external traffic to non-local pods - externalTrafficPolicy: Local -{{- else if .Values.validator.keys }} -{{- $root := . -}} -{{- range until (int .Values.nodes.replicas) }} ---- -kind: Service -apiVersion: v1 -metadata: - name: {{ $root.Values.app }}-{{ . }} -spec: - selector: - statefulset.kubernetes.io/pod-name: {{ $root.Values.app }}-{{ . }} - ports: - - port: 30333 - targetPort: 30333 - protocol: TCP -{{- end }} -{{- end }} diff --git a/.maintain/kubernetes/templates/serviceaccount.yaml b/.maintain/kubernetes/templates/serviceaccount.yaml deleted file mode 100644 index 53d016bffe..0000000000 --- a/.maintain/kubernetes/templates/serviceaccount.yaml +++ /dev/null @@ -1,10 +0,0 @@ -{{- if .Values.rbac.enable }} -# service account for substrate pods themselves -# no permissions for the api are required -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - name: {{ .Values.rbac.name }} -{{- end }} diff --git a/.maintain/kubernetes/templates/statefulset.yaml b/.maintain/kubernetes/templates/statefulset.yaml deleted file mode 100644 index 0f34b3507a..0000000000 --- a/.maintain/kubernetes/templates/statefulset.yaml +++ /dev/null @@ -1,139 +0,0 @@ -# https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/ -# https://cloud.google.com/kubernetes-engine/docs/concepts/statefulset -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ .Values.app }} -spec: - selector: - matchLabels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - serviceName: {{ .Values.app }} - replicas: {{ .Values.nodes.replicas }} - updateStrategy: - type: RollingUpdate - podManagementPolicy: Parallel - template: - metadata: - labels: - app: {{ .Values.GitlabEnvSlug | default .Values.app }} - spec: - {{- if .Values.rbac.enable }} - serviceAccountName: {{ .Values.rbac.name }} - {{- else }} - serviceAccountName: default - {{- end }} - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: node - operator: In - values: - - substrate - {{- if .Values.listen_node_port }} - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: "app" - operator: In - values: - - {{ .Values.app }} - topologyKey: "kubernetes.io/hostname" - {{- end }} - terminationGracePeriodSeconds: 300 - {{- if .Values.validator.keys }} - volumes: - - name: {{ .Values.app }}-validator-secrets - secret: - secretName: {{ .Values.app }}-secrets - initContainers: - - name: prepare-secrets - image: busybox - command: [ "/bin/sh" ] - args: - - -c - - sed -n -r "s/^${POD_NAME}-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/key; - sed -n -r "s/^${POD_NAME}-node-key ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/node-key; - sed -n -r "s/^${POD_NAME}-name ([^ ]+)$/\1/p" /etc/validator/secrets > {{ .Values.image.basepath }}/name; - test -s {{ .Values.image.basepath }}/name || echo "${POD_NAME}" > {{ .Values.image.basepath }}/name - env: - # from (workaround for hostname) - # https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - name: {{ .Values.app }}-validator-secrets - readOnly: true - mountPath: "/etc/validator" - - name: {{ .Values.app }}dir - mountPath: {{ .Values.image.basepath }} - {{- end }} - containers: - - name: {{ .Values.app }} - imagePullPolicy: "{{ .Values.image.pullPolicy }}" - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - {{- if .Values.resources }} - resources: - requests: - memory: {{ .Values.resources.memory }} - cpu: {{ .Values.resources.cpu }} - {{- end }} - ports: - - containerPort: 30333 - name: p2p - - containerPort: 9933 - name: http-rpc - - containerPort: 9944 - name: websocket-rpc - command: ["/bin/sh"] - args: - - -c - - exec /usr/local/bin/substrate - --base-path {{ .Values.image.basepath }} - {{- if .Values.validator.keys }} - --validator - --name $(cat {{ .Values.image.basepath }}/name) - --key $(cat {{ .Values.image.basepath }}/key) - --node-key $(cat {{ .Values.image.basepath }}/node-key) - {{- else }} - --name $(POD_NAME) - {{- end }} - {{- range .Values.nodes.args }} {{ . }} {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - volumeMounts: - - name: {{ .Values.app }}dir - mountPath: {{ .Values.image.basepath }} - readinessProbe: - httpGet: - path: /health - port: http-rpc - initialDelaySeconds: 10 - periodSeconds: 10 - livenessProbe: - httpGet: - path: /health - port: http-rpc - initialDelaySeconds: 10 - periodSeconds: 10 - securityContext: - runAsUser: 1000 - fsGroup: 1000 - volumeClaimTemplates: - - metadata: - name: {{ .Values.app }}dir - spec: - accessModes: [ "ReadWriteOnce" ] - storageClassName: ssd - resources: - requests: - storage: 32Gi - diff --git a/.maintain/kubernetes/values.yaml b/.maintain/kubernetes/values.yaml deleted file mode 100644 index 4c3cb5c7d7..0000000000 --- a/.maintain/kubernetes/values.yaml +++ /dev/null @@ -1,59 +0,0 @@ -# set tag manually --set image.tag=latest -image: - repository: parity/substrate - tag: latest - pullPolicy: Always - basepath: /substrate - - -# if set to true a service account for substrate will be created -rbac: - enable: true - name: substrate - - -# name of the statefulset -app: substrate -listen_node_port: true - -nodes: - replicas: 2 - args: - # name and data directory are set by the chart itself - # key and node-key may be provided on commandline invocation - # - # - --chain - # - krummelanke - # serve rpc within the local network - # - fenced off the world via firewall - # - used for health checks - - --rpc-external - - --ws-external - # - --log - # - sub-libp2p=trace - - -validator: {} - # providing 'keys' string via --set commandline parameter will run the nodes - # in validator mode (--validator). - # - # name, key and node-key can be given in a base64 encoded keyfile string (at - # validator.keys) which has the following format: - # - # substrate-0-name - # substrate-0-key - # substrate-0-node-key - # substrate-1-name - # substrate-1-key - # substrate-1-node-key - # - # pod names are canonical. changing these or providing different amount of - # keys than the replicas count will lead to behavior no one ever has - # experienced before. - - -# maybe adopt resource limits here to the nodes of the pool -# resources: -# memory: "5Gi" -# cpu: "1.5" - diff --git a/.maintain/monitoring/alerting-rules/alerting-rules.yaml b/.maintain/monitoring/alerting-rules/alerting-rules.yaml index cf00d7e2b9..5ee2376677 100644 --- a/.maintain/monitoring/alerting-rules/alerting-rules.yaml +++ b/.maintain/monitoring/alerting-rules/alerting-rules.yaml @@ -108,6 +108,13 @@ groups: annotations: message: 'The node {{ $labels.instance }} has less than 3 peers for more than 15 minutes' + - alert: NoIncomingConnection + expr: increase(polkadot_sub_libp2p_incoming_connections_total[20m]) == 0 + labels: + severity: warning + annotations: + message: 'The node {{ $labels.instance }} has not received any new incoming + TCP connection in the past 20 minutes. Is it connected to the Internet?' ############################################################################## # System diff --git a/.maintain/monitoring/grafana-dashboards/substrate-networking.json b/.maintain/monitoring/grafana-dashboards/substrate-networking.json index d2abfd1cb8..0b157e7205 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-networking.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-networking.json @@ -74,7 +74,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1610462565248, + "iteration": 1613393276921, "links": [], "panels": [ { @@ -963,7 +963,8 @@ "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", + "expr": "irate(${metric_namespace}_sub_libp2p_requests_out_success_total_sum{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m]) + on(instance) sum(irate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance)", + "hide": false, "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -989,6 +990,7 @@ }, "yaxes": [ { + "$$hashKey": "object:209", "format": "reqps", "label": null, "logBase": 1, @@ -997,6 +999,7 @@ "show": true }, { + "$$hashKey": "object:210", "format": "short", "label": null, "logBase": 1, @@ -1032,7 +1035,7 @@ "y": 51 }, "hiddenSeries": false, - "id": 151, + "id": 448, "legend": { "avg": false, "current": false, @@ -1060,9 +1063,11 @@ "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", + "expr": "sum(irate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[$__rate_interval])) by (instance, reason)", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "intervalFactor": 1, + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -1070,7 +1075,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Requests served per second", + "title": "Outbound requests failures", "tooltip": { "shared": true, "sort": 2, @@ -1086,6 +1091,7 @@ }, "yaxes": [ { + "$$hashKey": "object:209", "format": "reqps", "label": null, "logBase": 1, @@ -1094,6 +1100,7 @@ "show": true }, { + "$$hashKey": "object:210", "format": "short", "label": null, "logBase": 1, @@ -1227,7 +1234,7 @@ "y": 59 }, "hiddenSeries": false, - "id": 258, + "id": 257, "legend": { "avg": false, "current": false, @@ -1239,7 +1246,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "null as zero", "options": { "alertThreshold": true }, @@ -1255,7 +1262,8 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", + "instant": false, "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -1265,7 +1273,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Median request serving time", + "title": "99th percentile request answer time", "tooltip": { "shared": true, "sort": 2, @@ -1324,7 +1332,7 @@ "y": 63 }, "hiddenSeries": false, - "id": 257, + "id": 151, "legend": { "avg": false, "current": false, @@ -1336,7 +1344,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { "alertThreshold": true }, @@ -1352,8 +1360,7 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_out_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le)) > 0", - "instant": false, + "expr": "irate(${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])", "interval": "", "legendFormat": "{{instance}}", "refId": "A" @@ -1363,7 +1370,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile request answer time", + "title": "Requests served per second", "tooltip": { "shared": true, "sort": 2, @@ -1379,7 +1386,7 @@ }, "yaxes": [ { - "format": "s", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -1422,7 +1429,7 @@ "y": 67 }, "hiddenSeries": false, - "id": 259, + "id": 449, "legend": { "avg": false, "current": false, @@ -1434,7 +1441,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { "alertThreshold": true }, @@ -1450,9 +1457,11 @@ "steppedLine": false, "targets": [ { - "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", + "expr": "sum(irate(${metric_namespace}_sub_libp2p_requests_in_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[$__rate_interval])) by (instance, reason)", + "hide": false, "interval": "", - "legendFormat": "{{instance}}", + "intervalFactor": 1, + "legendFormat": "{{reason}}", "refId": "A" } ], @@ -1460,9 +1469,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "99th percentile request serving time", + "title": "Inbound requests failures", "tooltip": { - "shared": false, + "shared": true, "sort": 2, "value_type": "individual" }, @@ -1476,7 +1485,8 @@ }, "yaxes": [ { - "format": "s", + "$$hashKey": "object:209", + "format": "reqps", "label": null, "logBase": 1, "max": null, @@ -1484,6 +1494,7 @@ "show": true }, { + "$$hashKey": "object:210", "format": "short", "label": null, "logBase": 1, @@ -1519,7 +1530,7 @@ "y": 71 }, "hiddenSeries": false, - "id": 287, + "id": 258, "legend": { "avg": false, "current": false, @@ -1531,7 +1542,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { "alertThreshold": true }, @@ -1547,10 +1558,9 @@ "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_out_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", - "instant": false, + "expr": "histogram_quantile(0.5, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -1558,7 +1568,7 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Outgoing request failures per second", + "title": "Median request serving time", "tooltip": { "shared": true, "sort": 2, @@ -1574,7 +1584,7 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -1617,7 +1627,7 @@ "y": 75 }, "hiddenSeries": false, - "id": 286, + "id": 259, "legend": { "avg": false, "current": false, @@ -1645,10 +1655,9 @@ "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_requests_in_failure_total{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (reason)", - "instant": false, + "expr": "histogram_quantile(0.99, sum(rate(${metric_namespace}_sub_libp2p_requests_in_success_total_bucket{instance=~\"${nodename}\", protocol=\"${request_protocol}\"}[5m])) by (instance, le))", "interval": "", - "legendFormat": "{{reason}}", + "legendFormat": "{{instance}}", "refId": "A" } ], @@ -1656,9 +1665,9 @@ "timeFrom": null, "timeRegions": [], "timeShift": null, - "title": "Ingoing request failures per second", + "title": "99th percentile request serving time", "tooltip": { - "shared": true, + "shared": false, "sort": 2, "value_type": "individual" }, @@ -1672,7 +1681,7 @@ }, "yaxes": [ { - "format": "short", + "format": "s", "label": null, "logBase": 1, "max": null, @@ -1845,7 +1854,7 @@ "lines": true, "linewidth": 1, "maxPerRow": 12, - "nullPointMode": "null as zero", + "nullPointMode": "connected", "options": { "alertThreshold": true }, @@ -1871,7 +1880,7 @@ "steppedLine": false, "targets": [ { - "expr": "avg by (direction) (irate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval]))", + "expr": "avg by (direction) (irate(${metric_namespace}_sub_libp2p_notifications_sizes_count{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval]))", "interval": "", "legendFormat": "{{direction}}", "refId": "A" @@ -1958,7 +1967,7 @@ "lines": true, "linewidth": 1, "maxPerRow": 12, - "nullPointMode": "null as zero", + "nullPointMode": "connected", "options": { "alertThreshold": true }, @@ -1984,7 +1993,7 @@ "steppedLine": false, "targets": [ { - "expr": "avg(irate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__interval])) by (direction)", + "expr": "avg(irate(${metric_namespace}_sub_libp2p_notifications_sizes_sum{instance=~\"${nodename}\", protocol=\"${notif_protocol}\"}[$__rate_interval])) by (direction)", "instant": false, "interval": "", "legendFormat": "{{direction}}", @@ -2674,7 +2683,7 @@ "allValue": null, "current": {}, "datasource": "$data_source", - "definition": "${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\"}", + "definition": "${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\"}", "error": null, "hide": 2, "includeAll": true, @@ -2682,7 +2691,7 @@ "multi": false, "name": "request_protocol", "options": [], - "query": "${metric_namespace}_sub_libp2p_requests_out_started_total{instance=~\"${nodename}\"}", + "query": "${metric_namespace}_sub_libp2p_requests_in_success_total_count{instance=~\"${nodename}\"}", "refresh": 1, "regex": "/protocol=\"(.*?)\"/", "skipUrlSync": false, @@ -2707,6 +2716,7 @@ "name": "data_source", "options": [], "query": "prometheus", + "queryValue": "", "refresh": 1, "regex": "", "skipUrlSync": false, @@ -2756,5 +2766,5 @@ "timezone": "utc", "title": "Substrate Networking", "uid": "vKVuiD9Zk", - "version": 147 -} + "version": 154 +} \ No newline at end of file diff --git a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json index a3db46ec6d..944c9fb50c 100644 --- a/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json +++ b/.maintain/monitoring/grafana-dashboards/substrate-service-tasks.json @@ -37,6 +37,7 @@ "annotations": { "list": [ { + "$$hashKey": "object:326", "builtIn": 1, "datasource": "-- Grafana --", "enable": true, @@ -48,6 +49,7 @@ "type": "dashboard" }, { + "$$hashKey": "object:327", "datasource": "$data_source", "enable": true, "expr": "increase(${metric_namespace}_tasks_ended_total{reason=\"panic\", instance=~\"${nodename}\"}[10m])", @@ -64,6 +66,7 @@ "type": "tags" }, { + "$$hashKey": "object:621", "datasource": "$data_source", "enable": true, "expr": "changes(${metric_namespace}_process_start_time_seconds{instance=~\"${nodename}\"}[10m])", @@ -81,7 +84,7 @@ "gnetId": null, "graphTooltip": 0, "id": null, - "iteration": 1610462629581, + "iteration": 1613393319015, "links": [], "panels": [ { @@ -164,7 +167,7 @@ }, "lines": false, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "null", "options": { "alertThreshold": true }, @@ -180,7 +183,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[10m])", + "expr": "irate(${metric_namespace}_tasks_polling_duration_sum{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -206,6 +209,7 @@ }, "yaxes": [ { + "$$hashKey": "object:2721", "format": "percentunit", "label": null, "logBase": 1, @@ -214,6 +218,7 @@ "show": true }, { + "$$hashKey": "object:2722", "format": "short", "label": null, "logBase": 1, @@ -266,7 +271,7 @@ }, "lines": true, "linewidth": 2, - "nullPointMode": "null", + "nullPointMode": "connected", "options": { "alertThreshold": true }, @@ -282,7 +287,7 @@ "steppedLine": true, "targets": [ { - "expr": "irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[10m])", + "expr": "irate(${metric_namespace}_tasks_polling_duration_count{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -308,6 +313,7 @@ }, "yaxes": [ { + "$$hashKey": "object:2571", "format": "cps", "label": null, "logBase": 1, @@ -316,6 +322,7 @@ "show": true }, { + "$$hashKey": "object:2572", "format": "short", "label": null, "logBase": 1, @@ -382,7 +389,7 @@ "steppedLine": true, "targets": [ { - "expr": "irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[10m])", + "expr": "irate(${metric_namespace}_tasks_spawned_total{instance=~\"${nodename}\"}[$__rate_interval])", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -408,6 +415,7 @@ }, "yaxes": [ { + "$$hashKey": "object:771", "format": "short", "label": null, "logBase": 10, @@ -416,6 +424,7 @@ "show": true }, { + "$$hashKey": "object:772", "format": "short", "label": null, "logBase": 1, @@ -466,7 +475,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null as zero", + "nullPointMode": "connected", "options": { "alertThreshold": true }, @@ -508,6 +517,7 @@ }, "yaxes": [ { + "$$hashKey": "object:919", "format": "short", "label": null, "logBase": 10, @@ -516,6 +526,7 @@ "show": true }, { + "$$hashKey": "object:920", "format": "short", "label": null, "logBase": 1, @@ -585,7 +596,7 @@ "steppedLine": true, "targets": [ { - "expr": "irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[10m])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[10m]) > 0", + "expr": "irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"+Inf\"}[$__rate_interval])\n - ignoring(le)\n irate(${metric_namespace}_tasks_polling_duration_bucket{instance=~\"${nodename}\", le=\"1.024\"}[$__rate_interval]) > 0", "interval": "", "legendFormat": "{{task_name}}", "refId": "A" @@ -611,6 +622,7 @@ }, "yaxes": [ { + "$$hashKey": "object:3040", "decimals": null, "format": "cps", "label": "Calls to `Future::poll`/second", @@ -620,6 +632,7 @@ "show": true }, { + "$$hashKey": "object:3041", "format": "short", "label": null, "logBase": 1, @@ -683,7 +696,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "connected", "options": { "alertThreshold": true }, @@ -725,6 +738,7 @@ }, "yaxes": [ { + "$$hashKey": "object:626", "format": "short", "label": null, "logBase": 1, @@ -733,6 +747,7 @@ "show": true }, { + "$$hashKey": "object:627", "format": "short", "label": null, "logBase": 1, @@ -782,7 +797,7 @@ }, "lines": true, "linewidth": 1, - "nullPointMode": "null", + "nullPointMode": "connected", "options": { "alertThreshold": true }, @@ -798,7 +813,7 @@ "steppedLine": false, "targets": [ { - "expr": "irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[10m])", + "expr": "irate(${metric_namespace}_unbounded_channel_len{instance=~\"${nodename}\", action = \"send\"}[$__rate_interval])", "interval": "", "legendFormat": "{{entity}}", "refId": "B" @@ -824,6 +839,7 @@ }, "yaxes": [ { + "$$hashKey": "object:626", "format": "cps", "label": null, "logBase": 1, @@ -832,6 +848,7 @@ "show": true }, { + "$$hashKey": "object:627", "format": "short", "label": null, "logBase": 1, @@ -938,5 +955,5 @@ "timezone": "utc", "title": "Substrate Service Tasks", "uid": "3LA6XNqZz", - "version": 59 -} + "version": 60 +} \ No newline at end of file diff --git a/.rustfmt.toml b/.rustfmt.toml new file mode 100644 index 0000000000..5438b7f75d --- /dev/null +++ b/.rustfmt.toml @@ -0,0 +1,4 @@ +# Conform to: https://wiki.parity.io/Substrate-Style-Guide +reorder_imports = true +hard_tabs = true +max_width = 120 diff --git a/Cargo.lock b/Cargo.lock index 6a7e638aee..3c4681ed35 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,11 +175,10 @@ dependencies = [ [[package]] name = "assert_cmd" -version = "1.0.3" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2475b58cd94eb4f70159f4fd8844ba3b807532fe3131b3373fae060bbe30396" +checksum = "3dc1679af9a1ab4bea16f228b05d18f8363f8327b1fa8db00d2760cfafc6b61e" dependencies = [ - "bstr", "doc-comment", "predicates", "predicates-core", @@ -193,6 +192,16 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "695579f0f2520f3774bb40461e5adb066459d4e0af4d59d20175484fb8e9edf1" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "async-channel" version = "1.5.1" @@ -294,6 +303,7 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9f06685bad74e0570f5213741bea82158279a4103d988e57bfada11ad230341" dependencies = [ + "async-attributes", "async-channel", "async-global-executor", "async-io", @@ -346,6 +356,19 @@ dependencies = [ "pin-project-lite 0.2.4", ] +[[package]] +name = "asynchronous-codec" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +dependencies = [ + "bytes 1.0.1", + "futures-sink", + "futures-util", + "memchr", + "pin-project-lite 0.2.4", +] + [[package]] name = "atomic" version = "0.5.0" @@ -1464,9 +1487,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17392a012ea30ef05a610aa97dfb49496e71c9f676b27879922ea5bdf60d9d3f" +checksum = "f26ecb66b4bdca6c1409b40fb255eefc2bd4f6d135dab3c3124f80ffa2a9661e" dependencies = [ "atty", "humantime 2.1.0", @@ -1590,9 +1613,9 @@ dependencies = [ [[package]] name = "finality-grandpa" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cd795898c348a8ec9edc66ec9e014031c764d4c88cc26d09b492cd93eb41339" +checksum = "c6447e2f8178843749e8c8003206def83ec124a7859475395777a28b5338647c" dependencies = [ "either", "futures 0.3.12", @@ -1660,12 +1683,13 @@ dependencies = [ [[package]] name = "frame-benchmarking" -version = "3.0.0" +version = "3.1.0" dependencies = [ "frame-support", "frame-system", "hex-literal", "linregress", + "log", "parity-scale-codec", "paste 1.0.4", "serde", @@ -1746,7 +1770,6 @@ dependencies = [ "pretty_assertions", "serde", "smallvec 1.6.1", - "sp-api", "sp-arithmetic", "sp-core", "sp-inherents", @@ -1756,7 +1779,6 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-tracing", - "substrate-test-runtime-client", ] [[package]] @@ -1817,6 +1839,7 @@ dependencies = [ "criterion", "frame-support", "impl-trait-for-tuples", + "log", "parity-scale-codec", "serde", "sp-core", @@ -1851,6 +1874,17 @@ dependencies = [ "sp-api", ] +[[package]] +name = "frame-try-runtime" +version = "0.9.0" +dependencies = [ + "frame-support", + "parity-scale-codec", + "sp-api", + "sp-runtime", + "sp-std", +] + [[package]] name = "fs-swap" version = "0.2.5" @@ -2935,16 +2969,15 @@ checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" [[package]] name = "libp2p" -version = "0.34.0" +version = "0.35.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5133112ce42be9482f6a87be92a605dd6bbc9e93c297aee77d172ff06908f3a" +checksum = "adc225a49973cf9ab10d0cdd6a4b8f0cda299df9b760824bbb623f15f8f0c95a" dependencies = [ "atomic", "bytes 1.0.1", "futures 0.3.12", "lazy_static", "libp2p-core", - "libp2p-core-derive", "libp2p-deflate", "libp2p-dns", "libp2p-floodsub", @@ -2959,6 +2992,7 @@ dependencies = [ "libp2p-pnet", "libp2p-request-response", "libp2p-swarm", + "libp2p-swarm-derive", "libp2p-tcp", "libp2p-uds", "libp2p-wasm-ext", @@ -2973,9 +3007,9 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dad04d3cef6c1df366a6ab58c9cf8b06497699e335d83ac2174783946ff847d6" +checksum = "8a2d56aadc2c2bf22cd7797f86e56a65b5b3994a0136b65be3106938acae7a26" dependencies = [ "asn1_der", "bs58", @@ -3000,21 +3034,11 @@ dependencies = [ "sha2 0.9.3", "smallvec 1.6.1", "thiserror", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", "void", "zeroize", ] -[[package]] -name = "libp2p-core-derive" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4bc40943156e42138d22ed3c57ff0e1a147237742715937622a99b10fbe0156" -dependencies = [ - "quote", - "syn", -] - [[package]] name = "libp2p-deflate" version = "0.27.1" @@ -3057,11 +3081,11 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.27.0" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12451ba9493e87c91baf2a6dffce9ddf1fbc807a0861532d7cf477954f8ebbee" +checksum = "502dc5fcbfec4aa1c63ef3f7307ffe20e90c1a1387bf23ed0bec087f2dde58a1" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.0", "base64 0.13.0", "byteorder", "bytes 1.0.1", @@ -3077,7 +3101,7 @@ dependencies = [ "regex", "sha2 0.9.3", "smallvec 1.6.1", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", "wasm-timer", ] @@ -3099,12 +3123,12 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "456f5de8e283d7800ca848b9b9a4e2a578b790bd8ae582b885e831353cf0e5df" +checksum = "cf3da6c9acbcc05f93235d201d7d45ef4e8b88a45d8836f98becd8b4d443f066" dependencies = [ "arrayvec 0.5.2", - "asynchronous-codec", + "asynchronous-codec 0.6.0", "bytes 1.0.1", "either", "fnv", @@ -3118,16 +3142,16 @@ dependencies = [ "sha2 0.9.3", "smallvec 1.6.1", "uint", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", "void", "wasm-timer", ] [[package]] name = "libp2p-mdns" -version = "0.28.0" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b974db63233fc0e199f4ede7794294aae285c96f4b6010f853eac4099ef08590" +checksum = "0e9e6374814d1b118d97ccabdfc975c8910bd16dc38a8bc058eeb08bf2080fe1" dependencies = [ "async-io", "data-encoding", @@ -3146,11 +3170,11 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2705dc94b01ab9e3779b42a09bbf3712e637ed213e875c30face247291a85af0" +checksum = "350ce8b3923594aedabd5d6e3f875d058435052a29c3f32df378bc70d10be464" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures 0.3.12", "libp2p-core", @@ -3159,7 +3183,7 @@ dependencies = [ "parking_lot 0.11.1", "rand 0.7.3", "smallvec 1.6.1", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", ] [[package]] @@ -3201,18 +3225,18 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e8c1ec305c9949351925cdc7196b9570f4330477f5e47fbf5bb340b57e26ed" +checksum = "9d58defcadb646ae4b033e130b48d87410bf76394dc3335496cae99dac803e61" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures 0.3.12", "libp2p-core", "log", "prost", "prost-build", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", "void", ] @@ -3232,9 +3256,9 @@ dependencies = [ [[package]] name = "libp2p-request-response" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37637a4b33b5390322ccc068a33897d0aa541daf4fec99f6a7efbf37295346e" +checksum = "10e5552827c33d8326502682da73a0ba4bfa40c1b55b216af3c303f32169dd89" dependencies = [ "async-trait", "bytes 1.0.1", @@ -3246,15 +3270,15 @@ dependencies = [ "minicbor", "rand 0.7.3", "smallvec 1.6.1", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", "wasm-timer", ] [[package]] name = "libp2p-swarm" -version = "0.27.1" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4f89ebb4d8953bda12623e9871959fe728dea3bf6eae0421dc9c42dc821e488" +checksum = "7955b973e1fd2bd61ffd43ce261c1223f61f4aacd5bae362a924993f9a25fd98" dependencies = [ "either", "futures 0.3.12", @@ -3266,11 +3290,21 @@ dependencies = [ "wasm-timer", ] +[[package]] +name = "libp2p-swarm-derive" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c564ebaa36a64839f51eaddb0243aaaa29ce64affb56129193cc3248b72af273" +dependencies = [ + "quote", + "syn", +] + [[package]] name = "libp2p-tcp" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbd3d7076a478ac5a6aca55e74bdc250ac539b95de09b9d09915e0b8d01a6b2" +checksum = "88a5aef80e519a6cb8e2663605142f97baaaea1a252eecbf8756184765f7471b" dependencies = [ "async-io", "futures 0.3.12", @@ -3329,9 +3363,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.30.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "490b8b27fc40fe35212df1b6a3d14bffaa4117cbff956fdc2892168a371102ad" +checksum = "4819358c542a86ff95f6ae691efb4b94ddaf477079b01a686f5705b79bfc232a" dependencies = [ "futures 0.3.12", "libp2p-core", @@ -3452,9 +3486,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.6.3" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3aae342b73d57ad0b8b364bd12584819f2c1fe9114285dfcf8b0722607671635" +checksum = "1f374d42cdfc1d7dbf3d3dec28afab2eb97ffbf43a3234d795b5986dbf4b90ba" dependencies = [ "hashbrown", ] @@ -3870,6 +3904,7 @@ name = "node-cli" version = "2.0.0" dependencies = [ "assert_cmd", + "async-std", "frame-benchmarking-cli", "frame-support", "frame-system", @@ -3919,6 +3954,7 @@ dependencies = [ "sc-transaction-pool", "serde", "serde_json", + "soketto", "sp-authority-discovery", "sp-consensus", "sp-consensus-babe", @@ -3938,6 +3974,7 @@ dependencies = [ "substrate-build-script-utils", "substrate-frame-cli", "tempfile", + "try-runtime-cli", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -3965,6 +4002,7 @@ dependencies = [ "parity-scale-codec", "sc-executor", "sp-application-crypto", + "sp-consensus-babe", "sp-core", "sp-externalities", "sp-io", @@ -4061,7 +4099,9 @@ dependencies = [ "frame-system", "frame-system-benchmarking", "frame-system-rpc-runtime-api", + "frame-try-runtime", "hex-literal", + "log", "node-primitives", "pallet-assets", "pallet-authority-discovery", @@ -4074,7 +4114,9 @@ dependencies = [ "pallet-contracts-primitives", "pallet-contracts-rpc-runtime-api", "pallet-democracy", + "pallet-election-provider-multi-phase", "pallet-elections-phragmen", + "pallet-gilt", "pallet-grandpa", "pallet-identity", "pallet-im-online", @@ -4488,6 +4530,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-authorship", "pallet-balances", "pallet-offences", @@ -4501,7 +4544,7 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-vrf", "sp-core", - "sp-inherents", + "sp-election-providers", "sp-io", "sp-runtime", "sp-session", @@ -4517,6 +4560,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-transaction-payment", "parity-scale-codec", "serde", @@ -4552,6 +4596,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "log", "pallet-balances", "parity-scale-codec", "serde", @@ -4563,13 +4608,14 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.1" +version = "3.0.0" dependencies = [ "assert_matches", "frame-benchmarking", "frame-support", "frame-system", "hex-literal", + "log", "pallet-balances", "pallet-contracts-primitives", "pallet-contracts-proc-macro", @@ -4594,7 +4640,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.1" +version = "3.0.0" dependencies = [ "bitflags", "parity-scale-codec", @@ -4604,7 +4650,7 @@ dependencies = [ [[package]] name = "pallet-contracts-proc-macro" -version = "0.1.0" +version = "3.0.0" dependencies = [ "proc-macro2", "quote", @@ -4613,7 +4659,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "0.8.1" +version = "3.0.0" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4632,7 +4678,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.1" +version = "3.0.0" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4661,6 +4707,33 @@ dependencies = [ "substrate-test-utils", ] +[[package]] +name = "pallet-election-provider-multi-phase" +version = "3.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "hex-literal", + "log", + "pallet-balances", + "parity-scale-codec", + "parking_lot 0.11.1", + "paste 1.0.4", + "rand 0.7.3", + "serde", + "sp-arithmetic", + "sp-core", + "sp-election-providers", + "sp-io", + "sp-npos-elections", + "sp-runtime", + "sp-std", + "sp-tracing", + "static_assertions", + "substrate-test-utils", +] + [[package]] name = "pallet-elections" version = "3.0.0" @@ -4685,6 +4758,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "log", "pallet-balances", "parity-scale-codec", "serde", @@ -4719,6 +4793,7 @@ dependencies = [ "frame-support", "frame-system", "lite-json", + "log", "parity-scale-codec", "serde", "sp-core", @@ -4743,6 +4818,23 @@ dependencies = [ "sp-tasks", ] +[[package]] +name = "pallet-gilt" +version = "3.0.0" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "parity-scale-codec", + "serde", + "sp-arithmetic", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-grandpa" version = "3.0.0" @@ -4751,6 +4843,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-authorship", "pallet-balances", "pallet-offences", @@ -4762,6 +4855,7 @@ dependencies = [ "serde", "sp-application-crypto", "sp-core", + "sp-election-providers", "sp-finality-grandpa", "sp-io", "sp-keyring", @@ -4795,6 +4889,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "pallet-authorship", "pallet-session", "parity-scale-codec", @@ -4859,7 +4954,7 @@ name = "pallet-mmr" version = "3.0.0" dependencies = [ "ckb-merkle-mountain-range", - "env_logger 0.8.3", + "env_logger 0.8.2", "frame-benchmarking", "frame-support", "frame-system", @@ -4880,6 +4975,7 @@ dependencies = [ "frame-support", "frame-system", "hex-literal", + "log", "parity-scale-codec", "serde", "sp-api", @@ -4925,6 +5021,7 @@ version = "2.0.0" dependencies = [ "frame-support", "frame-system", + "log", "parity-scale-codec", "serde", "sp-core", @@ -4939,6 +5036,7 @@ version = "3.0.0" dependencies = [ "frame-support", "frame-system", + "log", "pallet-balances", "parity-scale-codec", "serde", @@ -4968,6 +5066,7 @@ dependencies = [ "parity-scale-codec", "serde", "sp-core", + "sp-election-providers", "sp-io", "sp-runtime", "sp-staking", @@ -5029,6 +5128,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", + "log", "parity-scale-codec", "serde", "sp-core", @@ -5090,6 +5190,7 @@ dependencies = [ "rand 0.7.3", "serde", "sp-core", + "sp-election-providers", "sp-io", "sp-runtime", "sp-session", @@ -5120,6 +5221,7 @@ dependencies = [ "frame-support", "frame-system", "hex", + "log", "pallet-authorship", "pallet-balances", "pallet-session", @@ -5131,6 +5233,7 @@ dependencies = [ "serde", "sp-application-crypto", "sp-core", + "sp-election-providers", "sp-io", "sp-npos-elections", "sp-runtime", @@ -5158,6 +5261,7 @@ dependencies = [ "parity-scale-codec", "serde", "sp-core", + "sp-election-providers", "sp-io", "sp-npos-elections", "sp-runtime", @@ -5210,6 +5314,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", + "log", "parity-scale-codec", "serde", "sp-core", @@ -5276,6 +5381,7 @@ dependencies = [ name = "pallet-transaction-payment-rpc-runtime-api" version = "3.0.0" dependencies = [ + "frame-support", "pallet-transaction-payment", "parity-scale-codec", "sp-api", @@ -5353,9 +5459,9 @@ dependencies = [ [[package]] name = "parity-multiaddr" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bfda2e46fc5e14122649e2645645a81ee5844e0fb2e727ef560cc71a8b2d801" +checksum = "d2c6805f98667a3828afb2ec2c396a8d610497e8d546f5447188aae47c5a79ec" dependencies = [ "arrayref", "bs58", @@ -5365,7 +5471,7 @@ dependencies = [ "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint 0.6.0", + "unsigned-varint 0.7.0", "url 2.2.0", ] @@ -5871,7 +5977,7 @@ dependencies = [ [[package]] name = "prml-attestation" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5886,7 +5992,7 @@ dependencies = [ [[package]] name = "prml-generic-asset" -version = "2.0.0" +version = "3.0.0" dependencies = [ "frame-benchmarking", "frame-support", @@ -5902,7 +6008,7 @@ dependencies = [ [[package]] name = "prml-generic-asset-rpc" -version = "2.0.0" +version = "3.0.0" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -5920,7 +6026,7 @@ dependencies = [ [[package]] name = "prml-generic-asset-rpc-runtime-api" -version = "2.0.0" +version = "3.0.0" dependencies = [ "parity-scale-codec", "prml-generic-asset", @@ -6108,7 +6214,7 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ - "env_logger 0.8.3", + "env_logger 0.8.2", "log", "rand 0.8.3", ] @@ -6126,9 +6232,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.9" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "991431c3519a3f36861882da93630ce66b52918dcf1b8e2fd66b397fc96f28df" dependencies = [ "proc-macro2", ] @@ -6435,6 +6541,24 @@ dependencies = [ "winapi 0.3.9", ] +[[package]] +name = "remote-externalities" +version = "0.9.0" +dependencies = [ + "async-std", + "env_logger 0.8.2", + "futures 0.3.12", + "hex-literal", + "jsonrpc-core-client", + "log", + "parity-scale-codec", + "sc-rpc", + "sc-rpc-api", + "sp-core", + "sp-io", + "tokio 0.1.22", +] + [[package]] name = "remove_dir_all" version = "0.5.3" @@ -6677,7 +6801,6 @@ dependencies = [ "sp-api", "sp-block-builder", "sp-blockchain", - "sp-consensus", "sp-core", "sp-inherents", "sp-runtime", @@ -7165,6 +7288,7 @@ version = "0.9.0" dependencies = [ "assert_matches", "derive_more", + "dyn-clone", "finality-grandpa", "fork-tree", "futures 0.3.12", @@ -7239,21 +7363,28 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-warp-sync" -version = "0.8.0" +version = "0.9.0" dependencies = [ "derive_more", + "finality-grandpa", "futures 0.3.12", "log", "num-traits", "parity-scale-codec", "parking_lot 0.11.1", "prost", + "rand 0.8.3", + "sc-block-builder", "sc-client-api", "sc-finality-grandpa", "sc-network", "sc-service", "sp-blockchain", + "sp-consensus", + "sp-finality-grandpa", + "sp-keyring", "sp-runtime", + "substrate-test-runtime-client", ] [[package]] @@ -7318,7 +7449,7 @@ dependencies = [ "assert_matches", "async-std", "async-trait", - "asynchronous-codec", + "asynchronous-codec 0.5.0", "bitflags", "bs58", "bytes 1.0.1", @@ -7635,6 +7766,7 @@ dependencies = [ "tokio 0.2.25", "tracing", "tracing-futures", + "tracing-log", "tracing-subscriber", "wasm-timer", ] @@ -8218,6 +8350,7 @@ name = "sp-api" version = "3.0.0" dependencies = [ "hash-db", + "log", "parity-scale-codec", "sp-api-proc-macro", "sp-core", @@ -8245,6 +8378,7 @@ name = "sp-api-test" version = "2.0.1" dependencies = [ "criterion", + "log", "parity-scale-codec", "rustversion", "sc-block-builder", @@ -8254,6 +8388,7 @@ dependencies = [ "sp-core", "sp-runtime", "sp-state-machine", + "sp-tracing", "sp-version", "substrate-test-runtime-client", "trybuild", @@ -8698,12 +8833,15 @@ dependencies = [ "rand 0.7.3", "serde", "serde_json", + "sp-api", "sp-application-crypto", "sp-arithmetic", "sp-core", "sp-io", "sp-state-machine", "sp-std", + "sp-tracing", + "substrate-test-runtime-client", ] [[package]] @@ -9446,9 +9584,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8208a331e1cb318dd5bd76951d2b8fc48ca38a69f5f4e4af1b6a9f8c6236915" +checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" dependencies = [ "once_cell", ] @@ -9961,6 +10099,27 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" +[[package]] +name = "try-runtime-cli" +version = "0.9.0" +dependencies = [ + "frame-try-runtime", + "log", + "parity-scale-codec", + "remote-externalities", + "sc-cli", + "sc-client-api", + "sc-executor", + "sc-service", + "sp-api", + "sp-blockchain", + "sp-core", + "sp-externalities", + "sp-runtime", + "sp-state-machine", + "structopt", +] + [[package]] name = "trybuild" version = "1.0.39" @@ -10078,7 +10237,19 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35581ff83d4101e58b582e607120c7f5ffb17e632a980b1f38334d76b36908b2" dependencies = [ - "asynchronous-codec", + "asynchronous-codec 0.5.0", + "bytes 1.0.1", + "futures-io", + "futures-util", +] + +[[package]] +name = "unsigned-varint" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f8d425fafb8cd76bc3f22aace4af471d3156301d7508f2107e98fbeae10bc7f" +dependencies = [ + "asynchronous-codec 0.6.0", "bytes 1.0.1", "futures-io", "futures-util", @@ -10213,9 +10384,9 @@ checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasm-bindgen" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cd364751395ca0f68cafb17666eee36b63077fb5ecd972bbcd74c90c4bf736e" +checksum = "55c0f7123de74f0dab9b7d00fd614e7b19349cd1e2f5252bbe9b1754b59433be" dependencies = [ "cfg-if 1.0.0", "serde", @@ -10225,9 +10396,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1114f89ab1f4106e5b55e688b828c0ab0ea593a1ea7c094b141b14cbaaec2d62" +checksum = "7bc45447f0d4573f3d65720f636bbcc3dd6ce920ed704670118650bcd47764c7" dependencies = [ "bumpalo", "lazy_static", @@ -10252,9 +10423,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6ac8995ead1f084a8dea1e65f194d0973800c7f571f6edd70adf06ecf77084" +checksum = "3b8853882eef39593ad4174dd26fc9865a64e84026d223f63bb2c42affcbba2c" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -10262,9 +10433,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5a48c72f299d80557c7c62e37e7225369ecc0c963964059509fbafe917c7549" +checksum = "4133b5e7f2a531fa413b3a1695e925038a05a71cf67e87dafa295cb645a01385" dependencies = [ "proc-macro2", "quote", @@ -10275,9 +10446,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.69" +version = "0.2.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e7811dd7f9398f14cc76efd356f98f03aa30419dea46aa810d71e819fc97158" +checksum = "dd4945e4943ae02d15c13962b38a5b1e81eadd4b71214eee75af64a4d6a4fd64" [[package]] name = "wasm-bindgen-test" @@ -10693,9 +10864,9 @@ dependencies = [ [[package]] name = "yamux" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aeb8c4043cac71c3c299dff107171c220d179492350ea198e109a414981b83c" +checksum = "1cc7bd8c983209ed5d527f44b01c41b7dc146fd960c61cf9e1d25399841dc271" dependencies = [ "futures 0.3.12", "log", diff --git a/Cargo.toml b/Cargo.toml index 21244dae4b..f08a59a4a7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -73,11 +73,14 @@ members = [ "frame/contracts/rpc", "frame/contracts/rpc/runtime-api", "frame/democracy", + "frame/try-runtime", "frame/elections", + "frame/election-provider-multi-phase", "frame/example", "frame/example-offchain-worker", "frame/example-parallel", "frame/executive", + "frame/gilt", "frame/grandpa", "frame/identity", "frame/im-online", @@ -175,17 +178,6 @@ members = [ "primitives/utils", "primitives/version", "primitives/wasm-interface", - # plug modules - "prml/attestation", - "prml/support", - # "prml/consortium-permission", - # doughnut disabled - # "prml/doughnut", - "prml/generic-asset", - "prml/generic-asset/rpc", - "prml/generic-asset/rpc/runtime-api", - # "prml/validator-manager", - # end plug modules "test-utils/client", "test-utils/derive", "test-utils/runtime", @@ -196,11 +188,23 @@ members = [ "utils/build-script-utils", "utils/fork-tree", "utils/frame/benchmarking-cli", + "utils/frame/remote-externalities", "utils/frame/frame-utilities-cli", + "utils/frame/try-runtime/cli", "utils/frame/rpc/support", "utils/frame/rpc/system", "utils/prometheus", "utils/wasm-builder", + "prml/support", + "prml/attestation", + "prml/generic-asset", + "prml/generic-asset/rpc", + "prml/generic-asset/rpc/runtime-api", +] +exclude = [ + "prml/consortium-permission", + "prml/doughnut", + "prml/validator-manager", ] # The list of dependencies below (which can be both direct and indirect dependencies) are crates diff --git a/Process.json b/Process.json deleted file mode 100644 index 540bd64431..0000000000 --- a/Process.json +++ /dev/null @@ -1,29 +0,0 @@ -[{ - "project_name": "Networking", - "owner": "tomaka", - "matrix_room_id": "!vUADSGcyXmxhKLeDsW:matrix.parity.io" -}, -{ "project_name": "Client", - "owner": "gnunicorn", - "matrix_room_id": "!aenJixaHcSKbJOWxYk:matrix.parity.io" -}, -{ - "project_name": "Runtime", - "owner": "gavofyork", - "matrix_room_id": "!yBKstWVBkwzUkPslsp:matrix.parity.io" -}, -{ - "project_name": "Consensus", - "owner": "andresilva", - "matrix_room_id": "!XdNWDTfVNFVixljKZU:matrix.parity.io" -}, -{ - "project_name": "Smart Contracts", - "owner": "pepyakin", - "matrix_room_id": "!yBKstWVBkwzUkPslsp:matrix.parity.io" -}, -{ - "project_name": "Benchmarking and Weights", - "owner": "shawntabrizi", - "matrix_room_id": "!pZPWqCRLVtORZTEsEf:matrix.parity.io" -}] diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 464b07cb98..2d36d3c469 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -48,7 +48,7 @@ substrate-frame-rpc-system = { version = "3.0.0", path = "../../../utils/frame/r pallet-transaction-payment-rpc = { version = "3.0.0", path = "../../../frame/transaction-payment/rpc/" } # These dependencies are used for runtime benchmarking -frame-benchmarking = { version = "3.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "3.1.0", path = "../../../frame/benchmarking" } frame-benchmarking-cli = { version = "3.0.0", path = "../../../utils/frame/benchmarking-cli" } node-template-runtime = { version = "2.0.0", path = "../runtime" } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 4061dce438..92518ef22d 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -11,6 +11,7 @@ pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; use sc_finality_grandpa::SharedVoterState; use sc_keystore::LocalKeystore; +use sc_telemetry::TelemetrySpan; // Our native executor instance. native_executor_instance!( @@ -72,7 +73,7 @@ pub fn new_partial(config: &Configuration) -> Result Result }) }; + let telemetry_span = TelemetrySpan::new(); + let _telemetry_span_entered = telemetry_span.enter(); + let (_rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks( sc_service::SpawnTasksParams { network: network.clone(), @@ -176,6 +180,7 @@ pub fn new_full(mut config: Configuration) -> Result network_status_sinks, system_rpc_tx, config, + telemetry_span: Some(telemetry_span.clone()), }, )?; @@ -224,7 +229,7 @@ pub fn new_full(mut config: Configuration) -> Result name: Some(name), observer_enabled: false, keystore, - is_authority: role.is_network_authority(), + is_authority: role.is_authority(), }; if enable_grandpa { @@ -290,7 +295,7 @@ pub fn new_light(mut config: Configuration) -> Result Some(Box::new(grandpa_block_import)), client.clone(), InherentDataProviders::new(), - &task_manager.spawn_handle(), + &task_manager.spawn_essential_handle(), config.prometheus_registry(), sp_consensus::NeverCanAuthor, )?; @@ -312,6 +317,9 @@ pub fn new_light(mut config: Configuration) -> Result ); } + let telemetry_span = TelemetrySpan::new(); + let _telemetry_span_entered = telemetry_span.enter(); + sc_service::spawn_tasks(sc_service::SpawnTasksParams { remote_blockchain: Some(backend.remote_blockchain()), transaction_pool, @@ -325,6 +333,7 @@ pub fn new_light(mut config: Configuration) -> Result network, network_status_sinks, system_rpc_tx, + telemetry_span: Some(telemetry_span.clone()), })?; network_starter.start_network(); diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index a13d05082b..e6c0c5ac06 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -51,3 +51,4 @@ std = [ 'frame-support/std', 'frame-system/std' ] +try-runtime = ["frame-support/try-runtime"] diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index dd907f55fb..de69419b92 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -41,7 +41,7 @@ frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, pa pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } # Used for runtime benchmarking -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } hex-literal = { version = "0.3.1", optional = true } diff --git a/bin/node/bench/src/construct.rs b/bin/node/bench/src/construct.rs index a8a02f19c3..b64ffec641 100644 --- a/bin/node/bench/src/construct.rs +++ b/bin/node/bench/src/construct.rs @@ -48,7 +48,7 @@ use sp_transaction_pool::{ TransactionStatusStreamFor, TxHash, }; -use sp_consensus::{Environment, Proposer, RecordProof}; +use sp_consensus::{Environment, Proposer}; use crate::{ common::SizeType, @@ -170,7 +170,6 @@ impl core::Benchmark for ConstructionBenchmark { inherent_data_providers.create_inherent_data().expect("Create inherent data failed"), Default::default(), std::time::Duration::from_secs(20), - RecordProof::Yes, ), ).map(|r| r.block).expect("Proposing failed"); diff --git a/bin/node/browser-testing/Cargo.toml b/bin/node/browser-testing/Cargo.toml index 66e7b398dd..fe83cc65ba 100644 --- a/bin/node/browser-testing/Cargo.toml +++ b/bin/node/browser-testing/Cargo.toml @@ -8,14 +8,14 @@ license = "Apache-2.0" [dependencies] futures-timer = "3.0.2" -libp2p = { version = "0.34.0", default-features = false } +libp2p = { version = "0.35.1", default-features = false } jsonrpc-core = "15.0.0" serde = "1.0.106" serde_json = "1.0.48" -wasm-bindgen = { version = "=0.2.69", features = ["serde-serialize"] } +wasm-bindgen = { version = "=0.2.70", features = ["serde-serialize"] } wasm-bindgen-futures = "0.4.18" wasm-bindgen-test = "0.3.18" futures = "0.3.9" -node-cli = { path = "../cli", default-features = false, features = ["browser"] , version = "2.0.0"} -sc-rpc-api = { path = "../../../client/rpc-api" , version = "0.9.0"} +node-cli = { path = "../cli", default-features = false, features = ["browser"], version = "2.0.0"} +sc-rpc-api = { path = "../../../client/rpc-api", version = "0.9.0"} diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index f85a89bdc9..6c5fd9c3a1 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -75,12 +75,12 @@ sc-service = { version = "0.9.0", default-features = false, path = "../../../cli sc-tracing = { version = "3.0.0", path = "../../../client/tracing" } sc-telemetry = { version = "3.0.0", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.9.0", path = "../../../client/authority-discovery" } -sc-finality-grandpa-warp-sync = { version = "0.8.0", path = "../../../client/finality-grandpa-warp-sync", optional = true } +sc-finality-grandpa-warp-sync = { version = "0.9.0", path = "../../../client/finality-grandpa-warp-sync", optional = true } # frame dependencies pallet-indices = { version = "3.0.0", path = "../../../frame/indices" } pallet-timestamp = { version = "3.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } +pallet-contracts = { version = "3.0.0", path = "../../../frame/contracts" } frame-system = { version = "3.0.0", path = "../../../frame/system" } pallet-balances = { version = "3.0.0", path = "../../../frame/balances" } pallet-transaction-payment = { version = "3.0.0", path = "../../../frame/transaction-payment" } @@ -100,6 +100,7 @@ node-executor = { version = "2.0.0", path = "../executor" } sc-cli = { version = "0.9.0", optional = true, path = "../../../client/cli" } frame-benchmarking-cli = { version = "3.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } +try-runtime-cli = { version = "0.9.0", optional = true, path = "../../../utils/frame/try-runtime/cli" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } @@ -125,6 +126,8 @@ nix = "0.19" serde_json = "1.0" regex = "1" platforms = "1.1" +async-std = { version = "1.6.5", features = ["attributes"] } +soketto = "0.4.2" [build-dependencies] structopt = { version = "0.3.8", optional = true } @@ -132,6 +135,7 @@ node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } frame-benchmarking-cli = { version = "3.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } substrate-build-script-utils = { version = "3.0.0", optional = true, path = "../../../utils/build-script-utils" } substrate-frame-cli = { version = "3.0.0", optional = true, path = "../../../utils/frame/frame-utilities-cli" } +try-runtime-cli = { version = "0.9.0", optional = true, path = "../../../utils/frame/try-runtime/cli" } [build-dependencies.sc-cli] version = "0.9.0" @@ -156,8 +160,15 @@ cli = [ "sc-finality-grandpa-warp-sync", "structopt", "substrate-build-script-utils", + "try-runtime-cli", ] runtime-benchmarks = [ "node-runtime/runtime-benchmarks", "frame-benchmarking-cli", ] +# Enable features that allow the runtime to be tried and debugged. Name might be subject to change +# in the near future. +try-runtime = [ + "node-runtime/try-runtime", + "try-runtime-cli", +] diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 7de9cfd0b6..db268ad105 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -326,6 +326,7 @@ pub fn testnet_genesis( max_members: 999, }), pallet_vesting: Some(Default::default()), + pallet_gilt: Some(Default::default()), } } diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 63a07e00e2..9b80a3e345 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -47,6 +47,11 @@ pub enum Subcommand { #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), + /// Try some experimental command on the runtime. This includes migration and runtime-upgrade + /// testing. + #[cfg(feature = "try-runtime")] + TryRuntime(try_runtime_cli::TryRuntimeCmd), + /// Verify a signature for a message, provided on STDIN, with a given (public or secret) key. Verify(VerifyCmd), diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 461930a613..ece97436bf 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -149,5 +149,20 @@ pub fn run() -> Result<()> { Ok((cmd.run(client, backend), task_manager)) }) }, + #[cfg(feature = "try-runtime")] + Some(Subcommand::TryRuntime(cmd)) => { + let runner = cli.create_runner(cmd)?; + runner.async_run(|config| { + // we don't need any of the components of new_partial, just a runtime, or a task + // manager to do `async_run`. + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + let task_manager = sc_service::TaskManager::new( + config.task_executor.clone(), + registry, + ).map_err(|e| sc_cli::Error::Service(sc_service::Error::Prometheus(e)))?; + + Ok((cmd.run::(config), task_manager)) + }) + } } } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 815ea243ce..970e8606c7 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -34,7 +34,7 @@ use sp_runtime::traits::Block as BlockT; use futures::prelude::*; use sc_client_api::{ExecutorProvider, RemoteBackend}; use node_executor::Executor; -use sc_telemetry::TelemetryConnectionNotifier; +use sc_telemetry::{TelemetryConnectionNotifier, TelemetrySpan}; type FullClient = sc_service::TFullClient; type FullBackend = sc_service::TFullBackend; @@ -95,7 +95,7 @@ pub fn new_partial(config: &Configuration) -> Result Result<( client.clone(), select_chain.clone(), inherent_data_providers.clone(), - &task_manager.spawn_handle(), + &task_manager.spawn_essential_handle(), config.prometheus_registry(), sp_consensus::NeverCanAuthor, )?; @@ -435,6 +444,9 @@ pub fn new_light_base(mut config: Configuration) -> Result<( let rpc_extensions = node_rpc::create_light(light_deps); + let telemetry_span = TelemetrySpan::new(); + let _telemetry_span_entered = telemetry_span.enter(); + let (rpc_handlers, telemetry_connection_notifier) = sc_service::spawn_tasks(sc_service::SpawnTasksParams { on_demand: Some(on_demand), @@ -446,6 +458,7 @@ pub fn new_light_base(mut config: Configuration) -> Result<( config, backend, network_status_sinks, system_rpc_tx, network: network.clone(), task_manager: &mut task_manager, + telemetry_span: Some(telemetry_span.clone()), })?; Ok(( @@ -472,7 +485,6 @@ mod tests { use sc_consensus_epochs::descendent_query; use sp_consensus::{ Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - RecordProof, }; use node_primitives::{Block, DigestItem, Signature}; use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; @@ -605,7 +617,6 @@ mod tests { inherent_data, digest, std::time::Duration::from_secs(1), - RecordProof::Yes, ).await }).expect("Error making test block").block; @@ -619,9 +630,7 @@ mod tests { sp_consensus_babe::AuthorityId::ID, &alice.to_public_crypto_pair(), &to_sign, - ).unwrap() - .try_into() - .unwrap(); + ).unwrap().unwrap().try_into().unwrap(); let item = ::babe_seal( signature, ); diff --git a/bin/node/cli/tests/telemetry.rs b/bin/node/cli/tests/telemetry.rs new file mode 100644 index 0000000000..0b90f56a03 --- /dev/null +++ b/bin/node/cli/tests/telemetry.rs @@ -0,0 +1,102 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use assert_cmd::cargo::cargo_bin; +use nix::sys::signal::{kill, Signal::SIGINT}; +use nix::unistd::Pid; +use std::convert::TryInto; +use std::process; + +pub mod common; +pub mod websocket_server; + +#[async_std::test] +async fn telemetry_works() { + let config = websocket_server::Config { + capacity: 1, + max_frame_size: 1048 * 1024, + send_buffer_len: 32, + bind_address: "127.0.0.1:0".parse().unwrap(), + }; + let mut server = websocket_server::WsServer::new(config).await.unwrap(); + + let addr = server.local_addr().unwrap(); + + let server_task = async_std::task::spawn(async move { + loop { + use websocket_server::Event; + match server.next_event().await { + // New connection on the listener. + Event::ConnectionOpen { address } => { + println!("New connection from {:?}", address); + server.accept(); + } + + // Received a message from a connection. + Event::BinaryFrame { message, .. } => { + let json: serde_json::Value = serde_json::from_slice(&message).unwrap(); + let object = json + .as_object() + .unwrap() + .get("payload") + .unwrap() + .as_object() + .unwrap(); + if matches!(object.get("best"), Some(serde_json::Value::String(_))) { + break; + } + } + + Event::TextFrame { .. } => panic!("Got a TextFrame over the socket, this is a bug"), + + // Connection has been closed. + Event::ConnectionError { .. } => {} + } + } + }); + + let mut substrate = process::Command::new(cargo_bin("substrate")); + + let mut substrate = substrate + .args(&["--dev", "--tmp", "--telemetry-url"]) + .arg(format!("ws://{} 10", addr)) + .stdout(process::Stdio::piped()) + .stderr(process::Stdio::piped()) + .stdin(process::Stdio::null()) + .spawn() + .unwrap(); + + server_task.await; + + assert!( + substrate.try_wait().unwrap().is_none(), + "the process should still be running" + ); + + // Stop the process + kill(Pid::from_raw(substrate.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(common::wait_for(&mut substrate, 40) + .map(|x| x.success()) + .unwrap_or_default()); + + let output = substrate.wait_with_output().unwrap(); + + println!("{}", String::from_utf8(output.stdout).unwrap()); + eprintln!("{}", String::from_utf8(output.stderr).unwrap()); + assert!(output.status.success()); +} diff --git a/bin/node/cli/tests/websocket_server.rs b/bin/node/cli/tests/websocket_server.rs new file mode 100644 index 0000000000..a8af1c3599 --- /dev/null +++ b/bin/node/cli/tests/websocket_server.rs @@ -0,0 +1,281 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use async_std::net::{TcpListener, TcpStream}; +use core::pin::Pin; +use futures::prelude::*; +use soketto::handshake::{server::Response, Server}; +use std::{io, net::SocketAddr}; + +/// Configuration for a [`WsServer`]. +pub struct Config { + /// IP address to try to bind to. + pub bind_address: SocketAddr, + + /// Maximum size, in bytes, of a frame sent by the remote. + /// + /// Since the messages are entirely buffered before being returned, a maximum value is + /// necessary in order to prevent malicious clients from sending huge frames that would + /// occupy a lot of memory. + pub max_frame_size: usize, + + /// Number of pending messages to buffer up for sending before the socket is considered + /// unresponsive. + pub send_buffer_len: usize, + + /// Pre-allocated capacity for the list of connections. + pub capacity: usize, +} + +/// Identifier for a connection with regard to a [`WsServer`]. +/// +/// After a connection has been closed, its [`ConnectionId`] might be reused. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)] +pub struct ConnectionId(u64); + +/// A WebSocket message. +pub enum Message { + Text(String), + Binary(Vec), +} + +/// WebSockets listening socket and list of open connections. +pub struct WsServer { + /// Value passed through [`Config::max_frame_size`]. + max_frame_size: usize, + + /// Endpoint for incoming TCP sockets. + listener: TcpListener, + + /// Pending incoming connection to accept. Accepted by calling [`WsServer::accept`]. + pending_incoming: Option, + + /// List of TCP connections that are currently negotiating the WebSocket handshake. + /// + /// The output can be an error if the handshake fails. + negotiating: stream::FuturesUnordered< + Pin< + Box< + dyn Future, Box>> + + Send, + >, + >, + >, + + /// List of streams of incoming messages for all connections. + incoming_messages: stream::SelectAll< + Pin>> + Send>>, + >, + + /// Tasks dedicated to closing sockets that have been rejected. + rejected_sockets: stream::FuturesUnordered + Send>>>, +} + +impl WsServer { + /// Try opening a TCP listening socket. + /// + /// Returns an error if the listening socket fails to open. + pub async fn new(config: Config) -> Result { + let listener = TcpListener::bind(config.bind_address).await?; + + Ok(WsServer { + max_frame_size: config.max_frame_size, + listener, + pending_incoming: None, + negotiating: stream::FuturesUnordered::new(), + incoming_messages: stream::SelectAll::new(), + rejected_sockets: stream::FuturesUnordered::new(), + }) + } + + /// Address of the local TCP listening socket, as provided by the operating system. + pub fn local_addr(&self) -> Result { + self.listener.local_addr() + } + + /// Accepts the pending connection. + /// + /// Either [`WsServer::accept`] or [`WsServer::reject`] must be called after a + /// [`Event::ConnectionOpen`] event is returned. + /// + /// # Panic + /// + /// Panics if no connection is pending. + /// + pub fn accept(&mut self) { + let pending_incoming = self.pending_incoming.take().expect("no pending socket"); + + self.negotiating.push(Box::pin(async move { + let mut server = Server::new(pending_incoming); + + let websocket_key = match server.receive_request().await { + Ok(req) => req.into_key(), + Err(err) => return Err(Box::new(err) as Box<_>), + }; + + match server + .send_response(&{ + Response::Accept { + key: &websocket_key, + protocol: None, + } + }) + .await + { + Ok(()) => {} + Err(err) => return Err(Box::new(err) as Box<_>), + }; + + Ok(server) + })); + } + + /// Reject the pending connection. + /// + /// Either [`WsServer::accept`] or [`WsServer::reject`] must be called after a + /// [`Event::ConnectionOpen`] event is returned. + /// + /// # Panic + /// + /// Panics if no connection is pending. + /// + pub fn reject(&mut self) { + let _ = self.pending_incoming.take().expect("no pending socket"); + } + + /// Returns the next event happening on the server. + pub async fn next_event(&mut self) -> Event { + loop { + futures::select! { + // Only try to fetch a new incoming connection if none is pending. + socket = { + let listener = &self.listener; + let has_pending = self.pending_incoming.is_some(); + async move { + if !has_pending { + listener.accept().await + } else { + loop { futures::pending!() } + } + } + }.fuse() => { + let (socket, address) = match socket { + Ok(s) => s, + Err(_) => continue, + }; + debug_assert!(self.pending_incoming.is_none()); + self.pending_incoming = Some(socket); + return Event::ConnectionOpen { address }; + }, + + result = self.negotiating.select_next_some() => { + let server = match result { + Ok(s) => s, + Err(error) => return Event::ConnectionError { + error, + }, + }; + + let (mut _sender, receiver) = { + let mut builder = server.into_builder(); + builder.set_max_frame_size(self.max_frame_size); + builder.set_max_message_size(self.max_frame_size); + builder.finish() + }; + + // Spawn a task dedicated to receiving messages from the socket. + self.incoming_messages.push({ + // Turn `receiver` into a stream of received packets. + let socket_packets = stream::unfold((receiver, Vec::new()), move |(mut receiver, mut buf)| async { + buf.clear(); + let ret = match receiver.receive_data(&mut buf).await { + Ok(soketto::Data::Text(len)) => String::from_utf8(buf[..len].to_vec()) + .map(Message::Text) + .map_err(|err| Box::new(err) as Box<_>), + Ok(soketto::Data::Binary(len)) => Ok(buf[..len].to_vec()) + .map(Message::Binary), + Err(err) => Err(Box::new(err) as Box<_>), + }; + Some((ret, (receiver, buf))) + }); + + Box::pin(socket_packets.map(move |msg| (msg))) + }); + }, + + result = self.incoming_messages.select_next_some() => { + let message = match result { + Ok(m) => m, + Err(error) => return Event::ConnectionError { + error, + }, + }; + + match message { + Message::Text(message) => { + return Event::TextFrame { + message, + } + } + Message::Binary(message) => { + return Event::BinaryFrame { + message, + } + } + } + }, + + _ = self.rejected_sockets.select_next_some() => { + } + } + } + } +} + +/// Event that has happened on a [`WsServer`]. +#[derive(Debug)] +pub enum Event { + /// A new TCP connection has arrived on the listening socket. + /// + /// The connection *must* be accepted or rejected using [`WsServer::accept`] or + /// [`WsServer::reject`]. + /// No other [`Event::ConnectionOpen`] event will be generated until the current pending + /// connection has been either accepted or rejected. + ConnectionOpen { + /// Address of the remote, as provided by the operating system. + address: SocketAddr, + }, + + /// An error has happened on a connection. The connection is now closed and its + /// [`ConnectionId`] is now invalid. + ConnectionError { error: Box }, + + /// A text frame has been received on a connection. + TextFrame { + /// Message sent by the remote. Its content is entirely decided by the client, and + /// nothing must be assumed about the validity of this message. + message: String, + }, + + /// A text frame has been received on a connection. + BinaryFrame { + /// Message sent by the remote. Its content is entirely decided by the client, and + /// nothing must be assumed about the validity of this message. + message: Vec, + }, +} diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index b67c29889d..fb7fc91911 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -22,7 +22,7 @@ sp-io = { version = "3.0.0", path = "../../../primitives/io" } sp-state-machine = { version = "0.9.0", path = "../../../primitives/state-machine" } sp-trie = { version = "3.0.0", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "3.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "3.1.0", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" @@ -30,7 +30,7 @@ frame-support = { version = "3.0.0", path = "../../../frame/support" } frame-system = { version = "3.0.0", path = "../../../frame/system" } node-testing = { version = "2.0.0", path = "../testing" } pallet-balances = { version = "3.0.0", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } +pallet-contracts = { version = "3.0.0", path = "../../../frame/contracts" } pallet-grandpa = { version = "3.0.0", path = "../../../frame/grandpa" } pallet-im-online = { version = "3.0.0", path = "../../../frame/im-online" } pallet-indices = { version = "3.0.0", path = "../../../frame/indices" } @@ -39,6 +39,7 @@ pallet-timestamp = { version = "3.0.0", path = "../../../frame/timestamp" } pallet-transaction-payment = { version = "3.0.0", path = "../../../frame/transaction-payment" } pallet-treasury = { version = "3.0.0", path = "../../../frame/treasury" } sp-application-crypto = { version = "3.0.0", path = "../../../primitives/application-crypto" } +sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-externalities = { version = "0.9.0", path = "../../../primitives/externalities" } substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index d27954d3a7..c18f81bdc0 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -17,7 +17,6 @@ use codec::{Encode, Decode, Joiner}; use frame_support::{ - StorageMap, traits::Currency, weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, }; @@ -32,7 +31,7 @@ use frame_system::{self, EventRecord, Phase}; use node_runtime::{ Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Call, Runtime, Balances, System, TransactionPayment, Event, - constants::currency::*, + constants::{time::SLOT_DURATION, currency::*}, }; use node_primitives::{Balance, Hash}; use wat; @@ -76,6 +75,7 @@ fn set_heap_pages(ext: &mut E, heap_pages: u64) { } fn changes_trie_block() -> (Vec, Hash) { + let time = 42 * 1000; construct_block( &mut new_test_ext(compact_code_unwrap(), true), 1, @@ -83,13 +83,14 @@ fn changes_trie_block() -> (Vec, Hash) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set(time)), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), }, - ] + ], + (time / SLOT_DURATION).into(), ) } @@ -98,6 +99,7 @@ fn changes_trie_block() -> (Vec, Hash) { /// from block1's execution to block2 to derive the correct storage_root. fn blocks() -> ((Vec, Hash), (Vec, Hash)) { let mut t = new_test_ext(compact_code_unwrap(), false); + let time1 = 42 * 1000; let block1 = construct_block( &mut t, 1, @@ -105,14 +107,16 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set(time1)), }, CheckedExtrinsic { signed: Some((alice(), signed_extra(0, 0))), function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), }, - ] + ], + (time1 / SLOT_DURATION).into(), ); + let time2 = 52 * 1000; let block2 = construct_block( &mut t, 2, @@ -120,7 +124,7 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set(time2)), }, CheckedExtrinsic { signed: Some((bob(), signed_extra(0, 0))), @@ -130,12 +134,13 @@ fn blocks() -> ((Vec, Hash), (Vec, Hash)) { signed: Some((alice(), signed_extra(1, 0))), function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 15 * DOLLARS)), } - ] + ], + (time2 / SLOT_DURATION).into(), ); // session change => consensus authorities change => authorities change digest item appears let digest = Header::decode(&mut &block2.0[..]).unwrap().digest; - assert_eq!(digest.logs().len(), 0); + assert_eq!(digest.logs().len(), 1 /* Just babe slot */); (block1, block2) } @@ -154,7 +159,8 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { signed: Some((alice(), signed_extra(nonce, 0))), function: Call::System(frame_system::Call::remark(vec![0; size])), } - ] + ], + (time * 1000 / SLOT_DURATION).into(), ) } @@ -590,6 +596,7 @@ fn deploying_wasm_contract_should_work() { let subsistence = pallet_contracts::Module::::subsistence_threshold(); + let time = 42 * 1000; let b = construct_block( &mut new_test_ext(compact_code_unwrap(), false), 1, @@ -597,7 +604,7 @@ fn deploying_wasm_contract_should_work() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set(time)), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), @@ -622,7 +629,8 @@ fn deploying_wasm_contract_should_work() { ) ), }, - ] + ], + (time / SLOT_DURATION).into(), ); let mut t = new_test_ext(compact_code_unwrap(), false); diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index b376ebc35b..635155b5d0 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -19,6 +19,7 @@ use codec::{Encode, Decode}; use frame_system::offchain::AppCrypto; use frame_support::Hashable; use sp_state_machine::TestExternalities as CoreTestExternalities; +use sp_consensus_babe::{BABE_ENGINE_ID, Slot, digests::{PreDigest, SecondaryPlainPreDigest}}; use sp_core::{ NeverNativeValue, NativeOrEncoded, crypto::KeyTypeId, @@ -29,6 +30,8 @@ use sp_runtime::{ ApplyExtrinsicResult, MultiSigner, MultiSignature, + Digest, + DigestItem, traits::{Header as HeaderT, BlakeTwo256}, }; use sc_executor::{NativeExecutor, WasmExecutionMethod}; @@ -99,7 +102,7 @@ pub fn executor() -> NativeExecutor { pub fn executor_call< R:Decode + Encode + PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe + NC: FnOnce() -> std::result::Result> + std::panic::UnwindSafe >( t: &mut TestExternalities, method: &str, @@ -145,6 +148,7 @@ pub fn construct_block( number: BlockNumber, parent_hash: Hash, extrinsics: Vec, + babe_slot: Slot, ) -> (Vec, Hash) { use sp_trie::{TrieConfiguration, trie_types::Layout}; @@ -162,7 +166,17 @@ pub fn construct_block( number, extrinsics_root, state_root: Default::default(), - digest: Default::default(), + digest: Digest { + logs: vec![ + DigestItem::PreRuntime( + BABE_ENGINE_ID, + PreDigest::SecondaryPlain(SecondaryPlainPreDigest { + slot: babe_slot, + authority_index: 42, + }).encode() + ), + ], + }, }; // execute the block to get the real header. diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 2e92077c4a..90b28539f7 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -25,7 +25,7 @@ use sp_runtime::{Perbill, FixedPointNumber}; use node_runtime::{ CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, Multiplier, TransactionByteFee, - constants::currency::*, + constants::{time::SLOT_DURATION, currency::*}, }; use node_primitives::Balance; use node_testing::keyring::*; @@ -46,6 +46,7 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { let mut tt = new_test_ext(compact_code_unwrap(), false); + let time1 = 42 * 1000; // big one in terms of weight. let block1 = construct_block( &mut tt, @@ -54,15 +55,17 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set(time1)), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(60))), } - ] + ], + (time1 / SLOT_DURATION).into(), ); + let time2 = 52 * 1000; // small one in terms of weight. let block2 = construct_block( &mut tt, @@ -71,13 +74,14 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { vec![ CheckedExtrinsic { signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + function: Call::Timestamp(pallet_timestamp::Call::set(time2)), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), function: Call::System(frame_system::Call::remark(vec![0; 1])), } - ] + ], + (time2 / SLOT_DURATION).into(), ); println!( @@ -219,7 +223,7 @@ fn block_weight_capacity_report() { let mut time = 10; let mut nonce: Index = 0; let mut block_number = 1; - let mut previous_hash: Hash = GENESIS_HASH.into(); + let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); loop { let num_transfers = block_number * factor; @@ -238,7 +242,8 @@ fn block_weight_capacity_report() { &mut tt, block_number, previous_hash, - xts + xts, + (time * 1000 / SLOT_DURATION).into(), ); let len = block.0.len(); @@ -286,7 +291,7 @@ fn block_length_capacity_report() { let mut time = 10; let mut nonce: Index = 0; let mut block_number = 1; - let mut previous_hash: Hash = GENESIS_HASH.into(); + let mut previous_hash: node_primitives::Hash = GENESIS_HASH.into(); loop { // NOTE: this is super slow. Can probably be improved. @@ -303,7 +308,8 @@ fn block_length_capacity_report() { signed: Some((charlie(), signed_extra(nonce, 0))), function: Call::System(frame_system::Call::remark(vec![0u8; (block_number * factor) as usize])), }, - ] + ], + (time * 1000 / SLOT_DURATION).into(), ); let len = block.0.len(); diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 2a55fdcda6..3abb9e9ff4 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -73,7 +73,7 @@ impl PrettyPrinter for DebugPrinter { } fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result { - writeln!(fmt, " {:?}", extrinsic)?; + writeln!(fmt, " {:#?}", extrinsic)?; writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?; Ok(()) } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 9389557a57..e044dd5a27 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] jsonrpc-core = "15.1.0" node-primitives = { version = "2.0.0", path = "../primitives" } node-runtime = { version = "2.0.0", path = "../runtime" } -pallet-contracts-rpc = { version = "0.8.0", path = "../../../frame/contracts/rpc/" } +pallet-contracts-rpc = { version = "3.0.0", path = "../../../frame/contracts/rpc/" } pallet-transaction-payment-rpc = { version = "3.0.0", path = "../../../frame/transaction-payment/rpc/" } sc-client-api = { version = "3.0.0", path = "../../../client/api" } sc-consensus-babe = { version = "0.9.0", path = "../../../client/consensus/babe" } @@ -34,6 +34,6 @@ sp-keystore = { version = "0.9.0", path = "../../../primitives/keystore" } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sp-consensus-babe = { version = "0.9.0", path = "../../../primitives/consensus/babe" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } -sp-utils = { version = "3.0.0", path = "../../../primitives/utils" } sp-transaction-pool = { version = "3.0.0", path = "../../../primitives/transaction-pool" } substrate-frame-rpc-system = { version = "3.0.0", path = "../../../utils/frame/rpc/system" } +sp-utils = { version = "3.0.0", path = "../../../primitives/utils" } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index bd5c452ec4..4977eac75f 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -38,8 +38,7 @@ use sc_consensus_babe::{Config, Epoch}; use sc_consensus_babe_rpc::BabeRpcHandler; use sc_consensus_epochs::SharedEpochChanges; use sc_finality_grandpa::{ - FinalityProofProvider, GrandpaJustificationStream, SharedVoterState, SharedAuthoritySet, - VoterCommand, + SharedVoterState, SharedAuthoritySet, FinalityProofProvider, GrandpaJustificationStream, VoterCommand }; use sc_finality_grandpa_rpc::GrandpaRpcHandler; pub use sc_rpc_api::DenyUnsafe; diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index fef913dd20..bccac751d0 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -18,6 +18,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = serde = { version = "1.0.102", optional = true } static_assertions = "1.1.0" hex-literal = { version = "0.3.1", optional = true } +log = { version = "0.4.14", default-features = false } # primitives sp-authority-discovery = { version = "3.0.0", default-features = false, path = "../../../primitives/authority-discovery" } @@ -38,11 +39,12 @@ sp-version = { version = "3.0.0", default-features = false, path = "../../../pri # frame dependencies frame-executive = { version = "3.0.0", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../../../frame/benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../../../frame/support" } frame-system = { version = "3.0.0", default-features = false, path = "../../../frame/system" } frame-system-benchmarking = { version = "3.0.0", default-features = false, path = "../../../frame/system/benchmarking", optional = true } frame-system-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +frame-try-runtime = { version = "0.9.0", default-features = false, path = "../../../frame/try-runtime", optional = true } pallet-assets = { version = "3.0.0", default-features = false, path = "../../../frame/assets" } pallet-authority-discovery = { version = "3.0.0", default-features = false, path = "../../../frame/authority-discovery" } pallet-authorship = { version = "3.0.0", default-features = false, path = "../../../frame/authorship" } @@ -50,11 +52,13 @@ pallet-babe = { version = "3.0.0", default-features = false, path = "../../../fr pallet-balances = { version = "3.0.0", default-features = false, path = "../../../frame/balances" } pallet-bounties = { version = "3.0.0", default-features = false, path = "../../../frame/bounties" } pallet-collective = { version = "3.0.0", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-contracts = { version = "3.0.0", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "3.0.0", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } pallet-democracy = { version = "3.0.0", default-features = false, path = "../../../frame/democracy" } +pallet-election-provider-multi-phase = { version = "3.0.0", default-features = false, path = "../../../frame/election-provider-multi-phase" } pallet-elections-phragmen = { version = "3.0.0", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-gilt = { version = "3.0.0", default-features = false, path = "../../../frame/gilt" } pallet-grandpa = { version = "3.0.0", default-features = false, path = "../../../frame/grandpa" } pallet-im-online = { version = "3.0.0", default-features = false, path = "../../../frame/im-online" } pallet-indices = { version = "3.0.0", default-features = false, path = "../../../frame/indices" } @@ -82,8 +86,8 @@ pallet-utility = { version = "3.0.0", default-features = false, path = "../../.. pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment" } pallet-transaction-payment-rpc-runtime-api = { version = "3.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } pallet-vesting = { version = "3.0.0", default-features = false, path = "../../../frame/vesting" } -prml-attestation = { version = "2.0.0", default-features = false, path = "../../../prml/attestation" } -prml-generic-asset = { version = "2.0.0", default-features = false, path = "../../../prml/generic-asset"} +prml-attestation = { version = "3.0.0", default-features = false, path = "../../../prml/attestation" } +prml-generic-asset = { version = "3.0.0", default-features = false, path = "../../../prml/generic-asset"} [build-dependencies] substrate-wasm-builder = { version = "4.0.0", path = "../../../utils/wasm-builder" } @@ -112,6 +116,7 @@ std = [ "pallet-democracy/std", "pallet-elections-phragmen/std", "frame-executive/std", + "pallet-gilt/std", "pallet-grandpa/std", "pallet-im-online/std", "pallet-indices/std", @@ -142,6 +147,7 @@ std = [ "frame-benchmarking/std", "frame-system-rpc-runtime-api/std", "frame-system/std", + "pallet-election-provider-multi-phase/std", "pallet-timestamp/std", "pallet-tips/std", "pallet-transaction-payment-rpc-runtime-api/std", @@ -153,6 +159,8 @@ std = [ "pallet-society/std", "pallet-recovery/std", "pallet-vesting/std", + "log/std", + "frame-try-runtime/std", "prml-attestation/std", "prml-generic-asset/std" ] @@ -160,6 +168,7 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", + "pallet-election-provider-multi-phase/runtime-benchmarks", "sp-runtime/runtime-benchmarks", "pallet-assets/runtime-benchmarks", "pallet-babe/runtime-benchmarks", @@ -169,6 +178,7 @@ runtime-benchmarks = [ "pallet-contracts/runtime-benchmarks", "pallet-democracy/runtime-benchmarks", "pallet-elections-phragmen/runtime-benchmarks", + "pallet-gilt/runtime-benchmarks", "pallet-grandpa/runtime-benchmarks", "pallet-identity/runtime-benchmarks", "pallet-im-online/runtime-benchmarks", @@ -192,3 +202,42 @@ runtime-benchmarks = [ "prml-attestation/runtime-benchmarks", "prml-generic-asset/runtime-benchmarks", ] +try-runtime = [ + "frame-executive/try-runtime", + "frame-try-runtime", + "frame-system/try-runtime", + "pallet-assets/try-runtime", + "pallet-authority-discovery/try-runtime", + "pallet-authorship/try-runtime", + "pallet-babe/try-runtime", + "pallet-balances/try-runtime", + "pallet-bounties/try-runtime", + "pallet-collective/try-runtime", + "pallet-contracts/try-runtime", + "pallet-democracy/try-runtime", + "pallet-elections-phragmen/try-runtime", + "pallet-grandpa/try-runtime", + "pallet-im-online/try-runtime", + "pallet-indices/try-runtime", + "pallet-lottery/try-runtime", + "pallet-membership/try-runtime", + "pallet-mmr/try-runtime", + "pallet-multisig/try-runtime", + "pallet-identity/try-runtime", + "pallet-scheduler/try-runtime", + "pallet-offences/try-runtime", + "pallet-proxy/try-runtime", + "pallet-randomness-collective-flip/try-runtime", + "pallet-session/try-runtime", + "pallet-staking/try-runtime", + "pallet-sudo/try-runtime", + "pallet-election-provider-multi-phase/try-runtime", + "pallet-timestamp/try-runtime", + "pallet-tips/try-runtime", + "pallet-transaction-payment/try-runtime", + "pallet-treasury/try-runtime", + "pallet-utility/try-runtime", + "pallet-society/try-runtime", + "pallet-recovery/try-runtime", + "pallet-vesting/try-runtime", +] diff --git a/bin/node/runtime/src/constants.rs b/bin/node/runtime/src/constants.rs index f447486c7f..c549b1977d 100644 --- a/bin/node/runtime/src/constants.rs +++ b/bin/node/runtime/src/constants.rs @@ -35,7 +35,7 @@ pub mod time { use node_primitives::{Moment, BlockNumber}; /// Since BABE is probabilistic this is the average expected block time that - /// we are targetting. Blocks will be produced at a minimum duration defined + /// we are targeting. Blocks will be produced at a minimum duration defined /// by `SLOT_DURATION`, but some slots will not be allocated to any /// authority and hence no block will be produced. We expect to have this /// block time on average following the defined slot duration and the value diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 35960bf83f..63166f0a9e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -22,66 +22,62 @@ // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. #![recursion_limit = "256"] - -use sp_std::prelude::*; +use codec::{Decode, Encode}; +use frame_support::traits::InstanceFilter; use frame_support::{ - construct_runtime, parameter_types, debug, RuntimeDebug, + construct_runtime, parameter_types, + traits::{Currency, Imbalance, KeyOwnerProofSystem, LockIdentifier, OnUnbalanced, Randomness, U128CurrencyToVote}, weights::{ - Weight, IdentityFee, - constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, DispatchClass, - }, - traits::{ - Currency, Imbalance, KeyOwnerProofSystem, OnUnbalanced, Randomness, LockIdentifier, - U128CurrencyToVote, + constants::{BlockExecutionWeight, ExtrinsicBaseWeight, RocksDbWeight, WEIGHT_PER_SECOND}, + DispatchClass, IdentityFee, Weight, }, + RuntimeDebug, }; use frame_system::{ - EnsureRoot, EnsureOneOf, - limits::{BlockWeights, BlockLength} -}; -use frame_support::traits::InstanceFilter; -use codec::{Encode, Decode}; -use sp_core::{ - crypto::KeyTypeId, - u32_trait::{_1, _2, _3, _4, _5}, - OpaqueMetadata, + limits::{BlockLength, BlockWeights}, + EnsureOneOf, EnsureRoot, }; pub use node_primitives::{AccountId, AssetId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; +use pallet_contracts::weights::WeightInfo; +use pallet_grandpa::fg_primitives; +use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_session::historical as pallet_session_historical; +pub use pallet_transaction_payment::{CurrencyAdapter, Multiplier, TargetedFeeAdjustment}; +use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; pub use prml_generic_asset::AssetInfo; use sp_api::impl_runtime_apis; -use sp_runtime::{ - Permill, Perbill, Perquintill, Percent, ApplyExtrinsicResult, - impl_opaque_keys, generic, create_runtime_str, ModuleId, FixedPointNumber, +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_core::{ + crypto::KeyTypeId, + u32_trait::{_1, _2, _3, _4, _5}, + OpaqueMetadata, }; +use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; use sp_runtime::traits::{ - self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, - ConvertInto, OpaqueKeys, NumberFor, + self, BlakeTwo256, Block as BlockT, ConvertInto, NumberFor, OpaqueKeys, SaturatedConversion, StaticLookup, }; -use sp_version::RuntimeVersion; +use sp_runtime::transaction_validity::{TransactionPriority, TransactionSource, TransactionValidity}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, ApplyExtrinsicResult, FixedPointNumber, ModuleId, Perbill, Percent, + Permill, Perquintill, +}; +use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use pallet_grandpa::{AuthorityId as GrandpaId, AuthorityList as GrandpaAuthorityList}; -use pallet_grandpa::fg_primitives; -use pallet_im_online::sr25519::AuthorityId as ImOnlineId; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use pallet_transaction_payment::{FeeDetails, RuntimeDispatchInfo}; -pub use pallet_transaction_payment::{Multiplier, TargetedFeeAdjustment, CurrencyAdapter}; -use pallet_session::{historical as pallet_session_historical}; -use sp_inherents::{InherentData, CheckInherentsResult}; +use sp_version::RuntimeVersion; use static_assertions::const_assert; -use pallet_contracts::WeightInfo; #[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; +pub use frame_system::Call as SystemCall; #[cfg(any(feature = "std", test))] pub use pallet_balances::Call as BalancesCall; #[cfg(any(feature = "std", test))] -pub use frame_system::Call as SystemCall; -#[cfg(any(feature = "std", test))] pub use pallet_staking::StakerStatus; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; @@ -89,7 +85,7 @@ use impls::Author; /// Constant values used within the runtime. pub mod constants; -use constants::{time::*, currency::*}; +use constants::{currency::*, time::*}; use sp_runtime::generic::Era; // Make the WASM binary available. @@ -99,9 +95,11 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { - WASM_BINARY.expect("Development wasm binary is not available. This means the client is \ + WASM_BINARY.expect( + "Development wasm binary is not available. This means the client is \ built with `SKIP_WASM_BUILD` flag and it is only usable for \ - production chains. Please rebuild with the flag disabled.") + production chains. Please rebuild with the flag disabled.", + ) } /// Runtime version. @@ -113,7 +111,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 264, + spec_version: 265, impl_version: 0, apis: RUNTIME_API_VERSIONS, transaction_version: 2, @@ -132,7 +130,7 @@ type NegativeImbalance = >::NegativeImbalance; pub struct DealWithFees; impl OnUnbalanced for DealWithFees { - fn on_unbalanceds(mut fees_then_tips: impl Iterator) { + fn on_unbalanceds(mut fees_then_tips: impl Iterator) { if let Some(fees) = fees_then_tips.next() { // for fees, 80% to treasury, 20% to author let mut split = fees.ration(80, 20); @@ -146,7 +144,7 @@ impl OnUnbalanced for DealWithFees { } } -/// We assume that ~10% of the block weight is consumed by `on_initalize` handlers. +/// We assume that ~10% of the block weight is consumed by `on_initialize` handlers. /// This is used to limit the maximal weight of a single extrinsic. const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used @@ -251,25 +249,28 @@ pub enum ProxyType { Governance, Staking, } -impl Default for ProxyType { fn default() -> Self { Self::Any } } +impl Default for ProxyType { + fn default() -> Self { + Self::Any + } +} impl InstanceFilter for ProxyType { fn filter(&self, c: &Call) -> bool { match self { ProxyType::Any => true, ProxyType::NonTransfer => !matches!( c, - Call::Balances(..) | - Call::Vesting(pallet_vesting::Call::vested_transfer(..)) | - Call::Indices(pallet_indices::Call::transfer(..)) + Call::Balances(..) + | Call::Vesting(pallet_vesting::Call::vested_transfer(..)) + | Call::Indices(pallet_indices::Call::transfer(..)) ), ProxyType::Governance => matches!( c, - Call::Democracy(..) | - Call::Council(..) | - Call::Society(..) | - Call::TechnicalCommittee(..) | - Call::Elections(..) | - Call::Treasury(..) + Call::Democracy(..) + | Call::Council(..) | Call::Society(..) + | Call::TechnicalCommittee(..) + | Call::Elections(..) + | Call::Treasury(..) ), ProxyType::Staking => matches!(c, Call::Staking(..)), } @@ -331,18 +332,13 @@ impl pallet_babe::Config for Runtime { type KeyOwnerProofSystem = Historical; - type KeyOwnerProof = >::Proof; + type KeyOwnerProof = + >::Proof; - type KeyOwnerIdentification = >::IdentificationTuple; + type KeyOwnerIdentification = + >::IdentificationTuple; - type HandleEquivocation = - pallet_babe::EquivocationHandler; + type HandleEquivocation = pallet_babe::EquivocationHandler; type WeightInfo = (); } @@ -387,8 +383,7 @@ impl pallet_transaction_payment::Config for Runtime { type OnChargeTransaction = CurrencyAdapter; type TransactionByteFee = TransactionByteFee; type WeightToFee = IdentityFee; - type FeeMultiplierUpdate = - TargetedFeeAdjustment; + type FeeMultiplierUpdate = TargetedFeeAdjustment; } parameter_types! { @@ -486,23 +481,61 @@ impl pallet_staking::Config for Runtime { type SlashCancelOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>, >; type SessionInterface = Self; type RewardCurve = RewardCurve; type NextNewSession = Session; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type ElectionLookahead = ElectionLookahead; type Call = Call; type MaxIterations = MaxIterations; type MinSolutionScoreBump = MinSolutionScoreBump; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type UnsignedPriority = StakingUnsignedPriority; // The unsigned solution weight targeted by the OCW. We set it to the maximum possible value of // a single extrinsic. type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; + type ElectionProvider = ElectionProviderMultiPhase; type WeightInfo = pallet_staking::weights::SubstrateWeight; } +parameter_types! { + // phase durations. 1/4 of the last session for each. + pub const SignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; + pub const UnsignedPhase: u32 = EPOCH_DURATION_IN_BLOCKS / 4; + + // fallback: no need to do on-chain phragmen initially. + pub const Fallback: pallet_election_provider_multi_phase::FallbackStrategy = + pallet_election_provider_multi_phase::FallbackStrategy::Nothing; + + pub SolutionImprovementThreshold: Perbill = Perbill::from_rational_approximation(1u32, 10_000); + + // miner configs + pub const MultiPhaseUnsignedPriority: TransactionPriority = StakingUnsignedPriority::get() - 1u64; + pub const MinerMaxIterations: u32 = 10; + pub MinerMaxWeight: Weight = RuntimeBlockWeights::get() + .get(DispatchClass::Normal) + .max_extrinsic.expect("Normal extrinsics have a weight limit configured; qed") + .saturating_sub(BlockExecutionWeight::get()); +} + +impl pallet_election_provider_multi_phase::Config for Runtime { + type Event = Event; + type Currency = Balances; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type SolutionImprovementThreshold = MinSolutionScoreBump; + type MinerMaxIterations = MinerMaxIterations; + type MinerMaxWeight = MinerMaxWeight; + type MinerTxPriority = MultiPhaseUnsignedPriority; + type DataProvider = Staking; + type OnChainAccuracy = Perbill; + type CompactSolution = pallet_staking::CompactAssignments; + type Fallback = Fallback; + type WeightInfo = pallet_election_provider_multi_phase::weights::SubstrateWeight; + type BenchmarkingConfig = (); +} + parameter_types! { pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; pub const VotingPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; @@ -549,7 +582,7 @@ impl pallet_democracy::Config for Runtime { >; type BlacklistOrigin = EnsureRoot; // Any single technical committee member may veto a coming council proposal, however they can - // only do it once and it lasts only for the cooloff period. + // only do it once and it lasts only for the cool-off period. type VetoOrigin = pallet_collective::EnsureMember; type CooloffPeriod = CooloffPeriod; type PreimageByteDeposit = PreimageByteDeposit; @@ -636,7 +669,7 @@ impl pallet_collective::Config for Runtime { type EnsureRootOrHalfCouncil = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>, >; impl pallet_membership::Config for Runtime { type Event = Event; @@ -673,12 +706,12 @@ impl pallet_treasury::Config for Runtime { type ApproveOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective> + pallet_collective::EnsureProportionAtLeast<_3, _5, AccountId, CouncilCollective>, >; type RejectOrigin = EnsureOneOf< AccountId, EnsureRoot, - pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective> + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>, >; type Event = Event; type OnSlash = (); @@ -736,6 +769,7 @@ parameter_types! { ::WeightInfo::on_initialize_per_queue_item(1) - ::WeightInfo::on_initialize_per_queue_item(0) )) / 5) as u32; + pub MaxCodeSize: u32 = 128 * 1024; } impl pallet_contracts::Config for Runtime { @@ -758,6 +792,7 @@ impl pallet_contracts::Config for Runtime { type ChainExtension = (); type DeletionQueueDepth = DeletionQueueDepth; type DeletionWeightLimit = DeletionWeightLimit; + type MaxCodeSize = MaxCodeSize; } impl pallet_sudo::Config for Runtime { @@ -773,8 +808,8 @@ parameter_types! { } impl frame_system::offchain::CreateSignedTransaction for Runtime - where - Call: From, +where + Call: From, { fn create_transaction>( call: Call, @@ -805,13 +840,10 @@ impl frame_system::offchain::CreateSignedTransaction for R ); let raw_payload = SignedPayload::new(call, extra) .map_err(|e| { - debug::warn!("Unable to create signed payload: {:?}", e); + log::warn!("Unable to create signed payload: {:?}", e); }) .ok()?; - let signature = raw_payload - .using_encoded(|payload| { - C::sign(payload, public) - })?; + let signature = raw_payload.using_encoded(|payload| C::sign(payload, public))?; let address = Indices::unlookup(account); let (call, extra, _) = raw_payload.deconstruct(); Some((call, (address, signature.into(), extra))) @@ -823,7 +855,8 @@ impl frame_system::offchain::SigningTypes for Runtime { type Signature = Signature; } -impl frame_system::offchain::SendTransactionTypes for Runtime where +impl frame_system::offchain::SendTransactionTypes for Runtime +where Call: From, { type Extrinsic = UncheckedExtrinsic; @@ -860,13 +893,10 @@ impl pallet_grandpa::Config for Runtime { type KeyOwnerProofSystem = Historical; - type KeyOwnerProof = - >::Proof; + type KeyOwnerProof = >::Proof; - type KeyOwnerIdentification = >::IdentificationTuple; + type KeyOwnerIdentification = + >::IdentificationTuple; type HandleEquivocation = pallet_grandpa::EquivocationHandler; @@ -955,6 +985,18 @@ impl pallet_vesting::Config for Runtime { type WeightInfo = pallet_vesting::weights::SubstrateWeight; } +impl prml_attestation::Config for Runtime { + type Event = Event; + type WeightInfo = (); +} + +impl prml_generic_asset::Config for Runtime { + type AssetId = AssetId; + type Balance = Balance; + type Event = Event; + type WeightInfo = (); +} + impl pallet_mmr::Config for Runtime { const INDEXING_PREFIX: &'static [u8] = b"mmr"; type Hashing = ::Hashing; @@ -1005,16 +1047,30 @@ impl pallet_assets::Config for Runtime { type WeightInfo = pallet_assets::weights::SubstrateWeight; } -impl prml_attestation::Config for Runtime { - type Event = Event; - type WeightInfo = (); +parameter_types! { + pub const QueueCount: u32 = 300; + pub const MaxQueueLen: u32 = 1000; + pub const FifoQueueLen: u32 = 500; + pub const Period: BlockNumber = 30 * DAYS; + pub const MinFreeze: Balance = 100 * DOLLARS; + pub const IntakePeriod: BlockNumber = 10; + pub const MaxIntakeBids: u32 = 10; } -impl prml_generic_asset::Config for Runtime { - type AssetId = AssetId; - type Balance = Balance; +impl pallet_gilt::Config for Runtime { type Event = Event; - type WeightInfo = (); + type Currency = Balances; + type AdminOrigin = frame_system::EnsureRoot; + type Deficit = (); + type Surplus = (); + type QueueCount = QueueCount; + type MaxQueueLen = MaxQueueLen; + type FifoQueueLen = FifoQueueLen; + type Period = Period; + type MinFreeze = MinFreeze; + type IntakePeriod = IntakePeriod; + type MaxIntakeBids = MaxIntakeBids; + type WeightInfo = pallet_gilt::weights::SubstrateWeight; } construct_runtime!( @@ -1025,12 +1081,13 @@ construct_runtime!( { System: frame_system::{Module, Call, Config, Storage, Event}, Utility: pallet_utility::{Module, Call, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, + Babe: pallet_babe::{Module, Call, Storage, Config, ValidateUnsigned}, Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, Authorship: pallet_authorship::{Module, Call, Storage, Inherent}, Indices: pallet_indices::{Module, Call, Storage, Config, Event}, Balances: pallet_balances::{Module, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Module, Storage}, + ElectionProviderMultiPhase: pallet_election_provider_multi_phase::{Module, Call, Storage, Event, ValidateUnsigned}, Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, Session: pallet_session::{Module, Call, Storage, Event, Config}, Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, @@ -1059,9 +1116,9 @@ construct_runtime!( Assets: pallet_assets::{Module, Call, Storage, Event}, Mmr: pallet_mmr::{Module, Storage}, Lottery: pallet_lottery::{Module, Call, Storage, Event}, - GenericAsset: prml_generic_asset::{Module, Call, Storage, Event}, + Gilt: pallet_gilt::{Module, Call, Storage, Event, Config}, Attestation: prml_attestation::{Module, Call, Storage, Event}, - + GenericAsset: prml_generic_asset::{Module, Call, Storage, Event} } ); @@ -1096,18 +1153,15 @@ pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive, Runtime, AllModules>; +pub type Executive = + frame_executive::Executive, Runtime, AllModules, ()>; /// MMR helper types. mod mmr { use super::Runtime; pub use pallet_mmr::primitives::*; - pub type Leaf = < - ::LeafData - as - LeafDataProvider - >::LeafData; + pub type Leaf = <::LeafData as LeafDataProvider>::LeafData; pub type Hash = ::Hash; pub type Hashing = ::Hashing; } @@ -1341,15 +1395,24 @@ impl_runtime_apis! { } } + #[cfg(feature = "try-runtime")] + impl frame_try_runtime::TryRuntime for Runtime { + fn on_runtime_upgrade() -> Result<(Weight, Weight), sp_runtime::RuntimeString> { + frame_support::debug::RuntimeLogger::init(); + let weight = Executive::try_runtime_upgrade()?; + Ok((weight, RuntimeBlockWeights::get().max_block)) + } + } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. - // To get around that, we separated the Session benchmarks into its own crate, which is why - // we need these two lines below. + // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency + // issues. To get around that, we separated the Session benchmarks into its own crate, + // which is why we need these two lines below. use pallet_session_benchmarking::Module as SessionBench; use pallet_offences_benchmarking::Module as OffencesBench; use frame_system_benchmarking::Module as SystemBench; @@ -1383,7 +1446,9 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_collective, Council); add_benchmark!(params, batches, pallet_contracts, Contracts); add_benchmark!(params, batches, pallet_democracy, Democracy); + add_benchmark!(params, batches, pallet_election_provider_multi_phase, ElectionProviderMultiPhase); add_benchmark!(params, batches, pallet_elections_phragmen, Elections); + add_benchmark!(params, batches, pallet_gilt, Gilt); add_benchmark!(params, batches, pallet_grandpa, Grandpa); add_benchmark!(params, batches, pallet_identity, Identity); add_benchmark!(params, batches, pallet_im_online, ImOnline); @@ -1418,9 +1483,11 @@ mod tests { #[test] fn validate_transaction_submitter_bounds() { - fn is_submit_signed_transaction() where + fn is_submit_signed_transaction() + where T: CreateSignedTransaction, - {} + { + } is_submit_signed_transaction::(); } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 95bc8abef6..5ae277b35b 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -18,7 +18,7 @@ sc-service = { version = "0.9.0", features = ["test-helpers", "db"], path = ".. sc-client-db = { version = "0.9.0", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "3.0.0", path = "../../../client/api/" } codec = { package = "parity-scale-codec", version = "2.0.0" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } +pallet-contracts = { version = "3.0.0", path = "../../../frame/contracts" } pallet-grandpa = { version = "3.0.0", path = "../../../frame/grandpa" } pallet-indices = { version = "3.0.0", path = "../../../frame/indices" } sp-keyring = { version = "3.0.0", path = "../../../primitives/keyring" } diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 75d0d18e6e..b026b9530e 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -119,5 +119,6 @@ pub fn config_endowed( max_members: 999, }), pallet_vesting: Some(Default::default()), + pallet_gilt: Some(Default::default()), } } diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 5f1e0134a5..3b725bf877 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -78,7 +78,7 @@ pub trait CallExecutor { Result, Self::Error> ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, initialize_block_fn: IB, diff --git a/client/api/src/in_mem.rs b/client/api/src/in_mem.rs index c108acc7b4..b7060cf1d9 100644 --- a/client/api/src/in_mem.rs +++ b/client/api/src/in_mem.rs @@ -488,7 +488,6 @@ impl ProvideChtRoots for Blockchain { /// In-memory operation. pub struct BlockImportOperation { pending_block: Option>, - pending_cache: HashMap>, old_state: InMemoryBackend>, new_state: Option<> as StateBackend>>::Transaction>, aux: Vec<(Vec, Option>)>, @@ -520,9 +519,7 @@ impl backend::BlockImportOperation for BlockImportOperatio Ok(()) } - fn update_cache(&mut self, cache: HashMap>) { - self.pending_cache = cache; - } + fn update_cache(&mut self, _cache: HashMap>) {} fn update_db_storage( &mut self, @@ -637,7 +634,6 @@ impl backend::Backend for Backend where Block::Hash let old_state = self.state_at(BlockId::Hash(Default::default()))?; Ok(BlockImportOperation { pending_block: None, - pending_cache: Default::default(), old_state, new_state: None, aux: Default::default(), diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 9a1b8c8dab..4de6b54790 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -23,7 +23,7 @@ derive_more = "0.99.2" either = "1.5.3" futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.34.0", default-features = false, features = ["kad"] } +libp2p = { version = "0.35.1", default-features = false, features = ["kad"] } log = "0.4.8" prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} prost = "0.7" diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 26d4396ca8..818eb1beb3 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -93,7 +93,7 @@ where Block: BlockT + Unpin + 'static, Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { new_worker_and_service_with_config( @@ -121,7 +121,7 @@ where Block: BlockT + Unpin + 'static, Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + >::Api: AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { let (to_worker, from_service) = mpsc::channel(0); diff --git a/client/authority-discovery/src/worker.rs b/client/authority-discovery/src/worker.rs index e47f42a445..b1fb89669b 100644 --- a/client/authority-discovery/src/worker.rs +++ b/client/authority-discovery/src/worker.rs @@ -132,7 +132,7 @@ where Network: NetworkProvider, Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, >::Api: - AuthorityDiscoveryApi, + AuthorityDiscoveryApi, DhtEventStream: Stream + Unpin, { /// Construct a [`Worker`]. @@ -296,10 +296,10 @@ where for (sign_result, key) in signatures.into_iter().zip(keys) { let mut signed_addresses = vec![]; - // sign_with_all returns Result signature - // is generated for a public key that is supported. // Verify that all signatures exist for all provided keys. - let signature = sign_result.map_err(|_| Error::MissingSignature(key.clone()))?; + let signature = sign_result.ok() + .flatten() + .ok_or_else(|| Error::MissingSignature(key.clone()))?; schema::SignedAuthorityAddresses { addresses: serialized_addresses.clone(), signature, @@ -332,7 +332,7 @@ where .client .runtime_api() .authorities(&id) - .map_err(Error::CallingRuntime)? + .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .filter(|id| !local_keys.contains(id.as_ref())) .collect(); @@ -546,7 +546,7 @@ where let id = BlockId::hash(client.info().best_hash); let authorities = client.runtime_api() .authorities(&id) - .map_err(Error::CallingRuntime)? + .map_err(|e| Error::CallingRuntime(e.into()))? .into_iter() .map(std::convert::Into::into) .collect::>(); diff --git a/client/authority-discovery/src/worker/tests.rs b/client/authority-discovery/src/worker/tests.rs index 20c4c93709..04f597aa26 100644 --- a/client/authority-discovery/src/worker/tests.rs +++ b/client/authority-discovery/src/worker/tests.rs @@ -100,8 +100,6 @@ pub(crate) struct RuntimeApi { sp_api::mock_impl_runtime_apis! { impl AuthorityDiscoveryApi for RuntimeApi { - type Error = sp_blockchain::Error; - fn authorities(&self) -> Vec { self.authorities.clone() } @@ -189,7 +187,7 @@ async fn build_dht_event( serialized_addresses.as_slice(), ) .await - .map_err(|_| Error::Signing) + .unwrap() .unwrap(); let mut signed_addresses = vec![]; @@ -197,9 +195,7 @@ async fn build_dht_event( addresses: serialized_addresses.clone(), signature, } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto) - .unwrap(); + .encode(&mut signed_addresses).unwrap(); let key = hash_authority_id(&public_key.to_raw_vec()); let value = signed_addresses; diff --git a/client/basic-authorship/README.md b/client/basic-authorship/README.md index 1a20593c09..d29ce258e5 100644 --- a/client/basic-authorship/README.md +++ b/client/basic-authorship/README.md @@ -20,7 +20,6 @@ let future = proposer.propose( Default::default(), Default::default(), Duration::from_secs(2), - RecordProof::Yes, ); // We wait until the proposition is performed. @@ -29,4 +28,4 @@ println!("Generated block: {:?}", block.block); ``` -License: GPL-3.0-or-later WITH Classpath-exception-2.0 \ No newline at end of file +License: GPL-3.0-or-later WITH Classpath-exception-2.0 diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index 73e6156615..0c5bb7abef 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -23,7 +23,7 @@ use std::{pin::Pin, time, sync::Arc}; use sc_client_api::backend; use codec::Decode; -use sp_consensus::{evaluation, Proposal, RecordProof}; +use sp_consensus::{evaluation, Proposal, ProofRecording, DisableProofRecording, EnableProofRecording}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use log::{error, info, debug, trace, warn}; @@ -52,7 +52,7 @@ use sc_proposer_metrics::MetricsLink as PrometheusMetrics; pub const DEFAULT_MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; /// Proposer factory. -pub struct ProposerFactory { +pub struct ProposerFactory { spawn_handle: Box, /// The client instance. client: Arc, @@ -60,12 +60,15 @@ pub struct ProposerFactory { transaction_pool: Arc, /// Prometheus Link, metrics: PrometheusMetrics, - /// phantom member to pin the `Backend` type. - _phantom: PhantomData, + /// phantom member to pin the `Backend`/`ProofRecording` type. + _phantom: PhantomData<(B, PR)>, max_block_size: usize, } -impl ProposerFactory { +impl ProposerFactory { + /// Create a new proposer factory. + /// + /// Proof recording will be disabled when using proposers built by this instance to build blocks. pub fn new( spawn_handle: impl SpawnNamed + 'static, client: Arc, @@ -81,7 +84,30 @@ impl ProposerFactory { max_block_size: DEFAULT_MAX_BLOCK_SIZE, } } +} + +impl ProposerFactory { + /// Create a new proposer factory with proof recording enabled. + /// + /// Each proposer created by this instance will record a proof while building a block. + pub fn with_proof_recording( + spawn_handle: impl SpawnNamed + 'static, + client: Arc, + transaction_pool: Arc, + prometheus: Option<&PrometheusRegistry>, + ) -> Self { + ProposerFactory { + spawn_handle: Box::new(spawn_handle), + client, + transaction_pool, + metrics: PrometheusMetrics::new(prometheus), + _phantom: PhantomData, + max_block_size: DEFAULT_MAX_BLOCK_SIZE, + } + } +} +impl ProposerFactory { /// Set the maximum block size in bytes. /// /// The default value for the maximum block size is: @@ -91,7 +117,7 @@ impl ProposerFactory { } } -impl ProposerFactory +impl ProposerFactory where A: TransactionPool + 'static, B: backend::Backend + Send + Sync + 'static, @@ -99,20 +125,20 @@ impl ProposerFactory C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, C::Api: ApiExt> - + BlockBuilderApi, + + BlockBuilderApi, { - pub fn init_with_now( + fn init_with_now( &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Proposer { + ) -> Proposer { let parent_hash = parent_header.hash(); let id = BlockId::hash(parent_hash); info!("🙌 Starting consensus session on top of parent {:?}", parent_hash); - let proposer = Proposer { + let proposer = Proposer::<_, _, _, _, PR> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), parent_hash, @@ -129,8 +155,8 @@ impl ProposerFactory } } -impl sp_consensus::Environment for - ProposerFactory +impl sp_consensus::Environment for + ProposerFactory where A: TransactionPool + 'static, B: backend::Backend + Send + Sync + 'static, @@ -138,10 +164,11 @@ impl sp_consensus::Environment for C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, C::Api: ApiExt> - + BlockBuilderApi, + + BlockBuilderApi, + PR: ProofRecording, { type CreateProposer = future::Ready>; - type Proposer = Proposer; + type Proposer = Proposer; type Error = sp_blockchain::Error; fn init( @@ -153,7 +180,7 @@ impl sp_consensus::Environment for } /// The proposer logic. -pub struct Proposer { +pub struct Proposer { spawn_handle: Box, client: Arc, parent_hash: ::Hash, @@ -162,12 +189,12 @@ pub struct Proposer { transaction_pool: Arc, now: Box time::Instant + Send + Sync>, metrics: PrometheusMetrics, - _phantom: PhantomData, + _phantom: PhantomData<(B, PR)>, max_block_size: usize, } -impl sp_consensus::Proposer for - Proposer +impl sp_consensus::Proposer for + Proposer where A: TransactionPool + 'static, B: backend::Backend + Send + Sync + 'static, @@ -175,20 +202,22 @@ impl sp_consensus::Proposer for C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, C::Api: ApiExt> - + BlockBuilderApi, + + BlockBuilderApi, + PR: ProofRecording, { type Transaction = backend::TransactionFor; type Proposal = Pin, Self::Error> + Output = Result, Self::Error> > + Send>>; type Error = sp_blockchain::Error; + type ProofRecording = PR; + type Proof = PR::Proof; fn propose( self, inherent_data: InherentData, inherent_digests: DigestFor, max_duration: time::Duration, - record_proof: RecordProof, ) -> Self::Proposal { let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); @@ -200,7 +229,6 @@ impl sp_consensus::Proposer for inherent_data, inherent_digests, deadline, - record_proof, ).await; if tx.send(res).is_err() { trace!("Could not send block production result to proposer!"); @@ -213,7 +241,7 @@ impl sp_consensus::Proposer for } } -impl Proposer +impl Proposer where A: TransactionPool, B: backend::Backend + Send + Sync + 'static, @@ -221,15 +249,15 @@ impl Proposer C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, C::Api: ApiExt> - + BlockBuilderApi, + + BlockBuilderApi, + PR: ProofRecording, { async fn propose_with( self, inherent_data: InherentData, inherent_digests: DigestFor, deadline: time::Instant, - record_proof: RecordProof, - ) -> Result>, sp_blockchain::Error> { + ) -> Result, PR::Proof>, sp_blockchain::Error> { /// If the block is full we will attempt to push at most /// this number of transactions before quitting for real. /// It allows us to increase block utilization. @@ -238,7 +266,7 @@ impl Proposer let mut block_builder = self.client.new_block_at( &self.parent_id, inherent_digests, - record_proof, + PR::ENABLED, )?; for inherent in block_builder.create_inherents(inherent_data)? { @@ -361,6 +389,8 @@ impl Proposer error!("Failed to evaluate authored block: {:?}", err); } + let proof = PR::into_proof(proof) + .map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; Ok(Proposal { block, proof, storage_changes }) } } @@ -452,7 +482,7 @@ mod tests { // when let deadline = time::Duration::from_secs(3); let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) + proposer.propose(Default::default(), Default::default(), deadline) ).map(|r| r.block).unwrap(); // then @@ -497,7 +527,7 @@ mod tests { let deadline = time::Duration::from_secs(1); futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) + proposer.propose(Default::default(), Default::default(), deadline) ).map(|r| r.block).unwrap(); } @@ -543,7 +573,7 @@ mod tests { let deadline = time::Duration::from_secs(9); let proposal = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No), + proposer.propose(Default::default(), Default::default(), deadline), ).unwrap(); assert_eq!(proposal.block.extrinsics().len(), 1); @@ -624,7 +654,7 @@ mod tests { // when let deadline = time::Duration::from_secs(9); let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) + proposer.propose(Default::default(), Default::default(), deadline) ).map(|r| r.block).unwrap(); // then diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 224dccd36b..ccf73cc93f 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -22,7 +22,7 @@ //! //! ``` //! # use sc_basic_authorship::ProposerFactory; -//! # use sp_consensus::{Environment, Proposer, RecordProof}; +//! # use sp_consensus::{Environment, Proposer}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{ @@ -61,7 +61,6 @@ //! Default::default(), //! Default::default(), //! Duration::from_secs(2), -//! RecordProof::Yes, //! ); //! //! // We wait until the proposition is performed. diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index dda5edde36..1019e2411c 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -17,7 +17,6 @@ targets = ["x86_64-unknown-linux-gnu"] sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" } sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } sp-api = { version = "3.0.0", path = "../../primitives/api" } -sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } sp-core = { version = "3.0.0", path = "../../primitives/core" } sp-block-builder = { version = "3.0.0", path = "../../primitives/block-builder" } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 5a7e0277d9..4893072a71 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -35,15 +35,50 @@ use sp_runtime::{ use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; use sp_api::{ - Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, - TransactionOutcome, + Core, ApiExt, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof, TransactionOutcome, }; -use sp_consensus::RecordProof; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client_api::backend; +/// Used as parameter to [`BlockBuilderProvider`] to express if proof recording should be enabled. +/// +/// When `RecordProof::Yes` is given, all accessed trie nodes should be saved. These recorded +/// trie nodes can be used by a third party to proof this proposal without having access to the +/// full storage. +#[derive(Copy, Clone, PartialEq)] +pub enum RecordProof { + /// `Yes`, record a proof. + Yes, + /// `No`, don't record any proof. + No, +} + +impl RecordProof { + /// Returns if `Self` == `Yes`. + pub fn yes(&self) -> bool { + matches!(self, Self::Yes) + } +} + +/// Will return [`RecordProof::No`] as default value. +impl Default for RecordProof { + fn default() -> Self { + Self::No + } +} + +impl From for RecordProof { + fn from(val: bool) -> Self { + if val { + Self::Yes + } else { + Self::No + } + } +} + /// A block that was build by [`BlockBuilder`] plus some additional data. /// /// This additional data includes the `storage_changes`, these changes can be applied to the @@ -106,8 +141,7 @@ impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> where Block: BlockT, A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + - ApiExt>, + A::Api: BlockBuilderApi + ApiExt>, B: backend::Backend, { /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. @@ -122,7 +156,7 @@ where record_proof: RecordProof, inherent_digests: DigestFor, backend: &'a B, - ) -> Result> { + ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), Default::default(), @@ -155,7 +189,7 @@ where /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). - pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), ApiErrorFor> { + pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), Error> { let block_id = &self.block_id; let extrinsics = &mut self.extrinsics; @@ -174,7 +208,7 @@ where Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), ) }, - Err(e) => TransactionOutcome::Rollback(Err(e)), + Err(e) => TransactionOutcome::Rollback(Err(Error::from(e))), } }) } @@ -184,10 +218,7 @@ where /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` /// supplied by `self.api`, combined as [`BuiltBlock`]. /// The storage proof will be `Some(_)` when proof recording was enabled. - pub fn build(mut self) -> Result< - BuiltBlock>, - ApiErrorFor - > { + pub fn build(mut self) -> Result>, Error> { let header = self.api.finalize_block_with_context( &self.block_id, ExecutionContext::BlockConstruction )?; @@ -227,7 +258,7 @@ where pub fn create_inherents( &mut self, inherent_data: sp_inherents::InherentData, - ) -> Result, ApiErrorFor> { + ) -> Result, Error> { let block_id = self.block_id; self.api.execute_in_transaction(move |api| { // `create_inherents` should not change any state, to ensure this we always rollback @@ -237,7 +268,7 @@ where ExecutionContext::BlockConstruction, inherent_data )) - }) + }).map_err(|e| Error::Application(Box::new(e))) } } diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index d0c2008411..4f3484df31 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] proc-macro-crate = "0.1.4" proc-macro2 = "1.0.6" -quote = "1.0.9" +quote = "1.0.3" syn = "1.0.58" [dev-dependencies] diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 03d23c5aec..4617c2d790 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -18,7 +18,7 @@ regex = "1.4.2" tokio = { version = "0.2.21", features = [ "signal", "rt-core", "rt-threaded", "blocking" ] } futures = "0.3.9" fdlimit = "0.2.1" -libp2p = "0.34.0" +libp2p = "0.35.1" parity-scale-codec = "2.0.0" hex = "0.4.2" rand = "0.7.3" diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 2ebfa38925..4b1f197cf3 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -165,18 +165,35 @@ impl Into for RpcMethods { } } -arg_enum! { - /// Database backend - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - pub enum Database { - // Facebooks RocksDB - RocksDb, - // ParityDb. https://github.com/paritytech/parity-db/ - ParityDb, +/// Database backend +#[derive(Debug, Clone, Copy)] +pub enum Database { + /// Facebooks RocksDB + RocksDb, + /// ParityDb. + ParityDb, +} + +impl std::str::FromStr for Database { + type Err = String; + + fn from_str(s: &str) -> Result { + if s.eq_ignore_ascii_case("rocksdb") { + Ok(Self::RocksDb) + } else if s.eq_ignore_ascii_case("paritydb-experimental") { + Ok(Self::ParityDb) + } else { + Err(format!("Unknwon variant `{}`, known variants: {:?}", s, Self::variants())) + } } } +impl Database { + /// Returns all the variants of this enum to be shown in the cli. + pub fn variants() -> &'static [&'static str] { + &["rocksdb", "paritydb-experimental"] + } +} arg_enum! { /// Whether off-chain workers are enabled. diff --git a/client/cli/src/commands/run_cmd.rs b/client/cli/src/commands/run_cmd.rs index bbb8d6f68d..bb6f77819d 100644 --- a/client/cli/src/commands/run_cmd.rs +++ b/client/cli/src/commands/run_cmd.rs @@ -27,7 +27,7 @@ use crate::params::TransactionPoolParams; use crate::CliConfiguration; use regex::Regex; use sc_service::{ - config::{BasePath, MultiaddrWithPeerId, PrometheusConfig, TransactionPoolOptions}, + config::{BasePath, PrometheusConfig, TransactionPoolOptions}, ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; @@ -43,33 +43,16 @@ pub struct RunCmd { /// participate in any consensus task that it can (e.g. depending on /// availability of local keys). #[structopt( - long = "validator", - conflicts_with_all = &[ "sentry" ] + long = "validator" )] pub validator: bool, - /// Enable sentry mode. - /// - /// The node will be started with the authority role and participate in - /// consensus tasks as an "observer", it will never actively participate - /// regardless of whether it could (e.g. keys are available locally). This - /// mode is useful as a secure proxy for validators (which would run - /// detached from the network), since we want this node to participate in - /// the full consensus protocols in order to have all needed consensus data - /// available to relay to private nodes. - #[structopt( - long = "sentry", - conflicts_with_all = &[ "validator", "light" ], - parse(try_from_str) - )] - pub sentry: Vec, - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. #[structopt(long)] pub no_grandpa: bool, /// Experimental: Run in light client mode. - #[structopt(long = "light", conflicts_with = "sentry")] + #[structopt(long = "light")] pub light: bool, /// Listen to all RPC interfaces. @@ -245,17 +228,6 @@ pub struct RunCmd { #[structopt(long)] pub max_runtime_instances: Option, - /// Specify a list of sentry node public addresses. - /// - /// Can't be used with --public-addr as the sentry node would take precedence over the public address - /// specified there. - #[structopt( - long = "sentry-nodes", - value_name = "ADDR", - conflicts_with_all = &[ "sentry", "public-addr" ] - )] - pub sentry_nodes: Vec, - /// Run a temporary node. /// /// A temporary directory will be created to store the configuration and will be deleted @@ -366,13 +338,7 @@ impl CliConfiguration for RunCmd { Ok(if is_light { sc_service::Role::Light } else if is_authority { - sc_service::Role::Authority { - sentry_nodes: self.sentry_nodes.clone(), - } - } else if !self.sentry.is_empty() { - sc_service::Role::Sentry { - validators: self.sentry.clone(), - } + sc_service::Role::Authority } else { sc_service::Role::Full }) diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 247f6d2fdd..748e3b1012 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -33,7 +33,7 @@ use sc_service::config::{ TaskExecutor, TelemetryEndpoints, TransactionPoolOptions, WasmExecutionMethod, }; use sc_service::{ChainSpec, TracingReceiver, KeepBlocks, TransactionStorageMode}; -use sc_telemetry::{TelemetryHandle, TelemetrySpan}; +use sc_telemetry::TelemetryHandle; use sc_tracing::logging::LoggerBuilder; use std::net::SocketAddr; use std::path::PathBuf; @@ -486,7 +486,7 @@ pub trait CliConfiguration: Sized { let node_key = self.node_key(&net_config_dir)?; let role = self.role(is_dev)?; let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); - let is_validator = role.is_network_authority(); + let is_validator = role.is_authority(); let (keystore_remote, keystore) = self.keystore_config(&config_dir)?; let telemetry_endpoints = telemetry_handle .as_ref() @@ -494,7 +494,6 @@ pub trait CliConfiguration: Sized { .transpose()? // Don't initialise telemetry if `telemetry_endpoints` == Some([]) .filter(|x| !x.is_empty()); - let telemetry_span = telemetry_endpoints.as_ref().map(|_| TelemetrySpan::new()); let unsafe_pruning = self .import_params() @@ -534,7 +533,6 @@ pub trait CliConfiguration: Sized { rpc_cors: self.rpc_cors(is_dev)?, prometheus_config: self.prometheus_config(DCV::prometheus_listen_port())?, telemetry_endpoints, - telemetry_span, telemetry_external_transport: self.telemetry_external_transport()?, default_heap_pages: self.default_heap_pages()?, offchain_worker: self.offchain_worker(&role)?, diff --git a/client/cli/src/params/database_params.rs b/client/cli/src/params/database_params.rs index 23d2adc07f..3d5aca10d5 100644 --- a/client/cli/src/params/database_params.rs +++ b/client/cli/src/params/database_params.rs @@ -29,6 +29,7 @@ pub struct DatabaseParams { alias = "db", value_name = "DB", case_insensitive = true, + possible_values = &Database::variants(), )] pub database: Option, @@ -38,7 +39,7 @@ pub struct DatabaseParams { /// Enable storage chain mode /// - /// This changes the storage format for blocks bodys. + /// This changes the storage format for blocks bodies. /// If this is enabled, each transaction is stored separately in the /// transaction database column and is only referenced by hash /// in the block body column. diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index 467ca25353..987b8527e6 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -46,10 +46,10 @@ impl PruningParams { // unless `unsafe_pruning` is set. Ok(match &self.pruning { Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None if role.is_network_authority() => PruningMode::ArchiveAll, + None if role.is_authority() => PruningMode::ArchiveAll, None => PruningMode::default(), Some(s) => { - if role.is_network_authority() && !unsafe_pruning { + if role.is_authority() && !unsafe_pruning { return Err(error::Error::Input( "Validators should run with state pruning disabled (i.e. archive). \ You can ignore this check with `--unsafe-pruning`." diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 5b9e7c590b..746ee6597e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -98,9 +98,9 @@ pub fn slot_duration(client: &C) -> CResult where A: Codec, B: BlockT, C: AuxStore + ProvideRuntimeApi, - C::Api: AuraApi, + C::Api: AuraApi, { - SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b)) + SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b).map_err(Into::into)) } /// Get slot author for given block along with authorities. @@ -179,7 +179,7 @@ pub fn start_aura( &inherent_data_providers, slot_duration.slot_duration() )?; - Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _>( + Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _, _>( slot_duration, select_chain, worker, @@ -255,8 +255,8 @@ where let expected_author = slot_author::

(slot, epoch_data); expected_author.and_then(|p| { if SyncCryptoStore::has_keys( - &*self.keystore, - &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], + &*self.keystore, + &[(p.to_raw_vec(), sp_application_crypto::key_types::AURA)], ) { Some(p.clone()) } else { @@ -299,6 +299,9 @@ where header_hash.as_ref() ).map_err(|e| sp_consensus::Error::CannotSign( public.clone(), e.to_string(), + ))? + .ok_or_else(|| sp_consensus::Error::CannotSign( + public.clone(), "Could not find key in keystore.".into(), ))?; let signature = signature.clone().try_into() .map_err(|_| sp_consensus::Error::InvalidSignature( @@ -515,7 +518,7 @@ impl AuraVerifier where inherent_data: InherentData, timestamp_now: u64, ) -> Result<(), Error> where - C: ProvideRuntimeApi, C::Api: BlockBuilderApi, + C: ProvideRuntimeApi, C::Api: BlockBuilderApi, CAW: CanAuthorWith, { const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; @@ -534,7 +537,7 @@ impl AuraVerifier where &block_id, block, inherent_data, - ).map_err(Error::Client)?; + ).map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { inherent_res @@ -578,7 +581,7 @@ impl Verifier for AuraVerifier where sc_client_api::backend::AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi + AuraApi> + ApiExt, + C::Api: BlockBuilderApi + AuraApi> + ApiExt, DigestItemFor: CompatibleDigestItem

, P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, @@ -624,7 +627,7 @@ impl Verifier for AuraVerifier where // skip the inherents verification if the runtime API is old. if self.client .runtime_api() - .has_api_with::, _>( + .has_api_with::, _>( &BlockId::Hash(parent_hash), |v| v >= 2, ) @@ -680,7 +683,7 @@ impl Verifier for AuraVerifier where } fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where - A: Codec, + A: Codec + Debug, B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache, C::Api: AuraApi, @@ -716,7 +719,7 @@ fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusErro #[allow(deprecated)] fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> where - A: Codec, + A: Codec + Debug, B: BlockT, C: ProvideRuntimeApi + BlockOf + ProvideCache, C::Api: AuraApi, @@ -842,14 +845,14 @@ pub fn import_queue( can_author_with: CAW, ) -> Result, sp_consensus::Error> where B: BlockT, - C::Api: BlockBuilderApi + AuraApi> + ApiExt, + C::Api: BlockBuilderApi + AuraApi> + ApiExt, C: 'static + ProvideRuntimeApi + BlockOf + ProvideCache + Send + Sync + AuxStore + HeaderBackend, I: BlockImport> + Send + Sync + 'static, DigestItemFor: CompatibleDigestItem

, P: Pair + Send + Sync + 'static, P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, - S: sp_core::traits::SpawnNamed, + S: sp_core::traits::SpawnEssentialNamed, CAW: CanAuthorWith + Send + Sync + 'static, { register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; @@ -874,7 +877,9 @@ pub fn import_queue( #[cfg(test)] mod tests { use super::*; - use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor}; + use sp_consensus::{ + NoNetwork as DummyOracle, Proposal, AlwaysCanAuthor, DisableProofRecording, + }; use sc_network_test::{Block as TestBlock, *}; use sp_runtime::traits::{Block as BlockT, DigestFor}; use sc_network::config::ProtocolConfig; @@ -883,7 +888,7 @@ mod tests { use sc_client_api::BlockchainEvents; use sp_consensus_aura::sr25519::AuthorityPair; use sc_consensus_slots::{SimpleSlotWorker, BackoffAuthoringOnFinalizedHeadLagging}; - use std::task::Poll; + use std::{task::Poll, time::Instant}; use sc_block_builder::BlockBuilderProvider; use sp_runtime::traits::Header as _; use substrate_test_runtime_client::{TestClient, runtime::{Header, H256}}; @@ -913,20 +918,21 @@ mod tests { substrate_test_runtime_client::Backend, TestBlock >; - type Proposal = future::Ready, Error>>; + type Proposal = future::Ready, Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); fn propose( self, _: InherentData, digests: DigestFor, _: Duration, - _: RecordProof, ) -> Self::Proposal { let r = self.1.new_block(digests).unwrap().build().map_err(|e| e.into()); future::ready(r.map(|b| Proposal { block: b.block, - proof: b.proof, + proof: (), storage_changes: b.storage_changes, })) } @@ -1121,4 +1127,51 @@ mod tests { assert!(worker.claim_slot(&head, 6.into(), &authorities).is_none()); assert!(worker.claim_slot(&head, 7.into(), &authorities).is_some()); } + + #[test] + fn on_slot_returns_correct_block() { + let net = AuraTestNet::new(4); + + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = LocalKeystore::open(keystore_path.path(), None) + .expect("Creates keystore."); + SyncCryptoStore::sr25519_generate_new( + &keystore, + AuthorityPair::ID, Some(&Keyring::Alice.to_seed()), + ).expect("Key should be created"); + + let net = Arc::new(Mutex::new(net)); + + let mut net = net.lock(); + let peer = net.peer(3); + let client = peer.client().as_full().expect("full clients are created").clone(); + let environ = DummyFactory(client.clone()); + + let mut worker = AuraWorker { + client: client.clone(), + block_import: Arc::new(Mutex::new(client.clone())), + env: environ, + keystore: keystore.into(), + sync_oracle: DummyOracle.clone(), + force_authoring: false, + backoff_authoring_blocks: Option::<()>::None, + _key_type: PhantomData::, + }; + + let head = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + let res = futures::executor::block_on(worker.on_slot( + head, + SlotInfo { + slot: 0.into(), + timestamp: 0, + ends_at: Instant::now() + Duration::from_secs(100), + inherent_data: InherentData::new(), + duration: 1000, + }, + )).unwrap(); + + // The returned block should be imported and we should be able to get its header by now. + assert!(client.header(&BlockId::Hash(res.block.hash())).unwrap().is_some()); + } } diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 1120f66061..cf75a4a43f 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -159,7 +159,7 @@ fn claim_secondary_slot( authority_id.as_ref(), transcript_data, ); - if let Ok(signature) = result { + if let Ok(Some(signature)) = result { Some(PreDigest::SecondaryVRF(SecondaryVRFPreDigest { slot, vrf_output: VRFOutput(signature.output), @@ -265,7 +265,7 @@ fn claim_primary_slot( authority_id.as_ref(), transcript_data, ); - if let Ok(signature) = result { + if let Ok(Some(signature)) = result { let public = PublicKey::from_bytes(&authority_id.to_raw_vec()).ok()?; let inout = match signature.output.attach_input_hash(&public, transcript) { Ok(inout) => inout, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 6ffa18c3cc..a8e533d2a8 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -273,6 +273,8 @@ pub enum Error { CheckInherents(String), /// Client error Client(sp_blockchain::Error), + /// Runtime Api error. + RuntimeApi(sp_api::ApiError), /// Runtime error Runtime(sp_inherents::Error), /// Fork tree error @@ -310,14 +312,14 @@ impl Config { /// Either fetch the slot duration from disk or compute it from the genesis /// state. pub fn get_or_compute(client: &C) -> ClientResult where - C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, + C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, { trace!(target: "babe", "Getting slot duration"); match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| { - let has_api_v1 = a.has_api_with::, _>( + let has_api_v1 = a.has_api_with::, _>( &b, |v| v == 1, )?; - let has_api_v2 = a.has_api_with::, _>( + let has_api_v2 = a.has_api_with::, _>( &b, |v| v == 2, )?; @@ -326,7 +328,7 @@ impl Config { Ok(a.configuration_before_version_2(b)?.into()) } } else if has_api_v2 { - a.configuration(b) + a.configuration(b).map_err(Into::into) } else { Err(sp_blockchain::Error::VersionInvalid( "Unsupported or invalid BabeApi version".to_string() @@ -647,6 +649,9 @@ where ) .map_err(|e| sp_consensus::Error::CannotSign( public.clone(), e.to_string(), + ))? + .ok_or_else(|| sp_consensus::Error::CannotSign( + public.clone(), "Could not find key in keystore.".into(), ))?; let signature: AuthoritySignature = signature.clone().try_into() .map_err(|_| sp_consensus::Error::InvalidSignature( @@ -846,8 +851,7 @@ impl BabeVerifier + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi - + BabeApi, + Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith, { @@ -871,7 +875,7 @@ where &block_id, block, inherent_data, - ).map_err(Error::Client)?; + ).map_err(Error::RuntimeApi)?; if !inherent_res.ok() { inherent_res @@ -934,7 +938,7 @@ where self.client .runtime_api() .generate_key_ownership_proof(block_id, slot, equivocation_proof.offender.clone()) - .map_err(Error::Client) + .map_err(Error::RuntimeApi) }; let parent_id = BlockId::Hash(*header.parent_hash()); @@ -957,7 +961,7 @@ where equivocation_proof, key_owner_proof, ) - .map_err(Error::Client)?; + .map_err(Error::RuntimeApi)?; info!(target: "babe", "Submitted equivocation report for author {:?}", author); @@ -971,7 +975,7 @@ where Block: BlockT, Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, - Client::Api: BlockBuilderApi + BabeApi, + Client::Api: BlockBuilderApi + BabeApi, SelectChain: sp_consensus::SelectChain, CAW: CanAuthorWith + Send + Sync, { @@ -1490,7 +1494,7 @@ pub fn import_queue( client: Arc, select_chain: SelectChain, inherent_data_providers: InherentDataProviders, - spawner: &impl sp_core::traits::SpawnNamed, + spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, can_author_with: CAW, ) -> ClientResult> where @@ -1498,7 +1502,7 @@ pub fn import_queue( + Send + Sync + 'static, Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, Client: HeaderBackend + HeaderMetadata, - Client::Api: BlockBuilderApi + BabeApi + ApiExt, + Client::Api: BlockBuilderApi + BabeApi + ApiExt, SelectChain: sp_consensus::SelectChain + 'static, CAW: CanAuthorWith + Send + Sync + 'static, { diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 9d03a3266d..a33a509ddc 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -32,11 +32,10 @@ use sp_consensus_babe::{AuthorityPair, Slot, AllowedSlots, make_transcript, make use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, RecordProof, AlwaysCanAuthor, + NoNetwork as DummyOracle, Proposal, DisableProofRecording, AlwaysCanAuthor, import_queue::{BoxBlockImport, BoxJustificationImport}, }; -use sc_network_test::*; -use sc_network_test::{Block as TestBlock, PeersClient}; +use sc_network_test::{Block as TestBlock, *}; use sc_network::config::ProtocolConfig; use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; use sc_client_api::{BlockchainEvents, backend::TransactionFor}; @@ -44,8 +43,7 @@ use log::debug; use std::{time::Duration, cell::RefCell, task::Poll}; use rand::RngCore; use rand_chacha::{ - rand_core::SeedableRng, - ChaChaRng, + rand_core::SeedableRng, ChaChaRng, }; use sc_keystore::LocalKeystore; use sp_application_crypto::key_types::BABE; @@ -112,7 +110,8 @@ impl DummyProposer { Result< Proposal< TestBlock, - sc_client_api::TransactionFor + sc_client_api::TransactionFor, + () >, Error > @@ -163,21 +162,22 @@ impl DummyProposer { // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) } } impl Proposer for DummyProposer { type Error = Error; type Transaction = sc_client_api::TransactionFor; - type Proposal = future::Ready, Error>>; + type Proposal = future::Ready, Error>>; + type ProofRecording = DisableProofRecording; + type Proof = (); fn propose( mut self, _: InherentData, pre_digests: DigestFor, _: Duration, - _: RecordProof, ) -> Self::Proposal { self.propose_with(pre_digests) } diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index fb1ca629f6..247a8d9091 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -73,7 +73,7 @@ impl BabeConsensusDataProvider where B: BlockT, C: AuxStore + HeaderBackend + ProvideRuntimeApi + HeaderMetadata, - C::Api: BabeApi, + C::Api: BabeApi, { pub fn new( client: Arc, @@ -131,7 +131,7 @@ impl ConsensusDataProvider for BabeConsensusDataProvider where B: BlockT, C: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - C::Api: BabeApi, + C::Api: BabeApi, { type Transaction = TransactionFor; @@ -259,7 +259,7 @@ impl SlotTimestampProvider { where B: BlockT, C: AuxStore + HeaderBackend + ProvideRuntimeApi, - C::Api: BabeApi, + C::Api: BabeApi, { let slot_duration = Config::get_or_compute(&*client)?.slot_duration; let info = client.info(); diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 3ec6858857..320f196c10 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -73,7 +73,7 @@ impl Verifier for ManualSealVerifier { /// Instantiate the import queue for the manual seal consensus engine. pub fn import_queue( block_import: BoxBlockImport, - spawner: &impl sp_core::traits::SpawnNamed, + spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, ) -> BasicQueue where diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index 59b99349bf..2176973f3a 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -123,8 +123,11 @@ pub async fn seal_block( Default::default() }; - let proposal = proposer.propose(id.clone(), digest, Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) - .map_err(|err| Error::StringError(format!("{:?}", err))).await?; + let proposal = proposer.propose( + id.clone(), + digest, + Duration::from_secs(MAX_PROPOSAL_DURATION), + ).map_err(|err| Error::StringError(format!("{:?}", err))).await?; if proposal.block.extrinsics().len() == inherents_len && !create_empty { return Err(Error::EmptyTransactionPool) diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 975a6f17e7..19f339cf10 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -52,8 +52,7 @@ use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{InherentDataProviders, InherentData}; use sp_consensus::{ BlockImportParams, BlockOrigin, ForkChoiceStrategy, SyncOracle, Environment, Proposer, - SelectChain, Error as ConsensusError, CanAuthorWith, RecordProof, BlockImport, - BlockCheckParams, ImportResult, + SelectChain, Error as ConsensusError, CanAuthorWith, BlockImport, BlockCheckParams, ImportResult, }; use sp_consensus::import_queue::{ BoxBlockImport, BasicQueue, Verifier, BoxJustificationImport, @@ -232,7 +231,7 @@ impl PowBlockImport wher I: BlockImport> + Send + Sync, I::Error: Into, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi, + C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, CAW: CanAuthorWith, { @@ -284,7 +283,7 @@ impl PowBlockImport wher &block_id, block, inherent_data, - ).map_err(Error::Client)?; + ).map_err(|e| Error::Client(e.into()))?; if !inherent_res.ok() { inherent_res @@ -314,7 +313,7 @@ impl BlockImport for PowBlockImport, S: SelectChain, C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi, + C::Api: BlockBuilderApi, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static, CAW: CanAuthorWith, @@ -505,7 +504,7 @@ pub fn import_queue( justification_import: Option>, algorithm: Algorithm, inherent_data_providers: InherentDataProviders, - spawner: &impl sp_core::traits::SpawnNamed, + spawner: &impl sp_core::traits::SpawnEssentialNamed, registry: Option<&Registry>, ) -> Result< PowImportQueue, @@ -549,7 +548,10 @@ pub fn start_mining_worker( timeout: Duration, build_time: Duration, can_author_with: CAW, -) -> (Arc>>, impl Future) where +) -> ( + Arc>::Proof>>>, + impl Future, +) where Block: BlockT, C: ProvideRuntimeApi + BlockchainEvents + 'static, S: SelectChain + 'static, @@ -566,7 +568,7 @@ pub fn start_mining_worker( } let timer = UntilImportedOrTimeout::new(client.import_notification_stream(), timeout); - let worker = Arc::new(Mutex::new(MiningWorker:: { + let worker = Arc::new(Mutex::new(MiningWorker:: { build: None, algorithm: algorithm.clone(), block_import, @@ -664,7 +666,6 @@ pub fn start_mining_worker( inherent_data, inherent_digest, build_time.clone(), - RecordProof::No, ).await { Ok(x) => x, Err(err) => { @@ -678,7 +679,7 @@ pub fn start_mining_worker( }, }; - let build = MiningBuild:: { + let build = MiningBuild:: { metadata: MiningMetadata { best_hash, pre_hash: proposal.block.header().hash(), diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index c19c5524d9..d64596e48c 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -40,21 +40,31 @@ pub struct MiningMetadata { } /// A build of mining, containing the metadata and the block proposal. -pub struct MiningBuild, C: sp_api::ProvideRuntimeApi> { +pub struct MiningBuild< + Block: BlockT, + Algorithm: PowAlgorithm, + C: sp_api::ProvideRuntimeApi, + Proof +> { /// Mining metadata. pub metadata: MiningMetadata, /// Mining proposal. - pub proposal: Proposal>, + pub proposal: Proposal, Proof>, } /// Mining worker that exposes structs to query the current mining build and submit mined blocks. -pub struct MiningWorker, C: sp_api::ProvideRuntimeApi> { - pub(crate) build: Option>, +pub struct MiningWorker< + Block: BlockT, + Algorithm: PowAlgorithm, + C: sp_api::ProvideRuntimeApi, + Proof +> { + pub(crate) build: Option>, pub(crate) algorithm: Algorithm, pub(crate) block_import: BoxBlockImport>, } -impl MiningWorker where +impl MiningWorker where Block: BlockT, C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, @@ -72,7 +82,7 @@ impl MiningWorker where pub(crate) fn on_build( &mut self, - build: MiningBuild, + build: MiningBuild, ) { self.build = Some(build); } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index d851753921..564d5c28c5 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -40,7 +40,7 @@ use log::{debug, error, info, warn}; use parking_lot::Mutex; use sp_api::{ProvideRuntimeApi, ApiRef}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; +use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData}; use sp_consensus_slots::Slot; use sp_inherents::{InherentData, InherentDataProviders}; use sp_runtime::{ @@ -57,28 +57,27 @@ pub type StorageChanges = /// The result of [`SlotWorker::on_slot`]. #[derive(Debug, Clone)] -pub struct SlotResult { +pub struct SlotResult { /// The block that was built. pub block: Block, - /// The optional storage proof that was calculated while building the block. - /// - /// This needs to be enabled for the proposer to get this storage proof. - pub storage_proof: Option, + /// The storage proof that was recorded while building the block. + pub storage_proof: Proof, } /// A worker that should be invoked at every new slot. /// /// The implementation should not make any assumptions of the slot being bound to the time or /// similar. The only valid assumption is that the slot number is always increasing. -pub trait SlotWorker { - /// The type of the future that will be returned when a new slot is triggered. - type OnSlot: Future>>; - +pub trait SlotWorker { /// Called when a new slot is triggered. /// /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in /// the slot. Otherwise `None` is returned. - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot; + fn on_slot( + &mut self, + chain_head: B::Header, + slot_info: SlotInfo, + ) -> Pin>> + Send>>; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at @@ -205,7 +204,7 @@ pub trait SimpleSlotWorker { &mut self, chain_head: B::Header, slot_info: SlotInfo, - ) -> Pin>> + Send>> + ) -> Pin>::Proof>>> + Send>> where >::Proposal: Unpin + Send + 'static, { @@ -306,7 +305,6 @@ pub trait SimpleSlotWorker { logs, }, slot_remaining_duration, - RecordProof::No, ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))); let proposal_work = @@ -333,7 +331,7 @@ pub trait SimpleSlotWorker { proposal_work.and_then(move |(proposal, claim)| async move { let (block, storage_proof) = (proposal.block, proposal.proof); - let (header, body) = block.clone().deconstruct(); + let (header, body) = block.deconstruct(); let header_num = *header.number(); let header_hash = header.hash(); let parent_hash = *header.parent_hash(); @@ -341,7 +339,7 @@ pub trait SimpleSlotWorker { let block_import_params = block_import_params_maker( header, &header_hash, - body, + body.clone(), proposal.storage_changes, claim, epoch_data, @@ -360,6 +358,7 @@ pub trait SimpleSlotWorker { "hash_previously" => ?header_hash, ); + let header = block_import_params.post_header(); if let Err(err) = block_import.lock().import_block(block_import_params, Default::default()) { warn!( target: logging_target, @@ -375,17 +374,20 @@ pub trait SimpleSlotWorker { ); } - Ok(SlotResult { block, storage_proof }) + Ok(SlotResult { block: B::new(header, body), storage_proof }) }).then(|r| async move { r.map_err(|e| warn!(target: "slots", "Encountered consensus error: {:?}", e)).ok() }).boxed() } } -impl> SlotWorker for T { - type OnSlot = Pin>> + Send>>; - - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { +impl> SlotWorker>::Proof> for T +{ + fn on_slot( + &mut self, + chain_head: B::Header, + slot_info: SlotInfo, + ) -> Pin>::Proof>>> + Send>> { SimpleSlotWorker::on_slot(self, chain_head, slot_info) } } @@ -403,7 +405,7 @@ pub trait SlotCompatible { /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub fn start_slot_worker( +pub fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, @@ -415,8 +417,7 @@ pub fn start_slot_worker( where B: BlockT, C: SelectChain, - W: SlotWorker, - W::OnSlot: Unpin, + W: SlotWorker, SO: SyncOracle + Send, SC: SlotCompatible + Unpin, T: SlotData + Clone, diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 0af148fd95..96329d1680 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -37,8 +37,8 @@ pub enum Error { #[error(transparent)] Wasmi(#[from] wasmi::Error), - #[error("API Error: {0}")] - ApiError(String), + #[error("Error calling api function: {0}")] + ApiError(Box), #[error("Method not found: '{0}'")] MethodNotFound(String), @@ -96,16 +96,16 @@ pub enum Error { #[error(transparent)] RuntimeConstruction(#[from] WasmError), - + #[error("Shared memory is not supported")] SharedMemUnsupported, - + #[error("Imported globals are not supported yet")] ImportedGlobalsUnsupported, - + #[error("initializer expression can have only up to 2 expressions in wasm 1.0")] InitializerHasTooManyExpressions, - + #[error("Invalid initializer expression provided {0}")] InvalidInitializerExpression(String), } diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index cdfe349eda..42a7950593 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -456,7 +456,7 @@ impl CodeExecutor for NativeExecutor { fn call< R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + UnwindSafe, >( &self, ext: &mut dyn Externalities, @@ -514,7 +514,7 @@ impl CodeExecutor for NativeExecutor { let res = with_externalities_safe(&mut **ext, move || (call)()) .and_then(|r| r .map(NativeOrEncoded::Native) - .map_err(|s| Error::ApiError(s)) + .map_err(Error::ApiError) ); Ok(res) diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 4772471049..351a2b5f40 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -414,7 +414,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 1)]), + apis: sp_api::create_apis_vec!([(Core::::ID, 1)]), }; let version = decode_version(&old_runtime_version.encode()).unwrap(); @@ -429,7 +429,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), + apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), }; decode_version(&old_runtime_version.encode()).unwrap_err(); @@ -443,7 +443,7 @@ mod tests { authoring_version: 1, spec_version: 1, impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), + apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), transaction_version: 3, }; diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml index ca3ea94f38..3557d543c9 100644 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -1,11 +1,10 @@ [package] description = "A request-response protocol for handling grandpa warp sync requests" name = "sc-finality-grandpa-warp-sync" -version = "0.8.0" +version = "0.9.0" license = "GPL-3.0-or-later WITH Classpath-exception-2.0" authors = ["Parity Technologies "] edition = "2018" -publish = false homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" @@ -13,16 +12,25 @@ repository = "https://github.com/paritytech/substrate/" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sc-network = { version = "0.9.0", path = "../network" } -sc-finality-grandpa = { version = "0.9.0", path = "../finality-grandpa" } -sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } -sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } -sc-client-api = { version = "3.0.0", path = "../api" } -sc-service = { version = "0.9.0", path = "../service" } +codec = { package = "parity-scale-codec", version = "2.0.0" } +derive_more = "0.99.11" futures = "0.3.8" log = "0.4.11" -derive_more = "0.99.11" -codec = { package = "parity-scale-codec", version = "2.0.0" } -prost = "0.7" num-traits = "0.2.14" parking_lot = "0.11.1" +prost = "0.7" +sc-client-api = { version = "3.0.0", path = "../api" } +sc-finality-grandpa = { version = "0.9.0", path = "../finality-grandpa" } +sc-network = { version = "0.9.0", path = "../network" } +sc-service = { version = "0.9.0", path = "../service" } +sp-blockchain = { version = "3.0.0", path = "../../primitives/blockchain" } +sp-finality-grandpa = { version = "3.0.0", path = "../../primitives/finality-grandpa" } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } + +[dev-dependencies] +finality-grandpa = { version = "0.14.0" } +rand = "0.8" +sc-block-builder = { version = "0.9.0", path = "../block-builder" } +sp-consensus = { version = "0.9.0", path = "../../primitives/consensus/common" } +sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/finality-grandpa-warp-sync/src/lib.rs b/client/finality-grandpa-warp-sync/src/lib.rs index f7ce59b1c1..54d06650bc 100644 --- a/client/finality-grandpa-warp-sync/src/lib.rs +++ b/client/finality-grandpa-warp-sync/src/lib.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer via the -//! [`crate::request_responses::RequestResponsesBehaviour`]. +//! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. -use codec::Decode; +use codec::{Decode, Encode}; use sc_network::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; use sc_client_api::Backend; use sp_runtime::traits::NumberFor; @@ -28,13 +27,18 @@ use sp_runtime::traits::Block as BlockT; use std::time::Duration; use std::sync::Arc; use sc_service::{SpawnTaskHandle, config::{Configuration, Role}}; -use sc_finality_grandpa::WarpSyncFragmentCache; +use sc_finality_grandpa::SharedAuthoritySet; + +mod proof; + +pub use proof::{AuthoritySetChangeProof, WarpSyncProof}; /// Generates the appropriate [`RequestResponseConfig`] for a given chain configuration. pub fn request_response_config_for_chain + 'static>( config: &Configuration, spawn_handle: SpawnTaskHandle, backend: Arc, + authority_set: SharedAuthoritySet>, ) -> RequestResponseConfig where NumberFor: sc_finality_grandpa::BlockNumberOps, { @@ -48,6 +52,7 @@ pub fn request_response_config_for_chain String { s } -#[derive(codec::Decode)] +#[derive(Decode)] struct Request { - begin: B::Hash + begin: B::Hash, } -/// Setting a large fragment limit, allowing client -/// to define it is possible. -const WARP_SYNC_FRAGMENTS_LIMIT: usize = 100; - -/// Number of item with justification in warp sync cache. -/// This should be customizable, but setting it to the max number of fragments -/// we return seems like a good idea until then. -const WARP_SYNC_CACHE_SIZE: usize = WARP_SYNC_FRAGMENTS_LIMIT; - /// Handler for incoming grandpa warp sync requests from a remote peer. pub struct GrandpaWarpSyncRequestHandler { backend: Arc, - cache: Arc>>, + authority_set: SharedAuthoritySet>, request_receiver: mpsc::Receiver, - _phantom: std::marker::PhantomData + _phantom: std::marker::PhantomData, } impl> GrandpaWarpSyncRequestHandler { /// Create a new [`GrandpaWarpSyncRequestHandler`]. - pub fn new(protocol_id: ProtocolId, backend: Arc) -> (Self, RequestResponseConfig) { + pub fn new( + protocol_id: ProtocolId, + backend: Arc, + authority_set: SharedAuthoritySet>, + ) -> (Self, RequestResponseConfig) { let (tx, request_receiver) = mpsc::channel(20); let mut request_response_config = generate_request_response_config(protocol_id); request_response_config.inbound_queue = Some(tx); - let cache = Arc::new(parking_lot::RwLock::new(WarpSyncFragmentCache::new(WARP_SYNC_CACHE_SIZE))); - (Self { backend, request_receiver, cache, _phantom: std::marker::PhantomData }, request_response_config) + ( + Self { + backend, + request_receiver, + _phantom: std::marker::PhantomData, + authority_set, + }, + request_response_config, + ) } fn handle_request( @@ -119,13 +126,14 @@ impl> GrandpaWarpSyncRequestHandler::decode(&mut &payload[..])?; - let mut cache = self.cache.write(); - let response = sc_finality_grandpa::prove_warp_sync( - self.backend.blockchain(), request.begin, Some(WARP_SYNC_FRAGMENTS_LIMIT), Some(&mut cache) + let proof = WarpSyncProof::generate( + self.backend.blockchain(), + request.begin, + &self.authority_set.authority_set_changes(), )?; pending_response.send(OutgoingResponse { - result: Ok(response), + result: Ok(proof.encode()), reputation_changes: Vec::new(), }).map_err(|_| HandleRequestError::SendResponse) } @@ -149,8 +157,8 @@ impl> GrandpaWarpSyncRequestHandler. + +use codec::{Decode, Encode}; + +use sc_finality_grandpa::{ + find_scheduled_change, AuthoritySetChanges, BlockNumberOps, GrandpaJustification, +}; +use sp_blockchain::Backend as BlockchainBackend; +use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, NumberFor}, +}; + +use crate::HandleRequestError; + +/// The maximum number of authority set change proofs to include in a single warp sync proof. +const MAX_CHANGES_PER_WARP_SYNC_PROOF: usize = 256; + +/// A proof of an authority set change. +#[derive(Decode, Encode)] +pub struct AuthoritySetChangeProof { + /// The last block that the given authority set finalized. This block should contain a digest + /// signaling an authority set change from which we can fetch the next authority set. + pub header: Block::Header, + /// A justification for the header above which proves its finality. In order to validate it the + /// verifier must be aware of the authorities and set id for which the justification refers to. + pub justification: GrandpaJustification, +} + +/// An accumulated proof of multiple authority set changes. +#[derive(Decode, Encode)] +pub struct WarpSyncProof { + proofs: Vec>, + is_finished: bool, +} + +impl WarpSyncProof { + /// Generates a warp sync proof starting at the given block. It will generate authority set + /// change proofs for all changes that happened from `begin` until the current authority set + /// (capped by MAX_CHANGES_PER_WARP_SYNC_PROOF). + pub fn generate( + backend: &Backend, + begin: Block::Hash, + set_changes: &AuthoritySetChanges>, + ) -> Result, HandleRequestError> + where + Backend: BlockchainBackend, + { + // TODO: cache best response (i.e. the one with lowest begin_number) + + let begin_number = backend + .block_number_from_id(&BlockId::Hash(begin))? + .ok_or_else(|| HandleRequestError::InvalidRequest("Missing start block".to_string()))?; + + if begin_number > backend.info().finalized_number { + return Err(HandleRequestError::InvalidRequest( + "Start block is not finalized".to_string(), + )); + } + + let canon_hash = backend.hash(begin_number)?.expect( + "begin number is lower than finalized number; \ + all blocks below finalized number must have been imported; \ + qed.", + ); + + if canon_hash != begin { + return Err(HandleRequestError::InvalidRequest( + "Start block is not in the finalized chain".to_string(), + )); + } + + let mut proofs = Vec::new(); + + let mut proof_limit_reached = false; + + for (_, last_block) in set_changes.iter_from(begin_number) { + if proofs.len() >= MAX_CHANGES_PER_WARP_SYNC_PROOF { + proof_limit_reached = true; + break; + } + + let header = backend.header(BlockId::Number(*last_block))?.expect( + "header number comes from previously applied set changes; must exist in db; qed.", + ); + + // the last block in a set is the one that triggers a change to the next set, + // therefore the block must have a digest that signals the authority set change + if find_scheduled_change::(&header).is_none() { + // if it doesn't contain a signal for standard change then the set must have changed + // through a forced changed, in which case we stop collecting proofs as the chain of + // trust in authority handoffs was broken. + break; + } + + let justification = backend.justification(BlockId::Number(*last_block))?.expect( + "header is last in set and contains standard change signal; \ + must have justification; \ + qed.", + ); + + let justification = GrandpaJustification::::decode(&mut &justification[..])?; + + proofs.push(AuthoritySetChangeProof { + header: header.clone(), + justification, + }); + } + + Ok(WarpSyncProof { + proofs, + is_finished: !proof_limit_reached, + }) + } + + /// Verifies the warp sync proof starting at the given set id and with the given authorities. + /// If the proof is valid the new set id and authorities is returned. + pub fn verify( + &self, + set_id: SetId, + authorities: AuthorityList, + ) -> Result<(SetId, AuthorityList), HandleRequestError> + where + NumberFor: BlockNumberOps, + { + let mut current_set_id = set_id; + let mut current_authorities = authorities; + + for proof in &self.proofs { + proof + .justification + .verify(current_set_id, ¤t_authorities) + .map_err(|err| HandleRequestError::InvalidProof(err.to_string()))?; + + let scheduled_change = find_scheduled_change::(&proof.header).ok_or( + HandleRequestError::InvalidProof( + "Header is missing authority set change digest".to_string(), + ), + )?; + + current_authorities = scheduled_change.next_authorities; + current_set_id += 1; + } + + Ok((current_set_id, current_authorities)) + } +} + +#[cfg(test)] +mod tests { + use crate::WarpSyncProof; + use codec::Encode; + use rand::prelude::*; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::Backend; + use sc_finality_grandpa::{AuthoritySetChanges, GrandpaJustification}; + use sp_blockchain::HeaderBackend; + use sp_consensus::BlockOrigin; + use sp_keyring::Ed25519Keyring; + use sp_runtime::{generic::BlockId, traits::Header as _}; + use std::sync::Arc; + use substrate_test_runtime_client::{ + ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + #[test] + fn warp_sync_proof_generate_verify() { + let mut rng = rand::rngs::StdRng::from_seed([0; 32]); + let builder = TestClientBuilder::new(); + let backend = builder.backend(); + let mut client = Arc::new(builder.build()); + + let available_authorities = Ed25519Keyring::iter().collect::>(); + let genesis_authorities = vec![(Ed25519Keyring::Alice.public().into(), 1)]; + + let mut current_authorities = vec![Ed25519Keyring::Alice]; + let mut current_set_id = 0; + let mut authority_set_changes = Vec::new(); + + for n in 1..=100 { + let mut block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + let mut new_authorities = None; + + // we will trigger an authority set change every 10 blocks + if n != 0 && n % 10 == 0 { + // pick next authorities and add digest for the set change + let n_authorities = rng.gen_range(1..available_authorities.len()); + let next_authorities = available_authorities + .choose_multiple(&mut rng, n_authorities) + .cloned() + .collect::>(); + + new_authorities = Some(next_authorities.clone()); + + let next_authorities = next_authorities + .iter() + .map(|keyring| (keyring.public().into(), 1)) + .collect::>(); + + let digest = sp_runtime::generic::DigestItem::Consensus( + sp_finality_grandpa::GRANDPA_ENGINE_ID, + sp_finality_grandpa::ConsensusLog::ScheduledChange( + sp_finality_grandpa::ScheduledChange { + delay: 0u64, + next_authorities, + }, + ) + .encode(), + ); + + block.header.digest_mut().logs.push(digest); + } + + client.import(BlockOrigin::Own, block).unwrap(); + + if let Some(new_authorities) = new_authorities { + // generate a justification for this block, finalize it and note the authority set + // change + let (target_hash, target_number) = { + let info = client.info(); + (info.best_hash, info.best_number) + }; + + let mut precommits = Vec::new(); + for keyring in ¤t_authorities { + let precommit = finality_grandpa::Precommit { + target_hash, + target_number, + }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = sp_finality_grandpa::localized_payload(42, current_set_id, &msg); + let signature = keyring.sign(&encoded[..]).into(); + + let precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: keyring.public().into(), + }; + + precommits.push(precommit); + } + + let commit = finality_grandpa::Commit { + target_hash, + target_number, + precommits, + }; + + let justification = GrandpaJustification::from_commit(&client, 42, commit).unwrap(); + + client + .finalize_block(BlockId::Hash(target_hash), Some(justification.encode())) + .unwrap(); + + authority_set_changes.push((current_set_id, n)); + + current_set_id += 1; + current_authorities = new_authorities; + } + } + + let authority_set_changes = AuthoritySetChanges::from(authority_set_changes); + + // generate a warp sync proof + let genesis_hash = client.hash(0).unwrap().unwrap(); + + let warp_sync_proof = + WarpSyncProof::generate(backend.blockchain(), genesis_hash, &authority_set_changes) + .unwrap(); + + // verifying the proof should yield the last set id and authorities + let (new_set_id, new_authorities) = warp_sync_proof.verify(0, genesis_authorities).unwrap(); + + let expected_authorities = current_authorities + .iter() + .map(|keyring| (keyring.public().into(), 1)) + .collect::>(); + + assert_eq!(new_set_id, current_set_id); + assert_eq!(new_authorities, expected_authorities); + } +} diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 38f6acda05..7ae5666c7b 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] derive_more = "0.99.2" +dyn-clone = "1.0" fork-tree = { version = "3.0.0", path = "../../utils/fork-tree" } futures = "0.3.9" futures-timer = "3.0.1" @@ -43,13 +44,13 @@ sc-network-gossip = { version = "0.9.0", path = "../network-gossip" } sp-finality-grandpa = { version = "3.0.0", path = "../../primitives/finality-grandpa" } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.9.0"} sc-block-builder = { version = "0.9.0", path = "../block-builder" } -finality-grandpa = { version = "0.13.0", features = ["derive-codec"] } +finality-grandpa = { version = "0.14.0", features = ["derive-codec"] } pin-project = "1.0.4" linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" -finality-grandpa = { version = "0.13.0", features = ["derive-codec", "test-helpers"] } +finality-grandpa = { version = "0.14.0", features = ["derive-codec", "test-helpers"] } sc-network = { version = "0.9.0", path = "../network" } sc-network-test = { version = "0.8.0", path = "../network/test" } sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } @@ -59,4 +60,3 @@ sp-state-machine = { version = "0.9.0", path = "../../primitives/state-machine" sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "3.0.0", path = "../../primitives/api" } diff --git a/client/finality-grandpa/rpc/Cargo.toml b/client/finality-grandpa/rpc/Cargo.toml index 5b67ad9514..fc420ad754 100644 --- a/client/finality-grandpa/rpc/Cargo.toml +++ b/client/finality-grandpa/rpc/Cargo.toml @@ -15,7 +15,7 @@ sp-blockchain = { version = "3.0.0", path = "../../../primitives/blockchain" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-utils = { version = "3.0.0", path = "../../../primitives/utils" } -finality-grandpa = { version = "0.13.0", features = ["derive-codec"] } +finality-grandpa = { version = "0.14.0", features = ["derive-codec"] } jsonrpc-core = "15.1.0" jsonrpc-core-client = "15.1.0" jsonrpc-derive = "15.1.0" diff --git a/client/finality-grandpa/rpc/src/lib.rs b/client/finality-grandpa/rpc/src/lib.rs index f539a28624..6402e96efa 100644 --- a/client/finality-grandpa/rpc/src/lib.rs +++ b/client/finality-grandpa/rpc/src/lib.rs @@ -213,13 +213,12 @@ mod tests { use jsonrpc_core::{Notification, Output, types::Params}; use parity_scale_codec::{Encode, Decode}; - use sc_block_builder::BlockBuilder; + use sc_block_builder::{BlockBuilder, RecordProof}; use sc_finality_grandpa::{ report, AuthorityId, GrandpaJustificationSender, GrandpaJustification, FinalityProof, }; use sp_blockchain::HeaderBackend; - use sp_consensus::RecordProof; use sp_core::crypto::Public; use sp_keyring::Ed25519Keyring; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; @@ -325,7 +324,7 @@ mod tests { ) where VoterState: ReportVoterState + Send + Sync + 'static, { - setup_io_handler_with_finality_proofs(voter_state, None, deny_unsafe) + setup_io_handler_with_finality_proofs(voter_state, Default::default(), deny_unsafe) } fn setup_io_handler_with_finality_proofs( @@ -484,7 +483,7 @@ mod tests { &*client, client.info().best_hash, client.info().best_number, - RecordProof::Yes, + RecordProof::No, Default::default(), &*backend, ).unwrap().build().unwrap(); diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 067f6dfc1a..36684f2e1a 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -468,7 +468,6 @@ where let mut authority_set_changes = self.authority_set_changes.clone(); authority_set_changes.append(self.set_id, median_last_finalized.clone()); - new_set = Some(( median_last_finalized, AuthoritySet { @@ -650,10 +649,17 @@ impl + Clone> PendingChange { } } -// Tracks historical authority set changes. We store the block numbers for the first block of each -// authority set, once they have been finalized. +/// Tracks historical authority set changes. We store the block numbers for the last block +/// of each authority set, once they have been finalized. These blocks are guaranteed to +/// have a justification unless they were triggered by a forced change. #[derive(Debug, Encode, Decode, Clone, PartialEq)] -pub struct AuthoritySetChanges(pub Vec<(u64, N)>); +pub struct AuthoritySetChanges(Vec<(u64, N)>); + +impl From> for AuthoritySetChanges { + fn from(changes: Vec<(u64, N)>) -> AuthoritySetChanges { + AuthoritySetChanges(changes) + } +} impl AuthoritySetChanges { pub(crate) fn empty() -> Self { @@ -668,6 +674,7 @@ impl AuthoritySetChanges { let idx = self.0 .binary_search_by_key(&block_number, |(_, n)| n.clone()) .unwrap_or_else(|b| b); + if idx < self.0.len() { let (set_id, block_number) = self.0[idx].clone(); // To make sure we have the right set we need to check that the one before it also exists. @@ -687,6 +694,19 @@ impl AuthoritySetChanges { None } } + + /// Returns an iterator over all historical authority set changes starting at the given block + /// number (excluded). The iterator yields a tuple representing the set id and the block number + /// of the last block in that set. + pub fn iter_from(&self, block_number: N) -> impl Iterator { + let idx = self.0.binary_search_by_key(&block_number, |(_, n)| n.clone()) + // if there was a change at the given block number then we should start on the next + // index since we want to exclude the current block number + .map(|n| n + 1) + .unwrap_or_else(|b| b); + + self.0[idx..].iter() + } } #[cfg(test)] @@ -1627,4 +1647,32 @@ mod tests { assert_eq!(authority_set_changes.get_set_id(42), Some((3, 81))); assert_eq!(authority_set_changes.get_set_id(141), None); } + + #[test] + fn iter_from_works() { + let mut authority_set_changes = AuthoritySetChanges::empty(); + authority_set_changes.append(1, 41); + authority_set_changes.append(2, 81); + authority_set_changes.append(3, 121); + + assert_eq!( + vec![(1, 41), (2, 81), (3, 121)], + authority_set_changes.iter_from(40).cloned().collect::>(), + ); + + assert_eq!( + vec![(2, 81), (3, 121)], + authority_set_changes.iter_from(41).cloned().collect::>(), + ); + + assert_eq!( + 0, + authority_set_changes.iter_from(121).count(), + ); + + assert_eq!( + 0, + authority_set_changes.iter_from(200).count(), + ); + } } diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 1e616f3fa3..9f5582e5ce 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -563,12 +563,10 @@ impl Peers { } fn authorities(&self) -> usize { - // Note that our sentry and our validator are neither authorities nor non-authorities. self.inner.iter().filter(|(_, info)| matches!(info.roles, ObservedRole::Authority)).count() } fn non_authorities(&self) -> usize { - // Note that our sentry and our validator are neither authorities nor non-authorities. self.inner .iter() .filter(|(_, info)| matches!(info.roles, ObservedRole::Full | ObservedRole::Light)) @@ -665,8 +663,7 @@ impl CatchUpConfig { match self { CatchUpConfig::Disabled => false, CatchUpConfig::Enabled { only_from_authorities, .. } => match peer.roles { - ObservedRole::Authority | ObservedRole::OurSentry | - ObservedRole::OurGuardedAuthority => true, + ObservedRole::Authority => true, _ => !only_from_authorities } } @@ -1158,7 +1155,6 @@ impl Inner { } match peer.roles { - ObservedRole::OurGuardedAuthority | ObservedRole::OurSentry => true, ObservedRole::Authority => { let authorities = self.peers.authorities(); @@ -1214,7 +1210,6 @@ impl Inner { }; match peer.roles { - ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority => true, ObservedRole::Authority => { let authorities = self.peers.authorities(); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 5e4203b2a4..7925a674c2 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -480,7 +480,7 @@ where Block: BlockT, BE: Backend, C: crate::ClientForGrandpa, - C::Api: GrandpaApi, + C::Api: GrandpaApi, N: NetworkT, SC: SelectChain + 'static, { @@ -549,7 +549,7 @@ where authority_set.set_id, equivocation.offender().clone(), ) - .map_err(Error::Client)? + .map_err(Error::RuntimeApi)? { Some(proof) => proof, None => { @@ -571,7 +571,7 @@ where equivocation_proof, key_owner_proof, ) - .map_err(Error::Client)?; + .map_err(Error::RuntimeApi)?; Ok(()) } @@ -592,100 +592,6 @@ where fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { ancestry(&self.client, base, block) } - - fn best_chain_containing(&self, block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // NOTE: when we finalize an authority set change through the sync protocol the voter is - // signaled asynchronously. therefore the voter could still vote in the next round - // before activating the new set. the `authority_set` is updated immediately thus we - // restrict the voter based on that. - if self.set_id != self.authority_set.set_id() { - return None; - } - - let base_header = match self.client.header(BlockId::Hash(block)).ok()? { - Some(h) => h, - None => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); - return None; - } - }; - - // we refuse to vote beyond the current limit number where transitions are scheduled to - // occur. - // once blocks are finalized that make that transition irrelevant or activate it, - // we will proceed onwards. most of the time there will be no pending transition. - // the limit, if any, is guaranteed to be higher than or equal to the given base number. - let limit = self.authority_set.current_limit(*base_header.number()); - debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); - - match self.select_chain.finality_target(block, None) { - Ok(Some(best_hash)) => { - let best_header = self.client.header(BlockId::Hash(best_hash)).ok()? - .expect("Header known to exist after `finality_target` call; qed"); - - // check if our vote is currently being limited due to a pending change - let limit = limit.filter(|limit| limit < best_header.number()); - let target; - - let target_header = if let Some(target_number) = limit { - let mut target_header = best_header.clone(); - - // walk backwards until we find the target block - loop { - if *target_header.number() < target_number { - unreachable!( - "we are traversing backwards from a known block; \ - blocks are stored contiguously; \ - qed" - ); - } - - if *target_header.number() == target_number { - break; - } - - target_header = self.client.header(BlockId::Hash(*target_header.parent_hash())).ok()? - .expect("Header known to exist after `finality_target` call; qed"); - } - - target = target_header; - &target - } else { - // otherwise just use the given best as the target - &best_header - }; - - // restrict vote according to the given voting rule, if the - // voting rule doesn't restrict the vote then we keep the - // previous target. - // - // note that we pass the original `best_header`, i.e. before the - // authority set limit filter, which can be considered a - // mandatory/implicit voting rule. - // - // we also make sure that the restricted vote is higher than the - // round base (i.e. last finalized), otherwise the value - // returned by the given voting rule is ignored and the original - // target is used instead. - self.voting_rule - .restrict_vote(&*self.client, &base_header, &best_header, target_header) - .filter(|(_, restricted_number)| { - // we can only restrict votes within the interval [base, target] - restricted_number >= base_header.number() && - restricted_number < target_header.number() - }) - .or_else(|| Some((target_header.hash(), *target_header.number()))) - }, - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - } - Err(e) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); - None - } - } - } } @@ -726,13 +632,21 @@ where Block: 'static, B: Backend, C: crate::ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, N: NetworkT + 'static + Send + Sync, SC: SelectChain + 'static, VR: VotingRule, NumberFor: BlockNumberOps, { type Timer = Pin> + Send + Sync>>; + type BestChain = Pin< + Box< + dyn Future)>, Self::Error>> + + Send + + Sync + >, + >; + type Id = AuthorityId; type Signature = AuthoritySignature; @@ -747,6 +661,119 @@ where type Error = CommandOrError>; + fn best_chain_containing(&self, block: Block::Hash) -> Self::BestChain { + let find_best_chain = || { + // NOTE: when we finalize an authority set change through the sync protocol the voter is + // signaled asynchronously. therefore the voter could still vote in the next round + // before activating the new set. the `authority_set` is updated immediately thus we + // restrict the voter based on that. + if self.set_id != self.authority_set.set_id() { + return None; + } + + let base_header = match self.client.header(BlockId::Hash(block)).ok()? { + Some(h) => h, + None => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); + return None; + } + }; + + // we refuse to vote beyond the current limit number where transitions are scheduled to + // occur. + // once blocks are finalized that make that transition irrelevant or activate it, + // we will proceed onwards. most of the time there will be no pending transition. + // the limit, if any, is guaranteed to be higher than or equal to the given base number. + let limit = self.authority_set.current_limit(*base_header.number()); + debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); + + match self.select_chain.finality_target(block, None) { + Ok(Some(best_hash)) => { + let best_header = self + .client + .header(BlockId::Hash(best_hash)) + .ok()? + .expect("Header known to exist after `finality_target` call; qed"); + + // check if our vote is currently being limited due to a pending change + let limit = limit.filter(|limit| limit < best_header.number()); + + if let Some(target_number) = limit { + let mut target_header = best_header.clone(); + + // walk backwards until we find the target block + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ + blocks are stored contiguously; \ + qed" + ); + } + + if *target_header.number() == target_number { + break; + } + + target_header = self + .client + .header(BlockId::Hash(*target_header.parent_hash())) + .ok()? + .expect("Header known to exist after `finality_target` call; qed"); + } + + Some((base_header, best_header, target_header)) + } else { + // otherwise just use the given best as the target + Some((base_header, best_header.clone(), best_header)) + } + } + Ok(None) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); + None + } + Err(e) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); + None + } + } + }; + + if let Some((base_header, best_header, target_header)) = find_best_chain() { + // restrict vote according to the given voting rule, if the + // voting rule doesn't restrict the vote then we keep the + // previous target. + // + // note that we pass the original `best_header`, i.e. before the + // authority set limit filter, which can be considered a + // mandatory/implicit voting rule. + // + // we also make sure that the restricted vote is higher than the + // round base (i.e. last finalized), otherwise the value + // returned by the given voting rule is ignored and the original + // target is used instead. + let rule_fut = self.voting_rule.restrict_vote( + self.client.clone(), + &base_header, + &best_header, + &target_header, + ); + + Box::pin(async move { + Ok(rule_fut + .await + .filter(|(_, restricted_number)| { + // we can only restrict votes within the interval [base, target] + restricted_number >= base_header.number() + && restricted_number < target_header.number() + }) + .or_else(|| Some((target_header.hash(), *target_header.number())))) + }) + } else { + Box::pin(future::ok(None)) + } + } + fn round_data( &self, round: RoundNumber, diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index e1e424472f..c88faa2498 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -44,10 +44,10 @@ use parity_scale_codec::{Encode, Decode}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sp_runtime::{ Justification, generic::BlockId, - traits::{NumberFor, Block as BlockT, Header as HeaderT, Zero, One}, + traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, }; use sc_client_api::backend::Backend; -use sp_finality_grandpa::{AuthorityId, AuthorityList}; +use sp_finality_grandpa::AuthorityId; use crate::authorities::AuthoritySetChanges; use crate::justification::GrandpaJustification; @@ -151,23 +151,6 @@ pub enum FinalityProofError { Client(sp_blockchain::Error), } -/// Single fragment of authority set proof. -/// -/// Finality for block B is proved by providing: -/// 1) headers of this block; -/// 2) the justification for the block containing a authority set change digest; -#[derive(Debug, PartialEq, Clone, Encode, Decode)] -pub(crate) struct AuthoritySetProofFragment { - /// The header of the given block. - pub header: Header, - /// Justification of the block F. - pub justification: Vec, -} - -/// Proof of authority set is the ordered set of authority set fragments, where: -/// - last fragment match target block. -type AuthoritySetProof

= Vec>; - fn prove_finality( blockchain: &B, authority_set_changes: AuthoritySetChanges>, @@ -242,238 +225,6 @@ where )) } -/// Prepare authority proof for the best possible block starting at a given trusted block. -/// -/// Started block should be in range of bonding duration. -/// We only return proof for finalized blocks (with justification). -/// -/// It is assumed that the caller already have a proof-of-finality for the block 'begin'. -pub fn prove_warp_sync>( - blockchain: &B, - begin: Block::Hash, - max_fragment_limit: Option, - mut cache: Option<&mut WarpSyncFragmentCache>, -) -> ::sp_blockchain::Result> { - - let begin = BlockId::Hash(begin); - let begin_number = blockchain.block_number_from_id(&begin)? - .ok_or_else(|| ClientError::Backend("Missing start block".to_string()))?; - let end = BlockId::Hash(blockchain.last_finalized()?); - let end_number = blockchain.block_number_from_id(&end)? - // This error should not happen, we could also panic. - .ok_or_else(|| ClientError::Backend("Missing last finalized block".to_string()))?; - - if begin_number > end_number { - return Err(ClientError::Backend("Unfinalized start for authority proof".to_string())); - } - - let mut result = Vec::new(); - let mut last_apply = None; - - let header = blockchain.expect_header(begin)?; - let mut index = *header.number(); - - // Find previous change in case there is a delay. - // This operation is a costy and only for the delay corner case. - while index > Zero::zero() { - index = index - One::one(); - if let Some((fragment, apply_block)) = get_warp_sync_proof_fragment(blockchain, index, &mut cache)? { - if last_apply.map(|next| &next > header.number()).unwrap_or(false) { - result.push(fragment); - last_apply = Some(apply_block); - } else { - break; - } - } - } - - let mut index = *header.number(); - while index <= end_number { - if max_fragment_limit.map(|limit| result.len() >= limit).unwrap_or(false) { - break; - } - - if let Some((fragement, apply_block)) = get_warp_sync_proof_fragment(blockchain, index, &mut cache)? { - if last_apply.map(|next| apply_block < next).unwrap_or(false) { - // Previous delayed will not apply, do not include it. - result.pop(); - } - result.push(fragement); - last_apply = Some(apply_block); - } - - index = index + One::one(); - } - - let at_limit = max_fragment_limit.map(|limit| result.len() >= limit).unwrap_or(false); - - // add last finalized block if reached and not already included. - if !at_limit && result.last().as_ref().map(|head| head.header.number()) != Some(&end_number) { - let header = blockchain.expect_header(end)?; - if let Some(justification) = blockchain.justification(BlockId::Number(end_number.clone()))? { - result.push(AuthoritySetProofFragment { - header: header.clone(), - justification, - }); - } else { - // no justification, don't include it. - } - } - - Ok(result.encode()) -} - -/// Try get a warp sync proof fragment a a given finalized block. -fn get_warp_sync_proof_fragment>( - blockchain: &B, - index: NumberFor, - cache: &mut Option<&mut WarpSyncFragmentCache>, -) -> sp_blockchain::Result, NumberFor)>> { - if let Some(cache) = cache.as_mut() { - if let Some(result) = cache.get_item(index) { - return Ok(result); - } - } - - let mut result = None; - let header = blockchain.expect_header(BlockId::number(index))?; - - if let Some((block_number, sp_finality_grandpa::ScheduledChange { - next_authorities: _, - delay, - })) = crate::import::find_forced_change::(&header) { - let dest = block_number + delay; - if let Some(justification) = blockchain.justification(BlockId::Number(index.clone()))? { - result = Some((AuthoritySetProofFragment { - header: header.clone(), - justification, - }, dest)); - } else { - return Err(ClientError::Backend("Unjustified block with authority set change".to_string())); - } - } - - if let Some(sp_finality_grandpa::ScheduledChange { - next_authorities: _, - delay, - }) = crate::import::find_scheduled_change::(&header) { - let dest = index + delay; - if let Some(justification) = blockchain.justification(BlockId::Number(index.clone()))? { - result = Some((AuthoritySetProofFragment { - header: header.clone(), - justification, - }, dest)); - } else { - return Err(ClientError::Backend("Unjustified block with authority set change".to_string())); - } - } - - cache.as_mut().map(|cache| cache.new_item(index, result.clone())); - Ok(result) -} - -/// Check GRANDPA authority change sequence to assert finality of a target block. -/// -/// Returns the header of the target block. -#[allow(unused)] -pub(crate) fn check_warp_sync_proof( - current_set_id: u64, - current_authorities: AuthorityList, - remote_proof: Vec, -) -> ClientResult<(Block::Header, u64, AuthorityList)> -where - NumberFor: BlockNumberOps, - J: Decode + ProvableJustification + BlockJustification, -{ - // decode finality proof - let proof = AuthoritySetProof::::decode(&mut &remote_proof[..]) - .map_err(|_| ClientError::BadJustification("failed to decode authority proof".into()))?; - - let last = proof.len() - 1; - - let mut result = (current_set_id, current_authorities, NumberFor::::zero()); - - for (ix, fragment) in proof.into_iter().enumerate() { - let is_last = ix == last; - result = check_warp_sync_proof_fragment::( - result.0, - &result.1, - &result.2, - is_last, - &fragment, - )?; - - if is_last { - return Ok((fragment.header, result.0, result.1)) - } - } - - // empty proof can't prove anything - return Err(ClientError::BadJustification("empty proof of authority".into())); -} - -/// Check finality authority set sequence. -fn check_warp_sync_proof_fragment( - current_set_id: u64, - current_authorities: &AuthorityList, - previous_checked_block: &NumberFor, - is_last: bool, - authorities_proof: &AuthoritySetProofFragment, -) -> ClientResult<(u64, AuthorityList, NumberFor)> -where - NumberFor: BlockNumberOps, - J: Decode + ProvableJustification + BlockJustification, -{ - let justification: J = Decode::decode(&mut authorities_proof.justification.as_slice()) - .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(current_set_id, ¤t_authorities)?; - - // assert justification is for this header - if &justification.number() != authorities_proof.header.number() - || justification.hash().as_ref() != authorities_proof.header.hash().as_ref() { - return Err(ClientError::Backend("Invalid authority warp proof, justification do not match header".to_string())); - } - - if authorities_proof.header.number() <= previous_checked_block { - return Err(ClientError::Backend("Invalid authority warp proof".to_string())); - } - let current_block = authorities_proof.header.number(); - let mut at_block = None; - if let Some(sp_finality_grandpa::ScheduledChange { - next_authorities, - delay, - }) = crate::import::find_scheduled_change::(&authorities_proof.header) { - let dest = *current_block + delay; - at_block = Some((dest, next_authorities)); - } - if let Some((block_number, sp_finality_grandpa::ScheduledChange { - next_authorities, - delay, - })) = crate::import::find_forced_change::(&authorities_proof.header) { - let dest = block_number + delay; - at_block = Some((dest, next_authorities)); - } - - // Fragment without change only allowed for proof last block. - if at_block.is_none() && !is_last { - return Err(ClientError::Backend("Invalid authority warp proof".to_string())); - } - if let Some((at_block, next_authorities)) = at_block { - Ok((current_set_id + 1, next_authorities, at_block)) - } else { - Ok((current_set_id, current_authorities.clone(), current_block.clone())) - } -} - -/// Block info extracted from the justification. -pub(crate) trait BlockJustification { - /// Block number justified. - fn number(&self) -> Header::Number; - - /// Block hash justified. - fn hash(&self) -> Header::Hash; -} - /// Check GRANDPA proof-of-finality for the given block. /// /// Returns the vector of headers that MUST be validated + imported @@ -483,7 +234,7 @@ pub(crate) trait BlockJustification { #[cfg(test)] fn check_finality_proof( current_set_id: u64, - current_authorities: AuthorityList, + current_authorities: sp_finality_grandpa::AuthorityList, remote_proof: Vec, ) -> ClientResult> where @@ -529,70 +280,7 @@ where ClientError::Consensus(sp_consensus::Error::InvalidAuthoritiesSet), )?; - GrandpaJustification::verify(self, set_id, &authorities) - } -} - -impl BlockJustification for GrandpaJustification { - fn number(&self) -> NumberFor { - self.commit.target_number.clone() - } - fn hash(&self) -> Block::Hash { - self.commit.target_hash.clone() - } -} - -/// Simple cache for warp sync queries. -pub struct WarpSyncFragmentCache { - header_has_proof_fragment: std::collections::HashMap, - cache: linked_hash_map::LinkedHashMap< - Header::Number, - (AuthoritySetProofFragment
, Header::Number), - >, - limit: usize, -} - -impl WarpSyncFragmentCache
{ - /// Instantiate a new cache for the warp sync prover. - pub fn new(size: usize) -> Self { - WarpSyncFragmentCache { - header_has_proof_fragment: Default::default(), - cache: Default::default(), - limit: size, - } - } - - fn new_item( - &mut self, - at: Header::Number, - item: Option<(AuthoritySetProofFragment
, Header::Number)>, - ) { - self.header_has_proof_fragment.insert(at, item.is_some()); - - if let Some(item) = item { - if self.cache.len() == self.limit { - self.pop_one(); - } - - self.cache.insert(at, item); - } - } - - fn pop_one(&mut self) { - if let Some((header_number, _)) = self.cache.pop_front() { - self.header_has_proof_fragment.remove(&header_number); - } - } - - fn get_item( - &mut self, - block: Header::Number, - ) -> Option, Header::Number)>> { - match self.header_has_proof_fragment.get(&block) { - Some(true) => Some(self.cache.get_refresh(&block).cloned()), - Some(false) => Some(None), - None => None - } + GrandpaJustification::verify_with_voter_set(self, set_id, &authorities) } } @@ -624,15 +312,6 @@ pub(crate) mod tests { #[derive(Debug, PartialEq, Encode, Decode)] pub struct TestBlockJustification(TestJustification, u64, H256); - impl BlockJustification
for TestBlockJustification { - fn number(&self) ->
::Number { - self.1 - } - fn hash(&self) ->
::Hash { - self.2.clone() - } - } - impl ProvableJustification
for TestBlockJustification { fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { self.0.verify(set_id, authorities) @@ -826,161 +505,4 @@ pub(crate) mod tests { } ); } - - #[test] - fn warp_sync_proof_encoding_decoding() { - fn test_blockchain( - nb_blocks: u64, - mut set_change: &[(u64, Vec)], - mut justifications: &[(u64, Vec)], - ) -> (InMemoryBlockchain, Vec) { - let blockchain = InMemoryBlockchain::::new(); - let mut hashes = Vec::::new(); - let mut set_id = 0; - for i in 0..nb_blocks { - let mut set_id_next = set_id; - let mut header = header(i); - set_change.first() - .map(|j| if i == j.0 { - set_change = &set_change[1..]; - let next_authorities: Vec<_> = j.1.iter().map(|i| (AuthorityId::from_slice(&[*i; 32]), 1u64)).collect(); - set_id_next += 1; - header.digest_mut().logs.push( - sp_runtime::generic::DigestItem::Consensus( - sp_finality_grandpa::GRANDPA_ENGINE_ID, - sp_finality_grandpa::ConsensusLog::ScheduledChange( - sp_finality_grandpa::ScheduledChange { delay: 0u64, next_authorities } - ).encode(), - )); - }); - - if let Some(parent) = hashes.last() { - header.set_parent_hash(parent.clone()); - } - let header_hash = header.hash(); - - let justification = justifications.first() - .and_then(|j| if i == j.0 { - justifications = &justifications[1..]; - - let authority = j.1.iter().map(|j| - (AuthorityId::from_slice(&[*j; 32]), 1u64) - ).collect(); - let justification = TestBlockJustification( - TestJustification((set_id, authority), vec![i as u8]), - i, - header_hash, - ); - Some(justification.encode()) - } else { - None - }); - hashes.push(header_hash.clone()); - set_id = set_id_next; - - blockchain.insert(header_hash, header, justification, None, NewBlockState::Final) - .unwrap(); - } - (blockchain, hashes) - } - - let (blockchain, hashes) = test_blockchain( - 7, - vec![(3, vec![9])].as_slice(), - vec![ - (1, vec![1, 2, 3]), - (2, vec![1, 2, 3]), - (3, vec![1, 2, 3]), - (4, vec![9]), - (6, vec![9]), - ].as_slice(), - ); - - // proof after set change - let mut cache = WarpSyncFragmentCache::new(5); - let proof_no_cache = prove_warp_sync(&blockchain, hashes[6], None, Some(&mut cache)).unwrap(); - let proof = prove_warp_sync(&blockchain, hashes[6], None, Some(&mut cache)).unwrap(); - assert_eq!(proof_no_cache, proof); - - let initial_authorities: Vec<_> = [1u8, 2, 3].iter().map(|i| - (AuthorityId::from_slice(&[*i; 32]), 1u64) - ).collect(); - - let authorities_next: Vec<_> = [9u8].iter().map(|i| - (AuthorityId::from_slice(&[*i; 32]), 1u64) - ).collect(); - - assert!(check_warp_sync_proof::( - 0, - initial_authorities.clone(), - proof.clone(), - ).is_err()); - assert!(check_warp_sync_proof::( - 0, - authorities_next.clone(), - proof.clone(), - ).is_err()); - assert!(check_warp_sync_proof::( - 1, - initial_authorities.clone(), - proof.clone(), - ).is_err()); - let ( - _header, - current_set_id, - current_set, - ) = check_warp_sync_proof::( - 1, - authorities_next.clone(), - proof.clone(), - ).unwrap(); - - assert_eq!(current_set_id, 1); - assert_eq!(current_set, authorities_next); - - // proof before set change - let proof = prove_warp_sync(&blockchain, hashes[1], None, None).unwrap(); - let ( - _header, - current_set_id, - current_set, - ) = check_warp_sync_proof::( - 0, - initial_authorities.clone(), - proof.clone(), - ).unwrap(); - - assert_eq!(current_set_id, 1); - assert_eq!(current_set, authorities_next); - - // two changes - let (blockchain, hashes) = test_blockchain( - 13, - vec![(3, vec![7]), (8, vec![9])].as_slice(), - vec![ - (1, vec![1, 2, 3]), - (2, vec![1, 2, 3]), - (3, vec![1, 2, 3]), - (4, vec![7]), - (6, vec![7]), - (8, vec![7]), // warning, requires a justification on change set - (10, vec![9]), - ].as_slice(), - ); - - // proof before set change - let proof = prove_warp_sync(&blockchain, hashes[1], None, None).unwrap(); - let ( - _header, - current_set_id, - current_set, - ) = check_warp_sync_proof::( - 0, - initial_authorities.clone(), - proof.clone(), - ).unwrap(); - - assert_eq!(current_set_id, 2); - assert_eq!(current_set, authorities_next); - } } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index c598c03549..64e828f157 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -189,9 +189,11 @@ impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { } } -pub(crate) fn find_scheduled_change(header: &B::Header) - -> Option>> -{ +/// Checks the given header for a consensus digest signalling a **standard** scheduled change and +/// extracts it. +pub fn find_scheduled_change( + header: &B::Header, +) -> Option>> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); let filter_log = |log: ConsensusLog>| match log { @@ -204,9 +206,11 @@ pub(crate) fn find_scheduled_change(header: &B::Header) header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -pub(crate) fn find_forced_change(header: &B::Header) - -> Option<(NumberFor, ScheduledChange>)> -{ +/// Checks the given header for a consensus digest signalling a **forced** scheduled change and +/// extracts it. +pub fn find_forced_change( + header: &B::Header, +) -> Option<(NumberFor, ScheduledChange>)> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); let filter_log = |log: ConsensusLog>| match log { @@ -670,6 +674,7 @@ where Error::Safety(error) => ConsensusError::ClientImport(error), Error::Signing(error) => ConsensusError::ClientImport(error), Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), + Error::RuntimeApi(error) => ConsensusError::ClientImport(error.to_string()), }); }, Ok(_) => { diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 9429acff06..69ca703860 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -19,15 +19,16 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use finality_grandpa::{voter_set::VoterSet, Error as GrandpaError}; +use parity_scale_codec::{Decode, Encode}; use sp_blockchain::{Error as ClientError, HeaderBackend}; -use parity_scale_codec::{Encode, Decode}; -use finality_grandpa::voter_set::VoterSet; -use finality_grandpa::{Error as GrandpaError}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT}; use sp_finality_grandpa::AuthorityId; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; -use crate::{Commit, Error}; +use crate::{AuthorityList, Commit, Error}; /// A GRANDPA justification for block finality, it includes a commit message and /// an ancestry proof including all headers routing all precommit target blocks @@ -105,12 +106,30 @@ impl GrandpaJustification { let msg = "invalid commit target in grandpa justification".to_string(); Err(ClientError::BadJustification(msg)) } else { - justification.verify(set_id, voters).map(|_| justification) + justification + .verify_with_voter_set(set_id, voters) + .map(|_| justification) } } /// Validate the commit and the votes' ancestry proofs. - pub(crate) fn verify(&self, set_id: u64, voters: &VoterSet) -> Result<(), ClientError> + pub fn verify(&self, set_id: u64, authorities: &AuthorityList) -> Result<(), ClientError> + where + NumberFor: finality_grandpa::BlockNumberOps, + { + let voters = VoterSet::new(authorities.iter().cloned()).ok_or(ClientError::Consensus( + sp_consensus::Error::InvalidAuthoritiesSet, + ))?; + + self.verify_with_voter_set(set_id, &voters) + } + + /// Validate the commit and the votes' ancestry proofs. + pub(crate) fn verify_with_voter_set( + &self, + set_id: u64, + voters: &VoterSet, + ) -> Result<(), ClientError> where NumberFor: finality_grandpa::BlockNumberOps, { @@ -217,8 +236,4 @@ impl finality_grandpa::Chain> for A Ok(route) } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - None - } } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 8266631176..e5535f46b6 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -121,16 +121,16 @@ mod observer; mod until_imported; mod voting_rule; -pub use authorities::{SharedAuthoritySet, AuthoritySet}; +pub use authorities::{AuthoritySet, AuthoritySetChanges, SharedAuthoritySet}; pub use finality_proof::{FinalityProof, FinalityProofProvider, FinalityProofError}; pub use notification::{GrandpaJustificationSender, GrandpaJustificationStream}; -pub use import::GrandpaBlockImport; +pub use import::{find_scheduled_change, find_forced_change, GrandpaBlockImport}; pub use justification::GrandpaJustification; pub use voting_rule::{ - BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder + BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRuleResult, + VotingRulesBuilder, }; pub use finality_grandpa::voter::report; -pub use finality_proof::{prove_warp_sync, WarpSyncFragmentCache}; use aux_schema::PersistentData; use environment::{Environment, VoterSetState}; @@ -295,6 +295,8 @@ pub enum Error { Safety(String), /// A timer failed to fire. Timer(io::Error), + /// A runtime api request failed. + RuntimeApi(sp_api::ApiError), } impl From for Error { @@ -705,7 +707,7 @@ where NumberFor: BlockNumberOps, DigestFor: Encode, C: ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, { let GrandpaParams { mut config, @@ -831,7 +833,7 @@ where Block: BlockT, B: Backend + 'static, C: ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, N: NetworkT + Sync, NumberFor: BlockNumberOps, SC: SelectChain + 'static, @@ -1054,7 +1056,7 @@ where NumberFor: BlockNumberOps, SC: SelectChain + 'static, C: ClientForGrandpa + 'static, - C::Api: GrandpaApi, + C::Api: GrandpaApi, VR: VotingRule + Clone + 'static, { type Output = Result<(), Error>; diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 3db0e7be48..e0aad72790 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -57,11 +57,6 @@ impl<'a, Block, Client> finality_grandpa::Chain> fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { environment::ancestry(&self.client, base, block) } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // only used by voter - None - } } fn grandpa_observer( diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index b949818381..921b49db61 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -174,8 +174,6 @@ impl ProvideRuntimeApi for TestApi { sp_api::mock_impl_runtime_apis! { impl GrandpaApi for RuntimeApi { - type Error = sp_blockchain::Error; - fn grandpa_authorities(&self) -> AuthorityList { self.inner.genesis_authorities.clone() } @@ -1357,7 +1355,7 @@ where #[test] fn grandpa_environment_respects_voting_rules() { - use finality_grandpa::Chain; + use finality_grandpa::voter::Environment; let peers = &[Ed25519Keyring::Alice]; let voters = make_ids(peers); @@ -1392,25 +1390,25 @@ fn grandpa_environment_respects_voting_rules() { // the unrestricted environment should just return the best block assert_eq!( - unrestricted_env.best_chain_containing( + futures::executor::block_on(unrestricted_env.best_chain_containing( peer.client().info().finalized_hash - ).unwrap().1, + )).unwrap().unwrap().1, 21, ); // both the other environments should return block 16, which is 3/4 of the // way in the unfinalized chain assert_eq!( - three_quarters_env.best_chain_containing( + futures::executor::block_on(three_quarters_env.best_chain_containing( peer.client().info().finalized_hash - ).unwrap().1, + )).unwrap().unwrap().1, 16, ); assert_eq!( - default_env.best_chain_containing( + futures::executor::block_on(default_env.best_chain_containing( peer.client().info().finalized_hash - ).unwrap().1, + )).unwrap().unwrap().1, 16, ); @@ -1419,18 +1417,18 @@ fn grandpa_environment_respects_voting_rules() { // the 3/4 environment should propose block 21 for voting assert_eq!( - three_quarters_env.best_chain_containing( + futures::executor::block_on(three_quarters_env.best_chain_containing( peer.client().info().finalized_hash - ).unwrap().1, + )).unwrap().unwrap().1, 21, ); // while the default environment will always still make sure we don't vote // on the best block (2 behind) assert_eq!( - default_env.best_chain_containing( + futures::executor::block_on(default_env.best_chain_containing( peer.client().info().finalized_hash - ).unwrap().1, + )).unwrap().unwrap().1, 19, ); @@ -1441,9 +1439,9 @@ fn grandpa_environment_respects_voting_rules() { // best block, there's a hard rule that we can't cast any votes lower than // the given base (#21). assert_eq!( - default_env.best_chain_containing( + futures::executor::block_on(default_env.best_chain_containing( peer.client().info().finalized_hash - ).unwrap().1, + )).unwrap().unwrap().1, 21, ); } diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index a861e79275..9b3fb9b328 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -22,14 +22,22 @@ //! restrictions that are taken into account by the GRANDPA environment when //! selecting a finality target to vote on. +use std::future::Future; use std::sync::Arc; +use std::pin::Pin; + +use dyn_clone::DynClone; use sc_client_api::blockchain::HeaderBackend; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; +/// A future returned by a `VotingRule` to restrict a given vote, if any restriction is necessary. +pub type VotingRuleResult = + Pin::Hash, NumberFor)>> + Send + Sync>>; + /// A trait for custom voting rules in GRANDPA. -pub trait VotingRule: Send + Sync where +pub trait VotingRule: DynClone + Send + Sync where Block: BlockT, B: HeaderBackend, { @@ -47,11 +55,11 @@ pub trait VotingRule: Send + Sync where /// execution of voting rules wherein `current_target <= best_target`. fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)>; + ) -> VotingRuleResult; } impl VotingRule for () where @@ -60,12 +68,12 @@ impl VotingRule for () where { fn restrict_vote( &self, - _backend: &B, + _backend: Arc, _base: &Block::Header, _best_target: &Block::Header, _current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - None + ) -> VotingRuleResult { + Box::pin(async { None }) } } @@ -80,15 +88,15 @@ impl VotingRule for BeforeBestBlockBy> wher { fn restrict_vote( &self, - backend: &B, + backend: Arc, _base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { + ) -> VotingRuleResult { use sp_arithmetic::traits::Saturating; if current_target.number().is_zero() { - return None; + return Box::pin(async { None }); } // find the target number restricted by this rule @@ -96,21 +104,24 @@ impl VotingRule for BeforeBestBlockBy> wher // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return None; + return Box::pin(async { None }); } + let current_target = current_target.clone(); + // find the block at the given target height - find_target( - backend, - target_number, - current_target, - ) + Box::pin(std::future::ready(find_target( + &*backend, + target_number.clone(), + ¤t_target, + ))) } } /// A custom voting rule that limits votes towards 3/4 of the unfinalized chain, /// using the given `base` and `best_target` to figure where the 3/4 target /// should fall. +#[derive(Clone)] pub struct ThreeQuartersOfTheUnfinalizedChain; impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where @@ -119,11 +130,11 @@ impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where { fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { + ) -> VotingRuleResult { // target a vote towards 3/4 of the unfinalized chain (rounding up) let target_number = { let two = NumberFor::::one() + One::one(); @@ -138,15 +149,15 @@ impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where // our current target is already lower than this rule would restrict if target_number >= *current_target.number() { - return None; + return Box::pin(async { None }); } // find the block at the given target height - find_target( - backend, + Box::pin(std::future::ready(find_target( + &*backend, target_number, current_target, - ) + ))) } } @@ -195,37 +206,47 @@ impl Clone for VotingRules { impl VotingRule for VotingRules where Block: BlockT, - B: HeaderBackend, + B: HeaderBackend + 'static, { fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - let restricted_target = self.rules.iter().fold( - current_target.clone(), - |current_target, rule| { - rule.restrict_vote( - backend, - base, - best_target, - ¤t_target, - ) + ) -> VotingRuleResult { + let rules = self.rules.clone(); + let base = base.clone(); + let best_target = best_target.clone(); + let current_target = current_target.clone(); + + Box::pin(async move { + let mut restricted_target = current_target.clone(); + + for rule in rules.iter() { + if let Some(header) = rule + .restrict_vote(backend.clone(), &base, &best_target, &restricted_target) + .await + .filter(|(_, restricted_number)| { + // NOTE: we can only restrict votes within the interval [base, target) + restricted_number >= base.number() + && restricted_number < restricted_target.number() + }) .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) .and_then(std::convert::identity) - .unwrap_or(current_target) - }, - ); - - let restricted_hash = restricted_target.hash(); - - if restricted_hash != current_target.hash() { - Some((restricted_hash, *restricted_target.number())) - } else { - None - } + { + restricted_target = header; + } + } + + let restricted_hash = restricted_target.hash(); + + if restricted_hash != current_target.hash() { + Some((restricted_hash, *restricted_target.number())) + } else { + None + } + }) } } @@ -237,7 +258,7 @@ pub struct VotingRulesBuilder { impl Default for VotingRulesBuilder where Block: BlockT, - B: HeaderBackend, + B: HeaderBackend + 'static, { fn default() -> Self { VotingRulesBuilder::new() @@ -248,7 +269,7 @@ impl Default for VotingRulesBuilder where impl VotingRulesBuilder where Block: BlockT, - B: HeaderBackend, + B: HeaderBackend + 'static, { /// Return a new voting rule builder using the given backend. pub fn new() -> Self { @@ -285,14 +306,109 @@ impl VotingRulesBuilder where impl VotingRule for Box> where Block: BlockT, B: HeaderBackend, + Self: Clone, { fn restrict_vote( &self, - backend: &B, + backend: Arc, base: &Block::Header, best_target: &Block::Header, current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { + ) -> VotingRuleResult { (**self).restrict_vote(backend, base, best_target, current_target) } } + +#[cfg(test)] +mod tests { + use super::*; + use sc_block_builder::BlockBuilderProvider; + use sp_consensus::BlockOrigin; + use sp_runtime::traits::Header as _; + + use substrate_test_runtime_client::{ + runtime::{Block, Header}, + Backend, Client, ClientBlockImportExt, DefaultTestClientBuilderExt, TestClientBuilder, + TestClientBuilderExt, + }; + + /// A mock voting rule that subtracts a static number of block from the `current_target`. + #[derive(Clone)] + struct Subtract(u64); + impl VotingRule> for Subtract { + fn restrict_vote( + &self, + backend: Arc>, + _base: &Header, + _best_target: &Header, + current_target: &Header, + ) -> VotingRuleResult { + let target_number = current_target.number() - self.0; + let res = backend + .hash(target_number) + .unwrap() + .map(|target_hash| (target_hash, target_number)); + + Box::pin(std::future::ready(res)) + } + } + + #[test] + fn multiple_voting_rules_cannot_restrict_past_base() { + // setup an aggregate voting rule composed of two voting rules + // where each subtracts 50 blocks from the current target + let rule = VotingRulesBuilder::new() + .add(Subtract(50)) + .add(Subtract(50)) + .build(); + + let mut client = Arc::new(TestClientBuilder::new().build()); + + for _ in 0..200 { + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + client.import(BlockOrigin::Own, block).unwrap(); + } + + let genesis = client + .header(&BlockId::Number(0u32.into())) + .unwrap() + .unwrap(); + + let best = client + .header(&BlockId::Hash(client.info().best_hash)) + .unwrap() + .unwrap(); + + let (_, number) = + futures::executor::block_on(rule.restrict_vote(client.clone(), &genesis, &best, &best)) + .unwrap(); + + // we apply both rules which should subtract 100 blocks from best block (#200) + // which means that we should be voting for block #100 + assert_eq!(number, 100); + + let block110 = client + .header(&BlockId::Number(110u32.into())) + .unwrap() + .unwrap(); + + let (_, number) = futures::executor::block_on(rule.restrict_vote( + client.clone(), + &block110, + &best, + &best, + )) + .unwrap(); + + // base block is #110 while best block is #200, applying both rules would make + // would make the target block (#100) be lower than the base block, therefore + // only one of the rules is applied. + assert_eq!(number, 150); + } +} diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 9cad56efac..38ab640d2e 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -46,9 +46,6 @@ pub enum Error { /// Public key type is not supported #[display(fmt="Key crypto type is not supported")] KeyNotSupported(KeyTypeId), - /// Pair not found for public key and KeyTypeId - #[display(fmt="Pair not found for {} public key", "_0")] - PairNotFound(String), /// Keystore unavailable #[display(fmt="Keystore unavailable")] Unavailable, @@ -61,7 +58,6 @@ impl From for TraitError { fn from(error: Error) -> Self { match error { Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), - Error::PairNotFound(e) => TraitError::PairNotFound(e), Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => { TraitError::ValidationError(error.to_string()) }, diff --git a/client/keystore/src/local.rs b/client/keystore/src/local.rs index 866a50ae4c..482ef40760 100644 --- a/client/keystore/src/local.rs +++ b/client/keystore/src/local.rs @@ -60,9 +60,9 @@ impl LocalKeystore { /// Get a key pair for the given public key. /// - /// This function is only available for a local keystore. If your application plans to work with - /// remote keystores, you do not want to depend on it. - pub fn key_pair(&self, public: &::Public) -> Result { + /// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists and + /// `Err(_)` when something failed. + pub fn key_pair(&self, public: &::Public) -> Result> { self.0.read().key_pair::(public) } } @@ -130,7 +130,7 @@ impl CryptoStore for LocalKeystore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> std::result::Result, TraitError> { + ) -> std::result::Result>, TraitError> { SyncCryptoStore::sign_with(self, id, key, msg) } @@ -139,7 +139,7 @@ impl CryptoStore for LocalKeystore { key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> std::result::Result { + ) -> std::result::Result, TraitError> { SyncCryptoStore::sr25519_vrf_sign(self, key_type, public, transcript_data) } } @@ -175,28 +175,28 @@ impl SyncCryptoStore for LocalKeystore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> std::result::Result, TraitError> { + ) -> std::result::Result>, TraitError> { match key.0 { ed25519::CRYPTO_ID => { let pub_key = ed25519::Public::from_slice(key.1.as_slice()); - let key_pair: ed25519::Pair = self.0.read() + let key_pair = self.0.read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() } sr25519::CRYPTO_ID => { let pub_key = sr25519::Public::from_slice(key.1.as_slice()); - let key_pair: sr25519::Pair = self.0.read() + let key_pair = self.0.read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() }, ecdsa::CRYPTO_ID => { let pub_key = ecdsa::Public::from_slice(key.1.as_slice()); - let key_pair: ecdsa::Pair = self.0.read() + let key_pair = self.0.read() .key_pair_by_type::(&pub_key, id) .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() } _ => Err(TraitError::KeyNotSupported(id)) } @@ -232,7 +232,7 @@ impl SyncCryptoStore for LocalKeystore { .map(|k| ed25519::Public::from_slice(k.as_slice())) .collect() }) - .unwrap_or_default() + .unwrap_or_default() } fn ed25519_generate_new( @@ -278,7 +278,8 @@ impl SyncCryptoStore for LocalKeystore { } fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter().all(|(p, t)| self.0.read().key_phrase_by_type(&p, *t).is_ok()) + public_keys.iter() + .all(|(p, t)| self.0.read().key_phrase_by_type(&p, *t).ok().flatten().is_some()) } fn sr25519_vrf_sign( @@ -286,16 +287,19 @@ impl SyncCryptoStore for LocalKeystore { key_type: KeyTypeId, public: &Sr25519Public, transcript_data: VRFTranscriptData, - ) -> std::result::Result { + ) -> std::result::Result, TraitError> { let transcript = make_transcript(transcript_data); - let pair = self.0.read().key_pair_by_type::(public, key_type) - .map_err(|e| TraitError::PairNotFound(e.to_string()))?; + let pair = self.0.read().key_pair_by_type::(public, key_type)?; - let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(VRFSignature { - output: inout.to_output(), - proof, - }) + if let Some(pair) = pair { + let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); + Ok(Some(VRFSignature { + output: inout.to_output(), + proof, + })) + } else { + Ok(None) + } } } @@ -411,36 +415,53 @@ impl KeystoreInner { } /// Get the key phrase for a given public key and key type. - fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result { + fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result> { if let Some(phrase) = self.get_additional_pair(public, key_type) { - return Ok(phrase.clone()) + return Ok(Some(phrase.clone())) } - let path = self.key_file_path(public, key_type).ok_or_else(|| Error::Unavailable)?; - let file = File::open(path)?; + let path = if let Some(path) = self.key_file_path(public, key_type) { + path + } else { + return Ok(None); + }; + + if path.exists() { + let file = File::open(path)?; - serde_json::from_reader(&file).map_err(Into::into) + serde_json::from_reader(&file).map_err(Into::into).map(Some) + } else { + Ok(None) + } } /// Get a key pair for the given public key and key type. - fn key_pair_by_type(&self, + fn key_pair_by_type( + &self, public: &Pair::Public, key_type: KeyTypeId, - ) -> Result { - let phrase = self.key_phrase_by_type(public.as_slice(), key_type)?; + ) -> Result> { + let phrase = if let Some(p) = self.key_phrase_by_type(public.as_slice(), key_type)? { + p + } else { + return Ok(None) + }; + let pair = Pair::from_string( &phrase, self.password(), ).map_err(|_| Error::InvalidPhrase)?; if &pair.public() == public { - Ok(pair) + Ok(Some(pair)) } else { Err(Error::InvalidPassword) } } - /// Returns the file path for the given public key and key type. + /// Get the file path for the given public key and key type. + /// + /// Returns `None` if the keystore only exists in-memory and there isn't any path to provide. fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { let mut buf = self.path.as_ref()?.clone(); let key_type = hex::encode(key_type.0); @@ -481,8 +502,12 @@ impl KeystoreInner { } /// Get a key pair for the given public key. - pub fn key_pair(&self, public: &::Public) -> Result { - self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) + /// + /// Returns `Ok(None)` if the key doesn't exist, `Ok(Some(_))` if the key exists or `Err(_)` when + /// something failed. + pub fn key_pair(&self, public: &::Public) -> Result> { + self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID) + .map(|v| v.map(Into::into)) } } @@ -531,13 +556,40 @@ mod tests { assert!(store.public_keys::().unwrap().is_empty()); let key: ed25519::AppPair = store.generate().unwrap(); - let key2: ed25519::AppPair = store.key_pair(&key.public()).unwrap(); + let key2: ed25519::AppPair = store.key_pair(&key.public()).unwrap().unwrap(); assert_eq!(key.public(), key2.public()); assert_eq!(store.public_keys::().unwrap()[0], key.public()); } + #[test] + fn has_keys_works() { + let temp_dir = TempDir::new().unwrap(); + let store = LocalKeystore::open(temp_dir.path(), None).unwrap(); + + let key: ed25519::AppPair = store.0.write().generate().unwrap(); + let key2 = ed25519::Pair::generate().0; + + assert!( + !SyncCryptoStore::has_keys(&store, &[(key2.public().to_vec(), ed25519::AppPublic::ID)]) + ); + + assert!( + !SyncCryptoStore::has_keys( + &store, + &[ + (key2.public().to_vec(), ed25519::AppPublic::ID), + (key.public().to_raw_vec(), ed25519::AppPublic::ID), + ], + ) + ); + + assert!( + SyncCryptoStore::has_keys(&store, &[(key.public().to_raw_vec(), ed25519::AppPublic::ID)]) + ); + } + #[test] fn test_insert_ephemeral_from_seed() { let temp_dir = TempDir::new().unwrap(); @@ -554,7 +606,7 @@ mod tests { drop(store); let store = KeystoreInner::open(temp_dir.path(), None).unwrap(); // Keys generated from seed should not be persisted! - assert!(store.key_pair::(&pair.public()).is_err()); + assert!(store.key_pair::(&pair.public()).unwrap().is_none()); } #[test] @@ -569,7 +621,7 @@ mod tests { let pair: ed25519::AppPair = store.generate().unwrap(); assert_eq!( pair.public(), - store.key_pair::(&pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().unwrap().public(), ); // Without the password the key should not be retrievable @@ -582,7 +634,7 @@ mod tests { ).unwrap(); assert_eq!( pair.public(), - store.key_pair::(&pair.public()).unwrap().public(), + store.key_pair::(&pair.public()).unwrap().unwrap().public(), ); } @@ -626,7 +678,7 @@ mod tests { let store_key_pair = store.key_pair_by_type::( &key_pair.public(), SR25519, - ).expect("Gets key pair from keystore"); + ).expect("Gets key pair from keystore").unwrap(); assert_eq!(key_pair.public(), store_key_pair.public()); } diff --git a/client/light/src/call_executor.rs b/client/light/src/call_executor.rs index 8b403823b0..ae83807dc9 100644 --- a/client/light/src/call_executor.rs +++ b/client/light/src/call_executor.rs @@ -104,7 +104,7 @@ impl CallExecutor for Result, Self::Error> ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, initialize_block_fn: IB, diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 4da356f92d..b5f3b754af 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -17,9 +17,9 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" futures-timer = "3.0.1" -libp2p = { version = "0.34.0", default-features = false } +libp2p = { version = "0.35.1", default-features = false } log = "0.4.8" -lru = "0.6.1" +lru = "0.6.5" prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.9.0", path = "../../utils/prometheus" } sc-network = { version = "0.9.0", path = "../network" } sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 87e960fb64..3d8c33eae0 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -36,7 +36,7 @@ hex = "0.4.0" ip_network = "0.3.4" linked-hash-map = "0.5.2" linked_hash_set = "0.1.3" -lru = "0.6.3" +lru = "0.6.5" log = "0.4.8" nohash-hasher = "0.2.0" parking_lot = "0.11.1" @@ -63,17 +63,17 @@ wasm-timer = "0.2" zeroize = "1.2.0" [dependencies.libp2p] -version = "0.34.0" +version = "0.35.1" [target.'cfg(target_os = "unknown")'.dependencies.libp2p] -version = "0.34.0" +version = "0.35.1" default-features = false features = ["identify", "kad", "mdns", "mplex", "noise", "ping", "request-response", "tcp-async-io", "websocket", "yamux"] [dev-dependencies] assert_matches = "1.3" -libp2p = { version = "0.34.0", default-features = false } +libp2p = { version = "0.35.1", default-features = false } quickcheck = "1.0.3" rand = "0.7.2" sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 06c91de886..0eebd1713c 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -17,12 +17,12 @@ // along with this program. If not, see . use crate::{ - config::{ProtocolId, Role}, + config::ProtocolId, bitswap::Bitswap, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, protocol::{message::Roles, CustomMessageOutcome, NotificationsSink, Protocol}, peer_info, request_responses, light_client_requests, - ObservedRole, DhtEvent, ExHashT, + ObservedRole, DhtEvent, }; use bytes::Bytes; @@ -54,9 +54,9 @@ pub use crate::request_responses::{ /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut", poll_method = "poll")] -pub struct Behaviour { +pub struct Behaviour { /// All the substrate-specific protocols. - substrate: Protocol, + substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a /// cache. peer_info: peer_info::PeerInfoBehaviour, @@ -71,10 +71,6 @@ pub struct Behaviour { #[behaviour(ignore)] events: VecDeque>, - /// Role of our local node, as originally passed from the configuration. - #[behaviour(ignore)] - role: Role, - /// Light client request handling. #[behaviour(ignore)] light_client_request_sender: light_client_requests::sender::LightClientRequestSender, @@ -176,11 +172,10 @@ pub enum BehaviourOut { Dht(DhtEvent, Duration), } -impl Behaviour { +impl Behaviour { /// Builds a new `Behaviour`. pub fn new( - substrate: Protocol, - role: Role, + substrate: Protocol, user_agent: String, local_public_key: PublicKey, light_client_request_sender: light_client_requests::sender::LightClientRequestSender, @@ -206,7 +201,6 @@ impl Behaviour { request_responses::RequestResponsesBehaviour::new(request_response_protocols.into_iter())?, light_client_request_sender, events: VecDeque::new(), - role, block_request_protocol_name, }) @@ -262,12 +256,12 @@ impl Behaviour { } /// Returns a shared reference to the user protocol. - pub fn user_protocol(&self) -> &Protocol { + pub fn user_protocol(&self) -> &Protocol { &self.substrate } /// Returns a mutable reference to the user protocol. - pub fn user_protocol_mut(&mut self) -> &mut Protocol { + pub fn user_protocol_mut(&mut self) -> &mut Protocol { &mut self.substrate } @@ -290,15 +284,9 @@ impl Behaviour { } } -fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Roles) -> ObservedRole { +fn reported_roles_to_observed_role(roles: Roles) -> ObservedRole { if roles.is_authority() { - match local_role { - Role::Authority { sentry_nodes } - if sentry_nodes.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurSentry, - Role::Sentry { validators } - if validators.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurGuardedAuthority, - _ => ObservedRole::Authority - } + ObservedRole::Authority } else if roles.is_full() { ObservedRole::Full } else { @@ -306,15 +294,15 @@ fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Ro } } -impl NetworkBehaviourEventProcess for -Behaviour { +impl NetworkBehaviourEventProcess for +Behaviour { fn inject_event(&mut self, event: void::Void) { void::unreachable(event) } } -impl NetworkBehaviourEventProcess> for -Behaviour { +impl NetworkBehaviourEventProcess> for +Behaviour { fn inject_event(&mut self, event: CustomMessageOutcome) { match event { CustomMessageOutcome::BlockImport(origin, blocks) => @@ -337,11 +325,10 @@ Behaviour { ); }, CustomMessageOutcome::NotificationStreamOpened { remote, protocol, roles, notifications_sink } => { - let role = reported_roles_to_observed_role(&self.role, &remote, roles); self.events.push_back(BehaviourOut::NotificationStreamOpened { remote, protocol, - role: role.clone(), + role: reported_roles_to_observed_role(roles), notifications_sink: notifications_sink.clone(), }); }, @@ -375,7 +362,7 @@ Behaviour { } } -impl NetworkBehaviourEventProcess for Behaviour { +impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: request_responses::Event) { match event { request_responses::Event::InboundRequest { peer, protocol, result } => { @@ -399,8 +386,8 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: peer_info::PeerInfoEvent) { let peer_info::PeerInfoEvent::Identified { peer_id, @@ -429,8 +416,8 @@ impl NetworkBehaviourEventProcess NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, out: DiscoveryOut) { match out { DiscoveryOut::UnroutablePeer(_peer_id) => { @@ -463,7 +450,7 @@ impl NetworkBehaviourEventProcess } } -impl Behaviour { +impl Behaviour { fn poll( &mut self, cx: &mut Context, diff --git a/client/network/src/block_request_handler.rs b/client/network/src/block_request_handler.rs index 92f21f44f9..8faa6a7f6c 100644 --- a/client/network/src/block_request_handler.rs +++ b/client/network/src/block_request_handler.rs @@ -72,7 +72,7 @@ impl BlockRequestHandler { pub fn new(protocol_id: &ProtocolId, client: Arc>) -> (Self, ProtocolConfig) { // Rate of arrival multiplied with the waiting time in the queue equals the queue length. // - // An average Polkadot sentry node serves less than 5 requests per second. The 95th percentile + // An average Polkadot node serves less than 5 requests per second. The 95th percentile // serving a request is less than 2 second. Thus one would estimate the queue length to be // below 10. // diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 5a2327dda1..d6d4d9d716 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -68,6 +68,9 @@ pub struct Params { /// default. pub executor: Option + Send>>) + Send>>, + /// How to spawn the background task dedicated to the transactions handler. + pub transactions_handler_executor: Box + Send>>) + Send>, + /// Network layer configuration. pub network_config: NetworkConfiguration, @@ -128,18 +131,8 @@ pub enum Role { Full, /// Regular light node. Light, - /// Sentry node that guards an authority. Will be reported as "authority" on the wire protocol. - Sentry { - /// Address and identity of the validator nodes that we're guarding. - /// - /// The nodes will be granted some priviledged status. - validators: Vec, - }, /// Actual authority. - Authority { - /// List of public addresses and identities of our sentry nodes. - sentry_nodes: Vec, - } + Authority, } impl Role { @@ -147,12 +140,6 @@ impl Role { pub fn is_authority(&self) -> bool { matches!(self, Role::Authority { .. }) } - - /// True for `Role::Authority` and `Role::Sentry` since they're both - /// announced as having the authority role to the network. - pub fn is_network_authority(&self) -> bool { - matches!(self, Role::Authority { .. } | Role::Sentry { .. }) - } } impl fmt::Display for Role { @@ -160,7 +147,6 @@ impl fmt::Display for Role { match self { Role::Full => write!(f, "FULL"), Role::Light => write!(f, "LIGHT"), - Role::Sentry { .. } => write!(f, "SENTRY"), Role::Authority { .. } => write!(f, "AUTHORITY"), } } diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index c0b8c5e730..c35159168d 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -116,6 +116,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, + transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), network_config, chain: client.clone(), on_demand: None, diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index 5bd2092786..556e71da23 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -264,6 +264,7 @@ pub mod config; pub mod error; pub mod gossip; pub mod network_state; +pub mod transactions; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 4997bc36e5..bddd79269f 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -17,9 +17,8 @@ // along with this program. If not, see . use crate::{ - ExHashT, chain::Client, - config::{self, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, + config::{self, ProtocolId}, error, request_responses::RequestFailure, utils::{interval, LruHashSet}, @@ -27,7 +26,7 @@ use crate::{ use bytes::{Bytes, BytesMut}; use codec::{Decode, DecodeAll, Encode}; -use futures::{channel::oneshot, prelude::*, stream::FuturesUnordered}; +use futures::{channel::oneshot, prelude::*}; use generic_proto::{GenericProto, GenericProtoOut}; use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; use libp2p::request_response::OutboundFailure; @@ -37,10 +36,7 @@ use libp2p::{Multiaddr, PeerId}; use log::{log, Level, trace, debug, warn, error}; use message::{BlockAnnounce, Message}; use message::generic::{Message as GenericMessage, Roles}; -use prometheus_endpoint::{ - Registry, Gauge, Counter, GaugeVec, - PrometheusError, Opts, register, U64 -}; +use prometheus_endpoint::{Registry, Gauge, GaugeVec, PrometheusError, Opts, register, U64}; use prost::Message as _; use sp_consensus::{ BlockOrigin, @@ -55,7 +51,7 @@ use sp_arithmetic::traits::SaturatedConversion; use sync::{ChainSync, SyncState}; use std::borrow::Cow; use std::convert::TryFrom as _; -use std::collections::{HashMap, HashSet, VecDeque, hash_map::Entry}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; use std::{io, iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; @@ -69,28 +65,16 @@ pub use generic_proto::{NotificationsSink, Ready, NotifsHandlerError}; /// Interval at which we perform time based maintenance const TICK_TIMEOUT: time::Duration = time::Duration::from_millis(1100); -/// Interval at which we propagate transactions; -const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); /// Maximum number of known block hashes to keep for a peer. const MAX_KNOWN_BLOCKS: usize = 1024; // ~32kb per peer + LruHashSet overhead -/// Maximum number of known transaction hashes to keep for a peer. -/// -/// This should be approx. 2 blocks full of transactions for the network to function properly. -const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. - /// Maximum allowed size for a block announce. const MAX_BLOCK_ANNOUNCE_SIZE: u64 = 1024 * 1024; -/// Maximum allowed size for a transactions notification. -const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; /// Maximum size used for notifications in the block announce and transaction protocols. // Must be equal to `max(MAX_BLOCK_ANNOUNCE_SIZE, MAX_TRANSACTIONS_SIZE)`. pub(crate) const BLOCK_ANNOUNCES_TRANSACTIONS_SUBSTREAM_SIZE: u64 = 16 * 1024 * 1024; -/// Maximum number of transaction validation request we keep at any moment. -const MAX_PENDING_TRANSACTIONS: usize = 8192; - /// Current protocol version. pub(crate) const CURRENT_VERSION: u32 = 6; /// Lowest version we support @@ -98,11 +82,9 @@ pub(crate) const MIN_VERSION: u32 = 3; /// Identifier of the peerset for the block announces protocol. const HARDCODED_PEERSETS_SYNC: sc_peerset::SetId = sc_peerset::SetId::from(0); -/// Identifier of the peerset for the transactions protocol. -const HARDCODED_PEERSETS_TX: sc_peerset::SetId = sc_peerset::SetId::from(1); /// Number of hardcoded peersets (the constants right above). Any set whose identifier is equal or /// superior to this value corresponds to a user-defined protocol. -const NUM_HARDCODED_PEERSETS: usize = 2; +const NUM_HARDCODED_PEERSETS: usize = 1; /// When light node connects to the full node and the full node is behind light node /// for at least `LIGHT_MAXIMAL_BLOCKS_DIFFERENCE` blocks, we consider it not useful @@ -117,21 +99,8 @@ mod rep { pub const REFUSED: Rep = Rep::new(-(1 << 10), "Request refused"); /// Reputation change when we are a light client and a peer is behind us. pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - /// Reputation change when a peer sends us any transaction. - /// - /// This forces node to verify it, thus the negative value here. Once transaction is verified, - /// reputation change should be refunded with `ANY_TRANSACTION_REFUND` - pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction"); - /// Reputation change when a peer sends us any transaction that is not invalid. - pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)"); - /// Reputation change when a peer sends us an transaction that we didn't know about. - pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); - /// Reputation change when a peer sends us a bad transaction. - pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); /// We received a message that failed to decode. pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); - /// We received an unexpected transaction packet. - pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); /// Peer has different genesis. pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); /// Peer is on unsupported protocol version. @@ -147,7 +116,6 @@ struct Metrics { queued_blocks: Gauge, fork_targets: Gauge, justifications: GaugeVec, - propagated_transactions: Counter, } impl Metrics { @@ -175,66 +143,38 @@ impl Metrics { )?; register(g, r)? }, - propagated_transactions: register(Counter::new( - "sync_propagated_transactions", - "Number of transactions propagated to at least one peer", - )?, r)?, }) } } -#[pin_project::pin_project] -struct PendingTransaction { - #[pin] - validation: TransactionImportFuture, - tx_hash: H, -} - -impl Future for PendingTransaction { - type Output = (H, TransactionImport); - - fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { - let mut this = self.project(); - - if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { - return Poll::Ready((this.tx_hash.clone(), import_result)); - } - - Poll::Pending - } -} - // Lock must always be taken in order declared here. -pub struct Protocol { +pub struct Protocol { /// Interval at which we call `tick`. tick_timeout: Pin + Send>>, - /// Interval at which we call `propagate_transactions`. - propagate_timeout: Pin + Send>>, /// Pending list of messages to return from `poll` as a priority. pending_messages: VecDeque>, - /// Pending transactions verification tasks. - pending_transactions: FuturesUnordered>, - /// As multiple peers can send us the same transaction, we group - /// these peers using the transaction hash while the transaction is - /// imported. This prevents that we import the same transaction - /// multiple times concurrently. - pending_transactions_peers: HashMap>, config: ProtocolConfig, genesis_hash: B::Hash, sync: ChainSync, // All connected peers - peers: HashMap>, + peers: HashMap>, chain: Arc>, /// List of nodes for which we perform additional logging because they are important for the /// user. important_peers: HashSet, /// Used to report reputation changes. peerset_handle: sc_peerset::PeersetHandle, - transaction_pool: Arc>, /// Handles opening the unique substream and sending and receiving raw messages. behaviour: GenericProto, /// List of notifications protocols that have been registered. notification_protocols: Vec>, + /// If we receive a new "substream open" event that contains an invalid handshake, we ask the + /// inner layer to force-close the substream. Force-closing the substream will generate a + /// "substream closed" event. This is a problem: since we can't propagate the "substream open" + /// event to the outer layers, we also shouldn't propagate this "substream closed" event. To + /// solve this, an entry is added to this map whenever an invalid handshake is received. + /// Entries are removed when the corresponding "substream closed" is later received. + bad_handshake_substreams: HashSet<(PeerId, sc_peerset::SetId)>, /// Prometheus metrics. metrics: Option, /// The `PeerId`'s of all boot nodes. @@ -245,15 +185,13 @@ pub struct Protocol { /// Peer information #[derive(Debug)] -struct Peer { +struct Peer { info: PeerInfo, /// Current block request, if any. Started by emitting [`CustomMessageOutcome::BlockRequest`]. block_request: Option<( message::BlockRequest, oneshot::Receiver, RequestFailure>>, )>, - /// Holds a set of transactions known to this peer. - known_transactions: LruHashSet, /// Holds a set of blocks known to this peer. known_blocks: LruHashSet, } @@ -336,18 +274,17 @@ fn build_status_message( Message::::Status(status).encode() } -impl Protocol { +impl Protocol { /// Create a new instance. pub fn new( config: ProtocolConfig, chain: Arc>, - transaction_pool: Arc>, protocol_id: ProtocolId, - config_role: &config::Role, network_config: &config::NetworkConfiguration, + notifications_protocols_handshakes: Vec>, block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { + ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); let sync = ChainSync::new( config.roles, @@ -384,21 +321,6 @@ impl Protocol { let mut sets = Vec::with_capacity(NUM_HARDCODED_PEERSETS + network_config.extra_sets.len()); let mut default_sets_reserved = HashSet::new(); - match config_role { - config::Role::Sentry { validators } => { - for validator in validators { - default_sets_reserved.insert(validator.peer_id.clone()); - known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); - } - } - config::Role::Authority { sentry_nodes } => { - for sentry_node in sentry_nodes { - default_sets_reserved.insert(sentry_node.peer_id.clone()); - known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); - } - } - _ => {} - }; for reserved in network_config.default_peers_set.reserved_nodes.iter() { default_sets_reserved.insert(reserved.peer_id.clone()); known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); @@ -420,17 +342,6 @@ impl Protocol { == config::NonReservedPeerMode::Deny, }); - // Set number 1 is used for transactions. - // The `reserved_nodes` of this set are later kept in sync with the peers we connect - // to through set 0. - sets.push(sc_peerset::SetConfig { - in_peers: 0, - out_peers: 0, - bootnodes: Vec::new(), - reserved_nodes: HashSet::new(), - reserved_only: true, - }); - for set_cfg in &network_config.extra_sets { let mut reserved_nodes = HashSet::new(); for reserved in set_cfg.set_config.reserved_nodes.iter() { @@ -455,14 +366,6 @@ impl Protocol { }) }; - let transactions_protocol: Cow<'static, str> = Cow::from({ - let mut proto = String::new(); - proto.push_str("/"); - proto.push_str(protocol_id.as_ref()); - proto.push_str("/transactions/1"); - proto - }); - let block_announces_protocol: Cow<'static, str> = Cow::from({ let mut proto = String::new(); proto.push_str("/"); @@ -473,7 +376,6 @@ impl Protocol { let behaviour = { let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let handshake_message = Roles::from(config_role).encode(); let best_number = info.best_number; let best_hash = info.best_hash; @@ -492,12 +394,10 @@ impl Protocol { build_status_message::(&config, best_number, best_hash, genesis_hash), peerset, iter::once((block_announces_protocol, block_announces_handshake, MAX_BLOCK_ANNOUNCE_SIZE)) - .chain(iter::once((transactions_protocol, vec![], MAX_TRANSACTIONS_SIZE))) - .chain(network_config.extra_sets.iter().map(|s| ( - s.notifications_protocol.clone(), - handshake_message.clone(), - s.max_notification_size - ))), + .chain(network_config.extra_sets.iter() + .zip(notifications_protocols_handshakes) + .map(|(s, hs)| (s.notifications_protocol.clone(), hs, s.max_notification_size)) + ), ) }; @@ -508,21 +408,18 @@ impl Protocol { let protocol = Protocol { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), - propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), pending_messages: VecDeque::new(), - pending_transactions: FuturesUnordered::new(), - pending_transactions_peers: HashMap::new(), config, peers: HashMap::new(), chain, genesis_hash: info.genesis_hash, sync, important_peers, - transaction_pool, peerset_handle: peerset_handle.clone(), behaviour, notification_protocols: network_config.extra_sets.iter().map(|s| s.notifications_protocol.clone()).collect(), + bad_handshake_substreams: Default::default(), metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) } else { @@ -667,8 +564,8 @@ impl Protocol { debug!(target: "sub-libp2p", "Received unexpected Status"), GenericMessage::BlockAnnounce(announce) => self.push_block_announce_validation(who.clone(), announce), - GenericMessage::Transactions(m) => - self.on_transactions(who, m), + GenericMessage::Transactions(_) => + warn!(target: "sub-libp2p", "Received unexpected Transactions"), GenericMessage::BlockResponse(_) => warn!(target: "sub-libp2p", "Received unexpected BlockResponse"), GenericMessage::RemoteCallResponse(_) => @@ -705,7 +602,7 @@ impl Protocol { who: PeerId, request: message::BlockRequest, ) -> CustomMessageOutcome { - prepare_block_request::(&mut self.peers, who, request) + prepare_block_request::(&mut self.peers, who, request) } /// Called by peer when it is disconnecting. @@ -911,8 +808,6 @@ impl Protocol { best_number: status.best_number }, block_request: None, - known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) - .expect("Constant is nonzero")), known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) .expect("Constant is nonzero")), }; @@ -943,144 +838,6 @@ impl Protocol { Ok(()) } - /// Called when peer sends us new transactions - fn on_transactions( - &mut self, - who: PeerId, - transactions: message::Transactions, - ) { - // sending transaction to light node is considered a bad behavior - if !self.config.roles.is_full() { - trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); - self.behaviour.disconnect_peer(&who, HARDCODED_PEERSETS_TX); - self.peerset_handle.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); - return; - } - - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - trace!(target: "sync", "{} Ignoring transactions while syncing", who); - return; - } - - trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); - if let Some(ref mut peer) = self.peers.get_mut(&who) { - for t in transactions { - if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { - debug!( - target: "sync", - "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", - MAX_PENDING_TRANSACTIONS, - ); - break; - } - - let hash = self.transaction_pool.hash_of(&t); - peer.known_transactions.insert(hash.clone()); - - self.peerset_handle.report_peer(who.clone(), rep::ANY_TRANSACTION); - - match self.pending_transactions_peers.entry(hash.clone()) { - Entry::Vacant(entry) => { - self.pending_transactions.push(PendingTransaction { - validation: self.transaction_pool.import(t), - tx_hash: hash, - }); - entry.insert(vec![who.clone()]); - }, - Entry::Occupied(mut entry) => { - entry.get_mut().push(who.clone()); - } - } - } - } - } - - fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { - match import { - TransactionImport::KnownGood => self.peerset_handle.report_peer(who, rep::ANY_TRANSACTION_REFUND), - TransactionImport::NewGood => self.peerset_handle.report_peer(who, rep::GOOD_TRANSACTION), - TransactionImport::Bad => self.peerset_handle.report_peer(who, rep::BAD_TRANSACTION), - TransactionImport::None => {}, - } - } - - /// Propagate one transaction. - pub fn propagate_transaction( - &mut self, - hash: &H, - ) { - debug!(target: "sync", "Propagating transaction [{:?}]", hash); - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - if let Some(transaction) = self.transaction_pool.transaction(hash) { - let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); - self.transaction_pool.on_broadcasted(propagated_to); - } - } - - fn do_propagate_transactions( - &mut self, - transactions: &[(H, B::Extrinsic)], - ) -> HashMap> { - let mut propagated_to = HashMap::<_, Vec<_>>::new(); - let mut propagated_transactions = 0; - - for (who, peer) in self.peers.iter_mut() { - // never send transactions to the light node - if !peer.info.roles.is_full() { - continue; - } - - if !self.behaviour.is_open(who, HARDCODED_PEERSETS_TX) { - continue; - } - - let (hashes, to_send): (Vec<_>, Vec<_>) = transactions - .iter() - .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) - .cloned() - .unzip(); - - propagated_transactions += hashes.len(); - - if !to_send.is_empty() { - for hash in hashes { - propagated_to - .entry(hash) - .or_default() - .push(who.to_base58()); - } - trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - self.behaviour.write_notification( - who, - HARDCODED_PEERSETS_TX, - to_send.encode() - ); - } - } - - if let Some(ref metrics) = self.metrics { - metrics.propagated_transactions.inc_by(propagated_transactions as _) - } - - propagated_to - } - - /// Call when we must propagate ready transactions to peers. - pub fn propagate_transactions(&mut self) { - debug!(target: "sync", "Propagating transactions"); - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - let transactions = self.transaction_pool.transactions(); - let propagated_to = self.do_propagate_transactions(&transactions); - self.transaction_pool.on_broadcasted(propagated_to); - } - /// Make sure an important block is propagated to peers. /// /// In chain-based consensus, we often need to make sure non-best forks are @@ -1332,25 +1089,21 @@ impl Protocol { /// Set whether the syncing peers set is in reserved-only mode. pub fn set_reserved_only(&self, reserved_only: bool) { self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_SYNC, reserved_only); - self.peerset_handle.set_reserved_only(HARDCODED_PEERSETS_TX, reserved_only); } /// Removes a `PeerId` from the list of reserved peers for syncing purposes. pub fn remove_reserved_peer(&self, peer: PeerId) { self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); - self.peerset_handle.remove_reserved_peer(HARDCODED_PEERSETS_TX, peer); } /// Adds a `PeerId` to the list of reserved peers for syncing purposes. pub fn add_reserved_peer(&self, peer: PeerId) { self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_SYNC, peer.clone()); - self.peerset_handle.add_reserved_peer(HARDCODED_PEERSETS_TX, peer); } /// Sets the list of reserved peers for syncing purposes. pub fn set_reserved_peers(&self, peers: HashSet) { self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_SYNC, peers.clone()); - self.peerset_handle.set_reserved_peers(HARDCODED_PEERSETS_TX, peers); } /// Removes a `PeerId` from the list of reserved peers. @@ -1436,8 +1189,8 @@ impl Protocol { } } -fn prepare_block_request( - peers: &mut HashMap>, +fn prepare_block_request( + peers: &mut HashMap>, who: PeerId, request: message::BlockRequest, ) -> CustomMessageOutcome { @@ -1505,7 +1258,7 @@ pub enum CustomMessageOutcome { None, } -impl NetworkBehaviour for Protocol { +impl NetworkBehaviour for Protocol { type ProtocolsHandler = ::ProtocolsHandler; type OutEvent = CustomMessageOutcome; @@ -1634,10 +1387,6 @@ impl NetworkBehaviour for Protocol { self.tick(); } - while let Poll::Ready(Some(())) = self.propagate_timeout.poll_next_unpin(cx) { - self.propagate_transactions(); - } - for (id, request) in self.sync.block_requests() { let event = prepare_block_request(&mut self.peers, id.clone(), request); self.pending_messages.push_back(event); @@ -1646,13 +1395,6 @@ impl NetworkBehaviour for Protocol { let event = prepare_block_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } - if let Poll::Ready(Some((tx_hash, result))) = self.pending_transactions.poll_next_unpin(cx) { - if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { - peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); - } else { - warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); - } - } // Check if there is any block announcement validation finished. while let Poll::Ready(result) = self.sync.poll_block_announce_validation(cx) { @@ -1696,11 +1438,6 @@ impl NetworkBehaviour for Protocol { }; if self.on_sync_peer_connected(peer_id.clone(), handshake).is_ok() { - // Set 1 is kept in sync with the connected peers of set 0. - self.peerset_handle.add_reserved_peer( - HARDCODED_PEERSETS_TX, - peer_id.clone() - ); CustomMessageOutcome::SyncConnected(peer_id) } else { CustomMessageOutcome::None @@ -1720,11 +1457,6 @@ impl NetworkBehaviour for Protocol { match as DecodeAll>::decode_all(&mut &received_handshake[..]) { Ok(handshake) => { if self.on_sync_peer_connected(peer_id.clone(), handshake).is_ok() { - // Set 1 is kept in sync with the connected peers of set 0. - self.peerset_handle.add_reserved_peer( - HARDCODED_PEERSETS_TX, - peer_id.clone() - ); CustomMessageOutcome::SyncConnected(peer_id) } else { CustomMessageOutcome::None @@ -1746,20 +1478,30 @@ impl NetworkBehaviour for Protocol { } } - } else if set_id == HARDCODED_PEERSETS_TX { - // Nothing to do. - CustomMessageOutcome::None } else { - match message::Roles::decode_all(&received_handshake[..]) { - Ok(roles) => + match (message::Roles::decode_all(&received_handshake[..]), self.peers.get(&peer_id)) { + (Ok(roles), _) => CustomMessageOutcome::NotificationStreamOpened { remote: peer_id, protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), roles, notifications_sink, }, - Err(err) => { + (Err(_), Some(peer)) if received_handshake.is_empty() => { + // As a convenience, we allow opening substreams for "external" + // notification protocols with an empty handshake. This fetches the + // roles from the locally-known roles. + // TODO: remove this after https://github.com/paritytech/substrate/issues/5685 + CustomMessageOutcome::NotificationStreamOpened { + remote: peer_id, + protocol: self.notification_protocols[usize::from(set_id) - NUM_HARDCODED_PEERSETS].clone(), + roles: peer.info.roles, + notifications_sink, + } + }, + (Err(err), _) => { debug!(target: "sync", "Failed to parse remote handshake: {}", err); + self.bad_handshake_substreams.insert((peer_id.clone(), set_id)); self.behaviour.disconnect_peer(&peer_id, set_id); self.peerset_handle.report_peer(peer_id, rep::BAD_MESSAGE); CustomMessageOutcome::None @@ -1768,7 +1510,9 @@ impl NetworkBehaviour for Protocol { } } GenericProtoOut::CustomProtocolReplaced { peer_id, notifications_sink, set_id } => { - if set_id == HARDCODED_PEERSETS_SYNC || set_id == HARDCODED_PEERSETS_TX { + if set_id == HARDCODED_PEERSETS_SYNC { + CustomMessageOutcome::None + } else if self.bad_handshake_substreams.contains(&(peer_id.clone(), set_id)) { CustomMessageOutcome::None } else { CustomMessageOutcome::NotificationStreamReplaced { @@ -1782,11 +1526,6 @@ impl NetworkBehaviour for Protocol { // Set number 0 is hardcoded the default set of peers we sync from. if set_id == HARDCODED_PEERSETS_SYNC { if self.on_sync_peer_disconnected(peer_id.clone()).is_ok() { - // Set 1 is kept in sync with the connected peers of set 0. - self.peerset_handle.remove_reserved_peer( - HARDCODED_PEERSETS_TX, - peer_id.clone() - ); CustomMessageOutcome::SyncDisconnected(peer_id) } else { log::debug!( @@ -1796,7 +1535,10 @@ impl NetworkBehaviour for Protocol { ); CustomMessageOutcome::None } - } else if set_id == HARDCODED_PEERSETS_TX { + } else if self.bad_handshake_substreams.remove(&(peer_id.clone(), set_id)) { + // The substream that has just been closed had been opened with a bad + // handshake. The outer layers have never received an opening event about this + // substream, and consequently shouldn't receive a closing event either. CustomMessageOutcome::None } else { CustomMessageOutcome::NotificationStreamClosed { @@ -1830,20 +1572,10 @@ impl NetworkBehaviour for Protocol { CustomMessageOutcome::None } } - HARDCODED_PEERSETS_TX if self.peers.contains_key(&peer_id) => { - if let Ok(m) = as Decode>::decode( - &mut message.as_ref(), - ) { - self.on_transactions(peer_id, m); - } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); - } - CustomMessageOutcome::None - } - HARDCODED_PEERSETS_SYNC | HARDCODED_PEERSETS_TX => { + HARDCODED_PEERSETS_SYNC => { debug!( target: "sync", - "Received sync or transaction for peer earlier refused by sync layer: {}", + "Received sync for peer earlier refused by sync layer: {}", peer_id ); CustomMessageOutcome::None diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index e20dbcb9ee..fb2e3b33dd 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -92,16 +92,16 @@ pub enum Event { /// Role that the peer sent to us during the handshake, with the addition of what our local node /// knows about that peer. +/// +/// > **Note**: This enum is different from the `Role` enum. The `Role` enum indicates what a +/// > node says about itself, while `ObservedRole` is a `Role` merged with the +/// > information known locally about that node. #[derive(Debug, Clone)] pub enum ObservedRole { /// Full node. Full, /// Light node. Light, - /// When we are a validator node, this is a sentry that protects us. - OurSentry, - /// When we are a sentry node, this is the authority we are protecting. - OurGuardedAuthority, /// Third-party authority. Authority, } diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index cd77852c91..3283ea33a0 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -1467,13 +1467,14 @@ impl NetworkBehaviour for GenericProto { if let ConnectionState::Closed = *connec_state { *connec_state = ConnectionState::OpenDesiredByRemote; } else { - // Connections in `OpeningThenClosing` state are in a Closed phase, - // and as such can emit `OpenDesiredByRemote` messages. - // Since an `Open` and a `Close` messages have already been sent, + // Connections in `OpeningThenClosing` and `Closing` state can be + // in a Closed phase, and as such can emit `OpenDesiredByRemote` + // messages. + // Since an `Open` and/or a `Close` message have already been sent, // there is nothing much that can be done about this anyway. debug_assert!(matches!( connec_state, - ConnectionState::OpeningThenClosing + ConnectionState::OpeningThenClosing | ConnectionState::Closing )); } } else { @@ -1502,13 +1503,15 @@ impl NetworkBehaviour for GenericProto { }); *connec_state = ConnectionState::Opening; } else { - // Connections in `OpeningThenClosing` and `Opening` are in a Closed - // phase, and as such can emit `OpenDesiredByRemote` messages. + // Connections in `OpeningThenClosing`, `Opening`, and `Closing` + // state can be in a Closed phase, and as such can emit + // `OpenDesiredByRemote` messages. // Since an `Open` message haS already been sent, there is nothing // more to do. debug_assert!(matches!( connec_state, - ConnectionState::OpenDesiredByRemote | ConnectionState::Opening + ConnectionState::OpenDesiredByRemote | + ConnectionState::Closing | ConnectionState::Opening )); } } else { @@ -1544,12 +1547,13 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = PeerState::Incoming { connections, backoff_until }; } else { - // Connections in `OpeningThenClosing` are in a Closed phase, and - // as such can emit `OpenDesiredByRemote` messages. + // Connections in `OpeningThenClosing` and `Closing` state can be + // in a Closed phase, and as such can emit `OpenDesiredByRemote` + // messages. // We ignore them. debug_assert!(matches!( connec_state, - ConnectionState::OpeningThenClosing + ConnectionState::OpeningThenClosing | ConnectionState::Closing )); *entry.into_mut() = PeerState::Disabled { connections, backoff_until }; } @@ -1578,12 +1582,13 @@ impl NetworkBehaviour for GenericProto { *entry.into_mut() = PeerState::Enabled { connections }; } else { - // Connections in `OpeningThenClosing` are in a Closed phase, and - // as such can emit `OpenDesiredByRemote` messages. + // Connections in `OpeningThenClosing` and `Closing` state can be + // in a Closed phase, and as such can emit `OpenDesiredByRemote` + // messages. // We ignore them. debug_assert!(matches!( connec_state, - ConnectionState::OpeningThenClosing + ConnectionState::OpeningThenClosing | ConnectionState::Closing )); *entry.into_mut() = PeerState::DisabledPendingEnable { connections, diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index 3161f91e53..ed27210328 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -191,7 +191,6 @@ pub mod generic { match roles { crate::config::Role::Full => Roles::FULL, crate::config::Role::Light => Roles::LIGHT, - crate::config::Role::Sentry { .. } => Roles::AUTHORITY, crate::config::Role::Authority { .. } => Roles::AUTHORITY, } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 39eaa606d0..74ce9316fc 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -30,7 +30,7 @@ use crate::{ ExHashT, NetworkStateInfo, NetworkStatus, behaviour::{self, Behaviour, BehaviourOut}, - config::{parse_str_addr, Params, Role, TransportConfig}, + config::{parse_str_addr, Params, TransportConfig}, DhtEvent, discovery::DiscoveryConfig, error::Error, @@ -41,6 +41,7 @@ use crate::{ light_client_requests, protocol::{ self, + message::generic::Roles, NotifsHandlerError, NotificationsSink, PeerInfo, @@ -49,9 +50,13 @@ use crate::{ event::Event, sync::SyncState, }, + transactions, transport, ReputationChange, + bitswap::Bitswap, }; + +use codec::Encode as _; use futures::{channel::oneshot, prelude::*}; use libp2p::{PeerId, multiaddr, Multiaddr}; use libp2p::core::{ @@ -140,7 +145,7 @@ impl NetworkWorker { /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(params: Params) -> Result, Error> { + pub fn new(mut params: Params) -> Result, Error> { // Ensure the listen addresses are consistent with the transport. ensure_addresses_consistent_with_transport( params.network_config.listen_addresses.iter(), @@ -171,6 +176,11 @@ impl NetworkWorker { fs::create_dir_all(path)?; } + let transactions_handler_proto = transactions::TransactionsHandlerPrototype::new( + params.protocol_id.clone() + ); + params.network_config.extra_sets.insert(0, transactions_handler_proto.set_config()); + // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); @@ -181,16 +191,17 @@ impl NetworkWorker { local_peer_id.to_base58(), ); + let default_notif_handshake_message = Roles::from(¶ms.role).encode(); let (protocol, peerset_handle, mut known_addresses) = Protocol::new( protocol::ProtocolConfig { roles: From::from(¶ms.role), max_parallel_downloads: params.network_config.max_parallel_downloads, }, params.chain.clone(), - params.transaction_pool, params.protocol_id.clone(), - ¶ms.role, ¶ms.network_config, + iter::once(Vec::new()).chain((0..params.network_config.extra_sets.len() - 1) + .map(|_| default_notif_handshake_message.clone())).collect(), params.block_announce_validator, params.metrics_registry.as_ref(), )?; @@ -225,22 +236,6 @@ impl NetworkWorker { } )?; - // Print a message about the deprecation of sentry nodes. - let print_deprecated_message = match ¶ms.role { - Role::Sentry { .. } => true, - Role::Authority { sentry_nodes } if !sentry_nodes.is_empty() => true, - _ => false, - }; - if print_deprecated_message { - log::warn!( - "🙇 Sentry nodes are deprecated, and the `--sentry` and `--sentry-nodes` \ - CLI options will eventually be removed in a future version. The Substrate \ - and Polkadot networking protocol require validators to be \ - publicly-accessible. Please do not block access to your validator nodes. \ - For details, see https://github.com/paritytech/substrate/issues/6845." - ); - } - let checker = params.on_demand.as_ref() .map(|od| od.checker().clone()) .unwrap_or_else(|| Arc::new(AlwaysBadChecker)); @@ -250,7 +245,7 @@ impl NetworkWorker { // Build the swarm. let client = params.chain.clone(); - let (mut swarm, bandwidth): (Swarm, _) = { + let (mut swarm, bandwidth): (Swarm, _) = { let user_agent = format!( "{} ({})", params.network_config.client_version, @@ -339,7 +334,6 @@ impl NetworkWorker { let bitswap = if params.network_config.ipfs_server { Some(Bitswap::new(client)) } else { None }; let result = Behaviour::new( protocol, - params.role, user_agent, local_public, light_client_request_sender, @@ -394,14 +388,14 @@ impl NetworkWorker { // Listen on multiaddresses. for addr in ¶ms.network_config.listen_addresses { - if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { + if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. for addr in ¶ms.network_config.public_addresses { - Swarm::::add_external_address(&mut swarm, addr.clone(), AddressScore::Infinite); + Swarm::::add_external_address(&mut swarm, addr.clone(), AddressScore::Infinite); } let external_addresses = Arc::new(Mutex::new(Vec::new())); @@ -421,6 +415,14 @@ impl NetworkWorker { _marker: PhantomData, }); + let (tx_handler, tx_handler_controller) = transactions_handler_proto.build( + service.clone(), + params.role, + params.transaction_pool, + params.metrics_registry.as_ref() + )?; + (params.transactions_handler_executor)(tx_handler.run().boxed()); + Ok(NetworkWorker { external_addresses, num_connected, @@ -432,6 +434,7 @@ impl NetworkWorker { light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, peers_notifications_sinks, + tx_handler_controller, metrics, boot_node_ids, }) @@ -523,14 +526,14 @@ impl NetworkWorker { /// Returns the local `PeerId`. pub fn local_peer_id(&self) -> &PeerId { - Swarm::::local_peer_id(&self.network_service) + Swarm::::local_peer_id(&self.network_service) } /// Returns the list of addresses we are listening on. /// /// Does **NOT** include a trailing `/p2p/` with our `PeerId`. pub fn listen_addresses(&self) -> impl Iterator { - Swarm::::listeners(&self.network_service) + Swarm::::listeners(&self.network_service) } /// Get network state. @@ -581,9 +584,9 @@ impl NetworkWorker { .collect() }; - let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); - let listened_addresses = Swarm::::listeners(&swarm).cloned().collect(); - let external_addresses = Swarm::::external_addresses(&swarm) + let peer_id = Swarm::::local_peer_id(&swarm).to_base58(); + let listened_addresses = Swarm::::listeners(&swarm).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&swarm) .map(|r| &r.addr) .cloned() .collect(); @@ -1310,7 +1313,7 @@ pub struct NetworkWorker { /// The network service that can be extracted and shared through the codebase. service: Arc>, /// The *actual* network. - network_service: Swarm, + network_service: Swarm, /// The import queue that was passed at initialization. import_queue: Box>, /// Messages from the [`NetworkService`] that must be processed. @@ -1326,6 +1329,8 @@ pub struct NetworkWorker { /// For each peer and protocol combination, an object that allows sending notifications to /// that peer. Shared with the [`NetworkService`]. peers_notifications_sinks: Arc), NotificationsSink>>>, + /// Controller for the handler of incoming and outgoing transactions. + tx_handler_controller: transactions::TransactionsHandlerController, } impl Future for NetworkWorker { @@ -1385,9 +1390,9 @@ impl Future for NetworkWorker { ServiceToWorkerMsg::RequestJustification(hash, number) => this.network_service.user_protocol_mut().request_justification(&hash, number), ServiceToWorkerMsg::PropagateTransaction(hash) => - this.network_service.user_protocol_mut().propagate_transaction(&hash), + this.tx_handler_controller.propagate_transaction(hash), ServiceToWorkerMsg::PropagateTransactions => - this.network_service.user_protocol_mut().propagate_transactions(), + this.tx_handler_controller.propagate_transactions(), ServiceToWorkerMsg::GetValue(key) => this.network_service.get_value(&key), ServiceToWorkerMsg::PutValue(key, value) => @@ -1766,7 +1771,7 @@ impl Future for NetworkWorker { // Update the variables shared with the `NetworkService`. this.num_connected.store(num_connected_peers, Ordering::Relaxed); { - let external_addresses = Swarm::::external_addresses(&this.network_service) + let external_addresses = Swarm::::external_addresses(&this.network_service) .map(|r| &r.addr) .cloned() .collect(); @@ -1778,6 +1783,8 @@ impl Future for NetworkWorker { SyncState::Downloading => true, }; + this.tx_handler_controller.set_gossip_enabled(!is_major_syncing); + this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); if let Some(metrics) = this.metrics.as_ref() { @@ -1809,14 +1816,14 @@ impl Unpin for NetworkWorker { } /// The libp2p swarm, customized for our needs. -type Swarm = libp2p::swarm::Swarm>; +type Swarm = libp2p::swarm::Swarm>; // Implementation of `import_queue::Link` trait using the available local variables. -struct NetworkLink<'a, B: BlockT, H: ExHashT> { - protocol: &'a mut Swarm, +struct NetworkLink<'a, B: BlockT> { + protocol: &'a mut Swarm, } -impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { +impl<'a, B: BlockT> Link for NetworkLink<'a, B> { fn blocks_processed( &mut self, imported: usize, diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index f88854963f..defb9213a3 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -116,6 +116,7 @@ fn build_test_full_node(config: config::NetworkConfiguration) let worker = NetworkWorker::new(config::Params { role: config::Role::Full, executor: None, + transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), network_config: config, chain: client.clone(), on_demand: None, diff --git a/client/network/src/transactions.rs b/client/network/src/transactions.rs new file mode 100644 index 0000000000..20ac8314b7 --- /dev/null +++ b/client/network/src/transactions.rs @@ -0,0 +1,488 @@ +// This file is part of Substrate. + +// Copyright (C) 2017-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +//! Transactions handling to plug on top of the network service. +//! +//! Usage: +//! +//! - Use [`TransactionsHandlerPrototype::new`] to create a prototype. +//! - Pass the return value of [`TransactionsHandlerPrototype::set_config`] to the network +//! configuration as an extra peers set. +//! - Use [`TransactionsHandlerPrototype::build`] then [`TransactionsHandler::run`] to obtain a +//! `Future` that processes transactions. +//! + +use crate::{ + ExHashT, Event, ObservedRole, + config::{self, ProtocolId, TransactionPool, TransactionImportFuture, TransactionImport}, + error, protocol::message, service::NetworkService, utils::{interval, LruHashSet}, +}; + +use codec::{Decode, Encode}; +use futures::{channel::mpsc, prelude::*, stream::FuturesUnordered}; +use libp2p::{multiaddr, PeerId}; +use log::{trace, debug, warn}; +use prometheus_endpoint::{ + Registry, Counter, PrometheusError, register, U64 +}; +use sp_runtime::traits::Block as BlockT; +use std::borrow::Cow; +use std::collections::{HashMap, hash_map::Entry}; +use std::sync::{atomic::{AtomicBool, Ordering}, Arc}; +use std::{iter, num::NonZeroUsize, pin::Pin, task::Poll, time}; + +/// Interval at which we propagate transactions; +const PROPAGATE_TIMEOUT: time::Duration = time::Duration::from_millis(2900); + +/// Maximum number of known transaction hashes to keep for a peer. +/// +/// This should be approx. 2 blocks full of transactions for the network to function properly. +const MAX_KNOWN_TRANSACTIONS: usize = 10240; // ~300kb per peer + overhead. + +/// Maximum allowed size for a transactions notification. +const MAX_TRANSACTIONS_SIZE: u64 = 16 * 1024 * 1024; + +/// Maximum number of transaction validation request we keep at any moment. +const MAX_PENDING_TRANSACTIONS: usize = 8192; + +mod rep { + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer sends us any transaction. + /// + /// This forces node to verify it, thus the negative value here. Once transaction is verified, + /// reputation change should be refunded with `ANY_TRANSACTION_REFUND` + pub const ANY_TRANSACTION: Rep = Rep::new(-(1 << 4), "Any transaction"); + /// Reputation change when a peer sends us any transaction that is not invalid. + pub const ANY_TRANSACTION_REFUND: Rep = Rep::new(1 << 4, "Any transaction (refund)"); + /// Reputation change when a peer sends us an transaction that we didn't know about. + pub const GOOD_TRANSACTION: Rep = Rep::new(1 << 7, "Good transaction"); + /// Reputation change when a peer sends us a bad transaction. + pub const BAD_TRANSACTION: Rep = Rep::new(-(1 << 12), "Bad transaction"); + /// We received an unexpected transaction packet. + pub const UNEXPECTED_TRANSACTIONS: Rep = Rep::new_fatal("Unexpected transactions packet"); +} + +struct Metrics { + propagated_transactions: Counter, +} + +impl Metrics { + fn register(r: &Registry) -> Result { + Ok(Metrics { + propagated_transactions: register(Counter::new( + "sync_propagated_transactions", + "Number of transactions propagated to at least one peer", + )?, r)?, + }) + } +} + +#[pin_project::pin_project] +struct PendingTransaction { + #[pin] + validation: TransactionImportFuture, + tx_hash: H, +} + +impl Future for PendingTransaction { + type Output = (H, TransactionImport); + + fn poll(self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + let mut this = self.project(); + + if let Poll::Ready(import_result) = Pin::new(&mut this.validation).poll_unpin(cx) { + return Poll::Ready((this.tx_hash.clone(), import_result)); + } + + Poll::Pending + } +} + +/// Prototype for a [`TransactionsHandler`]. +pub struct TransactionsHandlerPrototype { + protocol_name: Cow<'static, str>, +} + +impl TransactionsHandlerPrototype { + /// Create a new instance. + pub fn new(protocol_id: ProtocolId) -> Self { + TransactionsHandlerPrototype { + protocol_name: Cow::from({ + let mut proto = String::new(); + proto.push_str("/"); + proto.push_str(protocol_id.as_ref()); + proto.push_str("/transactions/1"); + proto + }) + } + } + + /// Returns the configuration of the set to put in the network configuration. + pub fn set_config(&self) -> config::NonDefaultSetConfig { + config::NonDefaultSetConfig { + notifications_protocol: self.protocol_name.clone(), + max_notification_size: MAX_TRANSACTIONS_SIZE, + set_config: config::SetConfig { + in_peers: 0, + out_peers: 0, + reserved_nodes: Vec::new(), + non_reserved_mode: config::NonReservedPeerMode::Deny, + } + } + } + + /// Turns the prototype into the actual handler. Returns a controller that allows controlling + /// the behaviour of the handler while it's running. + /// + /// Important: the transactions handler is initially disabled and doesn't gossip transactions. + /// You must call [`TransactionsHandlerController::set_gossip_enabled`] to enable it. + pub fn build( + self, + service: Arc>, + local_role: config::Role, + transaction_pool: Arc>, + metrics_registry: Option<&Registry>, + ) -> error::Result<(TransactionsHandler, TransactionsHandlerController)> { + let event_stream = service.event_stream("transactions-handler").boxed(); + let (to_handler, from_controller) = mpsc::unbounded(); + let gossip_enabled = Arc::new(AtomicBool::new(false)); + + let handler = TransactionsHandler { + protocol_name: self.protocol_name, + propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), + pending_transactions: FuturesUnordered::new(), + pending_transactions_peers: HashMap::new(), + gossip_enabled: gossip_enabled.clone(), + service, + event_stream, + peers: HashMap::new(), + transaction_pool, + local_role, + from_controller, + metrics: if let Some(r) = metrics_registry { + Some(Metrics::register(r)?) + } else { + None + }, + }; + + let controller = TransactionsHandlerController { + to_handler, + gossip_enabled, + }; + + Ok((handler, controller)) + } +} + +/// Controls the behaviour of a [`TransactionsHandler`] it is connected to. +pub struct TransactionsHandlerController { + to_handler: mpsc::UnboundedSender>, + gossip_enabled: Arc, +} + +impl TransactionsHandlerController { + /// Controls whether transactions are being gossiped on the network. + pub fn set_gossip_enabled(&mut self, enabled: bool) { + self.gossip_enabled.store(enabled, Ordering::Relaxed); + } + + /// You may call this when new transactions are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_transactions(&self) { + let _ = self.to_handler.unbounded_send(ToHandler::PropagateTransactions); + } + + /// You must call when new a transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_transaction(&self, hash: H) { + let _ = self.to_handler.unbounded_send(ToHandler::PropagateTransaction(hash)); + } +} + +enum ToHandler { + PropagateTransactions, + PropagateTransaction(H), +} + +/// Handler for transactions. Call [`TransactionsHandler::run`] to start the processing. +pub struct TransactionsHandler { + protocol_name: Cow<'static, str>, + /// Interval at which we call `propagate_transactions`. + propagate_timeout: Pin + Send>>, + /// Pending transactions verification tasks. + pending_transactions: FuturesUnordered>, + /// As multiple peers can send us the same transaction, we group + /// these peers using the transaction hash while the transaction is + /// imported. This prevents that we import the same transaction + /// multiple times concurrently. + pending_transactions_peers: HashMap>, + /// Network service to use to send messages and manage peers. + service: Arc>, + /// Stream of networking events. + event_stream: Pin + Send>>, + // All connected peers + peers: HashMap>, + transaction_pool: Arc>, + gossip_enabled: Arc, + local_role: config::Role, + from_controller: mpsc::UnboundedReceiver>, + /// Prometheus metrics. + metrics: Option, +} + +/// Peer information +#[derive(Debug)] +struct Peer { + /// Holds a set of transactions known to this peer. + known_transactions: LruHashSet, + role: ObservedRole, +} + +impl TransactionsHandler { + /// Turns the [`TransactionsHandler`] into a future that should run forever and not be + /// interrupted. + pub async fn run(mut self) { + loop { + futures::select!{ + _ = self.propagate_timeout.next().fuse() => { + self.propagate_transactions(); + }, + (tx_hash, result) = self.pending_transactions.select_next_some() => { + if let Some(peers) = self.pending_transactions_peers.remove(&tx_hash) { + peers.into_iter().for_each(|p| self.on_handle_transaction_import(p, result)); + } else { + warn!(target: "sub-libp2p", "Inconsistent state, no peers for pending transaction!"); + } + }, + network_event = self.event_stream.next().fuse() => { + if let Some(network_event) = network_event { + self.handle_network_event(network_event).await; + } else { + // Networking has seemingly closed. Closing as well. + return; + } + }, + message = self.from_controller.select_next_some().fuse() => { + match message { + ToHandler::PropagateTransaction(hash) => self.propagate_transaction(&hash), + ToHandler::PropagateTransactions => self.propagate_transactions(), + } + }, + } + } + } + + async fn handle_network_event(&mut self, event: Event) { + match event { + Event::Dht(_) => {}, + Event::SyncConnected { remote } => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); + let result = self.service.add_peers_to_reserved_set( + self.protocol_name.clone(), + iter::once(addr).collect() + ); + if let Err(err) = result { + log::error!(target: "sync", "Add reserved peer failed: {}", err); + } + }, + Event::SyncDisconnected { remote } => { + let addr = iter::once(multiaddr::Protocol::P2p(remote.into())) + .collect::(); + let result = self.service.remove_peers_from_reserved_set( + self.protocol_name.clone(), + iter::once(addr).collect() + ); + if let Err(err) = result { + log::error!(target: "sync", "Removing reserved peer failed: {}", err); + } + }, + + Event::NotificationStreamOpened { remote, protocol, role } if protocol == self.protocol_name => { + let _was_in = self.peers.insert(remote, Peer { + known_transactions: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_TRANSACTIONS) + .expect("Constant is nonzero")), + role, + }); + debug_assert!(_was_in.is_none()); + } + Event::NotificationStreamClosed { remote, protocol } if protocol == self.protocol_name => { + let _peer = self.peers.remove(&remote); + debug_assert!(_peer.is_some()); + } + + Event::NotificationsReceived { remote, messages } => { + for (protocol, message) in messages { + if protocol != self.protocol_name { + continue; + } + + if let Ok(m) = as Decode>::decode( + &mut message.as_ref(), + ) { + self.on_transactions(remote, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + } + }, + + // Not our concern. + Event::NotificationStreamOpened { .. } | Event::NotificationStreamClosed { .. } => {} + } + } + + /// Called when peer sends us new transactions + fn on_transactions( + &mut self, + who: PeerId, + transactions: message::Transactions, + ) { + // sending transaction to light node is considered a bad behavior + if matches!(self.local_role, config::Role::Light) { + trace!(target: "sync", "Peer {} is trying to send transactions to the light node", who); + self.service.disconnect_peer(who, self.protocol_name.clone()); + self.service.report_peer(who, rep::UNEXPECTED_TRANSACTIONS); + return; + } + + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + trace!(target: "sync", "{} Ignoring transactions while disabled", who); + return; + } + + trace!(target: "sync", "Received {} transactions from {}", transactions.len(), who); + if let Some(ref mut peer) = self.peers.get_mut(&who) { + for t in transactions { + if self.pending_transactions.len() > MAX_PENDING_TRANSACTIONS { + debug!( + target: "sync", + "Ignoring any further transactions that exceed `MAX_PENDING_TRANSACTIONS`({}) limit", + MAX_PENDING_TRANSACTIONS, + ); + break; + } + + let hash = self.transaction_pool.hash_of(&t); + peer.known_transactions.insert(hash.clone()); + + self.service.report_peer(who.clone(), rep::ANY_TRANSACTION); + + match self.pending_transactions_peers.entry(hash.clone()) { + Entry::Vacant(entry) => { + self.pending_transactions.push(PendingTransaction { + validation: self.transaction_pool.import(t), + tx_hash: hash, + }); + entry.insert(vec![who.clone()]); + }, + Entry::Occupied(mut entry) => { + entry.get_mut().push(who.clone()); + } + } + } + } + } + + fn on_handle_transaction_import(&mut self, who: PeerId, import: TransactionImport) { + match import { + TransactionImport::KnownGood => self.service.report_peer(who, rep::ANY_TRANSACTION_REFUND), + TransactionImport::NewGood => self.service.report_peer(who, rep::GOOD_TRANSACTION), + TransactionImport::Bad => self.service.report_peer(who, rep::BAD_TRANSACTION), + TransactionImport::None => {}, + } + } + + /// Propagate one transaction. + pub fn propagate_transaction( + &mut self, + hash: &H, + ) { + debug!(target: "sync", "Propagating transaction [{:?}]", hash); + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + return; + } + if let Some(transaction) = self.transaction_pool.transaction(hash) { + let propagated_to = self.do_propagate_transactions(&[(hash.clone(), transaction)]); + self.transaction_pool.on_broadcasted(propagated_to); + } + } + + fn do_propagate_transactions( + &mut self, + transactions: &[(H, B::Extrinsic)], + ) -> HashMap> { + let mut propagated_to = HashMap::<_, Vec<_>>::new(); + let mut propagated_transactions = 0; + + for (who, peer) in self.peers.iter_mut() { + // never send transactions to the light node + if matches!(peer.role, ObservedRole::Light) { + continue; + } + + let (hashes, to_send): (Vec<_>, Vec<_>) = transactions + .iter() + .filter(|&(ref hash, _)| peer.known_transactions.insert(hash.clone())) + .cloned() + .unzip(); + + propagated_transactions += hashes.len(); + + if !to_send.is_empty() { + for hash in hashes { + propagated_to + .entry(hash) + .or_default() + .push(who.to_base58()); + } + trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); + self.service.write_notification( + who.clone(), + self.protocol_name.clone(), + to_send.encode() + ); + } + } + + if let Some(ref metrics) = self.metrics { + metrics.propagated_transactions.inc_by(propagated_transactions as _) + } + + propagated_to + } + + /// Call when we must propagate ready transactions to peers. + fn propagate_transactions(&mut self) { + // Accept transactions only when enabled + if !self.gossip_enabled.load(Ordering::Relaxed) { + return; + } + debug!(target: "sync", "Propagating transactions"); + let transactions = self.transaction_pool.transactions(); + let propagated_to = self.do_propagate_transactions(&transactions); + self.transaction_pool.on_broadcasted(propagated_to); + } +} diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 5a799ad829..009315084c 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -20,7 +20,7 @@ parking_lot = "0.11.1" futures = "0.3.9" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.34.0", default-features = false } +libp2p = { version = "0.35.1", default-features = false } sp-consensus = { version = "0.9.0", path = "../../../primitives/consensus/common" } sc-consensus = { version = "0.9.0", path = "../../consensus/common" } sc-client-api = { version = "3.0.0", path = "../../api" } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index f523be8575..6e2380b284 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -741,6 +741,7 @@ pub trait TestNetFactory: Sized { let network = NetworkWorker::new(sc_network::config::Params { role: Role::Full, executor: None, + transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), network_config, chain: client.clone(), on_demand: None, @@ -831,6 +832,7 @@ pub trait TestNetFactory: Sized { let network = NetworkWorker::new(sc_network::config::Params { role: Role::Light, executor: None, + transactions_handler_executor: Box::new(|task| { async_std::task::spawn(task); }), network_config, chain: client.clone(), on_demand: None, diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index b82f89cb95..f456efb755 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -132,10 +132,10 @@ impl OffchainWorkers< ) -> impl Future { let runtime = self.client.runtime_api(); let at = BlockId::hash(header.hash()); - let has_api_v1 = runtime.has_api_with::, _>( + let has_api_v1 = runtime.has_api_with::, _>( &at, |v| v == 1 ); - let has_api_v2 = runtime.has_api_with::, _>( + let has_api_v2 = runtime.has_api_with::, _>( &at, |v| v == 2 ); let version = match (has_api_v1, has_api_v2) { diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 90f7820017..536ec6b681 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] futures = "0.3.9" -libp2p = { version = "0.34.0", default-features = false } +libp2p = { version = "0.35.1", default-features = false } sp-utils = { version = "3.0.0", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index b2b793a8ee..c2fc807471 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -82,8 +82,6 @@ pub enum NodeRole { LightClient, /// The node is an authority Authority, - /// The node is a sentry - Sentry, } /// The state of the syncing of the node. diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 7cd9805445..4181206fdd 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -24,12 +24,9 @@ mod tests; use std::{sync::Arc, convert::TryInto}; use log::warn; -use sp_blockchain::{Error as ClientError, HeaderBackend}; +use sp_blockchain::HeaderBackend; -use rpc::futures::{ - Sink, Future, - future::result, -}; +use rpc::futures::{Sink, Future, future::result}; use futures::{StreamExt as _, compat::Compat}; use futures::future::{ready, FutureExt, TryFutureExt}; use sc_rpc_api::DenyUnsafe; @@ -93,7 +90,7 @@ impl AuthorApi, BlockHash

> for Author where P: TransactionPool + Sync + Send + 'static, Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: SessionKeys, + Client::Api: SessionKeys, { type Metadata = crate::Metadata; diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 52a4ed1d75..a3d83ae250 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -178,9 +178,8 @@ pub fn new_full( BE: Backend + 'static, Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents - + CallApiAt - + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: Metadata, + + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: Metadata, { let child_backend = Box::new( self::state_full::FullState::new(client.clone(), subscriptions.clone()) diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 8d93d445b0..a55903484a 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -223,9 +223,9 @@ impl StateBackend for FullState + 'static, Client: ExecutorProvider + StorageProvider + ProofProvider + HeaderBackend + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi + + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: Metadata, + Client::Api: Metadata, { fn call( &self, @@ -344,17 +344,23 @@ impl StateBackend for FullState) -> FutureResult { Box::new(result( self.block_or_best(block) + .map_err(client_err) .and_then(|block| - self.client.runtime_api().metadata(&BlockId::Hash(block)).map(Into::into) - ) - .map_err(client_err))) + self.client.runtime_api().metadata(&BlockId::Hash(block)) + .map(Into::into) + .map_err(|e| Error::Client(Box::new(e)))) + )) } fn runtime_version(&self, block: Option) -> FutureResult { Box::new(result( self.block_or_best(block) - .and_then(|block| self.client.runtime_version_at(&BlockId::Hash(block))) - .map_err(client_err))) + .map_err(client_err) + .and_then(|block| + self.client.runtime_version_at(&BlockId::Hash(block)) + .map_err(|e| Error::Client(Box::new(e))) + ) + )) } fn query_storage( @@ -432,7 +438,7 @@ impl StateBackend for FullState ChildStateBackend for FullState + 'static, Client: ExecutorProvider + StorageProvider + HeaderBackend + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi + + CallApiAt + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: Metadata, + Client::Api: Metadata, { fn storage_keys( &self, diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 0a9be763b2..c6119695ac 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -92,3 +92,4 @@ grandpa-primitives = { version = "3.0.0", package = "sp-finality-grandpa", path tokio = { version = "0.2.25", default-features = false } async-std = { version = "1.6.5", default-features = false } tracing-subscriber = "0.2.15" +tracing-log = "0.1.1" diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 882a6c4062..103e499a58 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -56,6 +56,7 @@ use sc_telemetry::{ telemetry, ConnectionMessage, TelemetryConnectionNotifier, + TelemetrySpan, SUBSTRATE_INFO, }; use sp_transaction_pool::MaintainedTransactionPool; @@ -308,7 +309,7 @@ pub fn new_full_parts( let task_manager = { let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), registry, config.telemetry_span.clone())? + TaskManager::new(config.task_executor.clone(), registry)? }; let executor = NativeExecutor::::new( @@ -377,7 +378,7 @@ pub fn new_light_parts( let keystore_container = KeystoreContainer::new(&config.keystore)?; let task_manager = { let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManager::new(config.task_executor.clone(), registry, config.telemetry_span.clone())? + TaskManager::new(config.task_executor.clone(), registry)? }; let executor = NativeExecutor::::new( @@ -491,6 +492,10 @@ pub struct SpawnTasksParams<'a, TBl: BlockT, TCl, TExPool, TRpc, Backend> { pub network_status_sinks: NetworkStatusSinks, /// A Sender for RPC requests. pub system_rpc_tx: TracingUnboundedSender>, + /// Telemetry span. + /// + /// This span needs to be entered **before** calling [`spawn_tasks()`]. + pub telemetry_span: Option, } /// Build a shared offchain workers instance. @@ -542,14 +547,13 @@ pub fn spawn_tasks( TCl: ProvideRuntimeApi + HeaderMetadata + Chain + BlockBackend + BlockIdTo + ProofProvider + HeaderBackend + BlockchainEvents + ExecutorProvider + UsageProvider + - StorageProvider + CallApiAt + + StorageProvider + CallApiAt + Send + 'static, >::Api: sp_api::Metadata + sc_offchain::OffchainWorkerApi + sp_transaction_pool::runtime_api::TaggedTransactionQueue + sp_session::SessionKeys + - sp_api::ApiErrorExt + sp_api::ApiExt, TBl: BlockT, TBackend: 'static + sc_client_api::backend::Backend + Send, @@ -570,6 +574,7 @@ pub fn spawn_tasks( network, network_status_sinks, system_rpc_tx, + telemetry_span, } = params; let chain_info = client.usage_info().chain; @@ -578,10 +583,11 @@ pub fn spawn_tasks( client.clone(), &BlockId::Hash(chain_info.best_hash), config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; + ).map_err(|e| Error::Application(Box::new(e)))?; let telemetry_connection_notifier = init_telemetry( &mut config, + telemetry_span, network.clone(), client.clone(), ); @@ -682,10 +688,11 @@ async fn transaction_notifications( fn init_telemetry>( config: &mut Configuration, + telemetry_span: Option, network: Arc::Hash>>, client: Arc, ) -> Option { - let telemetry_span = config.telemetry_span.clone()?; + let telemetry_span = telemetry_span?; let endpoints = config.telemetry_endpoints.clone()?; let genesis_hash = client.block_hash(Zero::zero()).ok().flatten().unwrap_or_default(); let connection_message = ConnectionMessage { @@ -729,14 +736,14 @@ fn gen_handler( TBl: BlockT, TCl: ProvideRuntimeApi + BlockchainEvents + HeaderBackend + HeaderMetadata + ExecutorProvider + - CallApiAt + ProofProvider + + CallApiAt + ProofProvider + StorageProvider + BlockBackend + Send + Sync + 'static, TExPool: MaintainedTransactionPool::Hash> + 'static, TBackend: sc_client_api::backend::Backend + 'static, TRpc: sc_rpc::RpcExtension, >::Api: sp_session::SessionKeys + - sp_api::Metadata, + sp_api::Metadata, { use sc_rpc::{chain, state, author, system, offchain}; @@ -905,6 +912,12 @@ pub fn build_network( spawn_handle.spawn("libp2p-node", fut); })) }, + transactions_handler_executor: { + let spawn_handle = Clone::clone(&spawn_handle); + Box::new(move |fut| { + spawn_handle.spawn("network-transactions-handler", fut); + }) + }, network_config: config.network.clone(), chain: client.clone(), on_demand: on_demand, diff --git a/client/service/src/client/call_executor.rs b/client/service/src/client/call_executor.rs index cc196f67a3..8c7ca645b0 100644 --- a/client/service/src/client/call_executor.rs +++ b/client/service/src/client/call_executor.rs @@ -161,7 +161,7 @@ where Result, Self::Error> ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result + UnwindSafe, >( &self, initialize_block_fn: IB, @@ -226,7 +226,10 @@ where ); // TODO: https://github.com/paritytech/substrate/issues/4455 // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); @@ -245,7 +248,10 @@ where &runtime_code, self.spawn_handle.clone(), ).with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)); - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) + state_machine.execute_using_consensus_failure_handler( + execution_manager, + native_call.map(|n| || (n)().map_err(|e| Box::new(e) as Box<_>)), + ) } }.map_err(Into::into) } diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 8cb0e304cd..263ff7b9c5 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -52,7 +52,7 @@ use sp_state_machine::{ use sc_executor::RuntimeVersion; use sp_consensus::{ Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, - ImportResult, BlockOrigin, ForkChoiceStrategy, RecordProof, + ImportResult, BlockOrigin, ForkChoiceStrategy, }; use sp_blockchain::{ self as blockchain, @@ -66,7 +66,7 @@ use sp_api::{ CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, CallApiAtParams, }; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider, RecordProof}; use sc_client_api::{ backend::{ self, BlockImportOperation, PrunableStateChangesTrieStorage, @@ -604,7 +604,7 @@ impl Client where new_cache: HashMap>, ) -> sp_blockchain::Result where Self: ProvideRuntimeApi, - >::Api: CoreApi + + >::Api: CoreApi + ApiExt, { let BlockImportParams { @@ -696,7 +696,7 @@ impl Client where import_existing: bool, ) -> sp_blockchain::Result where Self: ProvideRuntimeApi, - >::Api: CoreApi + + >::Api: CoreApi + ApiExt, { let parent_hash = import_headers.post().parent_hash().clone(); @@ -838,7 +838,7 @@ impl Client where ) -> sp_blockchain::Result> where Self: ProvideRuntimeApi, - >::Api: CoreApi + + >::Api: CoreApi + ApiExt, { let parent_hash = import_block.header.parent_hash(); @@ -1272,7 +1272,7 @@ impl BlockBuilderProvider for Client + ProvideRuntimeApi, >::Api: ApiExt> - + BlockBuilderApi, + + BlockBuilderApi, { fn new_block_at>( &self, @@ -1628,18 +1628,17 @@ impl CallApiAt for Client where E: CallExecutor + Send + Sync, Block: BlockT, { - type Error = Error; type StateBackend = B::State; fn call_api_at< 'a, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - C: CoreApi, + NC: FnOnce() -> result::Result + UnwindSafe, + C: CoreApi, >( &self, params: CallApiAtParams<'a, Block, C, NC, B::State>, - ) -> sp_blockchain::Result> { + ) -> Result, sp_api::ApiError> { let core_api = params.core_api; let at = params.at; @@ -1649,7 +1648,9 @@ impl CallApiAt for Client where ); self.executor.contextual_call::<_, fn(_,_) -> _,_,_>( - || core_api.initialize_block(at, &self.prepare_environment_block(at)?), + || core_api + .initialize_block(at, &self.prepare_environment_block(at)?) + .map_err(Error::RuntimeApiError), at, params.function, ¶ms.arguments, @@ -1660,11 +1661,14 @@ impl CallApiAt for Client where params.native_call, params.recorder, Some(extensions), - ) + ).map_err(Into::into) } - fn runtime_version_at(&self, at: &BlockId) -> sp_blockchain::Result { - self.runtime_version_at(at) + fn runtime_version_at( + &self, + at: &BlockId, + ) -> Result { + self.runtime_version_at(at).map_err(Into::into) } } @@ -1676,7 +1680,7 @@ impl sp_consensus::BlockImport for &Client + Send + Sync, Block: BlockT, Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi + + as ProvideRuntimeApi>::Api: CoreApi + ApiExt, { type Error = ConsensusError; @@ -1776,7 +1780,7 @@ impl sp_consensus::BlockImport for Client + Send + Sync, Block: BlockT, Self: ProvideRuntimeApi, - >::Api: CoreApi + + >::Api: CoreApi + ApiExt, { type Error = ConsensusError; @@ -1935,7 +1939,7 @@ impl backend::AuxStore for Client E: CallExecutor, Block: BlockT, Self: ProvideRuntimeApi, - >::Api: CoreApi, + >::Api: CoreApi, { /// Insert auxiliary data into key-value store. fn insert_aux< @@ -1965,7 +1969,7 @@ impl backend::AuxStore for &Client E: CallExecutor, Block: BlockT, Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi, + as ProvideRuntimeApi>::Api: CoreApi, { fn insert_aux< 'a, diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 1e316c37dc..4f0d426bdb 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -101,10 +101,6 @@ pub struct Configuration { /// This is a handle to a `TelemetryWorker` instance. It is used to initialize the telemetry for /// a substrate node. pub telemetry_handle: Option, - /// Telemetry span. - /// - /// This span is entered for every background task spawned using the TaskManager. - pub telemetry_span: Option, /// The default number of 64KB pages to allocate for Wasm execution pub default_heap_pages: Option, /// Should offchain workers be executed. diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 31c3cea4ef..caa54700da 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -33,13 +33,13 @@ pub type Result = std::result::Result; pub enum Error { #[error(transparent)] Client(#[from] sp_blockchain::Error), - + #[error(transparent)] Io(#[from] std::io::Error), - + #[error(transparent)] Consensus(#[from] sp_consensus::Error), - + #[error(transparent)] Network(#[from] sc_network::error::Error), diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 4880b8cffd..39bad8f2f3 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -309,7 +309,6 @@ async fn build_network_future< Role::Authority { .. } => NodeRole::Authority, Role::Light => NodeRole::LightClient, Role::Full => NodeRole::Full, - Role::Sentry { .. } => NodeRole::Sentry, }; let _ = sender.send(vec![node_role]); diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 446cce9527..4fbfa4d77f 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -135,7 +135,6 @@ impl MetricsService { let role_bits = match config.role { Role::Full => 1u64, Role::Light => 2u64, - Role::Sentry { .. } => 3u64, Role::Authority { .. } => 4u64, }; diff --git a/client/service/src/task_manager/mod.rs b/client/service/src/task_manager/mod.rs index 9a1fd15952..c7254f1f89 100644 --- a/client/service/src/task_manager/mod.rs +++ b/client/service/src/task_manager/mod.rs @@ -24,7 +24,7 @@ use log::{debug, error}; use futures::{ Future, FutureExt, StreamExt, future::{select, Either, BoxFuture, join_all, try_join_all, pending}, - sink::SinkExt, task::{Context, Poll}, + sink::SinkExt, }; use prometheus_endpoint::{ exponential_buckets, register, @@ -34,43 +34,11 @@ use prometheus_endpoint::{ use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; use tracing_futures::Instrument; use crate::{config::{TaskExecutor, TaskType, JoinFuture}, Error}; -use sc_telemetry::TelemetrySpan; mod prometheus_future; #[cfg(test)] mod tests; -/// A wrapper around a `[Option]` and a [`Future`]. -/// -/// The telemetry in Substrate uses a span to identify the telemetry context. The span "infrastructure" -/// is provided by the tracing-crate. Now it is possible to have your own spans as well. To support -/// this with the [`TaskManager`] we have this wrapper. This wrapper enters the telemetry span every -/// time the future is polled and polls the inner future. So, the inner future can still have its -/// own span attached and we get our telemetry span ;) -struct WithTelemetrySpan { - span: Option, - inner: T, -} - -impl WithTelemetrySpan { - fn new(span: Option, inner: T) -> Self { - Self { - span, - inner, - } - } -} - -impl + Unpin> Future for WithTelemetrySpan { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, ctx: &mut Context) -> Poll { - let span = self.span.clone(); - let _enter = span.as_ref().map(|s| s.enter()); - Pin::new(&mut self.inner).poll(ctx) - } -} - /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { @@ -78,7 +46,6 @@ pub struct SpawnTaskHandle { executor: TaskExecutor, metrics: Option, task_notifier: TracingUnboundedSender, - telemetry_span: Option, } impl SpawnTaskHandle { @@ -155,11 +122,7 @@ impl SpawnTaskHandle { } }; - let future = future.in_current_span().boxed(); - let join_handle = self.executor.spawn( - WithTelemetrySpan::new(self.telemetry_span.clone(), future).boxed(), - task_type, - ); + let join_handle = self.executor.spawn(future.in_current_span().boxed(), task_type); let mut task_notifier = self.task_notifier.clone(); self.executor.spawn( @@ -187,6 +150,7 @@ impl sp_core::traits::SpawnNamed for SpawnTaskHandle { /// task spawned through it fails. The service should be on the receiver side /// and will shut itself down whenever it receives any message, i.e. an /// essential task has failed. +#[derive(Clone)] pub struct SpawnEssentialTaskHandle { essential_failed_tx: TracingUnboundedSender<()>, inner: SpawnTaskHandle, @@ -240,6 +204,16 @@ impl SpawnEssentialTaskHandle { } } +impl sp_core::traits::SpawnEssentialNamed for SpawnEssentialTaskHandle { + fn spawn_essential_blocking(&self, name: &'static str, future: BoxFuture<'static, ()>) { + self.spawn_blocking(name, future); + } + + fn spawn_essential(&self, name: &'static str, future: BoxFuture<'static, ()>) { + self.spawn(name, future); + } +} + /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { /// A future that resolves when the service has exited, this is useful to @@ -266,17 +240,14 @@ pub struct TaskManager { /// terminates and gracefully shutdown. Also ends the parent `future()` if a child's essential /// task fails. children: Vec, - /// A `TelemetrySpan` used to enter the telemetry span when a task is spawned. - telemetry_span: Option, } impl TaskManager { /// If a Prometheus registry is passed, it will be used to report statistics about the /// service tasks. - pub(super) fn new( + pub fn new( executor: TaskExecutor, prometheus_registry: Option<&Registry>, - telemetry_span: Option, ) -> Result { let (signal, on_exit) = exit_future::signal(); @@ -305,7 +276,6 @@ impl TaskManager { task_notifier, completion_future, children: Vec::new(), - telemetry_span, }) } @@ -316,7 +286,6 @@ impl TaskManager { executor: self.executor.clone(), metrics: self.metrics.clone(), task_notifier: self.task_notifier.clone(), - telemetry_span: self.telemetry_span.clone(), } } diff --git a/client/service/src/task_manager/tests.rs b/client/service/src/task_manager/tests.rs index 257f7db198..762348ba9f 100644 --- a/client/service/src/task_manager/tests.rs +++ b/client/service/src/task_manager/tests.rs @@ -20,10 +20,14 @@ use crate::config::TaskExecutor; use crate::task_manager::TaskManager; use futures::{future::FutureExt, pin_mut, select}; use parking_lot::Mutex; -use std::{any::Any, sync::Arc, time::Duration}; -use tracing_subscriber::{layer::{SubscriberExt, Context}, Layer}; -use tracing::{subscriber::Subscriber, span::{Attributes, Id, Record, Span}, event::Event}; use sc_telemetry::TelemetrySpan; +use std::{any::Any, env, sync::Arc, time::Duration}; +use tracing::{event::Event, span::Id, subscriber::Subscriber}; +use tracing_subscriber::{ + layer::{Context, SubscriberExt}, + registry::LookupSpan, + Layer, +}; #[derive(Clone, Debug)] struct DropTester(Arc>); @@ -83,7 +87,7 @@ async fn run_background_task_blocking(duration: Duration, _keep_alive: impl Any) } fn new_task_manager(task_executor: TaskExecutor) -> TaskManager { - TaskManager::new(task_executor, None, None).unwrap() + TaskManager::new(task_executor, None).unwrap() } #[test] @@ -315,92 +319,92 @@ fn ensure_task_manager_future_continues_when_childs_not_essential_task_fails() { } struct TestLayer { - spans_entered: Arc>>, - spans: Arc>>, + spans_found: Arc>>>, } -impl Layer for TestLayer { - fn new_span(&self, attrs: &Attributes<'_>, id: &Id, _ctx: Context) { - self.spans.lock().insert(id.clone(), attrs.metadata().name().to_string()); - } - - fn on_record(&self, _: &Id, _: &Record<'_>, _: Context) {} +impl Layer for TestLayer +where + S: Subscriber + for<'a> LookupSpan<'a>, +{ + fn on_event(&self, _: &Event<'_>, ctx: Context) { + let mut spans_found = self.spans_found.lock(); - fn on_event(&self, _: &Event<'_>, _: Context) {} + if spans_found.is_some() { + panic!("on_event called multiple times"); + } - fn on_enter(&self, span: &Id, _: Context) { - let name = self.spans.lock().get(span).unwrap().clone(); - self.spans_entered.lock().push(name); + *spans_found = Some(ctx.scope().map(|x| x.id()).collect()); } - - fn on_exit(&self, _: &Id, _: Context) {} - - fn on_close(&self, _: Id, _: Context) {} } -type TestSubscriber = tracing_subscriber::layer::Layered< - TestLayer, - tracing_subscriber::fmt::Subscriber ->; - fn setup_subscriber() -> ( - TestSubscriber, - Arc>>, + impl Subscriber + for<'a> LookupSpan<'a>, + Arc>>>, ) { - let spans_entered = Arc::new(Mutex::new(Default::default())); + let spans_found = Arc::new(Mutex::new(Default::default())); let layer = TestLayer { - spans: Arc::new(Mutex::new(Default::default())), - spans_entered: spans_entered.clone(), + spans_found: spans_found.clone(), }; let subscriber = tracing_subscriber::fmt().finish().with(layer); - (subscriber, spans_entered) + (subscriber, spans_found) } +/// This is not an actual test, it is used by the `telemetry_span_is_forwarded_to_task` test. +/// The given test will call the test executable and only execute this one test that +/// test that the telemetry span and the prefix span are forwarded correctly. This needs to be done +/// in a separate process to avoid interfering with the other tests. #[test] -fn telemetry_span_is_forwarded_to_task() { - let (subscriber, spans_entered) = setup_subscriber(); +fn subprocess_telemetry_span_is_forwarded_to_task() { + if env::var("SUBPROCESS_TEST").is_err() { + return; + } + + let (subscriber, spans_found) = setup_subscriber(); + tracing_log::LogTracer::init().unwrap(); let _sub_guard = tracing::subscriber::set_global_default(subscriber); - let telemetry_span = TelemetrySpan::new(); + let mut runtime = tokio::runtime::Runtime::new().unwrap(); - let span = tracing::info_span!("test"); - let _enter = span.enter(); + let prefix_span = tracing::info_span!("prefix"); + let _enter_prefix_span = prefix_span.enter(); + + let telemetry_span = TelemetrySpan::new(); + let _enter_telemetry_span = telemetry_span.enter(); - let mut runtime = tokio::runtime::Runtime::new().unwrap(); let handle = runtime.handle().clone(); let task_executor = TaskExecutor::from(move |fut, _| handle.spawn(fut).map(|_| ())); - let task_manager = TaskManager::new(task_executor, None, Some(telemetry_span.clone())).unwrap(); + let task_manager = new_task_manager(task_executor); let (sender, receiver) = futures::channel::oneshot::channel(); - let spawn_handle = task_manager.spawn_handle(); - let span = span.clone(); task_manager.spawn_handle().spawn( - "test", + "log-something", async move { - assert_eq!(span, Span::current()); - spawn_handle.spawn("test-nested", async move { - assert_eq!(span, Span::current()); - sender.send(()).unwrap(); - }.boxed()); - }.boxed(), + log::info!("boo!"); + sender.send(()).unwrap(); + } + .boxed(), ); - // We need to leave exit the span here. If tokio is not running with multithreading, this - // would lead to duplicate spans being "active" and forwarding the wrong one. - drop(_enter); runtime.block_on(receiver).unwrap(); runtime.block_on(task_manager.clean_shutdown()); - drop(runtime); - - let spans = spans_entered.lock(); - // We entered the telemetry span and the "test" in the future, the nested future and - // the "test" span outside of the future. So, we should have recorded 3 spans. - assert_eq!(5, spans.len()); - - assert_eq!(spans[0], "test"); - assert_eq!(spans[1], telemetry_span.span().metadata().unwrap().name()); - assert_eq!(spans[2], "test"); - assert_eq!(spans[3], telemetry_span.span().metadata().unwrap().name()); - assert_eq!(spans[4], "test"); + + let spans = spans_found.lock().take().unwrap(); + assert_eq!(2, spans.len()); + + assert_eq!(spans[0], prefix_span.id().unwrap()); + assert_eq!(spans[1], telemetry_span.span().id().unwrap()); +} + +#[test] +fn telemetry_span_is_forwarded_to_task() { + let executable = env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("SUBPROCESS_TEST", "1") + .args(&["--nocapture", "subprocess_telemetry_span_is_forwarded_to_task"]) + .output() + .unwrap(); + println!("{}", String::from_utf8(output.stdout).unwrap()); + eprintln!("{}", String::from_utf8(output.stderr).unwrap()); + assert!(output.status.success()); } diff --git a/client/service/test/src/client/light.rs b/client/service/test/src/client/light.rs index b6287741fd..3b20f16387 100644 --- a/client/service/test/src/client/light.rs +++ b/client/service/test/src/client/light.rs @@ -215,7 +215,7 @@ impl CallExecutor for DummyCallExecutor { Result, Self::Error> ) -> Result, Self::Error>, R: Encode + Decode + PartialEq, - NC: FnOnce() -> Result + UnwindSafe, + NC: FnOnce() -> Result + UnwindSafe, >( &self, _initialize_block_fn: IB, diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index 7498289c7b..66b6aae12c 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1327,7 +1327,9 @@ fn doesnt_import_blocks_that_revert_finality() { let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() + sp_blockchain::Error::RuntimeApiError( + sp_api::ApiError::Application(Box::new(sp_blockchain::Error::NotInFinalizedChain)) + ).to_string() ); assert_eq!( diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index a42dba84df..6c99f83d4c 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -268,7 +268,6 @@ fn node_config TestNet where let node_config = node_config( self.nodes, &self.chain_spec, - Role::Authority { sentry_nodes: Vec::new() }, + Role::Authority, task_executor.clone(), Some(key), self.base_port, diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index dd2baf9d18..1f73f3cca3 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -24,12 +24,12 @@ //! Canonicalization window tracks a tree of blocks identified by header hash. The in-memory //! overlay allows to get any node that was inserted in any of the blocks within the window. //! The tree is journaled to the backing database and rebuilt on startup. -//! Canonicalization function selects one root from the top of the tree and discards all other roots and -//! their subtrees. +//! Canonicalization function selects one root from the top of the tree and discards all other roots +//! and their subtrees. //! //! # Pruning. -//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until pruning -//! constraints are satisfied. +//! See `RefWindow` for pruning algorithm details. `StateDb` prunes on each canonicalization until +//! pruning constraints are satisfied. mod noncanonical; mod pruning; diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 23b6936ff4..0d29fbca6f 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -18,7 +18,7 @@ targets = ["x86_64-unknown-linux-gnu"] parking_lot = "0.11.1" futures = "0.3.9" wasm-timer = "0.2.5" -libp2p = { version = "0.34.0", default-features = false, features = ["dns", "tcp-async-io", "wasm-ext", "websocket"] } +libp2p = { version = "0.35.1", default-features = false, features = ["dns", "tcp-async-io", "wasm-ext", "websocket"] } log = "0.4.8" pin-project = "1.0.4" rand = "0.7.2" diff --git a/client/tracing/proc-macro/Cargo.toml b/client/tracing/proc-macro/Cargo.toml index 7c7e0bd141..ac06dc45a9 100644 --- a/client/tracing/proc-macro/Cargo.toml +++ b/client/tracing/proc-macro/Cargo.toml @@ -17,5 +17,5 @@ proc-macro = true [dependencies] proc-macro-crate = "0.1.4" proc-macro2 = "1.0.6" -quote = { version = "1.0.9", features = ["proc-macro"] } +quote = { version = "1.0.3", features = ["proc-macro"] } syn = { version = "1.0.58", features = ["proc-macro", "full", "extra-traits", "parsing"] } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index fc14a5a0cb..2ebf038844 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -84,7 +84,6 @@ where Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { type Block = Block; type Error = error::Error; @@ -166,14 +165,13 @@ where Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { sp_tracing::within_span!(sp_tracing::Level::TRACE, "validate_transaction"; { let runtime_api = client.runtime_api(); let has_v2 = sp_tracing::within_span! { sp_tracing::Level::TRACE, "check_version"; runtime_api - .has_api_with::, _>(&at, |v| v >= 2) + .has_api_with::, _>(&at, |v| v >= 2) .unwrap_or_default() }; @@ -198,7 +196,6 @@ where Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, Client: Send + Sync + 'static, Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { /// Validates a transaction by calling into the runtime, same as /// `validate_transaction` but blocks the current thread when performing diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index 32525065b9..b6f19ba376 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -360,7 +360,6 @@ where + sp_runtime::traits::BlockIdTo, Client: sc_client_api::ExecutorProvider + Send + Sync + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { /// Create new basic transaction pool for a full node with the provided api. pub fn new_full( @@ -391,7 +390,6 @@ where + sp_runtime::traits::BlockIdTo, Client: Send + Sync + 'static, Client::Api: sp_transaction_pool::runtime_api::TaggedTransactionQueue, - sp_api::ApiErrorFor: Send + std::fmt::Display, { type Block = Block; type Hash = sc_transaction_graph::ExtrinsicHash>; diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index a3837e1677..865c8d56df 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -18,6 +18,11 @@ # are more recognizable on GitHub, you can use them for mentioning unlike an email. # - The latest matching rule, if multiple, takes precedence. +# CI +/.maintain/ @paritytech/ci +/.github/ @paritytech/ci +/.gitlab-ci.yml @paritytech/ci + # Block production /client/basic-authorship/ @NikVolf @@ -56,10 +61,3 @@ # Transaction weight stuff /frame/support/src/weights.rs @shawntabrizi - -# Authority discovery -/client/authority-discovery/ @mxinden -/frame/authority-discovery/ @mxinden - -# Prometheus endpoint -/utils/prometheus/ @mxinden diff --git a/docs/Upgrading-2.0-to-3.0.md b/docs/Upgrading-2.0-to-3.0.md index fa61da5959..bc4a15eb15 100644 --- a/docs/Upgrading-2.0-to-3.0.md +++ b/docs/Upgrading-2.0-to-3.0.md @@ -51,16 +51,16 @@ The new version of wasm-builder has gotten a bit smarter and a lot faster (you s The new FRAME 2.0 macros are a lot nicer to use and easier to read. While we were on that change though, we also cleaned up some mainly internal names and traits. The old `macro`'s still work and also produce the new structure, however, when plugging all that together as a Runtime, there's some things we have to adapt now: -##### `::Config for Runtime` becomes `::Config for Runtime` +##### `::Trait for Runtime` becomes `::Config for Runtime` -The most visible and significant change is that the macros no longer generate the `$pallet::Config` but now a much more aptly named `$pallet::Config`. Thus, we need to rename all `::Config for Runtime` into`::Config for Runtime`, e.g. for the `sudo` pallet we must do: +The most visible and significant change is that the macros no longer generate the `$pallet::Trait` but now a much more aptly named `$pallet::Config`. Thus, we need to rename all `::Trait for Runtime` into`::Config for Runtime`, e.g. for the `sudo` pallet we must do: ```diff --impl pallet_sudo::Config for Runtime { +-impl pallet_sudo::Trait for Runtime { +impl pallet_sudo::Config for Runtime { ``` -The same goes for all `` and alike, which simply becomes ``. +The same goes for all `` and alike, which simply becomes ``. #### SS58 Prefix is now a runtime param @@ -117,7 +117,7 @@ And update the overall definition for weights on frame and a few related types a - -const_assert!(AvailableBlockRatio::get().deconstruct() >= AVERAGE_ON_INITIALIZE_WEIGHT.deconstruct()); - --impl frame_system::Config for Runtime { +-impl frame_system::Trait for Runtime { + pub RuntimeBlockLength: BlockLength = + BlockLength::max_with_normal_ratio(5 * 1024 * 1024, NORMAL_DISPATCH_RATIO); + pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -150,7 +150,7 @@ And update the overall definition for weights on frame and a few related types a type Origin = Origin; type Call = Call; type Index = Index; -@@ -171,25 +198,19 @@ impl frame_system::Config for Runtime { +@@ -171,25 +198,19 @@ impl frame_system::Trait for Runtime { type Header = generic::Header; type Event = Event; type BlockHashCount = BlockHashCount; @@ -258,7 +258,7 @@ The pallet has been moved to a new system in which the exact amount of deposit f pub const DesiredMembers: u32 = 13; pub const DesiredRunnersUp: u32 = 7; -@@ -559,16 +600,16 @@ impl pallet_elections_phragmen::Config for Runtime { +@@ -559,16 +600,16 @@ impl pallet_elections_phragmen::Trait for Runtime { // NOTE: this implies that council's genesis members cannot be set directly and must come from // this module. type InitializeMembers = Council; @@ -283,7 +283,7 @@ The pallet has been moved to a new system in which the exact amount of deposit f Democracy brings three new settings with this release, all to allow for better influx- and spam-control. Namely these allow to specify the maximum number of proposals at a time, who can blacklist and who can cancel proposals. This diff acts as a good starting point: ```diff= -@@ -508,6 +537,14 @@ impl pallet_democracy::Config for Runtime { +@@ -508,6 +537,14 @@ impl pallet_democracy::Trait for Runtime { type FastTrackVotingPeriod = FastTrackVotingPeriod; // To cancel a proposal which has been passed, 2/3 of the council must agree to it. type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; @@ -298,7 +298,7 @@ Democracy brings three new settings with this release, all to allow for better i // Any single technical committee member may veto a coming council proposal, however they can // only do it once and it lasts only for the cooloff period. type VetoOrigin = pallet_collective::EnsureMember; -@@ -518,7 +555,8 @@ impl pallet_democracy::Config for Runtime { +@@ -518,7 +555,8 @@ impl pallet_democracy::Trait for Runtime { type Scheduler = Scheduler; type PalletsOrigin = OriginCaller; type MaxVotes = MaxVotes; diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 67fa0af3d6..b62e8bac8c 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit frame-support = { version = "3.0.0", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. frame-system = { version = "3.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core" } @@ -46,3 +46,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/assets/README.md b/frame/assets/README.md index 941e82103f..44c4eedc31 100644 --- a/frame/assets/README.md +++ b/frame/assets/README.md @@ -11,7 +11,7 @@ with a fixed supply, including: * Asset Transfer * Asset Destruction -To use it in your runtime, you need to implement the assets [`assets::Config`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). +To use it in your runtime, you need to implement the assets [`assets::Trait`](https://docs.rs/pallet-assets/latest/pallet_assets/trait.Trait.html). The supported dispatchable functions are documented in the [`assets::Call`](https://docs.rs/pallet-assets/latest/pallet_assets/enum.Call.html) enum. diff --git a/frame/assets/src/benchmarking.rs b/frame/assets/src/benchmarking.rs index 986eedfb6a..86a0c48e79 100644 --- a/frame/assets/src/benchmarking.rs +++ b/frame/assets/src/benchmarking.rs @@ -20,7 +20,7 @@ use super::*; use sp_runtime::traits::Bounded; use frame_system::RawOrigin as SystemOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use frame_support::traits::Get; use crate::Module as Assets; @@ -233,120 +233,4 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - - #[test] - fn create() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_create::().is_ok()); - }); - } - - #[test] - fn force_create() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_force_create::().is_ok()); - }); - } - - #[test] - fn destroy() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_destroy::().is_ok()); - }); - } - - #[test] - fn force_destroy() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_force_destroy::().is_ok()); - }); - } - - #[test] - fn mint() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_mint::().is_ok()); - }); - } - - #[test] - fn burn() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_burn::().is_ok()); - }); - } - - #[test] - fn transfer() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_transfer::().is_ok()); - }); - } - - #[test] - fn force_transfer() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_force_transfer::().is_ok()); - }); - } - - #[test] - fn freeze() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_freeze::().is_ok()); - }); - } - - #[test] - fn thaw() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_thaw::().is_ok()); - }); - } - - #[test] - fn freeze_asset() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_freeze_asset::().is_ok()); - }); - } - - #[test] - fn thaw_asset() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_thaw_asset::().is_ok()); - }); - } - - #[test] - fn transfer_ownership() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_transfer_ownership::().is_ok()); - }); - } - - #[test] - fn set_team() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_set_team::().is_ok()); - }); - } - - #[test] - fn set_max_zombies() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_set_max_zombies::().is_ok()); - }); - } - - #[test] - fn set_metadata() { - new_test_ext().execute_with(|| { - assert!(test_benchmark_set_metadata::().is_ok()); - }); - } -} +impl_benchmark_test_suite!(Assets, crate::tests::new_test_ext(), crate::tests::Test); diff --git a/frame/atomic-swap/Cargo.toml b/frame/atomic-swap/Cargo.toml index 99ce41f399..a3b62d65e5 100644 --- a/frame/atomic-swap/Cargo.toml +++ b/frame/atomic-swap/Cargo.toml @@ -37,3 +37,4 @@ std = [ "sp-io/std", "sp-core/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 80ea164cf0..5f299cfbe0 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -45,3 +45,4 @@ std = [ "sp-timestamp/std", "pallet-timestamp/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index 43a09b01fd..85844cf716 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -41,3 +41,4 @@ std = [ "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index ab48fbec8f..3bbbe9749c 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -38,3 +38,4 @@ std = [ "frame-system/std", "sp-authorship/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 9bde935062..2d7467d82e 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } @@ -24,21 +24,21 @@ serde = { version = "1.0.101", optional = true } sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } sp-consensus-babe = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/babe" } sp-consensus-vrf = { version = "0.9.0", default-features = false, path = "../../primitives/consensus/vrf" } -sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-session = { version = "3.0.0", default-features = false, path = "../../primitives/session" } sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-timestamp = { version = "3.0.0", default-features = false, path = "../../primitives/timestamp" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -frame-benchmarking = { version = "3.0.0", path = "../benchmarking" } pallet-balances = { version = "3.0.0", path = "../balances" } pallet-offences = { version = "3.0.0", path = "../offences" } pallet-staking = { version = "3.0.0", path = "../staking" } pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-election-providers = { version = "3.0.0", path = "../../primitives/election-providers" } [features] default = ["std"] @@ -54,12 +54,13 @@ std = [ "sp-application-crypto/std", "sp-consensus-babe/std", "sp-consensus-vrf/std", - "sp-inherents/std", "sp-io/std", "sp-runtime/std", "sp-session/std", "sp-staking/std", "sp-std/std", "sp-timestamp/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/babe/src/default_weights.rs b/frame/babe/src/default_weights.rs index c7c87b5837..f16f589a77 100644 --- a/frame/babe/src/default_weights.rs +++ b/frame/babe/src/default_weights.rs @@ -23,6 +23,10 @@ use frame_support::weights::{ }; impl crate::WeightInfo for () { + fn plan_config_change() -> Weight { + DbWeight::get().writes(1) + } + fn report_equivocation(validator_count: u32) -> Weight { // we take the validator set count from the membership proof to // calculate the weight but we set a floor of 100 validators. diff --git a/frame/babe/src/equivocation.rs b/frame/babe/src/equivocation.rs index b7275d0473..14ba0f16cb 100644 --- a/frame/babe/src/equivocation.rs +++ b/frame/babe/src/equivocation.rs @@ -35,10 +35,7 @@ //! definition. //! -use frame_support::{ - debug, - traits::{Get, KeyOwnerProofSystem}, -}; +use frame_support::traits::{Get, KeyOwnerProofSystem}; use sp_consensus_babe::{EquivocationProof, Slot}; use sp_runtime::transaction_validity::{ InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, @@ -163,8 +160,15 @@ where let call = Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof); match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(()) => debug::info!("Submitted BABE equivocation report."), - Err(e) => debug::error!("Error submitting equivocation report: {:?}", e), + Ok(()) => log::info!( + target: "runtime::babe", + "Submitted BABE equivocation report.", + ), + Err(e) => log::error!( + target: "runtime::babe", + "Error submitting equivocation report: {:?}", + e, + ), } Ok(()) @@ -186,9 +190,9 @@ impl frame_support::unsigned::ValidateUnsigned for Module { match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } _ => { - debug::warn!( - target: "babe", - "rejecting unsigned report equivocation transaction because it is not local/in-block." + log::warn!( + target: "runtime::babe", + "rejecting unsigned report equivocation transaction because it is not local/in-block.", ); return InvalidTransaction::Call.into(); diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 0afa0e1d09..9fdb080574 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -29,24 +29,22 @@ use frame_support::{ weights::{Pays, Weight}, Parameter, }; -use frame_system::{ensure_none, ensure_signed}; +use frame_system::{ensure_none, ensure_root, ensure_signed}; use sp_application_crypto::Public; use sp_runtime::{ generic::DigestItem, - traits::{Hash, IsMember, One, SaturatedConversion, Saturating}, + traits::{Hash, IsMember, One, SaturatedConversion, Saturating, Zero}, ConsensusEngineId, KeyTypeId, }; use sp_session::{GetSessionNumber, GetValidatorCount}; -use sp_std::{prelude::*, result}; +use sp_std::prelude::*; use sp_timestamp::OnTimestampSet; use sp_consensus_babe::{ digests::{NextConfigDescriptor, NextEpochDescriptor, PreDigest}, - inherents::{BabeInherentData, INHERENT_IDENTIFIER}, BabeAuthorityWeight, ConsensusLog, Epoch, EquivocationProof, Slot, BABE_ENGINE_ID, }; use sp_consensus_vrf::schnorrkel; -use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; @@ -110,6 +108,7 @@ pub trait Config: pallet_timestamp::Config { } pub trait WeightInfo { + fn plan_config_change() -> Weight; fn report_equivocation(validator_count: u32) -> Weight; } @@ -316,6 +315,19 @@ decl_module! { key_owner_proof, ) } + + /// Plan an epoch config change. The epoch config change is recorded and will be enacted on + /// the next call to `enact_epoch_change`. The config will be activated one epoch after. + /// Multiple calls to this method will replace any existing planned config change that had + /// not been enacted yet. + #[weight = ::WeightInfo::plan_config_change()] + fn plan_config_change( + origin, + config: NextConfigDescriptor, + ) { + ensure_root(origin)?; + NextEpochConfig::put(config); + } } } @@ -415,12 +427,14 @@ impl Module { /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot /// number will grow while the block number will not. Hence, the result can be interpreted as an /// upper bound. - // -------------- IMPORTANT NOTE -------------- + // + // ## IMPORTANT NOTE + // // This implementation is linked to how [`should_epoch_change`] is working. This might need to // be updated accordingly, if the underlying mechanics of slot and epochs change. // - // WEIGHT NOTE: This function is tied to the weight of `EstimateNextSessionRotation`. If you update - // this function, you must also update the corresponding weight. + // WEIGHT NOTE: This function is tied to the weight of `EstimateNextSessionRotation`. If you + // update this function, you must also update the corresponding weight. pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); next_slot @@ -432,15 +446,6 @@ impl Module { }) } - /// Plan an epoch config change. The epoch config change is recorded and will be enacted on the - /// next call to `enact_epoch_change`. The config will be activated one epoch after. Multiple calls to this - /// method will replace any existing planned config change that had not been enacted yet. - pub fn plan_config_change( - config: NextConfigDescriptor, - ) { - NextEpochConfig::put(config); - } - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, /// and the caller is the only caller of this function. /// @@ -744,10 +749,22 @@ impl Module { } impl OnTimestampSet for Module { - fn on_timestamp_set(_moment: T::Moment) { } + fn on_timestamp_set(moment: T::Moment) { + let slot_duration = Self::slot_duration(); + assert!(!slot_duration.is_zero(), "Babe slot duration cannot be zero."); + + let timestamp_slot = moment / slot_duration; + let timestamp_slot = Slot::from(timestamp_slot.saturated_into::()); + + assert!(CurrentSlot::get() == timestamp_slot, "Timestamp slot must match `CurrentSlot`"); + } } impl frame_support::traits::EstimateNextSessionRotation for Module { + fn average_session_length() -> T::BlockNumber { + T::EpochDuration::get().saturated_into() + } + fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { Self::next_expected_epoch_change(now) } @@ -818,29 +835,3 @@ fn compute_randomness( sp_io::hashing::blake2_256(&s) } - -impl ProvideInherent for Module { - type Call = pallet_timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_: &InherentData) -> Option { - None - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; - - let timestamp_based_slot = (timestamp / Self::slot_duration()).saturated_into::(); - let seal_slot = data.babe_inherent_data()?; - - if timestamp_based_slot == *seal_slot { - Ok(()) - } else { - Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) - } - } -} diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index e3d2eb19ef..412f13f6a2 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -37,6 +37,7 @@ use sp_consensus_babe::{AuthorityId, AuthorityPair, Slot}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_staking::SessionIndex; use pallet_staking::EraIndex; +use sp_election_providers::onchain; use pallet_session::historical as pallet_session_historical; type DummyValidatorId = u64; @@ -54,7 +55,7 @@ frame_support::construct_runtime!( Balances: pallet_balances::{Module, Call, Storage, Config, Event}, Historical: pallet_session_historical::{Module}, Offences: pallet_offences::{Module, Call, Storage, Event}, - Babe: pallet_babe::{Module, Call, Storage, Config, Inherent, ValidateUnsigned}, + Babe: pallet_babe::{Module, Call, Storage, Config, ValidateUnsigned}, Staking: pallet_staking::{Module, Call, Storage, Config, Event}, Session: pallet_session::{Module, Call, Storage, Event, Config}, Timestamp: pallet_timestamp::{Module, Call, Storage, Inherent}, @@ -183,6 +184,13 @@ parameter_types! { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } +impl onchain::Config for Test { + type AccountId = ::AccountId; + type BlockNumber = ::BlockNumber; + type Accuracy = Perbill; + type DataProvider = Staking; +} + impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -205,6 +213,7 @@ impl pallet_staking::Config for Test { type MaxIterations = (); type MinSolutionScoreBump = (); type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 8576389af3..c7261d7f1f 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -231,10 +231,13 @@ fn can_enact_next_config() { assert_eq!(Babe::epoch_index(), 0); go_to_block(2, 7); - Babe::plan_config_change(NextConfigDescriptor::V1 { - c: (1, 4), - allowed_slots: AllowedSlots::PrimarySlots, - }); + Babe::plan_config_change( + Origin::root(), + NextConfigDescriptor::V1 { + c: (1, 4), + allowed_slots: AllowedSlots::PrimarySlots, + }, + ).unwrap(); progress_to_block(4); Babe::on_finalize(9); @@ -252,6 +255,39 @@ fn can_enact_next_config() { }); } +#[test] +fn only_root_can_enact_config_change() { + use sp_runtime::DispatchError; + + new_test_ext(1).execute_with(|| { + let next_config = NextConfigDescriptor::V1 { + c: (1, 4), + allowed_slots: AllowedSlots::PrimarySlots, + }; + + let res = Babe::plan_config_change( + Origin::none(), + next_config.clone(), + ); + + assert_eq!(res, Err(DispatchError::BadOrigin)); + + let res = Babe::plan_config_change( + Origin::signed(1), + next_config.clone(), + ); + + assert_eq!(res, Err(DispatchError::BadOrigin)); + + let res = Babe::plan_config_change( + Origin::root(), + next_config, + ); + + assert!(res.is_ok()); + }); +} + #[test] fn can_fetch_current_and_next_epoch_data() { new_test_ext(5).execute_with(|| { diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 39b7fda77f..22c4ef0976 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -17,9 +17,10 @@ serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-io = { version = "3.0.0", path = "../../primitives/io" } @@ -36,5 +37,7 @@ std = [ "frame-benchmarking/std", "frame-support/std", "frame-system/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 53cf273d85..c7cb67403d 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks_instance_pallet, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Module as Balances; @@ -32,7 +32,7 @@ const SEED: u32 = 0; const ED_MULTIPLIER: u32 = 10; -benchmarks! { +benchmarks_instance_pallet! { // Benchmark `transfer` extrinsic with the worst possible conditions: // * Transfer will kill the sender account. // * Transfer will create the recipient account. @@ -42,7 +42,7 @@ benchmarks! { // Give some multiple of the existential deposit + creation fee + transfer fee let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); @@ -50,8 +50,8 @@ benchmarks! { let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { - assert_eq!(Balances::::free_balance(&caller), Zero::zero()); - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + assert_eq!(Balances::::free_balance(&caller), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } // Benchmark `transfer` with the best possible condition: @@ -63,16 +63,16 @@ benchmarks! { let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds for transfer (their account will never reasonably be killed). - let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); // Give the recipient account existential deposit (thus their account already exists). let existential_deposit = T::ExistentialDeposit::get(); - let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); + let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); }: transfer(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { - assert!(!Balances::::free_balance(&caller).is_zero()); - assert!(!Balances::::free_balance(&recipient).is_zero()); + assert!(!Balances::::free_balance(&caller).is_zero()); + assert!(!Balances::::free_balance(&recipient).is_zero()); } // Benchmark `transfer_keep_alive` with the worst possible condition: @@ -83,13 +83,13 @@ benchmarks! { let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); // Give the sender account max funds, thus a transfer will not kill account. - let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); let existential_deposit = T::ExistentialDeposit::get(); let transfer_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); }: _(RawOrigin::Signed(caller.clone()), recipient_lookup, transfer_amount) verify { - assert!(!Balances::::free_balance(&caller).is_zero()); - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + assert!(!Balances::::free_balance(&caller).is_zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } // Benchmark `set_balance` coming from ROOT account. This always creates an account. @@ -100,11 +100,11 @@ benchmarks! { // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); }: set_balance(RawOrigin::Root, user_lookup, balance_amount, balance_amount) verify { - assert_eq!(Balances::::free_balance(&user), balance_amount); - assert_eq!(Balances::::reserved_balance(&user), balance_amount); + assert_eq!(Balances::::free_balance(&user), balance_amount); + assert_eq!(Balances::::reserved_balance(&user), balance_amount); } // Benchmark `set_balance` coming from ROOT account. This always kills an account. @@ -115,10 +115,10 @@ benchmarks! { // Give the user some initial balance. let existential_deposit = T::ExistentialDeposit::get(); let balance_amount = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); }: set_balance(RawOrigin::Root, user_lookup, Zero::zero(), Zero::zero()) verify { - assert!(Balances::::free_balance(&user).is_zero()); + assert!(Balances::::free_balance(&user).is_zero()); } // Benchmark `force_transfer` extrinsic with the worst possible conditions: @@ -131,7 +131,7 @@ benchmarks! { // Give some multiple of the existential deposit + creation fee + transfer fee let balance = existential_deposit.saturating_mul(ED_MULTIPLIER.into()); - let _ = as Currency<_>>::make_free_balance_be(&source, balance); + let _ = as Currency<_>>::make_free_balance_be(&source, balance); // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. let recipient: T::AccountId = account("recipient", 0, SEED); @@ -139,56 +139,13 @@ benchmarks! { let transfer_amount = existential_deposit.saturating_mul((ED_MULTIPLIER - 1).into()) + 1u32.into(); }: force_transfer(RawOrigin::Root, source_lookup, recipient_lookup, transfer_amount) verify { - assert_eq!(Balances::::free_balance(&source), Zero::zero()); - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + assert_eq!(Balances::::free_balance(&source), Zero::zero()); + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests_composite::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn transfer() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer::()); - }); - } - - #[test] - fn transfer_best_case() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer_best_case::()); - }); - } - - #[test] - fn transfer_keep_alive() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer_keep_alive::()); - }); - } - - #[test] - fn transfer_set_balance_creating() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance_creating::()); - }); - } - - #[test] - fn transfer_set_balance_killing() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance_killing::()); - }); - } - - #[test] - fn force_transfer() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_force_transfer::()); - }); - } -} +impl_benchmark_test_suite!( + Balances, + crate::tests_composite::ExtBuilder::default().build(), + crate::tests_composite::Test, +); diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index e3eb9478b6..cc7b6351c2 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -151,6 +151,7 @@ mod tests; mod tests_local; mod tests_composite; +mod tests_reentrancy; mod benchmarking; pub mod weights; @@ -618,6 +619,17 @@ impl Default for Releases { } } +pub struct DustCleaner, I: 'static = ()>(Option<(T::AccountId, NegativeImbalance)>); + +impl, I: 'static> Drop for DustCleaner { + fn drop(&mut self) { + if let Some((who, dust)) = self.0.take() { + Module::::deposit_event(Event::DustLost(who, dust.peek())); + T::DustRemoval::on_unbalanced(dust); + } + } +} + impl, I: 'static> Pallet { /// Get the free balance of an account. pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { @@ -646,25 +658,27 @@ impl, I: 'static> Pallet { T::AccountStore::get(&who) } - /// Places the `free` and `reserved` parts of `new` into `account`. Also does any steps needed - /// after mutating an account. This includes DustRemoval unbalancing, in the case than the `new` - /// account's total balance is non-zero but below ED. + /// Handles any steps needed after mutating an account. + /// + /// This includes DustRemoval unbalancing, in the case than the `new` account's total balance + /// is non-zero but below ED. /// - /// Returns the final free balance, iff the account was previously of total balance zero, known - /// as its "endowment". + /// Returns two values: + /// - `Some` containing the the `new` account, iff the account has sufficient balance. + /// - `Some` containing the dust to be dropped, iff some dust should be dropped. fn post_mutation( - who: &T::AccountId, + _who: &T::AccountId, new: AccountData, - ) -> Option> { + ) -> (Option>, Option>) { let total = new.total(); if total < T::ExistentialDeposit::get() { - if !total.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); - Self::deposit_event(Event::DustLost(who.clone(), total)); + if total.is_zero() { + (None, None) + } else { + (None, Some(NegativeImbalance::new(total))) } - None } else { - Some(new) + (Some(new), None) } } @@ -696,26 +710,54 @@ impl, I: 'static> Pallet { who: &T::AccountId, f: impl FnOnce(&mut AccountData, bool) -> Result ) -> Result { - T::AccountStore::try_mutate_exists(who, |maybe_account| { + Self::try_mutate_account_with_dust(who, f) + .map(|(result, dust_cleaner)| { + drop(dust_cleaner); + result + }) + } + + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the + /// result of `f` is an `Err`. + /// + /// It returns both the result from the closure, and an optional `DustCleaner` instance which + /// should be dropped once it is known that all nested mutates that could affect storage items + /// what the dust handler touches have completed. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn try_mutate_account_with_dust>( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData, bool) -> Result + ) -> Result<(R, DustCleaner), E> { + let result = T::AccountStore::try_mutate_exists(who, |maybe_account| { let is_new = maybe_account.is_none(); let mut account = maybe_account.take().unwrap_or_default(); f(&mut account, is_new).map(move |result| { let maybe_endowed = if is_new { Some(account.free) } else { None }; - *maybe_account = Self::post_mutation(who, account); - (maybe_endowed, result) + let maybe_account_maybe_dust = Self::post_mutation(who, account); + *maybe_account = maybe_account_maybe_dust.0; + (maybe_endowed, maybe_account_maybe_dust.1, result) }) - }).map(|(maybe_endowed, result)| { + }); + result.map(|(maybe_endowed, maybe_dust, result)| { if let Some(endowed) = maybe_endowed { Self::deposit_event(Event::Endowed(who.clone(), endowed)); } - result + let dust_cleaner = DustCleaner(maybe_dust.map(|dust| (who.clone(), dust))); + (result, dust_cleaner) }) } /// Update the account entry for `who`, given the locks. fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { if locks.len() as u32 > T::MaxLocks::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::balances", "Warning: A user has more currency locks than expected. \ A runtime configuration adjustment may be needed." ); @@ -749,7 +791,8 @@ impl, I: 'static> Pallet { // No providers for the locks. This is impossible under normal circumstances // since the funds that are under the lock will themselves be stored in the // account and therefore will need a reference. - frame_support::debug::warn!( + log::warn!( + target: "runtime::balances", "Warning: Attempt to introduce lock consumer reference, yet no providers. \ This is unexpected but should be safe." ); @@ -772,7 +815,7 @@ mod imbalances { /// funds have been created without any equal and opposite accounting. #[must_use] #[derive(RuntimeDebug, PartialEq, Eq)] - pub struct PositiveImbalance, I: 'static>(T::Balance); + pub struct PositiveImbalance, I: 'static = ()>(T::Balance); impl, I: 'static> PositiveImbalance { /// Create a new positive imbalance from a balance. @@ -785,7 +828,7 @@ mod imbalances { /// funds have been destroyed without any equal and opposite accounting. #[must_use] #[derive(RuntimeDebug, PartialEq, Eq)] - pub struct NegativeImbalance, I: 'static>(T::Balance); + pub struct NegativeImbalance, I: 'static = ()>(T::Balance); impl, I: 'static> NegativeImbalance { /// Create a new negative imbalance from a balance. @@ -1001,34 +1044,40 @@ impl, I: 'static> Currency for Pallet where ) -> DispatchResult { if value.is_zero() || transactor == dest { return Ok(()) } - Self::try_mutate_account(dest, |to_account, _| -> DispatchResult { - Self::try_mutate_account(transactor, |from_account, _| -> DispatchResult { - from_account.free = from_account.free.checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - - // NOTE: total stake being stored in the same type means that this could never overflow - // but better to be safe than sorry. - to_account.free = to_account.free.checked_add(&value).ok_or(Error::::Overflow)?; - - let ed = T::ExistentialDeposit::get(); - ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); - - Self::ensure_can_withdraw( + Self::try_mutate_account_with_dust( + dest, + |to_account, _| -> Result, DispatchError> { + Self::try_mutate_account_with_dust( transactor, - value, - WithdrawReasons::TRANSFER, - from_account.free, - ).map_err(|_| Error::::LiquidityRestrictions)?; - - // TODO: This is over-conservative. There may now be other providers, and this pallet - // may not even be a provider. - let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; - let allow_death = allow_death && !system::Pallet::::is_provider_required(transactor); - ensure!(allow_death || from_account.free >= ed, Error::::KeepAlive); - - Ok(()) - }) - })?; + |from_account, _| -> DispatchResult { + from_account.free = from_account.free.checked_sub(&value) + .ok_or(Error::::InsufficientBalance)?; + + // NOTE: total stake being stored in the same type means that this could never overflow + // but better to be safe than sorry. + to_account.free = to_account.free.checked_add(&value).ok_or(Error::::Overflow)?; + + let ed = T::ExistentialDeposit::get(); + ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); + + Self::ensure_can_withdraw( + transactor, + value, + WithdrawReasons::TRANSFER, + from_account.free, + ).map_err(|_| Error::::LiquidityRestrictions)?; + + // TODO: This is over-conservative. There may now be other providers, and this pallet + // may not even be a provider. + let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; + let allow_death = allow_death && !system::Pallet::::is_provider_required(transactor); + ensure!(allow_death || from_account.total() >= ed, Error::::KeepAlive); + + Ok(()) + } + ).map(|(_, maybe_dust_cleaner)| maybe_dust_cleaner) + } + )?; // Emit transfer event. Self::deposit_event(Event::Transfer(transactor.clone(), dest.clone(), value)); @@ -1322,18 +1371,28 @@ impl, I: 'static> ReservableCurrency for Pallet }; } - let actual = Self::try_mutate_account(beneficiary, |to_account, is_new|-> Result { - ensure!(!is_new, Error::::DeadAccount); - Self::try_mutate_account(slashed, |from_account, _| -> Result { - let actual = cmp::min(from_account.reserved, value); - match status { - Status::Free => to_account.free = to_account.free.checked_add(&actual).ok_or(Error::::Overflow)?, - Status::Reserved => to_account.reserved = to_account.reserved.checked_add(&actual).ok_or(Error::::Overflow)?, - } - from_account.reserved -= actual; - Ok(actual) - }) - })?; + let ((actual, _maybe_one_dust), _maybe_other_dust) = Self::try_mutate_account_with_dust( + beneficiary, + |to_account, is_new| -> Result<(Self::Balance, DustCleaner), DispatchError> { + ensure!(!is_new, Error::::DeadAccount); + Self::try_mutate_account_with_dust( + slashed, + |from_account, _| -> Result { + let actual = cmp::min(from_account.reserved, value); + match status { + Status::Free => to_account.free = to_account.free + .checked_add(&actual) + .ok_or(Error::::Overflow)?, + Status::Reserved => to_account.reserved = to_account.reserved + .checked_add(&actual) + .ok_or(Error::::Overflow)?, + } + from_account.reserved -= actual; + Ok(actual) + } + ) + } + )?; Self::deposit_event(Event::ReserveRepatriated(slashed.clone(), beneficiary.clone(), actual, status)); Ok(value - actual) diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index c860a0364d..776cda140e 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -732,8 +732,8 @@ macro_rules! decl_tests { assert_eq!( events(), [ + Event::frame_system(system::Event::KilledAccount(1)), Event::pallet_balances(crate::Event::DustLost(1, 99)), - Event::frame_system(system::Event::KilledAccount(1)) ] ); }); @@ -961,5 +961,18 @@ macro_rules! decl_tests { assert_storage_noop!(assert_eq!(Balances::slash(&1337, 42).1, 42)); }); } + + #[test] + fn transfer_keep_alive_all_free_succeed() { + <$ext_builder>::default() + .existential_deposit(100) + .build() + .execute_with(|| { + assert_ok!(Balances::set_balance(Origin::root(), 1, 100, 100)); + assert_ok!(Balances::transfer_keep_alive(Some(1).into(), 2, 100)); + assert_eq!(Balances::total_balance(&1), 100); + assert_eq!(Balances::total_balance(&2), 100); + }); + } } } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index ffefc6c4d8..02088e88b9 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -184,8 +184,8 @@ fn emit_events_with_no_existential_deposit_suicide_with_dust() { assert_eq!( events(), [ + Event::frame_system(system::Event::KilledAccount(1)), Event::pallet_balances(crate::Event::DustLost(1, 1)), - Event::frame_system(system::Event::KilledAccount(1)) ] ); }); diff --git a/frame/balances/src/tests_reentrancy.rs b/frame/balances/src/tests_reentrancy.rs new file mode 100644 index 0000000000..020c514b63 --- /dev/null +++ b/frame/balances/src/tests_reentrancy.rs @@ -0,0 +1,310 @@ +// This file is part of Substrate. + +// Copyright (C) 2018-2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test setup for potential reentracy and lost updates of nested mutations. + +#![cfg(test)] + +use sp_runtime::{ + traits::IdentityLookup, + testing::Header, +}; +use sp_core::H256; +use sp_io; +use frame_support::parameter_types; +use frame_support::traits::StorageMapShim; +use frame_support::weights::{IdentityFee}; +use crate::{ + self as pallet_balances, + Module, Config, +}; +use pallet_transaction_payment::CurrencyAdapter; + +use crate::*; +use frame_support::{ + assert_ok, + traits::{ + Currency, ReservableCurrency, + } +}; +use frame_system::RawOrigin; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +fn last_event() -> Event { + system::Module::::events().pop().expect("Event expected").event +} + +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Storage, Config, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub BlockWeights: frame_system::limits::BlockWeights = + frame_system::limits::BlockWeights::simple_max(1024); + pub static ExistentialDeposit: u64 = 0; +} +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = BlockWeights; + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = (); +} +parameter_types! { + pub const TransactionByteFee: u64 = 1; +} +impl pallet_transaction_payment::Config for Test { + type OnChargeTransaction = CurrencyAdapter, ()>; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = IdentityFee; + type FeeMultiplierUpdate = (); +} + +pub struct OnDustRemoval; +impl OnUnbalanced> for OnDustRemoval { + fn on_nonzero_unbalanced(amount: NegativeImbalance) { + let _ = Balances::resolve_into_existing(&1, amount); + } +} +parameter_types! { + pub const MaxLocks: u32 = 50; +} +impl Config for Test { + type Balance = u64; + type DustRemoval = OnDustRemoval; + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = StorageMapShim< + super::Account, + system::Provider, + u64, + super::AccountData, + >; + type MaxLocks = MaxLocks; + type WeightInfo = (); +} + +pub struct ExtBuilder { + existential_deposit: u64, +} +impl Default for ExtBuilder { + fn default() -> Self { + Self { + existential_deposit: 1, + } + } +} +impl ExtBuilder { + + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + + pub fn set_associated_consts(&self) { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + } + + pub fn build(self) -> sp_io::TestExternalities { + self.set_associated_consts(); + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![], + }.assimilate_storage(&mut t).unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } +} + +#[test] +fn transfer_dust_removal_tst1_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .build() + .execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 3, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // As expected beneficiary account 3 + // received the transfered fund. + assert_eq!(Balances::free_balance(&3), 450); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1050); + + // Verify the events + // Number of events expected is 8 + assert_eq!(System::events().len(), 11); + + assert!( + System::events().iter().any( + |er| + er.event == Event::pallet_balances( + crate::Event::Transfer(2, 3, 450), + ), + ), + ); + + assert!( + System::events().iter().any( + |er| + er.event == Event::pallet_balances( + crate::Event::DustLost(2, 50) + ), + ), + ); + } + ); +} + +#[test] +fn transfer_dust_removal_tst2_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .build() + .execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // In this transaction, account 2 free balance + // drops below existential balance + // and dust balance is removed from account 2 + assert_ok!(Balances::transfer(RawOrigin::Signed(2).into(), 1, 450)); + + // As expected dust balance is removed. + assert_eq!(Balances::free_balance(&2), 0); + + // Dust balance is deposited to account 1 + // during the process of dust removal. + assert_eq!(Balances::free_balance(&1), 1500); + + // Verify the events + // Number of events expected is 8 + assert_eq!(System::events().len(), 9); + + assert!( + System::events().iter().any( + |er| + er.event == Event::pallet_balances( + crate::Event::Transfer(2, 1, 450), + ), + ), + ); + + assert!( + System::events().iter().any( + |er| + er.event == Event::pallet_balances( + crate::Event::DustLost(2, 50), + ), + ), + ); + } + ); +} + +#[test] +fn repatriating_reserved_balance_dust_removal_should_work() { + ExtBuilder::default() + .existential_deposit(100) + .build() + .execute_with(|| { + // Verification of reentrancy in dust removal + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 1, 1000, 0)); + assert_ok!(Balances::set_balance(RawOrigin::Root.into(), 2, 500, 0)); + + // Reserve a value on account 2, + // Such that free balance is lower than + // Exestintial deposit. + assert_ok!(Balances::reserve(&2, 450)); + + // Transfer of reserved fund from slashed account 2 to + // beneficiary account 1 + assert_ok!(Balances::repatriate_reserved(&2, &1, 450, Status::Free), 0); + + // Since free balance of account 2 is lower than + // existential deposit, dust amount is + // removed from the account 2 + assert_eq!(Balances::reserved_balance(2), 0); + assert_eq!(Balances::free_balance(2), 0); + + // account 1 is credited with reserved amount + // together with dust balance during dust + // removal. + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 1500); + + // Verify the events + // Number of events expected is 10 + assert_eq!(System::events().len(), 10); + + assert!( + System::events().iter().any( + |er| + er.event == Event::pallet_balances( + crate::Event::ReserveRepatriated(2, 1, 450, Status::Free), + ), + ), + ); + + assert_eq!( + last_event(), + Event::pallet_balances(crate::Event::DustLost(2, 50)), + ); + + } + ); +} diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index 2b69c9c11d..463ac7dd35 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -18,7 +18,7 @@ //! Autogenerated weights for pallet_balances //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2021-01-06, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! DATE: 2021-01-06, STEPS: \[50, \], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index 41ab9efece..3b20cf7dd0 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking" -version = "3.0.0" +version = "3.1.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -24,6 +24,7 @@ sp-io = { version = "3.0.0", path = "../../primitives/io", default-features = fa sp-storage = { version = "3.0.0", path = "../../primitives/storage", default-features = false } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] hex-literal = "0.3.1" @@ -40,4 +41,5 @@ std = [ "frame-support/std", "frame-system/std", "linregress", + "log/std", ] diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index d2cba9cc70..1ff8cc8e57 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -29,10 +29,16 @@ pub use utils::*; pub use analysis::{Analysis, BenchmarkSelector, RegressionModel}; #[doc(hidden)] pub use sp_io::storage::root as storage_root; +#[doc(hidden)] pub use sp_runtime::traits::Zero; +#[doc(hidden)] pub use frame_support; +#[doc(hidden)] pub use paste; +#[doc(hidden)] pub use sp_storage::TrackedStorageKey; +#[doc(hidden)] +pub use log; /// Construct pallet benchmarks for weighing dispatchables. /// @@ -175,13 +181,33 @@ macro_rules! benchmarks { } /// Same as [`benchmarks`] but for instantiable module. +/// +/// NOTE: For pallet declared with [`frame_support::pallet`], use [`benchmarks_instance_pallet`]. #[macro_export] macro_rules! benchmarks_instance { ( $( $rest:tt )* ) => { $crate::benchmarks_iter!( - { I } + { I: Instance } + { } + ( ) + ( ) + $( $rest )* + ); + } +} + +/// Same as [`benchmarks`] but for instantiable pallet declared [`frame_support::pallet`]. +/// +/// NOTE: For pallet declared with `decl_module!`, use [`benchmarks_instance`]. +#[macro_export] +macro_rules! benchmarks_instance_pallet { + ( + $( $rest:tt )* + ) => { + $crate::benchmarks_iter!( + { I: 'static } { } ( ) ( ) @@ -195,16 +221,16 @@ macro_rules! benchmarks_instance { macro_rules! benchmarks_iter { // detect and extract where clause: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) - where_clause { where $( $where_ty:ty: $where_bound:path ),* $(,)? } + where_clause { where $( $where_bound:tt )* } $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } - { $( $where_ty: $where_bound ),* } + { $( $instance: $instance_bound)? } + { $( $where_bound )* } ( $( $names )* ) ( $( $names_extra )* ) $( $rest )* @@ -212,7 +238,7 @@ macro_rules! benchmarks_iter { }; // detect and extract extra tag: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) @@ -221,7 +247,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* $name ) @@ -231,7 +257,7 @@ macro_rules! benchmarks_iter { }; // mutation arm: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) // This contains $( $( { $instance } )? $name:ident )* ( $( $names_extra:tt )* ) @@ -240,7 +266,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) @@ -251,7 +277,7 @@ macro_rules! benchmarks_iter { }; // mutation arm: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) @@ -260,7 +286,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) @@ -277,7 +303,7 @@ macro_rules! benchmarks_iter { }; // iteration arm: ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) @@ -286,7 +312,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } { } @@ -298,12 +324,12 @@ macro_rules! benchmarks_iter { #[cfg(test)] $crate::impl_benchmark_test!( { $( $where_clause )* } - { $( $instance)? } + { $( $instance: $instance_bound )? } $name ); $crate::benchmarks_iter!( - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* { $( $instance )? } $name ) ( $( $names_extra )* ) @@ -312,26 +338,26 @@ macro_rules! benchmarks_iter { }; // iteration-exit arm ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) ) => { $crate::selected_benchmark!( { $( $where_clause)* } - { $( $instance)? } + { $( $instance: $instance_bound )? } $( $names )* ); $crate::impl_benchmark!( { $( $where_clause )* } - { $( $instance)? } + { $( $instance: $instance_bound )? } ( $( $names )* ) ( $( $names_extra ),* ) ); }; // add verify block to _() format ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) @@ -339,7 +365,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) @@ -350,7 +376,7 @@ macro_rules! benchmarks_iter { }; // add verify block to name() format ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) @@ -358,7 +384,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) @@ -369,7 +395,7 @@ macro_rules! benchmarks_iter { }; // add verify block to {} format ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } { $( $where_clause:tt )* } ( $( $names:tt )* ) ( $( $names_extra:tt )* ) @@ -377,7 +403,7 @@ macro_rules! benchmarks_iter { $( $rest:tt )* ) => { $crate::benchmarks_iter!( - { $( $instance)? } + { $( $instance: $instance_bound )? } { $( $where_clause )* } ( $( $names )* ) ( $( $names_extra )* ) @@ -393,7 +419,7 @@ macro_rules! benchmarks_iter { macro_rules! benchmark_backend { // parsing arms ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { $( PRE { $( $pre_parsed:tt )* } )* } @@ -405,7 +431,7 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } { @@ -418,7 +444,7 @@ macro_rules! benchmark_backend { } }; ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { $( $parsed:tt )* } @@ -430,7 +456,7 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } { @@ -444,7 +470,7 @@ macro_rules! benchmark_backend { }; // mutation arm to look after a single tt for param_from. ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { $( $parsed:tt )* } @@ -456,7 +482,7 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } { $( $parsed )* } @@ -470,7 +496,7 @@ macro_rules! benchmark_backend { }; // mutation arm to look after the default tail of `=> ()` ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { $( $parsed:tt )* } @@ -482,7 +508,7 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } { $( $parsed )* } @@ -496,7 +522,7 @@ macro_rules! benchmark_backend { }; // mutation arm to look after `let _ =` ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { $( $parsed:tt )* } @@ -508,7 +534,7 @@ macro_rules! benchmark_backend { $postcode:block ) => { $crate::benchmark_backend! { - { $( $instance)? } + { $( $instance: $instance_bound )? } $name { $( $where_clause )* } { $( $parsed )* } @@ -522,7 +548,7 @@ macro_rules! benchmark_backend { }; // actioning arm ( - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident { $( $where_clause:tt )* } { @@ -536,7 +562,7 @@ macro_rules! benchmark_backend { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] - impl, I: Instance)? > + impl, $instance: $instance_bound )? > $crate::BenchmarkingSetup for $name where $( $where_clause )* { @@ -597,7 +623,7 @@ macro_rules! benchmark_backend { macro_rules! selected_benchmark { ( { $( $where_clause:tt )* } - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $( { $( $bench_inst:ident )? } $bench:ident )* ) => { // The list of available benchmarks for this pallet. @@ -607,7 +633,7 @@ macro_rules! selected_benchmark { } // Allow us to select a benchmark from the list of available benchmarks. - impl, I: Instance )? > + impl, $instance: $instance_bound )? > $crate::BenchmarkingSetup for SelectedBenchmark where $( $where_clause )* { @@ -643,11 +669,11 @@ macro_rules! selected_benchmark { macro_rules! impl_benchmark { ( { $( $where_clause:tt )* } - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } ( $( { $( $name_inst:ident )? } $name:ident )* ) ( $( $name_extra:ident ),* ) ) => { - impl, I: Instance)? > + impl, $instance: $instance_bound )? > $crate::Benchmarking<$crate::BenchmarkResults> for Module where T: frame_system::Config, $( $where_clause )* { @@ -731,7 +757,7 @@ macro_rules! impl_benchmark { closure_to_benchmark()?; } else { // Time the extrinsic logic. - frame_support::debug::trace!( + $crate::log::trace!( target: "benchmark", "Start Benchmark: {:?}", c ); @@ -744,12 +770,12 @@ macro_rules! impl_benchmark { let elapsed_extrinsic = finish_extrinsic - start_extrinsic; // Commit the changes to get proper write count $crate::benchmarking::commit_db(); - frame_support::debug::trace!( + $crate::log::trace!( target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic ); let read_write_count = $crate::benchmarking::read_write_count(); - frame_support::debug::trace!( + $crate::log::trace!( target: "benchmark", "Read/Write Count {:?}", read_write_count ); @@ -830,6 +856,31 @@ macro_rules! impl_benchmark { return Ok(results); } } + + /// Test a particular benchmark by name. + /// + /// This isn't called `test_benchmark_by_name` just in case some end-user eventually + /// writes a benchmark, itself called `by_name`; the function would be shadowed in + /// that case. + /// + /// This is generally intended to be used by child test modules such as those created + /// by the `impl_benchmark_test_suite` macro. However, it is not an error if a pallet + /// author chooses not to implement benchmarks. + #[cfg(test)] + #[allow(unused)] + fn test_bench_by_name(name: &[u8]) -> Result<(), &'static str> + where + T: Config + frame_system::Config, $( $where_clause )* + { + let name = sp_std::str::from_utf8(name) + .map_err(|_| "`name` is not a valid utf8 string!")?; + match name { + $( stringify!($name) => { + $crate::paste::paste! { [< test_benchmark_ $name >]::() } + } )* + _ => Err("Could not find test for requested benchmark."), + } + } }; } @@ -841,7 +892,7 @@ macro_rules! impl_benchmark { macro_rules! impl_benchmark_test { ( { $( $where_clause:tt )* } - { $( $instance:ident )? } + { $( $instance:ident: $instance_bound:tt )? } $name:ident ) => { $crate::paste::item! { @@ -903,6 +954,246 @@ macro_rules! impl_benchmark_test { }; } +/// This creates a test suite which runs the module's benchmarks. +/// +/// When called in `pallet_example` as +/// +/// ```rust,ignore +/// impl_benchmark_test_suite!(Module, crate::tests::new_test_ext(), crate::tests::Test); +/// ``` +/// +/// It expands to the equivalent of: +/// +/// ```rust,ignore +/// #[cfg(test)] +/// mod tests { +/// use super::*; +/// use crate::tests::{new_test_ext, Test}; +/// use frame_support::assert_ok; +/// +/// #[test] +/// fn test_benchmarks() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_accumulate_dummy::()); +/// assert_ok!(test_benchmark_set_dummy::()); +/// assert_ok!(test_benchmark_another_set_dummy::()); +/// assert_ok!(test_benchmark_sort_vector::()); +/// }); +/// } +/// } +/// ``` +/// +/// ## Arguments +/// +/// The first argument, `module`, must be the path to this crate's module. +/// +/// The second argument, `new_test_ext`, must be a function call which returns either a +/// `sp_io::TestExternalities`, or some other type with a similar interface. +/// +/// Note that this function call is _not_ evaluated at compile time, but is instead copied textually +/// into each appropriate invocation site. +/// +/// The third argument, `test`, must be the path to the runtime. The item to which this must refer +/// will generally take the form: +/// +/// ```rust,ignore +/// frame_support::construct_runtime!( +/// pub enum Test where ... +/// { ... } +/// ); +/// ``` +/// +/// There is an optional fourth argument, with keyword syntax: `benchmarks_path = path_to_benchmarks_invocation`. +/// In the typical case in which this macro is in the same module as the `benchmarks!` invocation, +/// you don't need to supply this. However, if the `impl_benchmark_test_suite!` invocation is in a +/// different module than the `benchmarks!` invocation, then you should provide the path to the +/// module containing the `benchmarks!` invocation: +/// +/// ```rust,ignore +/// mod benches { +/// benchmarks!{ +/// ... +/// } +/// } +/// +/// mod tests { +/// // because of macro syntax limitations, neither Module nor benches can be paths, but both have +/// // to be idents in the scope of `impl_benchmark_test_suite`. +/// use crate::{benches, Module}; +/// +/// impl_benchmark_test_suite!(Module, new_test_ext(), Test, benchmarks_path = benches); +/// +/// // new_test_ext and the Test item are defined later in this module +/// } +/// ``` +/// +/// There is an optional fifth argument, with keyword syntax: `extra = true` or `extra = false`. +/// By default, this generates a test suite which iterates over all benchmarks, including those +/// marked with the `#[extra]` annotation. Setting `extra = false` excludes those. +/// +/// There is an optional sixth argument, with keyword syntax: `exec_name = custom_exec_name`. +/// By default, this macro uses `execute_with` for this parameter. This argument, if set, is subject +/// to these restrictions: +/// +/// - It must be the name of a method applied to the output of the `new_test_ext` argument. +/// - That method must have a signature capable of receiving a single argument of the form `impl FnOnce()`. +/// +// ## Notes (not for rustdoc) +// +// The biggest challenge for this macro is communicating the actual test functions to be run. We +// can't just build an array of function pointers to each test function and iterate over it, because +// the test functions are parameterized by the `Test` type. That's incompatible with +// monomorphization: if it were legal, then even if the compiler detected and monomorphized the +// functions into only the types of the callers, which implementation would the function pointer +// point to? There would need to be some kind of syntax for selecting the destination of the pointer +// according to a generic argument, and in general it would be a huge mess and not worth it. +// +// Instead, we're going to steal a trick from `fn run_benchmark`: generate a function which is +// itself parametrized by `Test`, which accepts a `&[u8]` parameter containing the name of the +// benchmark, and dispatches based on that to the appropriate real test implementation. Then, we can +// just iterate over the `Benchmarking::benchmarks` list to run the actual implementations. +#[macro_export] +macro_rules! impl_benchmark_test_suite { + // user might or might not have set some keyword arguments; set the defaults + // + // The weird syntax indicates that `rest` comes only after a comma, which is otherwise optional + ( + $bench_module:ident, + $new_test_ext:expr, + $test:path + $(, $( $rest:tt )* )? + ) => { + impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = super, + extra = true, + exec_name = execute_with, + @user: + $( $( $rest )* )? + ); + }; + // pick off the benchmarks_path keyword argument + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $old:ident, + extra = $extra:expr, + exec_name = $exec_name:ident, + @user: + benchmarks_path = $benchmarks_path:ident + $(, $( $rest:tt )* )? + ) => { + impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = $benchmarks_path, + extra = $extra, + exec_name = $exec_name, + @user: + $( $( $rest )* )? + ); + }; + // pick off the extra keyword argument + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $benchmarks_path:ident, + extra = $old:expr, + exec_name = $exec_name:ident, + @user: + extra = $extra:expr + $(, $( $rest:tt )* )? + ) => { + impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = $benchmarks_path, + extra = $extra, + exec_name = $exec_name, + @user: + $( $( $rest )* )? + ); + }; + // pick off the exec_name keyword argument + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $benchmarks_path:ident, + extra = $extra:expr, + exec_name = $old:ident, + @user: + exec_name = $exec_name:ident + $(, $( $rest:tt )* )? + ) => { + impl_benchmark_test_suite!( + @selected: + $bench_module, + $new_test_ext, + $test, + benchmarks_path = $benchmarks_path, + extra = $extra, + exec_name = $exec_name, + @user: + $( $( $rest )* )? + ); + }; + // all options set; nothing else in user-provided keyword arguments + ( + @selected: + $bench_module:ident, + $new_test_ext:expr, + $test:path, + benchmarks_path = $path_to_benchmarks_invocation:ident, + extra = $extra:expr, + exec_name = $exec_name:ident, + @user: + $(,)? + ) => { + #[cfg(test)] + mod benchmark_tests { + use $path_to_benchmarks_invocation::test_bench_by_name; + use super::$bench_module; + + #[test] + fn test_benchmarks() { + $new_test_ext.$exec_name(|| { + use $crate::Benchmarking; + + let mut anything_failed = false; + println!("failing benchmark tests:"); + for benchmark_name in $bench_module::<$test>::benchmarks($extra) { + match std::panic::catch_unwind(|| test_bench_by_name::<$test>(benchmark_name)) { + Err(err) => { + println!("{}: {:?}", String::from_utf8_lossy(benchmark_name), err); + anything_failed = true; + }, + Ok(Err(err)) => { + println!("{}: {}", String::from_utf8_lossy(benchmark_name), err); + anything_failed = true; + }, + Ok(Ok(_)) => (), + } + } + assert!(!anything_failed); + }); + } + } + }; +} + /// show error message and debugging info for the case of an error happening /// during a benchmark pub fn show_benchmark_debug_info( @@ -1031,7 +1322,7 @@ macro_rules! add_benchmark { *repeat, whitelist, *verify, - ).map_err(|e| { + ).map_err(|e| { $crate::show_benchmark_debug_info( instance_string, benchmark, @@ -1058,7 +1349,7 @@ macro_rules! add_benchmark { *repeat, whitelist, *verify, - ).map_err(|e| { + ).map_err(|e| { $crate::show_benchmark_debug_info( instance_string, benchmark, diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 53093fdf06..8431f3e46c 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -138,7 +138,8 @@ mod benchmarks { crate::benchmarks!{ where_clause { where - ::OtherEvent: Into<::Event> + ::OtherEvent: Into<::Event> + Clone, + ::Event: Clone, } set_value { diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 1574e47454..cdd4042616 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -228,4 +228,4 @@ macro_rules! whitelist_account { frame_system::Account::::hashed_key_for(&$acc).into() ); } -} +} \ No newline at end of file diff --git a/frame/bounties/Cargo.toml b/frame/bounties/Cargo.toml index ec4f1b94cd..ff1a3a6807 100644 --- a/frame/bounties/Cargo.toml +++ b/frame/bounties/Cargo.toml @@ -21,7 +21,7 @@ frame-support = { version = "3.0.0", default-features = false, path = "../suppor frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-treasury = { version = "3.0.0", default-features = false, path = "../treasury" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-io ={ version = "3.0.0", path = "../../primitives/io" } @@ -45,3 +45,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/bounties/src/benchmarking.rs b/frame/bounties/src/benchmarking.rs index f6fc11ad0b..632f951f05 100644 --- a/frame/bounties/src/benchmarking.rs +++ b/frame/bounties/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use sp_runtime::traits::Bounded; use frame_system::{EventRecord, RawOrigin}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use frame_support::traits::OnInitialize; use crate::Module as Bounties; @@ -220,26 +220,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_bounty::()); - assert_ok!(test_benchmark_approve_bounty::()); - assert_ok!(test_benchmark_propose_curator::()); - assert_ok!(test_benchmark_unassign_curator::()); - assert_ok!(test_benchmark_accept_curator::()); - assert_ok!(test_benchmark_award_bounty::()); - assert_ok!(test_benchmark_claim_bounty::()); - assert_ok!(test_benchmark_close_bounty_proposed::()); - assert_ok!(test_benchmark_close_bounty_active::()); - assert_ok!(test_benchmark_extend_bounty_expiry::()); - assert_ok!(test_benchmark_spend_funds::()); - }); - } -} +impl_benchmark_test_suite!( + Bounties, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 0c58f41640..b8f825cc52 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -19,9 +19,10 @@ sp-core = { version = "3.0.0", default-features = false, path = "../../primitive sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] hex-literal = "0.3.1" @@ -38,9 +39,11 @@ std = [ "frame-support/std", "sp-runtime/std", "frame-system/std", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index bff7dad59d..1afdd14b1a 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -21,7 +21,12 @@ use super::*; use frame_system::RawOrigin as SystemOrigin; use frame_system::EventRecord; -use frame_benchmarking::{benchmarks_instance, account, whitelisted_caller}; +use frame_benchmarking::{ + benchmarks_instance, + account, + whitelisted_caller, + impl_benchmark_test_suite, +}; use sp_runtime::traits::Bounded; use sp_std::mem::size_of; @@ -42,7 +47,6 @@ fn assert_last_event, I: Instance>(generic_event: >: } benchmarks_instance! { - set_members { let m in 1 .. T::MaxMembers::get(); let n in 1 .. T::MaxMembers::get(); @@ -634,79 +638,8 @@ benchmarks_instance! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn set_members() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_members::()); - }); - } - - #[test] - fn execute() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_execute::()); - }); - } - - #[test] - fn propose_execute() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_execute::()); - }); - } - - #[test] - fn propose_proposed() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_proposed::()); - }); - } - - #[test] - fn vote() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_vote::()); - }); - } - - #[test] - fn close_early_disapproved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_early_disapproved::()); - }); - } - - #[test] - fn close_early_approved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_early_approved::()); - }); - } - - #[test] - fn close_disapproved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_disapproved::()); - }); - } - - #[test] - fn close_approved() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_close_approved::()); - }); - } - - #[test] - fn disapprove_proposal() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_disapprove_proposal::()); - }); - } -} +impl_benchmark_test_suite!( + Collective, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index 50beb8607d..a8184b8dd5 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -49,7 +49,7 @@ use sp_runtime::{RuntimeDebug, traits::Hash}; use frame_support::{ codec::{Decode, Encode}, - debug, decl_error, decl_event, decl_module, decl_storage, + decl_error, decl_event, decl_module, decl_storage, dispatch::{ DispatchError, DispatchResult, DispatchResultWithPostInfo, Dispatchable, Parameter, PostDispatchInfo, @@ -320,19 +320,21 @@ decl_module! { ) -> DispatchResultWithPostInfo { ensure_root(origin)?; if new_members.len() > T::MaxMembers::get() as usize { - debug::error!( - "New members count exceeds maximum amount of members expected. (expected: {}, actual: {})", + log::error!( + target: "runtime::collective", + "New members count ({}) exceeds maximum amount of members expected ({}).", + new_members.len(), T::MaxMembers::get(), - new_members.len() ); } let old = Members::::get(); if old.len() > old_count as usize { - debug::warn!( - "Wrong count used to estimate set_members weight. (expected: {}, actual: {})", + log::warn!( + target: "runtime::collective", + "Wrong count used to estimate set_members weight. expected ({}) vs actual ({})", old_count, - old.len() + old.len(), ); } let mut new_members = new_members; @@ -811,10 +813,11 @@ impl, I: Instance> ChangeMembers for Module { new: &[T::AccountId], ) { if new.len() > T::MaxMembers::get() as usize { - debug::error!( - "New members count exceeds maximum amount of members expected. (expected: {}, actual: {})", + log::error!( + target: "runtime::collective", + "New members count ({}) exceeds maximum amount of members expected ({}).", + new.len(), T::MaxMembers::get(), - new.len() ); } // remove accounts from all current voting in motions. diff --git a/frame/contracts/CHANGELOG.md b/frame/contracts/CHANGELOG.md new file mode 100644 index 0000000000..ce35abbd86 --- /dev/null +++ b/frame/contracts/CHANGELOG.md @@ -0,0 +1,78 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +The semantic versioning guarantees cover the interface to the substrate runtime which +includes this pallet as a dependency. This module will also add storage migrations whenever +changes require it. Stability with regard to offchain tooling is explicitly excluded from +this guarantee: For example adding a new field to an in-storage data structure will require +changes to frontends to properly display it. However, those changes will still be regarded +as a minor version bump. + +The interface provided to smart contracts will adhere to semver with one exception: Even +major version bumps will be backwards compatible with regard to already deployed contracts. +In other words: Upgrading this pallet will not break pre-existing contracts. + +## [v3.0.0] + +This version constitutes the first release that brings any stability guarantees (see above). + +### Added + +- Emit an event when a contract terminates (self-destructs). +[1](https://github.com/paritytech/substrate/pull/8014) + +- Charge rent for code stored on the chain in addition to the already existing +rent that is payed for data storage. +[1](https://github.com/paritytech/substrate/pull/7935) + +- Allow the runtime to configure per storage item costs in addition +to the already existing per byte costs. +[1](https://github.com/paritytech/substrate/pull/7819) + +- Contracts are now deleted lazily so that the user who removes a contract +does not need to pay for the deletion of the contract storage. +[1](https://github.com/paritytech/substrate/pull/7740) + +- Allow runtime authors to define chain extensions in order to provide custom +functionality to contracts. +[1](https://github.com/paritytech/substrate/pull/7548) +[2](https://github.com/paritytech/substrate/pull/8003) + +- Proper weights which are fully automated by benchmarking. +[1](https://github.com/paritytech/substrate/pull/6715) +[2](https://github.com/paritytech/substrate/pull/7017) +[3](https://github.com/paritytech/substrate/pull/7361) + +### Changes + +- Collect the rent for one block during instantiation. +[1](https://github.com/paritytech/substrate/pull/7847) + +- Instantiation takes a `salt` argument to allow for easier instantion of the +same code by the same sender. +[1](https://github.com/paritytech/substrate/pull/7482) + +- Improve the information returned by the `contracts_call` RPC. +[1](https://github.com/paritytech/substrate/pull/7468) + +- Simplify the node configuration necessary to add this module. +[1](https://github.com/paritytech/substrate/pull/7409) + +### Fixed + +- Consider the code size of a contract in the weight that is charged for +loading a contract from storage. +[1](https://github.com/paritytech/substrate/pull/8086) + +- Fix possible overflow in storage size calculation +[1](https://github.com/paritytech/substrate/pull/7885) + +- Cap the surcharge reward that can be claimed. +[1](https://github.com/paritytech/substrate/pull/7870) + +- Fix a possible DoS vector where contracts could allocate too large buffers. +[1](https://github.com/paritytech/substrate/pull/7818) diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index c5ba615504..018a8a5df6 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts" -version = "2.0.1" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -9,19 +9,16 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for WASM contracts" readme = "README.md" -# Prevent publish until we are ready to release 3.0.0 -publish = false - [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "common" } -pallet-contracts-proc-macro = { version = "0.1.0", path = "proc-macro" } +pallet-contracts-primitives = { version = "3.0.0", default-features = false, path = "common" } +pallet-contracts-proc-macro = { version = "3.0.0", path = "proc-macro" } parity-wasm = { version = "0.41.0", default-features = false } pwasm-utils = { version = "0.16", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } @@ -31,6 +28,7 @@ sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/ sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-sandbox = { version = "0.9.0", default-features = false, path = "../../primitives/sandbox" } wasmi-validation = { version = "0.3.0", default-features = false } +log = { version = "0.4.14", default-features = false } # Only used in benchmarking to generate random contract code rand = { version = "0.7.0", optional = true, default-features = false } @@ -63,9 +61,11 @@ std = [ "wasmi-validation/std", "pallet-contracts-primitives/std", "pallet-contracts-proc-macro/full", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", "rand", "rand_pcg", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/contracts/README.md b/frame/contracts/README.md index 6ec5eba2df..1cb384e14c 100644 --- a/frame/contracts/README.md +++ b/frame/contracts/README.md @@ -2,8 +2,10 @@ The Contract module provides functionality for the runtime to deploy and execute WebAssembly smart-contracts. -- [`contract::Config`](https://docs.rs/pallet-contracts/latest/pallet_contracts/trait.Trait.html) - [`Call`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Call.html) +- [`Config`](https://docs.rs/pallet-contracts/latest/pallet_contracts/trait.Config.html) +- [`Error`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Error.html) +- [`Event`](https://docs.rs/pallet-contracts/latest/pallet_contracts/enum.Event.html) ## Overview @@ -32,6 +34,9 @@ reverted at the current call's contract level. For example, if contract A calls then all of B's calls are reverted. Assuming correct error handling by contract A, A's other calls and state changes still persist. +One gas is equivalent to one [weight](https://substrate.dev/docs/en/knowledgebase/learn-substrate/weight) +which is defined as one picosecond of execution time on the runtime's reference machine. + ### Notable Scenarios Contract call failures are not always cascading. When failures occur in a sub-call, they do not "bubble up", @@ -42,19 +47,14 @@ fails, A can decide how to handle that failure, either proceeding or reverting A ### Dispatchable functions -Those are documented in the reference documentation of the `Module`. +Those are documented in the [reference documentation](https://docs.rs/pallet-contracts/latest/pallet_contracts/#dispatchable-functions). ## Usage -The Contract module is a work in progress. The following examples show how this Contract module -can be used to instantiate and call contracts. - -- [`ink`](https://github.com/paritytech/ink) is -an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables writing -WebAssembly based smart contracts in the Rust programming language. This is a work in progress. - -## Related Modules - -- [Balances](https://docs.rs/pallet-balances/latest/pallet_balances/) +This module executes WebAssembly smart contracts. These can potentially be written in any language +that compiles to web assembly. However, using a language that specifically targets this module +will make things a lot easier. One such language is [`ink`](https://github.com/paritytech/ink) +which is an [`eDSL`](https://wiki.haskell.org/Embedded_domain_specific_language) that enables +writing WebAssembly based smart contracts in the Rust programming language. License: Apache-2.0 diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index f385a7ae9f..050e18fc44 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.1" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,7 +8,6 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." readme = "README.md" -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/contracts/proc-macro/Cargo.toml b/frame/contracts/proc-macro/Cargo.toml index 56ef855335..2bdde32e0b 100644 --- a/frame/contracts/proc-macro/Cargo.toml +++ b/frame/contracts/proc-macro/Cargo.toml @@ -1,13 +1,12 @@ [package] name = "pallet-contracts-proc-macro" -version = "0.1.0" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Procedural macros used in pallet_contracts" -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index 06c3c7d243..d0068e3e42 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.1" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,7 +8,6 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Node-specific RPC methods for interaction with contracts." readme = "README.md" -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -24,8 +23,8 @@ sp-rpc = { version = "3.0.0", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } sp-api = { version = "3.0.0", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0", path = "./runtime-api" } +pallet-contracts-primitives = { version = "3.0.0", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "3.0.0", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 0794fee292..32de637f10 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.1" +version = "3.0.0" authors = ["Parity Technologies "] edition = "2018" license = "Apache-2.0" @@ -8,7 +8,6 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by Contracts RPC extensions." readme = "README.md" -publish = false [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] @@ -18,7 +17,7 @@ sp-api = { version = "3.0.0", default-features = false, path = "../../../../prim codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../common" } +pallet-contracts-primitives = { version = "3.0.0", default-features = false, path = "../../common" } [features] default = ["std"] diff --git a/frame/contracts/src/benchmarking/code.rs b/frame/contracts/src/benchmarking/code.rs index 01ca7d3aac..64d2a0cf01 100644 --- a/frame/contracts/src/benchmarking/code.rs +++ b/frame/contracts/src/benchmarking/code.rs @@ -27,12 +27,14 @@ use crate::Config; use crate::Module as Contracts; -use parity_wasm::elements::{Instruction, Instructions, FuncBody, ValueType, BlockType}; +use parity_wasm::elements::{ + Instruction, Instructions, FuncBody, ValueType, BlockType, Section, CustomSection, +}; use pwasm_utils::stack_height::inject_limiter; use sp_core::crypto::UncheckedFrom; use sp_runtime::traits::Hash; use sp_sandbox::{EnvironmentDefinitionBuilder, Memory}; -use sp_std::{prelude::*, convert::TryFrom}; +use sp_std::{prelude::*, convert::TryFrom, borrow::ToOwned}; /// Pass to `create_code` in order to create a compiled `WasmModule`. /// @@ -66,6 +68,10 @@ pub struct ModuleDefinition { pub inject_stack_metering: bool, /// Create a table containing function pointers. pub table: Option, + /// Create a section named "dummy" of the specified size. This is useful in order to + /// benchmark the overhead of loading and storing codes of specified sizes. The dummy + /// section only contributes to the size of the contract but does not affect execution. + pub dummy_section: u32, } pub struct TableSegment { @@ -204,6 +210,15 @@ where .build(); } + // Add the dummy section + if def.dummy_section > 0 { + contract = contract.with_section( + Section::Custom( + CustomSection::new("dummy".to_owned(), vec![42; def.dummy_section as usize]) + ) + ); + } + let mut code = contract.build(); // Inject stack height metering @@ -235,10 +250,11 @@ where ModuleDefinition::default().into() } - /// Same as `dummy` but with maximum sized linear memory. - pub fn dummy_with_mem() -> Self { + /// Same as `dummy` but with maximum sized linear memory and a dummy section of specified size. + pub fn dummy_with_bytes(dummy_bytes: u32) -> Self { ModuleDefinition { memory: Some(ImportedMemory::max::()), + dummy_section: dummy_bytes, .. Default::default() } .into() diff --git a/frame/contracts/src/benchmarking/mod.rs b/frame/contracts/src/benchmarking/mod.rs index a5dcc40d71..d01a2bce2c 100644 --- a/frame/contracts/src/benchmarking/mod.rs +++ b/frame/contracts/src/benchmarking/mod.rs @@ -36,12 +36,13 @@ use self::{ }, sandbox::Sandbox, }; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use frame_system::{Module as System, RawOrigin}; use parity_wasm::elements::{Instruction, ValueType, BlockType}; use sp_runtime::traits::{Hash, Bounded, Zero}; use sp_std::{default::Default, convert::{TryInto}, vec::Vec, vec}; use pallet_contracts_primitives::RentProjection; +use frame_support::weights::Weight; /// How many batches we do per API benchmark. const API_BENCHMARK_BATCHES: u32 = 20; @@ -304,6 +305,19 @@ benchmarks! { Storage::::process_deletion_queue_batch(Weight::max_value()) } + // This benchmarks the additional weight that is charged when a contract is executed the + // first time after a new schedule was deployed: For every new schedule a contract needs + // to re-run the instrumentation once. + instrument { + let c in 0 .. T::MaxCodeSize::get() / 1024; + let WasmModule { code, hash, .. } = WasmModule::::sized(c * 1024); + Contracts::::store_code_raw(code)?; + let mut module = PrefabWasmModule::from_storage_noinstr(hash)?; + let schedule = Contracts::::current_schedule(); + }: { + Contracts::::reinstrument_module(&mut module, &schedule)?; + } + // This extrinsic is pretty much constant as it is only a simple setter. update_schedule { let schedule = Schedule { @@ -318,8 +332,13 @@ benchmarks! { // determine the contract address. // `c`: Size of the code in kilobytes. // `s`: Size of the salt in kilobytes. + // + // # Note + // + // We cannot let `c` grow to the maximum code size because the code is not allowed + // to be larger than the maximum size **after instrumentation**. instantiate_with_code { - let c in 0 .. Contracts::::current_schedule().limits.code_size / 1024; + let c in 0 .. Perbill::from_percent(50).mul_ceil(T::MaxCodeSize::get() / 1024); let s in 0 .. code::max_pages::() * 64; let salt = vec![42u8; (s * 1024) as usize]; let endowment = caller_funding::() / 3u32.into(); @@ -339,14 +358,16 @@ benchmarks! { } // Instantiate uses a dummy contract constructor to measure the overhead of the instantiate. + // `c`: Size of the code in kilobytes. // `s`: Size of the salt in kilobytes. instantiate { + let c in 0 .. T::MaxCodeSize::get() / 1024; let s in 0 .. code::max_pages::() * 64; let salt = vec![42u8; (s * 1024) as usize]; let endowment = caller_funding::() / 3u32.into(); let caller = whitelisted_caller(); T::Currency::make_free_balance_be(&caller, caller_funding::()); - let WasmModule { code, hash, .. } = WasmModule::::dummy_with_mem(); + let WasmModule { code, hash, .. } = WasmModule::::dummy_with_bytes(c * 1024); let origin = RawOrigin::Signed(caller.clone()); let addr = Contracts::::contract_address(&caller, &hash, &salt); Contracts::::store_code_raw(code)?; @@ -365,10 +386,12 @@ benchmarks! { // won't call `seal_input` in its constructor to copy the data to contract memory. // The dummy contract used here does not do this. The costs for the data copy is billed as // part of `seal_input`. + // `c`: Size of the code in kilobytes. call { + let c in 0 .. T::MaxCodeSize::get() / 1024; let data = vec![42u8; 1024]; let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy_with_mem(), vec![], Endow::CollectRent + whitelisted_caller(), WasmModule::dummy_with_bytes(c * 1024), vec![], Endow::CollectRent )?; let value = T::Currency::minimum_balance() * 100u32.into(); let origin = RawOrigin::Signed(instance.caller.clone()); @@ -396,9 +419,11 @@ benchmarks! { // will be distributed over multiple blocks using a scheduler. Otherwise there is // no incentive to remove large contracts when the removal is more expensive than // the reward for removing them. + // `c`: Size of the code of the contract that should be evicted. claim_surcharge { + let c in 0 .. T::MaxCodeSize::get() / 1024; let instance = Contract::::with_caller( - whitelisted_caller(), WasmModule::dummy(), vec![], Endow::CollectRent + whitelisted_caller(), WasmModule::dummy_with_bytes(c * 1024), vec![], Endow::CollectRent )?; let origin = RawOrigin::Signed(instance.caller.clone()); let account_id = instance.account_id.clone(); @@ -694,6 +719,42 @@ benchmarks! { } } + seal_terminate_per_code_kb { + let c in 0 .. T::MaxCodeSize::get() / 1024; + let beneficiary = account::("beneficiary", 0, 0); + let beneficiary_bytes = beneficiary.encode(); + let beneficiary_len = beneficiary_bytes.len(); + let code = WasmModule::::from(ModuleDefinition { + memory: Some(ImportedMemory::max::()), + imported_functions: vec![ImportedFunction { + name: "seal_terminate", + params: vec![ValueType::I32, ValueType::I32], + return_type: None, + }], + data_segments: vec![ + DataSegment { + offset: 0, + value: beneficiary_bytes, + }, + ], + call_body: Some(body::repeated(1, &[ + Instruction::I32Const(0), // beneficiary_ptr + Instruction::I32Const(beneficiary_len as i32), // beneficiary_len + Instruction::Call(0), + ])), + dummy_section: c * 1024, + .. Default::default() + }); + let instance = Contract::::new(code, vec![], Endow::Max)?; + let origin = RawOrigin::Signed(instance.caller.clone()); + assert_eq!(T::Currency::total_balance(&beneficiary), 0u32.into()); + assert_eq!(T::Currency::total_balance(&instance.account_id), Endow::max::()); + }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) + verify { + assert_eq!(T::Currency::total_balance(&instance.account_id), 0u32.into()); + assert_eq!(T::Currency::total_balance(&beneficiary), Endow::max::()); + } + seal_restore_to { let r in 0 .. 1; @@ -772,9 +833,16 @@ benchmarks! { } } - seal_restore_to_per_delta { + // `c`: Code size of caller contract + // `t`: Code size of tombstone contract + // `d`: Number of supplied delta keys + seal_restore_to_per_code_kb_delta { + let c in 0 .. T::MaxCodeSize::get() / 1024; + let t in 0 .. T::MaxCodeSize::get() / 1024; let d in 0 .. API_BENCHMARK_BATCHES; - let mut tombstone = ContractWithStorage::::new(0, 0)?; + let mut tombstone = ContractWithStorage::::with_code( + WasmModule::::dummy_with_bytes(t * 1024), 0, 0 + )?; tombstone.evict()?; let delta = create_storage::(d * API_BENCHMARK_BATCH_SIZE, T::MaxValueSize::get())?; @@ -837,6 +905,7 @@ benchmarks! { Instruction::Call(0), Instruction::End, ])), + dummy_section: c * 1024, .. Default::default() }); @@ -1225,7 +1294,7 @@ benchmarks! { // We call unique accounts. seal_call { let r in 0 .. API_BENCHMARK_BATCHES; - let dummy_code = WasmModule::::dummy_with_mem(); + let dummy_code = WasmModule::::dummy_with_bytes(0); let callees = (0..r * API_BENCHMARK_BATCH_SIZE) .map(|i| Contract::with_index(i + 1, dummy_code.clone(), vec![], Endow::Max)) .collect::, _>>()?; @@ -1280,7 +1349,8 @@ benchmarks! { let origin = RawOrigin::Signed(instance.caller.clone()); }: call(origin, instance.addr, 0u32.into(), Weight::max_value(), vec![]) - seal_call_per_transfer_input_output_kb { + seal_call_per_code_transfer_input_output_kb { + let c in 0 .. T::MaxCodeSize::get() / 1024; let t in 0 .. 1; let i in 0 .. code::max_pages::() * 64; let o in 0 .. (code::max_pages::() - 1) * 64; @@ -1302,6 +1372,7 @@ benchmarks! { Instruction::Call(0), Instruction::End, ])), + dummy_section: c * 1024, .. Default::default() }); let callees = (0..API_BENCHMARK_BATCH_SIZE) @@ -1475,7 +1546,8 @@ benchmarks! { } } - seal_instantiate_per_input_output_salt_kb { + seal_instantiate_per_code_input_output_salt_kb { + let c in 0 .. T::MaxCodeSize::get() / 1024; let i in 0 .. (code::max_pages::() - 1) * 64; let o in 0 .. (code::max_pages::() - 1) * 64; let s in 0 .. (code::max_pages::() - 1) * 64; @@ -1497,6 +1569,7 @@ benchmarks! { Instruction::Call(0), Instruction::End, ])), + dummy_section: c * 1024, .. Default::default() }); let hash = callee_code.hash.clone(); @@ -2440,127 +2513,8 @@ benchmarks! { }: {} } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - use paste::paste; - - macro_rules! create_test { - ($name:ident) => { - #[test] - fn $name() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(paste!{ - []::() - }); - }); - } - } - } - - create_test!(on_initialize); - create_test!(on_initialize_per_trie_key); - create_test!(on_initialize_per_queue_item); - - create_test!(update_schedule); - create_test!(instantiate_with_code); - create_test!(instantiate); - create_test!(call); - create_test!(claim_surcharge); - - create_test!(seal_caller); - create_test!(seal_address); - create_test!(seal_gas_left); - create_test!(seal_balance); - create_test!(seal_value_transferred); - create_test!(seal_minimum_balance); - create_test!(seal_tombstone_deposit); - create_test!(seal_rent_allowance); - create_test!(seal_block_number); - create_test!(seal_now); - create_test!(seal_weight_to_fee); - create_test!(seal_gas); - create_test!(seal_input); - create_test!(seal_input_per_kb); - create_test!(seal_return); - create_test!(seal_return_per_kb); - create_test!(seal_terminate); - create_test!(seal_restore_to); - create_test!(seal_restore_to_per_delta); - create_test!(seal_random); - create_test!(seal_deposit_event); - create_test!(seal_deposit_event_per_topic_and_kb); - create_test!(seal_set_rent_allowance); - create_test!(seal_set_storage); - create_test!(seal_set_storage_per_kb); - create_test!(seal_get_storage); - create_test!(seal_get_storage_per_kb); - create_test!(seal_transfer); - create_test!(seal_call); - create_test!(seal_call_per_transfer_input_output_kb); - create_test!(seal_instantiate); - create_test!(seal_instantiate_per_input_output_salt_kb); - create_test!(seal_clear_storage); - create_test!(seal_hash_sha2_256); - create_test!(seal_hash_sha2_256_per_kb); - create_test!(seal_hash_keccak_256); - create_test!(seal_hash_keccak_256_per_kb); - create_test!(seal_hash_blake2_256); - create_test!(seal_hash_blake2_256_per_kb); - create_test!(seal_hash_blake2_128); - create_test!(seal_hash_blake2_128_per_kb); - - create_test!(instr_i64const); - create_test!(instr_i64load); - create_test!(instr_i64store); - create_test!(instr_select); - create_test!(instr_if); - create_test!(instr_br); - create_test!(instr_br_if); - create_test!(instr_br_table); - create_test!(instr_br_table_per_entry); - create_test!(instr_call); - create_test!(instr_call_indirect); - create_test!(instr_call_indirect_per_param); - create_test!(instr_local_get); - create_test!(instr_local_set); - create_test!(instr_local_tee); - create_test!(instr_global_get); - create_test!(instr_global_set); - create_test!(instr_memory_current); - create_test!(instr_memory_grow); - create_test!(instr_i64clz); - create_test!(instr_i64ctz); - create_test!(instr_i64popcnt); - create_test!(instr_i64eqz); - create_test!(instr_i64extendsi32); - create_test!(instr_i64extendui32); - create_test!(instr_i32wrapi64); - create_test!(instr_i64eq); - create_test!(instr_i64ne); - create_test!(instr_i64lts); - create_test!(instr_i64ltu); - create_test!(instr_i64gts); - create_test!(instr_i64gtu); - create_test!(instr_i64les); - create_test!(instr_i64leu); - create_test!(instr_i64ges); - create_test!(instr_i64geu); - create_test!(instr_i64add); - create_test!(instr_i64sub); - create_test!(instr_i64mul); - create_test!(instr_i64divs); - create_test!(instr_i64divu); - create_test!(instr_i64rems); - create_test!(instr_i64remu); - create_test!(instr_i64and); - create_test!(instr_i64or); - create_test!(instr_i64xor); - create_test!(instr_i64shl); - create_test!(instr_i64shrs); - create_test!(instr_i64shru); - create_test!(instr_i64rotl); - create_test!(instr_i64rotr); -} +impl_benchmark_test_suite!( + Contracts, + crate::tests::ExtBuilder::default().build(), + crate::tests::Test, +); diff --git a/frame/contracts/src/chain_extension.rs b/frame/contracts/src/chain_extension.rs index ef6e034791..dc6e977177 100644 --- a/frame/contracts/src/chain_extension.rs +++ b/frame/contracts/src/chain_extension.rs @@ -18,7 +18,7 @@ //! A mechanism for runtime authors to augment the functionality of contracts. //! //! The runtime is able to call into any contract and retrieve the result using -//! [`bare_call`](crate::Module::bare_call). This already allows customization of runtime +//! [`bare_call`](crate::Pallet::bare_call). This already allows customization of runtime //! behaviour by user generated code (contracts). However, often it is more straightforward //! to allow the reverse behaviour: The contract calls into the runtime. We call the latter //! one a "chain extension" because it allows the chain to extend the set of functions that are @@ -37,7 +37,7 @@ //! [`charge_weight`](Environment::charge_weight) function must be called **before** //! carrying out any action that causes the consumption of the chargeable weight. //! It cannot be overstated how delicate of a process the creation of a chain extension -//! is. Check whether using [`bare_call`](crate::Module::bare_call) suffices for the +//! is. Check whether using [`bare_call`](crate::Pallet::bare_call) suffices for the //! use case at hand. //! //! # Benchmarking @@ -328,7 +328,7 @@ where /// /// If the contract supplied buffer is smaller than the passed `buffer` an `Err` is returned. /// If `allow_skip` is set to true the contract is allowed to skip the copying of the buffer - /// by supplying the guard value of [`u32::max_value()`] as `out_ptr`. The + /// by supplying the guard value of `u32::max_value()` as `out_ptr`. The /// `weight_per_byte` is only charged when the write actually happens and is not skipped or /// failed due to a too small output buffer. pub fn write( diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index bbb972b2ed..745384a867 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -16,7 +16,7 @@ // limitations under the License. use crate::{ - CodeHash, Event, RawEvent, Config, Module as Contracts, + CodeHash, Event, Config, Module as Contracts, TrieId, BalanceOf, ContractInfo, gas::GasMeter, rent::Rent, storage::{self, Storage}, Error, ContractInfoOf, Schedule, }; @@ -30,7 +30,7 @@ use frame_support::{ dispatch::{DispatchResult, DispatchError}, traits::{ExistenceRequirement, Currency, Time, Randomness, Get}, weights::Weight, - ensure, StorageMap, + ensure, }; use pallet_contracts_primitives::{ErrorOrigin, ExecError, ExecReturnValue, ExecResult, ReturnFlags}; @@ -57,7 +57,11 @@ pub enum TransactorKind { /// /// This interface is specialized to an account of the executing code, so all /// operations are implicitly performed on that account. -pub trait Ext { +/// +/// # Note +/// +/// This trait is sealed and cannot be implemented by downstream crates. +pub trait Ext: sealing::Sealed { type T: Config; /// Returns the storage entry of the executing account by the given `key`. @@ -72,8 +76,13 @@ pub trait Ext { /// Instantiate a contract from the given code. /// + /// Returns the original code size of the called contract. /// The newly created account will be associated with `code`. `value` specifies the amount of value /// transferred from this to the newly created account (also known as endowment). + /// + /// # Return Value + /// + /// Result<(AccountId, ExecReturnValue, CodeSize), (ExecError, CodeSize)> fn instantiate( &mut self, code: CodeHash, @@ -81,7 +90,7 @@ pub trait Ext { gas_meter: &mut GasMeter, input_data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; + ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)>; /// Transfer some amount of funds into the specified account. fn transfer( @@ -92,24 +101,35 @@ pub trait Ext { /// Transfer all funds to `beneficiary` and delete the contract. /// + /// Returns the original code size of the terminated contract. /// Since this function removes the self contract eagerly, if succeeded, no further actions should /// be performed on this `Ext` instance. /// /// This function will fail if the same contract is present on the contract /// call stack. + /// + /// # Return Value + /// + /// Result fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> DispatchResult; + ) -> Result; /// Call (possibly transferring some amount of funds) into the specified account. + /// + /// Returns the original code size of the called contract. + /// + /// # Return Value + /// + /// Result<(ExecReturnValue, CodeSize), (ExecError, CodeSize)> fn call( &mut self, to: &AccountIdOf, value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> ExecResult; + ) -> Result<(ExecReturnValue, u32), (ExecError, u32)>; /// Restores the given destination contract sacrificing the current one. /// @@ -118,13 +138,17 @@ pub trait Ext { /// /// This function will fail if the same contract is present /// on the contract call stack. + /// + /// # Return Value + /// + /// Result<(CallerCodeSize, DestCodeSize), (DispatchError, CallerCodeSize, DestCodesize)> fn restore_to( &mut self, dest: AccountIdOf, code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> DispatchResult; + ) -> Result<(u32, u32), (DispatchError, u32, u32)>; /// Returns a reference to the account id of the caller. fn caller(&self) -> &AccountIdOf; @@ -190,7 +214,11 @@ pub enum ExportedFunction { /// order to be able to mock the wasm logic for testing. pub trait Executable: Sized { /// Load the executable from storage. - fn from_storage(code_hash: CodeHash, schedule: &Schedule) -> Result; + fn from_storage( + code_hash: CodeHash, + schedule: &Schedule, + gas_meter: &mut GasMeter, + ) -> Result; /// Load the module from storage without re-instrumenting it. /// @@ -203,10 +231,14 @@ pub trait Executable: Sized { fn drop_from_storage(self); /// Increment the refcount by one. Fails if the code does not exist on-chain. - fn add_user(code_hash: CodeHash) -> DispatchResult; + /// + /// Returns the size of the original code. + fn add_user(code_hash: CodeHash) -> Result; /// Decrement the refcount by one and remove the code when it drops to zero. - fn remove_user(code_hash: CodeHash); + /// + /// Returns the size of the original code. + fn remove_user(code_hash: CodeHash) -> u32; /// Execute the specified exported function and return the result. /// @@ -238,6 +270,9 @@ pub trait Executable: Sized { /// without refetching this from storage the result can be inaccurate as it might be /// working with a stale value. Usually this inaccuracy is tolerable. fn occupied_storage(&self) -> u32; + + /// Size of the instrumented code in bytes. + fn code_len(&self) -> u32; } pub struct ExecutionContext<'a, T: Config + 'a, E> { @@ -290,35 +325,42 @@ where } /// Make a call to the specified address, optionally transferring some funds. + /// + /// # Return Value + /// + /// Result<(ExecReturnValue, CodeSize), (ExecError, CodeSize)> pub fn call( &mut self, dest: T::AccountId, value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> ExecResult { + ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { if self.depth == T::MaxDepth::get() as usize { - Err(Error::::MaxCallDepthReached)? + return Err((Error::::MaxCallDepthReached.into(), 0)); } let contract = >::get(&dest) .and_then(|contract| contract.get_alive()) - .ok_or(Error::::NotCallable)?; + .ok_or((Error::::NotCallable.into(), 0))?; - let executable = E::from_storage(contract.code_hash, &self.schedule)?; + let executable = E::from_storage(contract.code_hash, &self.schedule, gas_meter) + .map_err(|e| (e.into(), 0))?; + let code_len = executable.code_len(); // This charges the rent and denies access to a contract that is in need of // eviction by returning `None`. We cannot evict eagerly here because those // changes would be rolled back in case this contract is called by another // contract. // See: https://github.com/paritytech/substrate/issues/6439#issuecomment-648754324 - let contract = Rent::::charge(&dest, contract, executable.occupied_storage())? - .ok_or(Error::::NotCallable)?; + let contract = Rent::::charge(&dest, contract, executable.occupied_storage()) + .map_err(|e| (e.into(), code_len))? + .ok_or((Error::::NotCallable.into(), code_len))?; let transactor_kind = self.transactor_kind(); let caller = self.self_account.clone(); - self.with_nested_context(dest.clone(), contract.trie_id.clone(), |nested| { + let result = self.with_nested_context(dest.clone(), contract.trie_id.clone(), |nested| { if value > BalanceOf::::zero() { transfer::( TransferCause::Call, @@ -336,7 +378,8 @@ where gas_meter, ).map_err(|e| ExecError { error: e.error, origin: ErrorOrigin::Callee })?; Ok(output) - }) + }).map_err(|e| (e, code_len))?; + Ok((result, code_len)) } pub fn instantiate( @@ -407,7 +450,7 @@ where .ok_or(Error::::NewContractNotFunded)?; // Deposit an instantiation event. - deposit_event::(vec![], RawEvent::Instantiated(caller.clone(), dest.clone())); + deposit_event::(vec![], Event::Instantiated(caller.clone(), dest.clone())); Ok(output) }); @@ -581,10 +624,13 @@ where gas_meter: &mut GasMeter, input_data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { - let executable = E::from_storage(code_hash, &self.ctx.schedule)?; - let result = self.ctx.instantiate(endowment, gas_meter, executable, input_data, salt)?; - Ok(result) + ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)> { + let executable = E::from_storage(code_hash, &self.ctx.schedule, gas_meter) + .map_err(|e| (e.into(), 0))?; + let code_len = executable.code_len(); + self.ctx.instantiate(endowment, gas_meter, executable, input_data, salt) + .map(|r| (r.0, r.1, code_len)) + .map_err(|e| (e, code_len)) } fn transfer( @@ -604,12 +650,12 @@ where fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> DispatchResult { + ) -> Result { let self_id = self.ctx.self_account.clone(); let value = T::Currency::free_balance(&self_id); if let Some(caller_ctx) = self.ctx.caller { if caller_ctx.is_live(&self_id) { - return Err(Error::::ReentranceDenied.into()); + return Err((Error::::ReentranceDenied.into(), 0)); } } transfer::( @@ -618,12 +664,12 @@ where &self_id, beneficiary, value, - )?; + ).map_err(|e| (e, 0))?; if let Some(ContractInfo::Alive(info)) = ContractInfoOf::::take(&self_id) { - Storage::::queue_trie_for_deletion(&info)?; - E::remove_user(info.code_hash); - Contracts::::deposit_event(RawEvent::Terminated(self_id, beneficiary.clone())); - Ok(()) + Storage::::queue_trie_for_deletion(&info).map_err(|e| (e, 0))?; + let code_len = E::remove_user(info.code_hash); + Contracts::::deposit_event(Event::Terminated(self_id, beneficiary.clone())); + Ok(code_len) } else { panic!( "this function is only invoked by in the context of a contract;\ @@ -639,7 +685,7 @@ where value: BalanceOf, gas_meter: &mut GasMeter, input_data: Vec, - ) -> ExecResult { + ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { self.ctx.call(to.clone(), value, gas_meter, input_data) } @@ -649,10 +695,10 @@ where code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> DispatchResult { + ) -> Result<(u32, u32), (DispatchError, u32, u32)> { if let Some(caller_ctx) = self.ctx.caller { if caller_ctx.is_live(&self.ctx.self_account) { - return Err(Error::::ReentranceDenied.into()); + return Err((Error::::ReentranceDenied.into(), 0, 0)); } } @@ -666,7 +712,7 @@ where if let Ok(_) = result { deposit_event::( vec![], - RawEvent::Restored( + Event::Restored( self.ctx.self_account.clone(), dest, code_hash, @@ -712,7 +758,7 @@ where fn deposit_event(&mut self, topics: Vec, data: Vec) { deposit_event::( topics, - RawEvent::ContractEmitted(self.ctx.self_account.clone(), data) + Event::ContractEmitted(self.ctx.self_account.clone(), data) ); } @@ -757,6 +803,20 @@ fn deposit_event( ) } +mod sealing { + use super::*; + + pub trait Sealed {} + + impl<'a, 'b: 'a, T: Config, E> Sealed for CallContext<'a, 'b, T, E> {} + + #[cfg(test)] + impl Sealed for crate::wasm::MockExt {} + + #[cfg(test)] + impl Sealed for &mut crate::wasm::MockExt {} +} + /// These tests exercise the executive layer. /// /// In these tests the VM/loader are mocked. Instead of dealing with wasm bytecode they use simple closures. @@ -767,13 +827,12 @@ mod tests { use super::*; use crate::{ gas::GasMeter, tests::{ExtBuilder, Test, Event as MetaEvent}, - gas::Gas, storage::Storage, tests::{ ALICE, BOB, CHARLIE, test_utils::{place_contract, set_balance, get_balance}, }, - Error, + Error, Weight, }; use sp_runtime::DispatchError; use assert_matches::assert_matches; @@ -781,7 +840,7 @@ mod tests { type MockContext<'a> = ExecutionContext<'a, Test, MockExecutable>; - const GAS_LIMIT: Gas = 10_000_000_000; + const GAS_LIMIT: Weight = 10_000_000_000; thread_local! { static LOADER: RefCell = RefCell::new(MockLoader::default()); @@ -828,7 +887,8 @@ mod tests { impl Executable for MockExecutable { fn from_storage( code_hash: CodeHash, - _schedule: &Schedule + _schedule: &Schedule, + _gas_meter: &mut GasMeter, ) -> Result { Self::from_storage_noinstr(code_hash) } @@ -845,11 +905,11 @@ mod tests { fn drop_from_storage(self) {} - fn add_user(_code_hash: CodeHash) -> DispatchResult { - Ok(()) + fn add_user(_code_hash: CodeHash) -> Result { + Ok(0) } - fn remove_user(_code_hash: CodeHash) {} + fn remove_user(_code_hash: CodeHash) -> u32 { 0 } fn execute>( self, @@ -872,6 +932,10 @@ mod tests { fn occupied_storage(&self) -> u32 { 0 } + + fn code_len(&self) -> u32 { + 0 + } } fn exec_success() -> ExecResult { @@ -954,7 +1018,7 @@ mod tests { vec![], ).unwrap(); - assert!(!output.is_success()); + assert!(!output.0.is_success()); assert_eq!(get_balance(&origin), 100); // the rent is still charged @@ -1012,8 +1076,8 @@ mod tests { ); let output = result.unwrap(); - assert!(output.is_success()); - assert_eq!(output.data, vec![1, 2, 3, 4]); + assert!(output.0.is_success()); + assert_eq!(output.0.data, vec![1, 2, 3, 4]); }); } @@ -1040,8 +1104,8 @@ mod tests { ); let output = result.unwrap(); - assert!(!output.is_success()); - assert_eq!(output.data, vec![1, 2, 3, 4]); + assert!(!output.0.is_success()); + assert_eq!(output.0.data, vec![1, 2, 3, 4]); }); } @@ -1080,13 +1144,17 @@ mod tests { let schedule = Contracts::current_schedule(); let subsistence = Contracts::::subsistence_threshold(); let mut ctx = MockContext::top_level(ALICE, &schedule); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage( + input_data_ch, &schedule, &mut gas_meter + ).unwrap(); set_balance(&ALICE, subsistence * 10); let result = ctx.instantiate( subsistence * 3, - &mut GasMeter::::new(GAS_LIMIT), - MockExecutable::from_storage(input_data_ch, &schedule).unwrap(), + &mut gas_meter, + executable, vec![1, 2, 3, 4], &[], ); @@ -1113,7 +1181,7 @@ mod tests { // Verify that we've got proper error and set `reached_bottom`. assert_eq!( r, - Err(Error::::MaxCallDepthReached.into()) + Err((Error::::MaxCallDepthReached.into(), 0)) ); *reached_bottom = true; } else { @@ -1235,12 +1303,16 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = Contracts::current_schedule(); let mut ctx = MockContext::top_level(ALICE, &schedule); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage( + dummy_ch, &schedule, &mut gas_meter + ).unwrap(); assert_matches!( ctx.instantiate( 0, // <- zero endowment - &mut GasMeter::::new(GAS_LIMIT), - MockExecutable::from_storage(dummy_ch, &schedule).unwrap(), + &mut gas_meter, + executable, vec![], &[], ), @@ -1258,13 +1330,17 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = Contracts::current_schedule(); let mut ctx = MockContext::top_level(ALICE, &schedule); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage( + dummy_ch, &schedule, &mut gas_meter + ).unwrap(); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( ctx.instantiate( 100, - &mut GasMeter::::new(GAS_LIMIT), - MockExecutable::from_storage(dummy_ch, &schedule).unwrap(), + &mut gas_meter, + executable, vec![], &[], ), @@ -1275,7 +1351,7 @@ mod tests { // there are instantiation event. assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); assert_eq!(&events(), &[ - RawEvent::Instantiated(ALICE, instantiated_contract_address) + Event::Instantiated(ALICE, instantiated_contract_address) ]); }); } @@ -1289,13 +1365,17 @@ mod tests { ExtBuilder::default().existential_deposit(15).build().execute_with(|| { let schedule = Contracts::current_schedule(); let mut ctx = MockContext::top_level(ALICE, &schedule); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage( + dummy_ch, &schedule, &mut gas_meter + ).unwrap(); set_balance(&ALICE, 1000); let instantiated_contract_address = assert_matches!( ctx.instantiate( 100, - &mut GasMeter::::new(GAS_LIMIT), - MockExecutable::from_storage(dummy_ch, &schedule).unwrap(), + &mut gas_meter, + executable, vec![], &[], ), @@ -1317,7 +1397,7 @@ mod tests { let instantiated_contract_address = Rc::clone(&instantiated_contract_address); move |ctx| { // Instantiate a contract and save it's address in `instantiated_contract_address`. - let (address, output) = ctx.ext.instantiate( + let (address, output, _) = ctx.ext.instantiate( dummy_ch, Contracts::::subsistence_threshold() * 3, ctx.gas_meter, @@ -1347,7 +1427,7 @@ mod tests { // there are instantiation event. assert_eq!(Storage::::code_hash(&instantiated_contract_address).unwrap(), dummy_ch); assert_eq!(&events(), &[ - RawEvent::Instantiated(BOB, instantiated_contract_address) + Event::Instantiated(BOB, instantiated_contract_address) ]); }); } @@ -1369,10 +1449,10 @@ mod tests { vec![], &[], ), - Err(ExecError { + Err((ExecError { error: DispatchError::Other("It's a trap!"), origin: ErrorOrigin::Callee, - }) + }, 0)) ); exec_success() @@ -1410,13 +1490,17 @@ mod tests { .execute_with(|| { let schedule = Contracts::current_schedule(); let mut ctx = MockContext::top_level(ALICE, &schedule); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage( + terminate_ch, &schedule, &mut gas_meter + ).unwrap(); set_balance(&ALICE, 1000); assert_eq!( ctx.instantiate( 100, - &mut GasMeter::::new(GAS_LIMIT), - MockExecutable::from_storage(terminate_ch, &schedule).unwrap(), + &mut gas_meter, + executable, vec![], &[], ), @@ -1445,12 +1529,16 @@ mod tests { let subsistence = Contracts::::subsistence_threshold(); let schedule = Contracts::current_schedule(); let mut ctx = MockContext::top_level(ALICE, &schedule); + let mut gas_meter = GasMeter::::new(GAS_LIMIT); + let executable = MockExecutable::from_storage( + rent_allowance_ch, &schedule, &mut gas_meter + ).unwrap(); set_balance(&ALICE, subsistence * 10); let result = ctx.instantiate( subsistence * 5, - &mut GasMeter::::new(GAS_LIMIT), - MockExecutable::from_storage(rent_allowance_ch, &schedule).unwrap(), + &mut gas_meter, + executable, vec![], &[], ); diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 4bdfcdd577..80e608b217 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -15,42 +15,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::Config; +use crate::{Config, Error}; use sp_std::marker::PhantomData; use sp_runtime::traits::Zero; use frame_support::{ - dispatch::{DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo}, + dispatch::{ + DispatchResultWithPostInfo, PostDispatchInfo, DispatchErrorWithPostInfo, DispatchError, + }, weights::Weight, }; use pallet_contracts_primitives::ExecError; +use sp_core::crypto::UncheckedFrom; #[cfg(test)] use std::{any::Any, fmt::Debug}; -// Gas is essentially the same as weight. It is a 1 to 1 correspondence. -pub type Gas = Weight; - -#[must_use] #[derive(Debug, PartialEq, Eq)] -pub enum GasMeterResult { - Proceed(ChargedAmount), - OutOfGas, -} - -impl GasMeterResult { - pub fn is_out_of_gas(&self) -> bool { - match *self { - GasMeterResult::OutOfGas => true, - GasMeterResult::Proceed(_) => false, - } - } -} - -#[derive(Debug, PartialEq, Eq)] -pub struct ChargedAmount(Gas); +pub struct ChargedAmount(Weight); impl ChargedAmount { - pub fn amount(&self) -> Gas { + pub fn amount(&self) -> Weight { self.0 } } @@ -85,7 +69,7 @@ pub trait Token: Copy + Clone + TestAuxiliaries { /// That said, implementors of this function still can run into overflows /// while calculating the amount. In this case it is ok to use saturating operations /// since on overflow they will return `max_value` which should consume all gas. - fn calculate_amount(&self, metadata: &Self::Metadata) -> Gas; + fn calculate_amount(&self, metadata: &Self::Metadata) -> Weight; } /// A wrapper around a type-erased trait object of what used to be a `Token`. @@ -96,15 +80,19 @@ pub struct ErasedToken { } pub struct GasMeter { - gas_limit: Gas, + gas_limit: Weight, /// Amount of gas left from initial gas limit. Can reach zero. - gas_left: Gas, + gas_left: Weight, _phantom: PhantomData, #[cfg(test)] tokens: Vec, } -impl GasMeter { - pub fn new(gas_limit: Gas) -> Self { + +impl GasMeter +where + T::AccountId: UncheckedFrom<::Hash> + AsRef<[u8]> +{ + pub fn new(gas_limit: Weight) -> Self { GasMeter { gas_limit, gas_left: gas_limit, @@ -128,7 +116,7 @@ impl GasMeter { &mut self, metadata: &Tok::Metadata, token: Tok, - ) -> GasMeterResult { + ) -> Result { #[cfg(test)] { // Unconditionally add the token to the storage. @@ -149,11 +137,25 @@ impl GasMeter { self.gas_left = new_value.unwrap_or_else(Zero::zero); match new_value { - Some(_) => GasMeterResult::Proceed(ChargedAmount(amount)), - None => GasMeterResult::OutOfGas, + Some(_) => Ok(ChargedAmount(amount)), + None => Err(Error::::OutOfGas.into()), } } + /// Adjust a previously charged amount down to its actual amount. + /// + /// This is when a maximum a priori amount was charged and then should be partially + /// refunded to match the actual amount. + pub fn adjust_gas>( + &mut self, + charged_amount: ChargedAmount, + metadata: &Tok::Metadata, + token: Tok, + ) { + let adjustment = charged_amount.0.saturating_sub(token.calculate_amount(metadata)); + self.gas_left = self.gas_left.saturating_add(adjustment).min(self.gas_limit); + } + /// Refund previously charged gas back to the gas meter. /// /// This can be used if a gas worst case estimation must be charged before @@ -172,7 +174,7 @@ impl GasMeter { /// All unused gas in the nested gas meter is returned to this gas meter. pub fn with_nested>) -> R>( &mut self, - amount: Gas, + amount: Weight, f: F, ) -> R { // NOTE that it is ok to allocate all available gas since it still ensured @@ -192,12 +194,12 @@ impl GasMeter { } /// Returns how much gas was used. - pub fn gas_spent(&self) -> Gas { + pub fn gas_spent(&self) -> Weight { self.gas_limit - self.gas_left } /// Returns how much gas left from the initial budget. - pub fn gas_left(&self) -> Gas { + pub fn gas_left(&self) -> Weight { self.gas_left } @@ -225,49 +227,48 @@ impl GasMeter { } } -/// A simple utility macro that helps to match against a -/// list of tokens. -#[macro_export] -macro_rules! match_tokens { - ($tokens_iter:ident,) => { - }; - ($tokens_iter:ident, $x:expr, $($rest:tt)*) => { - { - let next = ($tokens_iter).next().unwrap(); - let pattern = $x; - - // Note that we don't specify the type name directly in this macro, - // we only have some expression $x of some type. At the same time, we - // have an iterator of Box and to downcast we need to specify - // the type which we want downcast to. - // - // So what we do is we assign `_pattern_typed_next_ref` to a variable which has - // the required type. - // - // Then we make `_pattern_typed_next_ref = token.downcast_ref()`. This makes - // rustc infer the type `T` (in `downcast_ref`) to be the same as in $x. - - let mut _pattern_typed_next_ref = &pattern; - _pattern_typed_next_ref = match next.token.downcast_ref() { - Some(p) => { - assert_eq!(p, &pattern); - p - } - None => { - panic!("expected type {} got {}", stringify!($x), next.description); - } - }; - } - - match_tokens!($tokens_iter, $($rest)*); - }; -} - #[cfg(test)] mod tests { use super::{GasMeter, Token}; use crate::tests::Test; + /// A simple utility macro that helps to match against a + /// list of tokens. + macro_rules! match_tokens { + ($tokens_iter:ident,) => { + }; + ($tokens_iter:ident, $x:expr, $($rest:tt)*) => { + { + let next = ($tokens_iter).next().unwrap(); + let pattern = $x; + + // Note that we don't specify the type name directly in this macro, + // we only have some expression $x of some type. At the same time, we + // have an iterator of Box and to downcast we need to specify + // the type which we want downcast to. + // + // So what we do is we assign `_pattern_typed_next_ref` to a variable which has + // the required type. + // + // Then we make `_pattern_typed_next_ref = token.downcast_ref()`. This makes + // rustc infer the type `T` (in `downcast_ref`) to be the same as in $x. + + let mut _pattern_typed_next_ref = &pattern; + _pattern_typed_next_ref = match next.token.downcast_ref() { + Some(p) => { + assert_eq!(p, &pattern); + p + } + None => { + panic!("expected type {} got {}", stringify!($x), next.description); + } + }; + } + + match_tokens!($tokens_iter, $($rest)*); + }; + } + /// A trivial token that charges the specified number of gas units. #[derive(Copy, Clone, PartialEq, Eq, Debug)] struct SimpleToken(u64); @@ -304,7 +305,7 @@ mod tests { let result = gas_meter .charge(&MultiplierTokenMetadata { multiplier: 3 }, MultiplierToken(10)); - assert!(!result.is_out_of_gas()); + assert!(!result.is_err()); assert_eq!(gas_meter.gas_left(), 49_970); } @@ -312,10 +313,10 @@ mod tests { #[test] fn tracing() { let mut gas_meter = GasMeter::::new(50000); - assert!(!gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + assert!(!gas_meter.charge(&(), SimpleToken(1)).is_err()); assert!(!gas_meter .charge(&MultiplierTokenMetadata { multiplier: 3 }, MultiplierToken(10)) - .is_out_of_gas()); + .is_err()); let mut tokens = gas_meter.tokens()[0..2].iter(); match_tokens!(tokens, SimpleToken(1), MultiplierToken(10),); @@ -325,7 +326,7 @@ mod tests { #[test] fn refuse_to_execute_anything_if_zero() { let mut gas_meter = GasMeter::::new(0); - assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + assert!(gas_meter.charge(&(), SimpleToken(1)).is_err()); } // Make sure that if the gas meter is charged by exceeding amount then not only an error @@ -338,10 +339,10 @@ mod tests { let mut gas_meter = GasMeter::::new(200); // The first charge is should lead to OOG. - assert!(gas_meter.charge(&(), SimpleToken(300)).is_out_of_gas()); + assert!(gas_meter.charge(&(), SimpleToken(300)).is_err()); // The gas meter is emptied at this moment, so this should also fail. - assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + assert!(gas_meter.charge(&(), SimpleToken(1)).is_err()); } @@ -350,6 +351,6 @@ mod tests { #[test] fn charge_exact_amount() { let mut gas_meter = GasMeter::::new(25); - assert!(!gas_meter.charge(&(), SimpleToken(25)).is_out_of_gas()); + assert!(!gas_meter.charge(&(), SimpleToken(25)).is_err()); } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index b20db8dd8c..2ce2014075 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -80,7 +80,7 @@ //! * [Balances](../pallet_balances/index.html) #![cfg_attr(not(feature = "std"), no_std)] -#![cfg_attr(feature = "runtime-benchmarks", recursion_limit="256")] +#![cfg_attr(feature = "runtime-benchmarks", recursion_limit="512")] #[macro_use] mod gas; @@ -98,363 +98,80 @@ pub mod weights; mod tests; pub use crate::{ - gas::{Gas, GasMeter}, - wasm::{ReturnCode as RuntimeReturnCode, PrefabWasmModule}, - weights::WeightInfo, + wasm::PrefabWasmModule, schedule::{Schedule, HostFnWeights, InstructionWeights, Limits}, + pallet::*, }; use crate::{ + gas::GasMeter, exec::{ExecutionContext, Executable}, rent::Rent, - storage::Storage, + storage::{Storage, DeletedContract}, + weights::WeightInfo, }; use sp_core::crypto::UncheckedFrom; use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; use codec::{Codec, Encode, Decode}; use sp_runtime::{ traits::{ - Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, Convert, Saturating, + Hash, StaticLookup, MaybeSerializeDeserialize, Member, Convert, Saturating, Zero, }, RuntimeDebug, Perbill, }; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, ensure, storage::child::ChildInfo, - dispatch::{DispatchResult, DispatchResultWithPostInfo}, traits::{OnUnbalanced, Currency, Get, Time, Randomness}, - weights::Pays, + weights::{Weight, PostDispatchInfo, WithPostDispatchInfo}, }; -use frame_system::{ensure_signed, ensure_root, Module as System}; +use frame_system::Module as System; use pallet_contracts_primitives::{ - RentProjectionResult, GetStorageResult, ContractAccessError, ContractExecResult, ExecResult, + RentProjectionResult, GetStorageResult, ContractAccessError, ContractExecResult, }; -use frame_support::weights::Weight; pub type CodeHash = ::Hash; pub type TrieId = Vec; - -/// Information for managing an account and its sub trie abstraction. -/// This is the required info to cache for an account -#[derive(Encode, Decode, RuntimeDebug)] -pub enum ContractInfo { - Alive(AliveContractInfo), - Tombstone(TombstoneContractInfo), -} - -impl ContractInfo { - /// If contract is alive then return some alive info - pub fn get_alive(self) -> Option> { - if let ContractInfo::Alive(alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some reference to alive info - pub fn as_alive(&self) -> Option<&AliveContractInfo> { - if let ContractInfo::Alive(ref alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some mutable reference to alive info - pub fn as_alive_mut(&mut self) -> Option<&mut AliveContractInfo> { - if let ContractInfo::Alive(ref mut alive) = self { - Some(alive) - } else { - None - } - } - - /// If contract is tombstone then return some tombstone info - pub fn get_tombstone(self) -> Option> { - if let ContractInfo::Tombstone(tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some reference to tombstone info - pub fn as_tombstone(&self) -> Option<&TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some mutable reference to tombstone info - pub fn as_tombstone_mut(&mut self) -> Option<&mut TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref mut tombstone) = self { - Some(tombstone) - } else { - None - } - } -} - -pub type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; - -/// Information for managing an account and its sub trie abstraction. -/// This is the required info to cache for an account. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct RawAliveContractInfo { - /// Unique ID for the subtree encoded as a bytes vector. - pub trie_id: TrieId, - /// The total number of bytes used by this contract. - /// - /// It is a sum of each key-value pair stored by this contract. - pub storage_size: u32, - /// The total number of key-value pairs in storage of this contract. - pub pair_count: u32, - /// The code associated with a given account. - pub code_hash: CodeHash, - /// Pay rent at most up to this value. - pub rent_allowance: Balance, - /// The amount of rent that was payed by the contract over its whole lifetime. - /// - /// A restored contract starts with a value of zero just like a new contract. - pub rent_payed: Balance, - /// Last block rent has been payed. - pub deduct_block: BlockNumber, - /// Last block child storage has been written. - pub last_write: Option, -} - -impl RawAliveContractInfo { - /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_info(&self) -> ChildInfo { - child_trie_info(&self.trie_id[..]) - } -} - -/// Associated child trie unique id is built from the hash part of the trie id. -pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { - ChildInfo::new_default(trie_id) -} - -pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; - -#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] -pub struct RawTombstoneContractInfo(H, PhantomData); - -impl RawTombstoneContractInfo -where - H: Member + MaybeSerializeDeserialize+ Debug - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default - + sp_std::hash::Hash + Codec, - Hasher: Hash, -{ - fn new(storage_root: &[u8], code_hash: H) -> Self { - let mut buf = Vec::new(); - storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); - buf.extend_from_slice(code_hash.as_ref()); - RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) - } -} - -impl From> for ContractInfo { - fn from(alive_info: AliveContractInfo) -> Self { - Self::Alive(alive_info) - } -} - pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; pub type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +pub type AliveContractInfo = + RawAliveContractInfo, BalanceOf, ::BlockNumber>; +pub type TombstoneContractInfo = + RawTombstoneContractInfo<::Hash, ::Hashing>; -pub trait Config: frame_system::Config { - type Time: Time; - type Randomness: Randomness; - - /// The currency in which fees are paid and contract balances are held. - type Currency: Currency; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// Handler for rent payments. - type RentPayment: OnUnbalanced>; - - /// Number of block delay an extrinsic claim surcharge has. - /// - /// When claim surcharge is called by an extrinsic the rent is checked - /// for current_block - delay - type SignedClaimHandicap: Get; - - /// The minimum amount required to generate a tombstone. - type TombstoneDeposit: Get>; - - /// The balance every contract needs to deposit to stay alive indefinitely. - /// - /// This is different from the [`Self::TombstoneDeposit`] because this only needs to be - /// deposited while the contract is alive. Costs for additional storage are added to - /// this base cost. - /// - /// This is a simple way to ensure that contracts with empty storage eventually get deleted by - /// making them pay rent. This creates an incentive to remove them early in order to save rent. - type DepositPerContract: Get>; - - /// The balance a contract needs to deposit per storage byte to stay alive indefinitely. - /// - /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, - /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. - /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, - /// then it would pay 500 BU/day. - type DepositPerStorageByte: Get>; - - /// The balance a contract needs to deposit per storage item to stay alive indefinitely. - /// - /// It works the same as [`Self::DepositPerStorageByte`] but for storage items. - type DepositPerStorageItem: Get>; - - /// The fraction of the deposit that should be used as rent per block. - /// - /// When a contract hasn't enough balance deposited to stay alive indefinitely it needs - /// to pay per block for the storage it consumes that is not covered by the deposit. - /// This determines how high this rent payment is per block as a fraction of the deposit. - type RentFraction: Get; - - /// Reward that is received by the party whose touch has led - /// to removal of a contract. - type SurchargeReward: Get>; - - /// The maximum nesting level of a call/instantiate stack. - type MaxDepth: Get; - - /// The maximum size of a storage value and event payload in bytes. - type MaxValueSize: Get; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// Used to answer contracts's queries regarding the current weight price. This is **not** - /// used to calculate the actual fee and is only for informational purposes. - type WeightPrice: Convert>; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The time implementation used to supply timestamps to conntracts through `seal_now`. + type Time: Time; - /// Describes the weights of the dispatchables of this module and is also used to - /// construct a default cost schedule. - type WeightInfo: WeightInfo; + /// The generator used to supply randomness to contracts through `seal_random`. + type Randomness: Randomness; - /// Type that allows the runtime authors to add new host functions for a contract to call. - type ChainExtension: chain_extension::ChainExtension; + /// The currency in which fees are paid and contract balances are held. + type Currency: Currency; - /// The maximum number of tries that can be queued for deletion. - type DeletionQueueDepth: Get; + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// The maximum amount of weight that can be consumed per block for lazy trie removal. - type DeletionWeightLimit: Get; -} - -decl_error! { - /// Error for the contracts module. - pub enum Error for Module - where - T::AccountId: UncheckedFrom, - T::AccountId: AsRef<[u8]>, - { - /// A new schedule must have a greater version than the current one. - InvalidScheduleVersion, - /// An origin must be signed or inherent and auxiliary sender only provided on inherent. - InvalidSurchargeClaim, - /// Cannot restore from nonexisting or tombstone contract. - InvalidSourceContract, - /// Cannot restore to nonexisting or alive contract. - InvalidDestinationContract, - /// Tombstones don't match. - InvalidTombstone, - /// An origin TrieId written in the current block. - InvalidContractOrigin, - /// The executed contract exhausted its gas limit. - OutOfGas, - /// The output buffer supplied to a contract API call was too small. - OutputBufferTooSmall, - /// Performing the requested transfer would have brought the contract below - /// the subsistence threshold. No transfer is allowed to do this in order to allow - /// for a tombstone to be created. Use `seal_terminate` to remove a contract without - /// leaving a tombstone behind. - BelowSubsistenceThreshold, - /// The newly created contract is below the subsistence threshold after executing - /// its contructor. No contracts are allowed to exist below that threshold. - NewContractNotFunded, - /// Performing the requested transfer failed for a reason originating in the - /// chosen currency implementation of the runtime. Most probably the balance is - /// too low or locks are placed on it. - TransferFailed, - /// Performing a call was denied because the calling depth reached the limit - /// of what is specified in the schedule. - MaxCallDepthReached, - /// The contract that was called is either no contract at all (a plain account) - /// or is a tombstone. - NotCallable, - /// The code supplied to `instantiate_with_code` exceeds the limit specified in the - /// current schedule. - CodeTooLarge, - /// No code could be found at the supplied code hash. - CodeNotFound, - /// A buffer outside of sandbox memory was passed to a contract API function. - OutOfBounds, - /// Input passed to a contract API function failed to decode as expected type. - DecodingFailed, - /// Contract trapped during execution. - ContractTrapped, - /// The size defined in `T::MaxValueSize` was exceeded. - ValueTooLarge, - /// The action performed is not allowed while the contract performing it is already - /// on the call stack. Those actions are contract self destruction and restoration - /// of a tombstone. - ReentranceDenied, - /// `seal_input` was called twice from the same contract execution context. - InputAlreadyRead, - /// The subject passed to `seal_random` exceeds the limit. - RandomSubjectTooLong, - /// The amount of topics passed to `seal_deposit_events` exceeds the limit. - TooManyTopics, - /// The topics passed to `seal_deposit_events` contains at least one duplicate. - DuplicateTopics, - /// The chain does not provide a chain extension. Calling the chain extension results - /// in this error. Note that this usually shouldn't happen as deploying such contracts - /// is rejected. - NoChainExtension, - /// Removal of a contract failed because the deletion queue is full. - /// - /// This can happen when either calling [`Module::claim_surcharge`] or `seal_terminate`. - /// The queue is filled by deleting contracts and emptied by a fixed amount each block. - /// Trying again during another block is the only way to resolve this issue. - DeletionQueueFull, - /// A contract could not be evicted because it has enough balance to pay rent. - /// - /// This can be returned from [`Module::claim_surcharge`] because the target - /// contract has enough balance to pay for its rent. - ContractNotEvictable, - /// A storage modification exhausted the 32bit type that holds the storage size. - /// - /// This can either happen when the accumulated storage in bytes is too large or - /// when number of storage items is too large. - StorageExhausted, - /// A contract with the same AccountId already exists. - DuplicateContract, - } -} - -decl_module! { - /// Contracts module. - pub struct Module for enum Call - where - origin: T::Origin, - T::AccountId: UncheckedFrom, - T::AccountId: AsRef<[u8]>, - { - type Error = Error; + /// Handler for rent payments. + type RentPayment: OnUnbalanced>; /// Number of block delay an extrinsic claim surcharge has. /// /// When claim surcharge is called by an extrinsic the rent is checked /// for current_block - delay - const SignedClaimHandicap: T::BlockNumber = T::SignedClaimHandicap::get(); + #[pallet::constant] + type SignedClaimHandicap: Get; /// The minimum amount required to generate a tombstone. - const TombstoneDeposit: BalanceOf = T::TombstoneDeposit::get(); + #[pallet::constant] + type TombstoneDeposit: Get>; /// The balance every contract needs to deposit to stay alive indefinitely. /// @@ -464,7 +181,8 @@ decl_module! { /// /// This is a simple way to ensure that contracts with empty storage eventually get deleted by /// making them pay rent. This creates an incentive to remove them early in order to save rent. - const DepositPerContract: BalanceOf = T::DepositPerContract::get(); + #[pallet::constant] + type DepositPerContract: Get>; /// The balance a contract needs to deposit per storage byte to stay alive indefinitely. /// @@ -472,40 +190,73 @@ decl_module! { /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, /// then it would pay 500 BU/day. - const DepositPerStorageByte: BalanceOf = T::DepositPerStorageByte::get(); + #[pallet::constant] + type DepositPerStorageByte: Get>; /// The balance a contract needs to deposit per storage item to stay alive indefinitely. /// /// It works the same as [`Self::DepositPerStorageByte`] but for storage items. - const DepositPerStorageItem: BalanceOf = T::DepositPerStorageItem::get(); + #[pallet::constant] + type DepositPerStorageItem: Get>; /// The fraction of the deposit that should be used as rent per block. /// /// When a contract hasn't enough balance deposited to stay alive indefinitely it needs /// to pay per block for the storage it consumes that is not covered by the deposit. /// This determines how high this rent payment is per block as a fraction of the deposit. - const RentFraction: Perbill = T::RentFraction::get(); + #[pallet::constant] + type RentFraction: Get; /// Reward that is received by the party whose touch has led /// to removal of a contract. - const SurchargeReward: BalanceOf = T::SurchargeReward::get(); + #[pallet::constant] + type SurchargeReward: Get>; + + /// The maximum nesting level of a call/instantiate stack. + #[pallet::constant] + type MaxDepth: Get; + + /// The maximum size of a storage value and event payload in bytes. + #[pallet::constant] + type MaxValueSize: Get; - /// The maximum nesting level of a call/instantiate stack. A reasonable default - /// value is 100. - const MaxDepth: u32 = T::MaxDepth::get(); + /// Used to answer contracts's queries regarding the current weight price. This is **not** + /// used to calculate the actual fee and is only for informational purposes. + type WeightPrice: Convert>; - /// The maximum size of a storage value in bytes. A reasonable default is 16 KiB. - const MaxValueSize: u32 = T::MaxValueSize::get(); + /// Describes the weights of the dispatchables of this module and is also used to + /// construct a default cost schedule. + type WeightInfo: WeightInfo; + + /// Type that allows the runtime authors to add new host functions for a contract to call. + type ChainExtension: chain_extension::ChainExtension; /// The maximum number of tries that can be queued for deletion. - const DeletionQueueDepth: u32 = T::DeletionQueueDepth::get(); + #[pallet::constant] + type DeletionQueueDepth: Get; /// The maximum amount of weight that can be consumed per block for lazy trie removal. - const DeletionWeightLimit: Weight = T::DeletionWeightLimit::get(); + #[pallet::constant] + type DeletionWeightLimit: Get; + + /// The maximum length of a contract code in bytes. This limit applies to the instrumented + /// version of the code. Therefore `instantiate_with_code` can fail even when supplying + /// a wasm binary below this maximum size. + #[pallet::constant] + type MaxCodeSize: Get; + } - fn deposit_event() = default; + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); - fn on_initialize() -> Weight { + #[pallet::hooks] + impl Hooks> for Pallet + where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { + fn on_initialize(_block: T::BlockNumber) -> Weight { // We do not want to go above the block limit and rather avoid lazy deletion // in that case. This should only happen on runtime upgrades. let weight_limit = T::BlockWeights::get().max_block @@ -514,21 +265,29 @@ decl_module! { Storage::::process_deletion_queue_batch(weight_limit) .saturating_add(T::WeightInfo::on_initialize()) } + } + #[pallet::call] + impl Pallet + where + T::AccountId: UncheckedFrom, + T::AccountId: AsRef<[u8]>, + { /// Updates the schedule for metering contracts. /// /// The schedule must have a greater version than the stored schedule. - #[weight = T::WeightInfo::update_schedule()] - pub fn update_schedule(origin, schedule: Schedule) -> DispatchResult { + #[pallet::weight(T::WeightInfo::update_schedule())] + pub fn update_schedule( + origin: OriginFor, + schedule: Schedule + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; if >::current_schedule().version >= schedule.version { Err(Error::::InvalidScheduleVersion)? } - - Self::deposit_event(RawEvent::ScheduleUpdated(schedule.version)); + Self::deposit_event(Event::ScheduleUpdated(schedule.version)); CurrentSchedule::put(schedule); - - Ok(()) + Ok(().into()) } /// Makes a call to an account, optionally transferring some balance. @@ -538,21 +297,24 @@ decl_module! { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. - #[weight = T::WeightInfo::call().saturating_add(*gas_limit)] + #[pallet::weight(T::WeightInfo::call(T::MaxCodeSize::get() / 1024).saturating_add(*gas_limit))] pub fn call( - origin, + origin: OriginFor, dest: ::Source, - #[compact] value: BalanceOf, - #[compact] gas_limit: Gas, + #[pallet::compact] value: BalanceOf, + #[pallet::compact] gas_limit: Weight, data: Vec ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; let dest = T::Lookup::lookup(dest)?; let mut gas_meter = GasMeter::new(gas_limit); - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.call(dest, value, gas_meter, data) - }); - gas_meter.into_dispatch_result(result, T::WeightInfo::call()) + let schedule = >::current_schedule(); + let mut ctx = ExecutionContext::>::top_level(origin, &schedule); + let (result, code_len) = match ctx.call(dest, value, &mut gas_meter, data) { + Ok((output, len)) => (Ok(output), len), + Err((err, len)) => (Err(err), len), + }; + gas_meter.into_dispatch_result(result, T::WeightInfo::call(code_len / 1024)) } /// Instantiates a new contract from the supplied `code` optionally transferring @@ -576,32 +338,32 @@ decl_module! { /// - The smart-contract account is created at the computed address. /// - The `endowment` is transferred to the new account. /// - The `deploy` function is executed in the context of the newly-created account. - #[weight = + #[pallet::weight( T::WeightInfo::instantiate_with_code( code.len() as u32 / 1024, salt.len() as u32 / 1024, ) .saturating_add(*gas_limit) - ] + )] pub fn instantiate_with_code( - origin, - #[compact] endowment: BalanceOf, - #[compact] gas_limit: Gas, + origin: OriginFor, + #[pallet::compact] endowment: BalanceOf, + #[pallet::compact] gas_limit: Weight, code: Vec, data: Vec, salt: Vec, ) -> DispatchResultWithPostInfo { let origin = ensure_signed(origin)?; - let schedule = >::current_schedule(); let code_len = code.len() as u32; - ensure!(code_len <= schedule.limits.code_size, Error::::CodeTooLarge); + ensure!(code_len <= T::MaxCodeSize::get(), Error::::CodeTooLarge); let mut gas_meter = GasMeter::new(gas_limit); - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - let executable = PrefabWasmModule::from_code(code, &schedule)?; - let result = ctx.instantiate(endowment, gas_meter, executable, data, &salt) - .map(|(_address, output)| output)?; - Ok(result) - }); + let schedule = >::current_schedule(); + let executable = PrefabWasmModule::from_code(code, &schedule)?; + let code_len = executable.code_len(); + ensure!(code_len <= T::MaxCodeSize::get(), Error::::CodeTooLarge); + let mut ctx = ExecutionContext::>::top_level(origin, &schedule); + let result = ctx.instantiate(endowment, &mut gas_meter, executable, data, &salt) + .map(|(_address, output)| output); gas_meter.into_dispatch_result( result, T::WeightInfo::instantiate_with_code(code_len / 1024, salt.len() as u32 / 1024) @@ -610,88 +372,291 @@ decl_module! { /// Instantiates a contract from a previously deployed wasm binary. /// - /// This function is identical to [`Self::instantiate_with_code`] but without the - /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary - /// must be supplied. - #[weight = - T::WeightInfo::instantiate(salt.len() as u32 / 1024) - .saturating_add(*gas_limit) - ] - pub fn instantiate( - origin, - #[compact] endowment: BalanceOf, - #[compact] gas_limit: Gas, - code_hash: CodeHash, - data: Vec, - salt: Vec, - ) -> DispatchResultWithPostInfo { - let origin = ensure_signed(origin)?; - let mut gas_meter = GasMeter::new(gas_limit); - let result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - let executable = PrefabWasmModule::from_storage(code_hash, &ctx.schedule)?; - let result = ctx.instantiate(endowment, gas_meter, executable, data, &salt) - .map(|(_address, output)| output)?; - Ok(result) - }); - gas_meter.into_dispatch_result( - result, - T::WeightInfo::instantiate(salt.len() as u32 / 1024) - ) - } - - /// Allows block producers to claim a small reward for evicting a contract. If a block - /// producer fails to do so, a regular users will be allowed to claim the reward. + /// This function is identical to [`Self::instantiate_with_code`] but without the + /// code deployment step. Instead, the `code_hash` of an on-chain deployed wasm binary + /// must be supplied. + #[pallet::weight( + T::WeightInfo::instantiate(T::MaxCodeSize::get() / 1024, salt.len() as u32 / 1024) + .saturating_add(*gas_limit) + )] + pub fn instantiate( + origin: OriginFor, + #[pallet::compact] endowment: BalanceOf, + #[pallet::compact] gas_limit: Weight, + code_hash: CodeHash, + data: Vec, + salt: Vec, + ) -> DispatchResultWithPostInfo { + let origin = ensure_signed(origin)?; + let mut gas_meter = GasMeter::new(gas_limit); + let schedule = >::current_schedule(); + let executable = PrefabWasmModule::from_storage(code_hash, &schedule, &mut gas_meter)?; + let mut ctx = ExecutionContext::>::top_level(origin, &schedule); + let code_len = executable.code_len(); + let result = ctx.instantiate(endowment, &mut gas_meter, executable, data, &salt) + .map(|(_address, output)| output); + gas_meter.into_dispatch_result( + result, + T::WeightInfo::instantiate(code_len / 1024, salt.len() as u32 / 1024), + ) + } + + /// Allows block producers to claim a small reward for evicting a contract. If a block + /// producer fails to do so, a regular users will be allowed to claim the reward. + /// + /// In case of a successful eviction no fees are charged from the sender. However, the + /// reward is capped by the total amount of rent that was payed by the contract while + /// it was alive. + /// + /// If contract is not evicted as a result of this call, [`Error::ContractNotEvictable`] + /// is returned and the sender is not eligible for the reward. + #[pallet::weight(T::WeightInfo::claim_surcharge(T::MaxCodeSize::get() / 1024))] + pub fn claim_surcharge( + origin: OriginFor, + dest: T::AccountId, + aux_sender: Option + ) -> DispatchResultWithPostInfo { + let origin = origin.into(); + let (signed, rewarded) = match (origin, aux_sender) { + (Ok(frame_system::RawOrigin::Signed(account)), None) => { + (true, account) + }, + (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => { + (false, aux_sender) + }, + _ => Err(Error::::InvalidSurchargeClaim)?, + }; + + // Add some advantage for block producers (who send unsigned extrinsics) by + // adding a handicap: for signed extrinsics we use a slightly older block number + // for the eviction check. This can be viewed as if we pushed regular users back in past. + let handicap = if signed { + T::SignedClaimHandicap::get() + } else { + Zero::zero() + }; + + // If poking the contract has lead to eviction of the contract, give out the rewards. + match Rent::>::try_eviction(&dest, handicap)? { + (Some(rent_payed), code_len) => { + T::Currency::deposit_into_existing( + &rewarded, + T::SurchargeReward::get().min(rent_payed), + ) + .map(|_| PostDispatchInfo { + actual_weight: Some(T::WeightInfo::claim_surcharge(code_len / 1024)), + pays_fee: Pays::No, + }) + .map_err(Into::into) + } + (None, code_len) => Err(Error::::ContractNotEvictable.with_weight( + T::WeightInfo::claim_surcharge(code_len / 1024) + )), + } + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + #[pallet::metadata(T::AccountId = "AccountId", T::Hash = "Hash", BalanceOf = "Balance")] + pub enum Event { + /// Contract deployed by address at the specified address. \[deployer, contract\] + Instantiated(T::AccountId, T::AccountId), + + /// Contract has been evicted and is now in tombstone state. \[contract\] + Evicted(T::AccountId), + + /// Contract has been terminated without leaving a tombstone. + /// \[contract, beneficiary\] + /// + /// # Params + /// + /// - `contract`: The contract that was terminated. + /// - `beneficiary`: The account that received the contracts remaining balance. + /// + /// # Note + /// + /// The only way for a contract to be removed without a tombstone and emitting + /// this event is by calling `seal_terminate`. + Terminated(T::AccountId, T::AccountId), + + /// Restoration of a contract has been successful. + /// \[restorer, dest, code_hash, rent_allowance\] + /// + /// # Params + /// + /// - `restorer`: Account ID of the restoring contract. + /// - `dest`: Account ID of the restored contract. + /// - `code_hash`: Code hash of the restored contract. + /// - `rent_allowance`: Rent allowance of the restored contract. + Restored(T::AccountId, T::AccountId, T::Hash, BalanceOf), + + /// Code with the specified hash has been stored. \[code_hash\] + CodeStored(T::Hash), + + /// Triggered when the current schedule is updated. + /// \[version\] + /// + /// # Params + /// + /// - `version`: The version of the newly set schedule. + ScheduleUpdated(u32), + + /// A custom event emitted by the contract. + /// \[contract, data\] + /// + /// # Params + /// + /// - `contract`: The contract that emitted the event. + /// - `data`: Data supplied by the contract. Metadata generated during contract + /// compilation is needed to decode it. + ContractEmitted(T::AccountId, Vec), + + /// A code with the specified hash was removed. + /// \[code_hash\] + /// + /// This happens when the last contract that uses this code hash was removed or evicted. + CodeRemoved(T::Hash), + } + + #[pallet::error] + pub enum Error { + /// A new schedule must have a greater version than the current one. + InvalidScheduleVersion, + /// An origin must be signed or inherent and auxiliary sender only provided on inherent. + InvalidSurchargeClaim, + /// Cannot restore from nonexisting or tombstone contract. + InvalidSourceContract, + /// Cannot restore to nonexisting or alive contract. + InvalidDestinationContract, + /// Tombstones don't match. + InvalidTombstone, + /// An origin TrieId written in the current block. + InvalidContractOrigin, + /// The executed contract exhausted its gas limit. + OutOfGas, + /// The output buffer supplied to a contract API call was too small. + OutputBufferTooSmall, + /// Performing the requested transfer would have brought the contract below + /// the subsistence threshold. No transfer is allowed to do this in order to allow + /// for a tombstone to be created. Use `seal_terminate` to remove a contract without + /// leaving a tombstone behind. + BelowSubsistenceThreshold, + /// The newly created contract is below the subsistence threshold after executing + /// its contructor. No contracts are allowed to exist below that threshold. + NewContractNotFunded, + /// Performing the requested transfer failed for a reason originating in the + /// chosen currency implementation of the runtime. Most probably the balance is + /// too low or locks are placed on it. + TransferFailed, + /// Performing a call was denied because the calling depth reached the limit + /// of what is specified in the schedule. + MaxCallDepthReached, + /// The contract that was called is either no contract at all (a plain account) + /// or is a tombstone. + NotCallable, + /// The code supplied to `instantiate_with_code` exceeds the limit specified in the + /// current schedule. + CodeTooLarge, + /// No code could be found at the supplied code hash. + CodeNotFound, + /// A buffer outside of sandbox memory was passed to a contract API function. + OutOfBounds, + /// Input passed to a contract API function failed to decode as expected type. + DecodingFailed, + /// Contract trapped during execution. + ContractTrapped, + /// The size defined in `T::MaxValueSize` was exceeded. + ValueTooLarge, + /// The action performed is not allowed while the contract performing it is already + /// on the call stack. Those actions are contract self destruction and restoration + /// of a tombstone. + ReentranceDenied, + /// `seal_input` was called twice from the same contract execution context. + InputAlreadyRead, + /// The subject passed to `seal_random` exceeds the limit. + RandomSubjectTooLong, + /// The amount of topics passed to `seal_deposit_events` exceeds the limit. + TooManyTopics, + /// The topics passed to `seal_deposit_events` contains at least one duplicate. + DuplicateTopics, + /// The chain does not provide a chain extension. Calling the chain extension results + /// in this error. Note that this usually shouldn't happen as deploying such contracts + /// is rejected. + NoChainExtension, + /// Removal of a contract failed because the deletion queue is full. + /// + /// This can happen when either calling [`Pallet::claim_surcharge`] or `seal_terminate`. + /// The queue is filled by deleting contracts and emptied by a fixed amount each block. + /// Trying again during another block is the only way to resolve this issue. + DeletionQueueFull, + /// A contract could not be evicted because it has enough balance to pay rent. /// - /// In case of a successful eviction no fees are charged from the sender. However, the - /// reward is capped by the total amount of rent that was payed by the contract while - /// it was alive. + /// This can be returned from [`Pallet::claim_surcharge`] because the target + /// contract has enough balance to pay for its rent. + ContractNotEvictable, + /// A storage modification exhausted the 32bit type that holds the storage size. /// - /// If contract is not evicted as a result of this call, [`Error::ContractNotEvictable`] - /// is returned and the sender is not eligible for the reward. - #[weight = T::WeightInfo::claim_surcharge()] - pub fn claim_surcharge( - origin, - dest: T::AccountId, - aux_sender: Option - ) -> DispatchResultWithPostInfo { - let origin = origin.into(); - let (signed, rewarded) = match (origin, aux_sender) { - (Ok(frame_system::RawOrigin::Signed(account)), None) => { - (true, account) - }, - (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => { - (false, aux_sender) - }, - _ => Err(Error::::InvalidSurchargeClaim)?, - }; + /// This can either happen when the accumulated storage in bytes is too large or + /// when number of storage items is too large. + StorageExhausted, + /// A contract with the same AccountId already exists. + DuplicateContract, + } - // Add some advantage for block producers (who send unsigned extrinsics) by - // adding a handicap: for signed extrinsics we use a slightly older block number - // for the eviction check. This can be viewed as if we pushed regular users back in past. - let handicap = if signed { - T::SignedClaimHandicap::get() - } else { - Zero::zero() - }; + /// Current cost schedule for contracts. + #[pallet::storage] + #[pallet::getter(fn current_schedule)] + pub(super) type CurrentSchedule = StorageValue<_, Schedule, ValueQuery>; - // If poking the contract has lead to eviction of the contract, give out the rewards. - if let Some(rent_payed) = - Rent::>::try_eviction(&dest, handicap)? - { - T::Currency::deposit_into_existing( - &rewarded, - T::SurchargeReward::get().min(rent_payed), - ) - .map(|_| Pays::No.into()) - .map_err(Into::into) - } else { - Err(Error::::ContractNotEvictable.into()) + /// A mapping from an original code hash to the original code, untouched by instrumentation. + #[pallet::storage] + pub type PristineCode = StorageMap<_, Identity, CodeHash, Vec>; + + /// A mapping between an original code hash and instrumented wasm code, ready for execution. + #[pallet::storage] + pub type CodeStorage = StorageMap<_, Identity, CodeHash, PrefabWasmModule>; + + /// The subtrie counter. + #[pallet::storage] + pub type AccountCounter = StorageValue<_, u64, ValueQuery>; + + /// The code associated with a given account. + /// + /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. + #[pallet::storage] + pub type ContractInfoOf = StorageMap<_, Twox64Concat, T::AccountId, ContractInfo>; + + /// Evicted contracts that await child trie deletion. + /// + /// Child trie deletion is a heavy operation depending on the amount of storage items + /// stored in said trie. Therefore this operation is performed lazily in `on_initialize`. + #[pallet::storage] + pub type DeletionQueue = StorageValue<_, Vec, ValueQuery>; + + #[pallet::genesis_config] + pub struct GenesisConfig { + #[doc = "Current cost schedule for contracts."] + pub current_schedule: Schedule, + } + + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + current_schedule: Default::default(), } } } + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.current_schedule); + } + } } -/// Public APIs provided by the contracts module. impl Module where T::AccountId: UncheckedFrom + AsRef<[u8]>, @@ -706,16 +671,16 @@ where origin: T::AccountId, dest: T::AccountId, value: BalanceOf, - gas_limit: Gas, + gas_limit: Weight, input_data: Vec, ) -> ContractExecResult { let mut gas_meter = GasMeter::new(gas_limit); - let exec_result = Self::execute_wasm(origin, &mut gas_meter, |ctx, gas_meter| { - ctx.call(dest, value, gas_meter, input_data) - }); + let schedule = >::current_schedule(); + let mut ctx = ExecutionContext::>::top_level(origin, &schedule); + let result = ctx.call(dest, value, &mut gas_meter, input_data); let gas_consumed = gas_meter.gas_spent(); ContractExecResult { - exec_result, + exec_result: result.map(|r| r.0).map_err(|r| r.0), gas_consumed, } } @@ -731,18 +696,12 @@ where Ok(maybe_value) } + /// Query how many blocks the contract stays alive given that the amount endowment + /// and consumed storage does not change. pub fn rent_projection(address: T::AccountId) -> RentProjectionResult { Rent::>::compute_projection(&address) } - /// Store code for benchmarks which does not check nor instrument the code. - #[cfg(feature = "runtime-benchmarks")] - pub fn store_code_raw(code: Vec) -> DispatchResult { - let schedule = >::current_schedule(); - PrefabWasmModule::store_code_unchecked(code, &schedule)?; - Ok(()) - } - /// Determine the address of a contract, /// /// This is the address generation function used by contract instantiation. Its result @@ -775,114 +734,145 @@ where pub fn subsistence_threshold() -> BalanceOf { T::Currency::minimum_balance().saturating_add(T::TombstoneDeposit::get()) } -} -impl Module -where - T::AccountId: UncheckedFrom + AsRef<[u8]>, -{ - fn execute_wasm( - origin: T::AccountId, - gas_meter: &mut GasMeter, - func: impl FnOnce( - &mut ExecutionContext>, - &mut GasMeter, - ) -> ExecResult, - ) -> ExecResult { + /// Store code for benchmarks which does not check nor instrument the code. + #[cfg(feature = "runtime-benchmarks")] + fn store_code_raw(code: Vec) -> frame_support::dispatch::DispatchResult { let schedule = >::current_schedule(); - let mut ctx = ExecutionContext::top_level(origin, &schedule); - func(&mut ctx, gas_meter) + PrefabWasmModule::store_code_unchecked(code, &schedule)?; + Ok(()) + } + + /// This exists so that benchmarks can determine the weight of running an instrumentation. + #[cfg(feature = "runtime-benchmarks")] + fn reinstrument_module( + module: &mut PrefabWasmModule, + schedule: &Schedule + ) -> frame_support::dispatch::DispatchResult { + self::wasm::reinstrument(module, schedule) } } -decl_event! { - pub enum Event - where - Balance = BalanceOf, - ::AccountId, - ::Hash - { - /// Contract deployed by address at the specified address. \[deployer, contract\] - Instantiated(AccountId, AccountId), +/// Information for managing an account and its sub trie abstraction. +/// This is the required info to cache for an account +#[derive(Encode, Decode, RuntimeDebug)] +pub enum ContractInfo { + Alive(AliveContractInfo), + Tombstone(TombstoneContractInfo), +} - /// Contract has been evicted and is now in tombstone state. \[contract\] - Evicted(AccountId), +impl ContractInfo { + /// If contract is alive then return some alive info + pub fn get_alive(self) -> Option> { + if let ContractInfo::Alive(alive) = self { + Some(alive) + } else { + None + } + } + /// If contract is alive then return some reference to alive info + pub fn as_alive(&self) -> Option<&AliveContractInfo> { + if let ContractInfo::Alive(ref alive) = self { + Some(alive) + } else { + None + } + } + /// If contract is alive then return some mutable reference to alive info + pub fn as_alive_mut(&mut self) -> Option<&mut AliveContractInfo> { + if let ContractInfo::Alive(ref mut alive) = self { + Some(alive) + } else { + None + } + } - /// Contract has been terminated without leaving a tombstone. - /// \[contract, beneficiary\] - /// - /// # Params - /// - /// - `contract`: The contract that was terminated. - /// - `beneficiary`: The account that received the contracts remaining balance. - /// - /// # Note - /// - /// The only way for a contract to be removed without a tombstone and emitting - /// this event is by calling `seal_terminate`. - Terminated(AccountId, AccountId), + /// If contract is tombstone then return some tombstone info + pub fn get_tombstone(self) -> Option> { + if let ContractInfo::Tombstone(tombstone) = self { + Some(tombstone) + } else { + None + } + } + /// If contract is tombstone then return some reference to tombstone info + pub fn as_tombstone(&self) -> Option<&TombstoneContractInfo> { + if let ContractInfo::Tombstone(ref tombstone) = self { + Some(tombstone) + } else { + None + } + } + /// If contract is tombstone then return some mutable reference to tombstone info + pub fn as_tombstone_mut(&mut self) -> Option<&mut TombstoneContractInfo> { + if let ContractInfo::Tombstone(ref mut tombstone) = self { + Some(tombstone) + } else { + None + } + } +} - /// Restoration of a contract has been successful. - /// \[restorer, dest, code_hash, rent_allowance\] - /// - /// # Params - /// - /// - `restorer`: Account ID of the restoring contract. - /// - `dest`: Account ID of the restored contract. - /// - `code_hash`: Code hash of the restored contract. - /// - `rent_allowance`: Rent allowance of the restored contract. - Restored(AccountId, AccountId, Hash, Balance), +/// Information for managing an account and its sub trie abstraction. +/// This is the required info to cache for an account. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct RawAliveContractInfo { + /// Unique ID for the subtree encoded as a bytes vector. + pub trie_id: TrieId, + /// The total number of bytes used by this contract. + /// + /// It is a sum of each key-value pair stored by this contract. + pub storage_size: u32, + /// The total number of key-value pairs in storage of this contract. + pub pair_count: u32, + /// The code associated with a given account. + pub code_hash: CodeHash, + /// Pay rent at most up to this value. + pub rent_allowance: Balance, + /// The amount of rent that was payed by the contract over its whole lifetime. + /// + /// A restored contract starts with a value of zero just like a new contract. + pub rent_payed: Balance, + /// Last block rent has been payed. + pub deduct_block: BlockNumber, + /// Last block child storage has been written. + pub last_write: Option, + /// This field is reserved for future evolution of format. + pub _reserved: Option<()>, +} - /// Code with the specified hash has been stored. \[code_hash\] - CodeStored(Hash), +impl RawAliveContractInfo { + /// Associated child trie unique id is built from the hash part of the trie id. + pub fn child_trie_info(&self) -> ChildInfo { + child_trie_info(&self.trie_id[..]) + } +} - /// Triggered when the current schedule is updated. - /// \[version\] - /// - /// # Params - /// - /// - `version`: The version of the newly set schedule. - ScheduleUpdated(u32), +/// Associated child trie unique id is built from the hash part of the trie id. +pub(crate) fn child_trie_info(trie_id: &[u8]) -> ChildInfo { + ChildInfo::new_default(trie_id) +} - /// A custom event emitted by the contract. - /// \[contract, data\] - /// - /// # Params - /// - /// - `contract`: The contract that emitted the event. - /// - `data`: Data supplied by the contract. Metadata generated during contract - /// compilation is needed to decode it. - ContractEmitted(AccountId, Vec), +#[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] +pub struct RawTombstoneContractInfo(H, PhantomData); - /// A code with the specified hash was removed. - /// \[code_hash\] - /// - /// This happens when the last contract that uses this code hash was removed or evicted. - CodeRemoved(Hash), +impl RawTombstoneContractInfo +where + H: Member + MaybeSerializeDeserialize+ Debug + + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + + sp_std::hash::Hash + Codec, + Hasher: Hash, +{ + fn new(storage_root: &[u8], code_hash: H) -> Self { + let mut buf = Vec::new(); + storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); + buf.extend_from_slice(code_hash.as_ref()); + RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) } } -decl_storage! { - trait Store for Module as Contracts - where - T::AccountId: UncheckedFrom + AsRef<[u8]> - { - /// Current cost schedule for contracts. - CurrentSchedule get(fn current_schedule) config(): Schedule = Default::default(); - /// A mapping from an original code hash to the original code, untouched by instrumentation. - pub PristineCode: map hasher(identity) CodeHash => Option>; - /// A mapping between an original code hash and instrumented wasm code, ready for execution. - pub CodeStorage: map hasher(identity) CodeHash => Option>; - /// The subtrie counter. - pub AccountCounter: u64 = 0; - /// The code associated with a given account. - /// - /// TWOX-NOTE: SAFE since `AccountId` is a secure hash. - pub ContractInfoOf: map hasher(twox_64_concat) T::AccountId => Option>; - /// Evicted contracts that await child trie deletion. - /// - /// Child trie deletion is a heavy operation depending on the amount of storage items - /// stored in said trie. Therefore this operation is performed lazily in `on_initialize`. - pub DeletionQueue: Vec; +impl From> for ContractInfo { + fn from(alive_info: AliveContractInfo) -> Self { + Self::Alive(alive_info) } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 38b1e8bd11..e9befeee2d 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -18,7 +18,7 @@ //! A module responsible for computing the right amount of weight and charging it. use crate::{ - AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, + AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, Event, TombstoneContractInfo, Config, CodeHash, Error, storage::Storage, wasm::PrefabWasmModule, exec::Executable, }; @@ -26,7 +26,6 @@ use sp_std::prelude::*; use sp_io::hashing::blake2_256; use sp_core::crypto::UncheckedFrom; use frame_support::{ - debug, StorageMap, storage::child, traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReasons}, }; @@ -183,9 +182,10 @@ where // accidental loss of a contract. Ony `seal_terminate` can remove a // contract without a tombstone. Therefore this case should be never // hit. - debug::error!( + log::error!( + target: "runtime::contracts", "Tombstoned a contract that is below the subsistence threshold: {:?}", - account + account, ); 0u32.into() } @@ -268,7 +268,7 @@ where let tombstone_info = ContractInfo::Tombstone(tombstone); >::insert(account, &tombstone_info); code.drop_from_storage(); - >::deposit_event(RawEvent::Evicted(account.clone())); + >::deposit_event(Event::Evicted(account.clone())); Ok(None) } (Verdict::Evict { amount: _ }, None) => { @@ -325,13 +325,14 @@ where pub fn try_eviction( account: &T::AccountId, handicap: T::BlockNumber, - ) -> Result>, DispatchError> { + ) -> Result<(Option>, u32), DispatchError> { let contract = >::get(account); let contract = match contract { - None | Some(ContractInfo::Tombstone(_)) => return Ok(None), + None | Some(ContractInfo::Tombstone(_)) => return Ok((None, 0)), Some(ContractInfo::Alive(contract)) => contract, }; let module = PrefabWasmModule::::from_storage_noinstr(contract.code_hash)?; + let code_len = module.code_len(); let current_block_number = >::block_number(); let verdict = Self::consider_case( account, @@ -353,9 +354,9 @@ where Self::enact_verdict( account, contract, current_block_number, verdict, Some(module), )?; - Ok(Some(rent_payed)) + Ok((Some(rent_payed), code_len)) } - _ => Ok(None), + _ => Ok((None, code_len)), } } @@ -447,28 +448,32 @@ where /// Upon succesful restoration, `origin` will be destroyed, all its funds are transferred to /// the restored account. The restored account will inherit the last write block and its last /// deduct block will be set to the current block. + /// + /// # Return Value + /// + /// Result<(CallerCodeSize, DestCodeSize), (DispatchError, CallerCodeSize, DestCodesize)> pub fn restore_to( origin: T::AccountId, dest: T::AccountId, code_hash: CodeHash, rent_allowance: BalanceOf, delta: Vec, - ) -> Result<(), DispatchError> { + ) -> Result<(u32, u32), (DispatchError, u32, u32)> { let mut origin_contract = >::get(&origin) .and_then(|c| c.get_alive()) - .ok_or(Error::::InvalidSourceContract)?; + .ok_or((Error::::InvalidSourceContract.into(), 0, 0))?; let child_trie_info = origin_contract.child_trie_info(); let current_block = >::block_number(); if origin_contract.last_write == Some(current_block) { - return Err(Error::::InvalidContractOrigin.into()); + return Err((Error::::InvalidContractOrigin.into(), 0, 0)); } let dest_tombstone = >::get(&dest) .and_then(|c| c.get_tombstone()) - .ok_or(Error::::InvalidDestinationContract)?; + .ok_or((Error::::InvalidDestinationContract.into(), 0, 0))?; let last_write = if !delta.is_empty() { Some(current_block) @@ -477,7 +482,7 @@ where }; // Fails if the code hash does not exist on chain - E::add_user(code_hash)?; + let caller_code_len = E::add_user(code_hash).map_err(|e| (e, 0, 0))?; // We are allowed to eagerly modify storage even though the function can // fail later due to tombstones not matching. This is because the restoration @@ -501,28 +506,26 @@ where ); if tombstone != dest_tombstone { - return Err(Error::::InvalidTombstone.into()); + return Err((Error::::InvalidTombstone.into(), caller_code_len, 0)); } origin_contract.storage_size -= bytes_taken; >::remove(&origin); - E::remove_user(origin_contract.code_hash); + let tombstone_code_len = E::remove_user(origin_contract.code_hash); >::insert(&dest, ContractInfo::Alive(AliveContractInfo:: { - trie_id: origin_contract.trie_id, - storage_size: origin_contract.storage_size, - pair_count: origin_contract.pair_count, code_hash, rent_allowance, rent_payed: >::zero(), deduct_block: current_block, last_write, + .. origin_contract })); let origin_free_balance = T::Currency::free_balance(&origin); T::Currency::make_free_balance_be(&origin, >::zero()); T::Currency::deposit_creating(&dest, origin_free_balance); - Ok(()) + Ok((caller_code_len, tombstone_code_len)) } } diff --git a/frame/contracts/src/schedule.rs b/frame/contracts/src/schedule.rs index 3580fa2aae..c86134bc41 100644 --- a/frame/contracts/src/schedule.rs +++ b/frame/contracts/src/schedule.rs @@ -104,10 +104,6 @@ pub struct Limits { /// The maximum length of a subject in bytes used for PRNG generation. pub subject_len: u32, - - /// The maximum length of a contract code in bytes. This limit applies to the uninstrumented - /// and pristine form of the code as supplied to `instantiate_with_code`. - pub code_size: u32, } impl Limits { @@ -250,9 +246,18 @@ pub struct HostFnWeights { /// Weight of calling `seal_terminate`. pub terminate: Weight, + /// Weight per byte of the terminated contract. + pub terminate_per_code_byte: Weight, + /// Weight of calling `seal_restore_to`. pub restore_to: Weight, + /// Weight per byte of the restoring contract. + pub restore_to_per_caller_code_byte: Weight, + + /// Weight per byte of the restored contract. + pub restore_to_per_tombstone_code_byte: Weight, + /// Weight per delta key supplied to `seal_restore_to`. pub restore_to_per_delta: Weight, @@ -292,6 +297,9 @@ pub struct HostFnWeights { /// Weight of calling `seal_call`. pub call: Weight, + /// Weight per byte of the called contract. + pub call_per_code_byte: Weight, + /// Weight surcharge that is claimed if `seal_call` does a balance transfer. pub call_transfer_surcharge: Weight, @@ -304,6 +312,9 @@ pub struct HostFnWeights { /// Weight of calling `seal_instantiate`. pub instantiate: Weight, + /// Weight per byte of the instantiated contract. + pub instantiate_per_code_byte: Weight, + /// Weight per input byte supplied to `seal_instantiate`. pub instantiate_per_input_byte: Weight, @@ -443,7 +454,6 @@ impl Default for Limits { table_size: 4096, br_table_size: 256, subject_len: 32, - code_size: 512 * 1024, } } } @@ -528,8 +538,11 @@ impl Default for HostFnWeights { r#return: cost!(seal_return), return_per_byte: cost_byte!(seal_return_per_kb), terminate: cost!(seal_terminate), + terminate_per_code_byte: cost_byte!(seal_terminate_per_code_kb), restore_to: cost!(seal_restore_to), - restore_to_per_delta: cost_batched!(seal_restore_to_per_delta), + restore_to_per_caller_code_byte: cost_byte_args!(seal_restore_to_per_code_kb_delta, 1, 0, 0), + restore_to_per_tombstone_code_byte: cost_byte_args!(seal_restore_to_per_code_kb_delta, 0, 1, 0), + restore_to_per_delta: cost_batched_args!(seal_restore_to_per_code_kb_delta, 0, 0, 1), random: cost_batched!(seal_random), deposit_event: cost_batched!(seal_deposit_event), deposit_event_per_topic: cost_batched_args!(seal_deposit_event_per_topic_and_kb, 1, 0), @@ -542,13 +555,15 @@ impl Default for HostFnWeights { get_storage_per_byte: cost_byte_batched!(seal_get_storage_per_kb), transfer: cost_batched!(seal_transfer), call: cost_batched!(seal_call), - call_transfer_surcharge: cost_batched_args!(seal_call_per_transfer_input_output_kb, 1, 0, 0), - call_per_input_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 1, 0), - call_per_output_byte: cost_byte_batched_args!(seal_call_per_transfer_input_output_kb, 0, 0, 1), + call_per_code_byte: cost_byte_batched_args!(seal_call_per_code_transfer_input_output_kb, 1, 0, 0, 0), + call_transfer_surcharge: cost_batched_args!(seal_call_per_code_transfer_input_output_kb, 0, 1, 0, 0), + call_per_input_byte: cost_byte_batched_args!(seal_call_per_code_transfer_input_output_kb, 0, 0, 1, 0), + call_per_output_byte: cost_byte_batched_args!(seal_call_per_code_transfer_input_output_kb, 0, 0, 0, 1), instantiate: cost_batched!(seal_instantiate), - instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 1, 0, 0), - instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 1, 0), - instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_input_output_salt_kb, 0, 0, 1), + instantiate_per_code_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 1, 0, 0, 0), + instantiate_per_input_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 0, 1, 0, 0), + instantiate_per_output_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 0, 0, 1, 0), + instantiate_per_salt_byte: cost_byte_batched_args!(seal_instantiate_per_code_input_output_salt_kb, 0, 0, 0, 1), hash_sha2_256: cost_batched!(seal_hash_sha2_256), hash_sha2_256_per_byte: cost_byte_batched!(seal_hash_sha2_256_per_kb), hash_keccak_256: cost_batched!(seal_hash_keccak_256), diff --git a/frame/contracts/src/storage.rs b/frame/contracts/src/storage.rs index 2a2d5da225..5b9e7c1f58 100644 --- a/frame/contracts/src/storage.rs +++ b/frame/contracts/src/storage.rs @@ -31,9 +31,7 @@ use sp_runtime::traits::{Bounded, Saturating, Zero}; use sp_core::crypto::UncheckedFrom; use frame_support::{ dispatch::DispatchResult, - StorageMap, - debug, - storage::{child::{self, KillOutcome}, StorageValue}, + storage::child::{self, KillChildStorageResult}, traits::Get, weights::Weight, }; @@ -183,6 +181,7 @@ where rent_payed: >::zero(), pair_count: 0, last_write: None, + _reserved: None, }; *existing = Some(contract.into()); @@ -196,10 +195,10 @@ where /// You must make sure that the contract is also removed or converted into a tombstone /// when queuing the trie for deletion. pub fn queue_trie_for_deletion(contract: &AliveContractInfo) -> DispatchResult { - if DeletionQueue::decode_len().unwrap_or(0) >= T::DeletionQueueDepth::get() as usize { + if >::decode_len().unwrap_or(0) >= T::DeletionQueueDepth::get() as usize { Err(Error::::DeletionQueueFull.into()) } else { - DeletionQueue::append(DeletedContract { + >::append(DeletedContract { pair_count: contract.pair_count, trie_id: contract.trie_id.clone(), }); @@ -234,7 +233,7 @@ where /// It returns the amount of weight used for that task or `None` when no weight was used /// apart from the base weight. pub fn process_deletion_queue_batch(weight_limit: Weight) -> Weight { - let queue_len = DeletionQueue::decode_len().unwrap_or(0); + let queue_len = >::decode_len().unwrap_or(0); if queue_len == 0 { return weight_limit; } @@ -251,7 +250,7 @@ where return weight_limit; } - let mut queue = DeletionQueue::get(); + let mut queue = >::get(); while !queue.is_empty() && remaining_key_budget > 0 { // Cannot panic due to loop condition @@ -270,20 +269,21 @@ where let removed = queue.swap_remove(0); match outcome { // This should not happen as our budget was large enough to remove all keys. - KillOutcome::SomeRemaining => { - debug::error!( + KillChildStorageResult::SomeRemaining(_) => { + log::error!( + target: "runtime::contracts", "After deletion keys are remaining in this child trie: {:?}", removed.trie_id, ); }, - KillOutcome::AllRemoved => (), + KillChildStorageResult::AllRemoved(_) => (), } } remaining_key_budget = remaining_key_budget .saturating_sub(remaining_key_budget.min(pair_count)); } - DeletionQueue::put(queue); + >::put(queue); weight_limit.saturating_sub(weight_per_key.saturating_mul(remaining_key_budget as Weight)) } @@ -293,7 +293,7 @@ where use sp_runtime::traits::Hash; // Note that skipping a value due to error is not an issue here. // We only need uniqueness, not sequence. - let new_seed = AccountCounter::mutate(|v| { + let new_seed = >::mutate(|v| { *v = v.wrapping_add(1); *v }); @@ -322,6 +322,6 @@ where trie_id: vec![], }) .collect(); - DeletionQueue::put(queue); + >::put(queue); } } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 62768641ac..c17434300d 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -17,13 +17,15 @@ use crate::{ BalanceOf, ContractInfo, ContractInfoOf, Module, - RawAliveContractInfo, RawEvent, Config, Schedule, gas::Gas, - Error, RuntimeReturnCode, storage::Storage, + RawAliveContractInfo, Config, Schedule, + Error, storage::Storage, chain_extension::{ Result as ExtensionResult, Environment, ChainExtension, Ext, SysConfig, RetVal, UncheckedFrom, InitState, ReturnFlags, }, exec::{AccountIdOf, Executable}, wasm::PrefabWasmModule, + weights::WeightInfo, + wasm::ReturnCode as RuntimeReturnCode, }; use assert_matches::assert_matches; use codec::Encode; @@ -35,8 +37,8 @@ use sp_runtime::{ use sp_io::hashing::blake2_256; use frame_support::{ assert_ok, assert_err, assert_err_ignore_postinfo, - parameter_types, StorageMap, assert_storage_noop, - traits::{Currency, ReservableCurrency, OnInitialize}, + parameter_types, assert_storage_noop, + traits::{Currency, ReservableCurrency, OnInitialize, GenesisBuild}, weights::{Weight, PostDispatchInfo, DispatchClass, constants::WEIGHT_PER_SECOND}, dispatch::DispatchErrorWithPostInfo, storage::child, @@ -72,7 +74,7 @@ pub mod test_utils { exec::{StorageKey, AccountIdOf}, Module as Contracts, }; - use frame_support::{StorageMap, traits::Currency}; + use frame_support::traits::Currency; pub fn set_storage(addr: &AccountIdOf, key: &StorageKey, value: Option>) { let contract_info = >::get(&addr).unwrap().get_alive().unwrap(); @@ -250,6 +252,7 @@ parameter_types! { pub const MaxValueSize: u32 = 16_384; pub const DeletionQueueDepth: u32 = 1024; pub const DeletionWeightLimit: Weight = 500_000_000_000; + pub const MaxCodeSize: u32 = 2 * 1024; } parameter_types! { @@ -282,6 +285,7 @@ impl Config for Test { type ChainExtension = TestExtension; type DeletionQueueDepth = DeletionQueueDepth; type DeletionWeightLimit = DeletionWeightLimit; + type MaxCodeSize = MaxCodeSize; } pub const ALICE: AccountId32 = AccountId32::new([1u8; 32]); @@ -289,7 +293,7 @@ pub const BOB: AccountId32 = AccountId32::new([2u8; 32]); pub const CHARLIE: AccountId32 = AccountId32::new([3u8; 32]); pub const DJANGO: AccountId32 = AccountId32::new([4u8; 32]); -const GAS_LIMIT: Gas = 10_000_000_000; +const GAS_LIMIT: Weight = 10_000_000_000; pub struct ExtBuilder { existential_deposit: u64, @@ -350,7 +354,7 @@ where fn calling_plain_account_fails() { ExtBuilder::default().build().execute_with(|| { let _ = Balances::deposit_creating(&ALICE, 100_000_000); - let base_cost = <::WeightInfo as crate::WeightInfo>::call(); + let base_cost = <::WeightInfo as WeightInfo>::call(0); assert_eq!( Contracts::call(Origin::signed(ALICE), BOB, 0, GAS_LIMIT, Vec::new()), @@ -388,6 +392,7 @@ fn account_removal_does_not_remove_storage() { rent_allowance: 40, rent_payed: 0, last_write: None, + _reserved: None, }); let _ = Balances::deposit_creating(&ALICE, 110); ContractInfoOf::::insert(ALICE, &alice_contract_info); @@ -403,6 +408,7 @@ fn account_removal_does_not_remove_storage() { rent_allowance: 40, rent_payed: 0, last_write: None, + _reserved: None, }); let _ = Balances::deposit_creating(&BOB, 110); ContractInfoOf::::insert(BOB, &bob_contract_info); @@ -498,19 +504,19 @@ fn instantiate_and_call_and_deposit_event() { }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::CodeStored(code_hash.into())), + event: Event::pallet_contracts(crate::Event::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: Event::pallet_contracts( - RawEvent::ContractEmitted(addr.clone(), vec![1, 2, 3, 4]) + crate::Event::ContractEmitted(addr.clone(), vec![1, 2, 3, 4]) ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::Instantiated(ALICE, addr.clone())), + event: Event::pallet_contracts(crate::Event::Instantiated(ALICE, addr.clone())), topics: vec![], }, ]); @@ -1227,12 +1233,16 @@ fn restoration( }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::CodeStored(set_rent_code_hash.into())), + event: Event::pallet_contracts( + crate::Event::CodeStored(set_rent_code_hash.into()) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::Instantiated(ALICE, addr_bob.clone())), + event: Event::pallet_contracts( + crate::Event::Instantiated(ALICE, addr_bob.clone()) + ), topics: vec![], }, ]; @@ -1272,7 +1282,9 @@ fn restoration( }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::Instantiated(ALICE, addr_dummy.clone())), + event: Event::pallet_contracts( + crate::Event::Instantiated(ALICE, addr_dummy.clone()) + ), topics: vec![], }, ].iter().cloned()); @@ -1398,7 +1410,7 @@ fn restoration( assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::Evicted(addr_bob)), + event: Event::pallet_contracts(crate::Event::Evicted(addr_bob)), topics: vec![], }, EventRecord { @@ -1430,12 +1442,16 @@ fn restoration( }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::CodeStored(restoration_code_hash)), + event: Event::pallet_contracts( + crate::Event::CodeStored(restoration_code_hash) + ), topics: vec![], }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::Instantiated(CHARLIE, addr_django.clone())), + event: Event::pallet_contracts( + crate::Event::Instantiated(CHARLIE, addr_django.clone()) + ), topics: vec![], }, @@ -1467,7 +1483,7 @@ fn restoration( assert_eq!(System::events(), vec![ EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::CodeRemoved(restoration_code_hash)), + event: Event::pallet_contracts(crate::Event::CodeRemoved(restoration_code_hash)), topics: vec![], }, EventRecord { @@ -1478,7 +1494,9 @@ fn restoration( EventRecord { phase: Phase::Initialization, event: Event::pallet_contracts( - RawEvent::Restored(addr_django, addr_bob, bob_contract.code_hash, 50) + crate::Event::Restored( + addr_django, addr_bob, bob_contract.code_hash, 50 + ) ), topics: vec![], }, @@ -1717,13 +1735,13 @@ fn self_destruct_works() { }, EventRecord { phase: Phase::Initialization, - event: Event::pallet_contracts(RawEvent::CodeRemoved(code_hash)), + event: Event::pallet_contracts(crate::Event::CodeRemoved(code_hash)), topics: vec![], }, EventRecord { phase: Phase::Initialization, event: Event::pallet_contracts( - RawEvent::Terminated(addr.clone(), DJANGO) + crate::Event::Terminated(addr.clone(), DJANGO) ), topics: vec![], }, @@ -2432,7 +2450,7 @@ fn lazy_removal_does_no_run_on_full_block() { // Run the lazy removal without any limit so that all keys would be removed if there // had been some weight left in the block. let weight_used = Contracts::on_initialize(Weight::max_value()); - let base = <::WeightInfo as crate::WeightInfo>::on_initialize(); + let base = <::WeightInfo as WeightInfo>::on_initialize(); assert_eq!(weight_used, base); // All the keys are still in place @@ -2717,3 +2735,69 @@ fn refcounter() { assert_matches!(crate::CodeStorage::::get(code_hash), None); }); } + + +#[test] +fn reinstrument_does_charge() { + let (wasm, code_hash) = compile_module::("return_with_data").unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + let _ = Balances::deposit_creating(&ALICE, 1_000_000); + let subsistence = Module::::subsistence_threshold(); + let zero = 0u32.to_le_bytes().encode(); + let code_len = wasm.len() as u32; + + assert_ok!(Contracts::instantiate_with_code( + Origin::signed(ALICE), + subsistence * 100, + GAS_LIMIT, + wasm, + zero.clone(), + vec![], + )); + + let addr = Contracts::contract_address(&ALICE, &code_hash, &[]); + + // Call the contract two times without reinstrument + + let result0 = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + zero.clone(), + ); + assert!(result0.exec_result.unwrap().is_success()); + + let result1 = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + zero.clone(), + ); + assert!(result1.exec_result.unwrap().is_success()); + + // They should match because both where called with the same schedule. + assert_eq!(result0.gas_consumed, result1.gas_consumed); + + // Update the schedule version but keep the rest the same + crate::CurrentSchedule::mutate(|old: &mut Schedule| { + old.version += 1; + }); + + // This call should trigger reinstrumentation + let result2 = Contracts::bare_call( + ALICE, + addr.clone(), + 0, + GAS_LIMIT, + zero.clone(), + ); + assert!(result2.exec_result.unwrap().is_success()); + assert!(result2.gas_consumed > result1.gas_consumed); + assert_eq!( + result2.gas_consumed, + result1.gas_consumed + ::WeightInfo::instrument(code_len / 1024), + ); + }); +} diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index 6166918c80..0b2512f17f 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -28,11 +28,15 @@ //! Thus, before executing a contract it should be reinstrument with new schedule. use crate::{ - CodeHash, CodeStorage, PristineCode, Schedule, Config, Error, - wasm::{prepare, PrefabWasmModule}, Module as Contracts, RawEvent, + CodeHash, CodeStorage, PristineCode, Schedule, Config, Error, Weight, + wasm::{prepare, PrefabWasmModule}, Module as Contracts, Event, + gas::{GasMeter, Token}, + weights::WeightInfo, }; use sp_core::crypto::UncheckedFrom; -use frame_support::{StorageMap, dispatch::{DispatchError, DispatchResult}}; +use frame_support::dispatch::DispatchError; +#[cfg(feature = "runtime-benchmarks")] +pub use self::private::reinstrument as reinstrument; /// Put the instrumented module in storage. /// @@ -54,7 +58,7 @@ where Some(module) => increment_64(&mut module.refcount), None => { *existing = Some(prefab_module); - Contracts::::deposit_event(RawEvent::CodeStored(code_hash)) + Contracts::::deposit_event(Event::CodeStored(code_hash)) } } }); @@ -77,14 +81,14 @@ where } /// Increment the refcount of a code in-storage by one. -pub fn increment_refcount(code_hash: CodeHash) -> DispatchResult +pub fn increment_refcount(code_hash: CodeHash) -> Result where T::AccountId: UncheckedFrom + AsRef<[u8]> { >::mutate(code_hash, |existing| { if let Some(module) = existing { increment_64(&mut module.refcount); - Ok(()) + Ok(module.original_code_len) } else { Err(Error::::CodeNotFound.into()) } @@ -92,19 +96,23 @@ where } /// Decrement the refcount of a code in-storage by one and remove the code when it drops to zero. -pub fn decrement_refcount(code_hash: CodeHash) +pub fn decrement_refcount(code_hash: CodeHash) -> u32 where T::AccountId: UncheckedFrom + AsRef<[u8]> { >::mutate_exists(code_hash, |existing| { if let Some(module) = existing { + let code_len = module.original_code_len; module.refcount = module.refcount.saturating_sub(1); if module.refcount == 0 { *existing = None; finish_removal::(code_hash); } + code_len + } else { + 0 } - }); + }) } /// Load code with the given code hash. @@ -114,38 +122,55 @@ where /// re-instrumentation and update the cache in the storage. pub fn load( code_hash: CodeHash, - schedule: Option<&Schedule>, + reinstrument: Option<(&Schedule, &mut GasMeter)>, ) -> Result, DispatchError> where T::AccountId: UncheckedFrom + AsRef<[u8]> { let mut prefab_module = >::get(code_hash) .ok_or_else(|| Error::::CodeNotFound)?; + prefab_module.code_hash = code_hash; - if let Some(schedule) = schedule { + if let Some((schedule, gas_meter)) = reinstrument { if prefab_module.schedule_version < schedule.version { // The current schedule version is greater than the version of the one cached // in the storage. // // We need to re-instrument the code with the latest schedule here. - let original_code = >::get(code_hash) - .ok_or_else(|| Error::::CodeNotFound)?; - prefab_module.code = prepare::reinstrument_contract::(original_code, schedule)?; - prefab_module.schedule_version = schedule.version; - >::insert(&code_hash, &prefab_module); + gas_meter.charge(&(), InstrumentToken(prefab_module.original_code_len))?; + private::reinstrument(&mut prefab_module, schedule)?; } } - prefab_module.code_hash = code_hash; Ok(prefab_module) } +mod private { + use super::*; + + /// Instruments the passed prefab wasm module with the supplied schedule. + pub fn reinstrument( + prefab_module: &mut PrefabWasmModule, + schedule: &Schedule, + ) -> Result<(), DispatchError> + where + T::AccountId: UncheckedFrom + AsRef<[u8]> + { + let original_code = >::get(&prefab_module.code_hash) + .ok_or_else(|| Error::::CodeNotFound)?; + prefab_module.code = prepare::reinstrument_contract::(original_code, schedule)?; + prefab_module.schedule_version = schedule.version; + >::insert(&prefab_module.code_hash, &*prefab_module); + Ok(()) + } +} + /// Finish removal of a code by deleting the pristine code and emitting an event. fn finish_removal(code_hash: CodeHash) where T::AccountId: UncheckedFrom + AsRef<[u8]> { >::remove(code_hash); - Contracts::::deposit_event(RawEvent::CodeRemoved(code_hash)) + Contracts::::deposit_event(Event::CodeRemoved(code_hash)) } /// Increment the refcount panicking if it should ever overflow (which will not happen). @@ -161,3 +186,17 @@ fn increment_64(refcount: &mut u64) { qed "); } + +/// Token to be supplied to the gas meter which charges the weight needed for reinstrumenting +/// a contract of the specified size in bytes. +#[cfg_attr(test, derive(Debug, PartialEq, Eq))] +#[derive(Clone, Copy)] +struct InstrumentToken(u32); + +impl Token for InstrumentToken { + type Metadata = (); + + fn calculate_amount(&self, _metadata: &Self::Metadata) -> Weight { + T::WeightInfo::instrument(self.0 / 1024) + } +} diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index dbb6705e97..3c10d3225e 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -20,13 +20,11 @@ //! //! Most likely you should use `define_env` macro. -#[macro_export] macro_rules! convert_args { () => (vec![]); ( $( $t:ty ),* ) => ( vec![ $( { use $crate::wasm::env_def::ConvertibleToWasm; <$t>::VALUE_TYPE }, )* ] ); } -#[macro_export] macro_rules! gen_signature { ( ( $( $params: ty ),* ) ) => ( { @@ -43,7 +41,6 @@ macro_rules! gen_signature { ); } -#[macro_export] macro_rules! gen_signature_dispatch { ( $needle_name:ident, @@ -102,7 +99,6 @@ where f } -#[macro_export] macro_rules! unmarshall_then_body_then_marshall { ( $args_iter:ident, $ctx:ident, ( $( $names:ident : $params:ty ),* ) -> $returns:ty => $body:tt ) => ({ let body = $crate::wasm::env_def::macros::constrain_closure::< @@ -128,7 +124,6 @@ macro_rules! unmarshall_then_body_then_marshall { }) } -#[macro_export] macro_rules! define_func { ( < E: $seal_ty:tt > $name:ident ( $ctx: ident $(, $names:ident : $params:ty)*) $(-> $returns:ty)* => $body:tt ) => { fn $name< E: $seal_ty >( @@ -152,7 +147,6 @@ macro_rules! define_func { }; } -#[macro_export] macro_rules! register_func { ( $reg_cb:ident, < E: $seal_ty:tt > ; ) => {}; @@ -215,9 +209,9 @@ mod tests { use sp_runtime::traits::Zero; use sp_sandbox::{ReturnValue, Value}; use crate::{ + Weight, wasm::{Runtime, runtime::TrapReason, tests::MockExt}, exec::Ext, - gas::Gas, }; struct TestRuntime { @@ -282,7 +276,7 @@ mod tests { #[test] fn macro_define_func() { define_func!( seal_gas (_ctx, amount: u32) => { - let amount = Gas::from(amount); + let amount = Weight::from(amount); if !amount.is_zero() { Ok(()) } else { @@ -334,7 +328,7 @@ mod tests { define_env!(Env, , seal_gas( _ctx, amount: u32 ) => { - let amount = Gas::from(amount); + let amount = Weight::from(amount); if !amount.is_zero() { Ok(()) } else { diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index 0d9ceeee02..997ec29e02 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -22,7 +22,7 @@ use sp_sandbox::Value; use parity_wasm::elements::{FunctionType, ValueType}; #[macro_use] -pub(crate) mod macros; +pub mod macros; pub trait ConvertibleToWasm: Sized { const VALUE_TYPE: ValueType; @@ -67,13 +67,13 @@ impl ConvertibleToWasm for u64 { } } -pub(crate) type HostFunc = +pub type HostFunc = fn( &mut Runtime, &[sp_sandbox::Value] ) -> Result; -pub(crate) trait FunctionImplProvider { +pub trait FunctionImplProvider { fn impls)>(f: &mut F); } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 56be9f3531..9001e2b8e9 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -33,9 +33,13 @@ use crate::{ use sp_std::prelude::*; use sp_core::crypto::UncheckedFrom; use codec::{Encode, Decode}; -use frame_support::dispatch::{DispatchError, DispatchResult}; +use frame_support::dispatch::DispatchError; use pallet_contracts_primitives::ExecResult; pub use self::runtime::{ReturnCode, Runtime, RuntimeToken}; +#[cfg(feature = "runtime-benchmarks")] +pub use self::code_cache::reinstrument; +#[cfg(test)] +pub use tests::MockExt; /// A prepared wasm module ready for execution. /// @@ -125,7 +129,7 @@ where pub fn store_code_unchecked( original_code: Vec, schedule: &Schedule - ) -> DispatchResult { + ) -> Result<(), DispatchError> { let executable = prepare::benchmarking::prepare_contract(original_code, schedule) .map_err::(Into::into)?; code_cache::store(executable); @@ -145,9 +149,10 @@ where { fn from_storage( code_hash: CodeHash, - schedule: &Schedule + schedule: &Schedule, + gas_meter: &mut GasMeter, ) -> Result { - code_cache::load(code_hash, Some(schedule)) + code_cache::load(code_hash, Some((schedule, gas_meter))) } fn from_storage_noinstr(code_hash: CodeHash) -> Result { @@ -158,11 +163,11 @@ where code_cache::store_decremented(self); } - fn add_user(code_hash: CodeHash) -> DispatchResult { + fn add_user(code_hash: CodeHash) -> Result { code_cache::increment_refcount::(code_hash) } - fn remove_user(code_hash: CodeHash) { + fn remove_user(code_hash: CodeHash) -> u32 { code_cache::decrement_refcount::(code_hash) } @@ -222,6 +227,10 @@ where let len = self.original_code_len.saturating_add(self.code.len() as u32); len.checked_div(self.refcount as u32).unwrap_or(len) } + + fn code_len(&self) -> u32 { + self.code.len() as u32 + } } #[cfg(test)] @@ -230,7 +239,7 @@ mod tests { use crate::{ CodeHash, BalanceOf, Error, Module as Contracts, exec::{Ext, StorageKey, AccountIdOf, Executable}, - gas::{Gas, GasMeter}, + gas::GasMeter, tests::{Test, Call, ALICE, BOB}, }; use std::collections::HashMap; @@ -241,7 +250,7 @@ mod tests { use assert_matches::assert_matches; use pallet_contracts_primitives::{ExecReturnValue, ReturnFlags, ExecError, ErrorOrigin}; - const GAS_LIMIT: Gas = 10_000_000_000; + const GAS_LIMIT: Weight = 10_000_000_000; #[derive(Debug, PartialEq, Eq)] struct DispatchEntry(Call); @@ -305,7 +314,7 @@ mod tests { gas_meter: &mut GasMeter, data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)> { self.instantiates.push(InstantiateEntry { code_hash: code_hash.clone(), endowment, @@ -319,6 +328,7 @@ mod tests { flags: ReturnFlags::empty(), data: Vec::new(), }, + 0, )) } fn transfer( @@ -339,7 +349,7 @@ mod tests { value: u64, _gas_meter: &mut GasMeter, data: Vec, - ) -> ExecResult { + ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { self.transfers.push(TransferEntry { to: to.clone(), value, @@ -347,16 +357,16 @@ mod tests { }); // Assume for now that it was just a plain transfer. // TODO: Add tests for different call outcomes. - Ok(ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }) + Ok((ExecReturnValue { flags: ReturnFlags::empty(), data: Vec::new() }, 0)) } fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError> { + ) -> Result { self.terminations.push(TerminationEntry { beneficiary: beneficiary.clone(), }); - Ok(()) + Ok(0) } fn restore_to( &mut self, @@ -364,14 +374,14 @@ mod tests { code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(), DispatchError> { + ) -> Result<(u32, u32), (DispatchError, u32, u32)> { self.restores.push(RestoreEntry { dest, code_hash, rent_allowance, delta, }); - Ok(()) + Ok((0, 0)) } fn caller(&self) -> &AccountIdOf { &ALICE @@ -443,7 +453,7 @@ mod tests { gas_meter: &mut GasMeter, input_data: Vec, salt: &[u8], - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + ) -> Result<(AccountIdOf, ExecReturnValue, u32), (ExecError, u32)> { (**self).instantiate(code, value, gas_meter, input_data, salt) } fn transfer( @@ -456,7 +466,7 @@ mod tests { fn terminate( &mut self, beneficiary: &AccountIdOf, - ) -> Result<(), DispatchError> { + ) -> Result { (**self).terminate(beneficiary) } fn call( @@ -465,7 +475,7 @@ mod tests { value: u64, gas_meter: &mut GasMeter, input_data: Vec, - ) -> ExecResult { + ) -> Result<(ExecReturnValue, u32), (ExecError, u32)> { (**self).call(to, value, gas_meter, input_data) } fn restore_to( @@ -474,7 +484,7 @@ mod tests { code_hash: H256, rent_allowance: u64, delta: Vec, - ) -> Result<(), DispatchError> { + ) -> Result<(u32, u32), (DispatchError, u32, u32)> { (**self).restore_to( dest, code_hash, @@ -1194,7 +1204,7 @@ mod tests { &mut gas_meter, ).unwrap(); - let gas_left = Gas::decode(&mut output.data.as_slice()).unwrap(); + let gas_left = Weight::decode(&mut output.data.as_slice()).unwrap(); assert!(gas_left < GAS_LIMIT, "gas_left must be less than initial"); assert!(gas_left > gas_meter.gas_left(), "gas_left must be greater than final"); } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 9dd098e852..c383fdcc2a 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -20,11 +20,11 @@ use crate::{ HostFnWeights, Config, CodeHash, BalanceOf, Error, exec::{Ext, StorageKey, TopicOf}, - gas::{Gas, GasMeter, Token, GasMeterResult, ChargedAmount}, + gas::{GasMeter, Token, ChargedAmount}, wasm::env_def::ConvertibleToWasm, }; use parity_wasm::elements::ValueType; -use frame_support::{dispatch::DispatchError, ensure}; +use frame_support::{dispatch::DispatchError, ensure, traits::Get, weights::Weight}; use sp_std::prelude::*; use codec::{Decode, DecodeAll, Encode}; use sp_runtime::traits::SaturatedConversion; @@ -165,11 +165,15 @@ pub enum RuntimeToken { Return(u32), /// Weight of calling `seal_terminate`. Terminate, + /// Weight that is added to `seal_terminate` for every byte of the terminated contract. + TerminateSurchargeCodeSize(u32), /// Weight of calling `seal_restore_to` per number of supplied delta entries. RestoreTo(u32), + /// Weight that is added to `seal_restore_to` for the involved code sizes. + RestoreToSurchargeCodeSize{caller_code: u32, tombstone_code: u32}, /// Weight of calling `seal_random`. It includes the weight for copying the subject. Random, - /// Weight of calling `seal_reposit_event` with the given number of topics and event size. + /// Weight of calling `seal_deposit_event` with the given number of topics and event size. DepositEvent{num_topic: u32, len: u32}, /// Weight of calling `seal_set_rent_allowance`. SetRentAllowance, @@ -185,6 +189,8 @@ pub enum RuntimeToken { Transfer, /// Weight of calling `seal_call` for the given input size. CallBase(u32), + /// Weight that is added to `seal_call` for every byte of the called contract. + CallSurchargeCodeSize(u32), /// Weight of the transfer performed during a call. CallSurchargeTransfer, /// Weight of output received through `seal_call` for the given size. @@ -193,6 +199,8 @@ pub enum RuntimeToken { /// This includes the transfer as an instantiate without a value will always be below /// the existential deposit and is disregarded as corner case. InstantiateBase{input_data_len: u32, salt_len: u32}, + /// Weight that is added to `seal_instantiate` for every byte of the instantiated contract. + InstantiateSurchargeCodeSize(u32), /// Weight of output received through `seal_instantiate` for the given size. InstantiateCopyOut(u32), /// Weight of calling `seal_hash_sha_256` for the given input size. @@ -215,7 +223,7 @@ where { type Metadata = HostFnWeights; - fn calculate_amount(&self, s: &Self::Metadata) -> Gas { + fn calculate_amount(&self, s: &Self::Metadata) -> Weight { use self::RuntimeToken::*; match *self { MeteringBlock(amount) => s.gas.saturating_add(amount.into()), @@ -235,8 +243,13 @@ where Return(len) => s.r#return .saturating_add(s.return_per_byte.saturating_mul(len.into())), Terminate => s.terminate, + TerminateSurchargeCodeSize(len) => s.terminate_per_code_byte.saturating_mul(len.into()), RestoreTo(delta) => s.restore_to .saturating_add(s.restore_to_per_delta.saturating_mul(delta.into())), + RestoreToSurchargeCodeSize{caller_code, tombstone_code} => + s.restore_to_per_caller_code_byte.saturating_mul(caller_code.into()).saturating_add( + s.restore_to_per_tombstone_code_byte.saturating_mul(tombstone_code.into()) + ), Random => s.random, DepositEvent{num_topic, len} => s.deposit_event .saturating_add(s.deposit_event_per_topic.saturating_mul(num_topic.into())) @@ -250,11 +263,14 @@ where Transfer => s.transfer, CallBase(len) => s.call .saturating_add(s.call_per_input_byte.saturating_mul(len.into())), + CallSurchargeCodeSize(len) => s.call_per_code_byte.saturating_mul(len.into()), CallSurchargeTransfer => s.call_transfer_surcharge, CallCopyOut(len) => s.call_per_output_byte.saturating_mul(len.into()), InstantiateBase{input_data_len, salt_len} => s.instantiate .saturating_add(s.instantiate_per_input_byte.saturating_mul(input_data_len.into())) .saturating_add(s.instantiate_per_salt_byte.saturating_mul(salt_len.into())), + InstantiateSurchargeCodeSize(len) => + s.instantiate_per_code_byte.saturating_mul(len.into()), InstantiateCopyOut(len) => s.instantiate_per_output_byte .saturating_mul(len.into()), HashSha256(len) => s.hash_sha2_256 @@ -408,10 +424,19 @@ where where Tok: Token>, { - match self.gas_meter.charge(&self.ext.schedule().host_fn_weights, token) { - GasMeterResult::Proceed(amount) => Ok(amount), - GasMeterResult::OutOfGas => Err(Error::::OutOfGas.into()) - } + self.gas_meter.charge(&self.ext.schedule().host_fn_weights, token) + } + + /// Correct previously charged gas amount. + pub fn adjust_gas(&mut self, charged_amount: ChargedAmount, adjusted_amount: Tok) + where + Tok: Token>, + { + self.gas_meter.adjust_gas( + charged_amount, + &self.ext.schedule().host_fn_weights, + adjusted_amount, + ); } /// Read designated chunk from the sandbox memory. @@ -774,11 +799,12 @@ define_env!(Env, , ctx.read_sandbox_memory_as(callee_ptr, callee_len)?; let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; - if value > 0u32.into() { ctx.charge_gas(RuntimeToken::CallSurchargeTransfer)?; } - + let charged = ctx.charge_gas( + RuntimeToken::CallSurchargeCodeSize(::MaxCodeSize::get()) + )?; let nested_gas_limit = if gas == 0 { ctx.gas_meter.gas_left() } else { @@ -796,16 +822,20 @@ define_env!(Env, , ) } // there is not enough gas to allocate for the nested call. - None => Err(Error::<::T>::OutOfGas.into()), + None => Err((Error::<::T>::OutOfGas.into(), 0)), } }); - - if let Ok(output) = &call_outcome { + let code_len = match &call_outcome { + Ok((_, len)) => len, + Err((_, len)) => len, + }; + ctx.adjust_gas(charged, RuntimeToken::CallSurchargeCodeSize(*code_len)); + if let Ok((output, _)) = &call_outcome { ctx.write_sandbox_output(output_ptr, output_len_ptr, &output.data, true, |len| { Some(RuntimeToken::CallCopyOut(len)) })?; } - Ok(Runtime::::exec_into_return_code(call_outcome)?) + Ok(Runtime::::exec_into_return_code(call_outcome.map(|r| r.0).map_err(|r| r.0))?) }, // Instantiate a contract with the specified code hash. @@ -816,8 +846,8 @@ define_env!(Env, , // length to `output_len_ptr`. The copy of the output buffer and address can be skipped by // supplying the sentinel value of `u32::max_value()` to `output_ptr` or `address_ptr`. // - // After running the constructor it is verfied that the contract account holds at - // least the subsistence threshold. If that is not the case the instantion fails and + // After running the constructor it is verified that the contract account holds at + // least the subsistence threshold. If that is not the case the instantiation fails and // the contract is not created. // // # Parameters @@ -836,7 +866,7 @@ define_env!(Env, , // - output_ptr: a pointer where the output buffer is copied to. // - output_len_ptr: in-out pointer to where the length of the buffer is read from // and the actual length is written to. - // - salt_ptr: Pointer to raw bytes used for address deriviation. See `fn contract_address`. + // - salt_ptr: Pointer to raw bytes used for address derivation. See `fn contract_address`. // - salt_len: length in bytes of the supplied salt. // // # Errors @@ -875,7 +905,9 @@ define_env!(Env, , let value: BalanceOf<::T> = ctx.read_sandbox_memory_as(value_ptr, value_len)?; let input_data = ctx.read_sandbox_memory(input_data_ptr, input_data_len)?; let salt = ctx.read_sandbox_memory(salt_ptr, salt_len)?; - + let charged = ctx.charge_gas( + RuntimeToken::InstantiateSurchargeCodeSize(::MaxCodeSize::get()) + )?; let nested_gas_limit = if gas == 0 { ctx.gas_meter.gas_left() } else { @@ -894,10 +926,15 @@ define_env!(Env, , ) } // there is not enough gas to allocate for the nested call. - None => Err(Error::<::T>::OutOfGas.into()), + None => Err((Error::<::T>::OutOfGas.into(), 0)), } }); - if let Ok((address, output)) = &instantiate_outcome { + let code_len = match &instantiate_outcome { + Ok((_, _, code_len)) => code_len, + Err((_, code_len)) => code_len, + }; + ctx.adjust_gas(charged, RuntimeToken::InstantiateSurchargeCodeSize(*code_len)); + if let Ok((address, output, _)) = &instantiate_outcome { if !output.flags.contains(ReturnFlags::REVERT) { ctx.write_sandbox_output( address_ptr, address_len_ptr, &address.encode(), true, already_charged, @@ -907,7 +944,9 @@ define_env!(Env, , Some(RuntimeToken::InstantiateCopyOut(len)) })?; } - Ok(Runtime::::exec_into_return_code(instantiate_outcome.map(|(_id, retval)| retval))?) + Ok(Runtime::::exec_into_return_code( + instantiate_outcome.map(|(_, retval, _)| retval).map_err(|(err, _)| err) + )?) }, // Remove the calling account and transfer remaining balance. @@ -917,7 +956,7 @@ define_env!(Env, , // which is considered fatal and results in a trap + rollback. // // - beneficiary_ptr: a pointer to the address of the beneficiary account where all - // where all remaining funds of the caller are transfered. + // where all remaining funds of the caller are transferred. // Should be decodable as an `T::AccountId`. Traps otherwise. // - beneficiary_len: length of the address buffer. // @@ -935,7 +974,15 @@ define_env!(Env, , let beneficiary: <::T as frame_system::Config>::AccountId = ctx.read_sandbox_memory_as(beneficiary_ptr, beneficiary_len)?; - ctx.ext.terminate(&beneficiary)?; + let charged = ctx.charge_gas( + RuntimeToken::TerminateSurchargeCodeSize(::MaxCodeSize::get()) + )?; + let (result, code_len) = match ctx.ext.terminate(&beneficiary) { + Ok(len) => (Ok(()), len), + Err((err, len)) => (Err(err), len), + }; + ctx.adjust_gas(charged, RuntimeToken::TerminateSurchargeCodeSize(code_len)); + result?; Err(TrapReason::Termination) }, @@ -963,7 +1010,7 @@ define_env!(Env, , // Cease contract execution and save a data buffer as a result of the execution. // - // This function never retuns as it stops execution of the caller. + // This function never returns as it stops execution of the caller. // This is the only way to return a data buffer to the caller. Returning from // execution without calling this function is equivalent to calling: // ``` @@ -1150,7 +1197,7 @@ define_env!(Env, , // This function will compute a tombstone hash from the caller's storage and the given code hash // and if the hash matches the hash found in the tombstone at the specified address - kill // the caller contract and restore the destination contract and set the specified `rent_allowance`. - // All caller's funds are transfered to the destination. + // All caller's funds are transferred to the destination. // // The tombstone hash is derived as `hash(code_hash, storage_root_hash)`. In order to match // this hash to its own hash the restorer must make its storage equal to the one of the @@ -1220,7 +1267,22 @@ define_env!(Env, , delta }; - ctx.ext.restore_to(dest, code_hash, rent_allowance, delta)?; + let max_len = ::MaxCodeSize::get(); + let charged = ctx.charge_gas(RuntimeToken::RestoreToSurchargeCodeSize { + caller_code: max_len, + tombstone_code: max_len, + })?; + let (result, caller_code, tombstone_code) = match ctx.ext.restore_to( + dest, code_hash, rent_allowance, delta + ) { + Ok((code, tomb)) => (Ok(()), code, tomb), + Err((err, code, tomb)) => (Err(err), code, tomb), + }; + ctx.adjust_gas(charged, RuntimeToken::RestoreToSurchargeCodeSize { + caller_code, + tombstone_code, + }); + result?; Err(TrapReason::Restoration) }, diff --git a/frame/contracts/src/weights.rs b/frame/contracts/src/weights.rs index 9c53611038..905ccf8cb5 100644 --- a/frame/contracts/src/weights.rs +++ b/frame/contracts/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_contracts //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 -//! DATE: 2021-02-04, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-02-18, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -47,11 +47,12 @@ pub trait WeightInfo { fn on_initialize() -> Weight; fn on_initialize_per_trie_key(k: u32, ) -> Weight; fn on_initialize_per_queue_item(q: u32, ) -> Weight; + fn instrument(c: u32, ) -> Weight; fn update_schedule() -> Weight; fn instantiate_with_code(c: u32, s: u32, ) -> Weight; - fn instantiate(s: u32, ) -> Weight; - fn call() -> Weight; - fn claim_surcharge() -> Weight; + fn instantiate(c: u32, s: u32, ) -> Weight; + fn call(c: u32, ) -> Weight; + fn claim_surcharge(c: u32, ) -> Weight; fn seal_caller(r: u32, ) -> Weight; fn seal_address(r: u32, ) -> Weight; fn seal_gas_left(r: u32, ) -> Weight; @@ -69,8 +70,9 @@ pub trait WeightInfo { fn seal_return(r: u32, ) -> Weight; fn seal_return_per_kb(n: u32, ) -> Weight; fn seal_terminate(r: u32, ) -> Weight; + fn seal_terminate_per_code_kb(c: u32, ) -> Weight; fn seal_restore_to(r: u32, ) -> Weight; - fn seal_restore_to_per_delta(d: u32, ) -> Weight; + fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight; fn seal_random(r: u32, ) -> Weight; fn seal_deposit_event(r: u32, ) -> Weight; fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight; @@ -82,9 +84,9 @@ pub trait WeightInfo { fn seal_get_storage_per_kb(n: u32, ) -> Weight; fn seal_transfer(r: u32, ) -> Weight; fn seal_call(r: u32, ) -> Weight; - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight; + fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight; fn seal_instantiate(r: u32, ) -> Weight; - fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight; + fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight; fn seal_hash_sha2_256(r: u32, ) -> Weight; fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight; fn seal_hash_keccak_256(r: u32, ) -> Weight; @@ -150,11 +152,11 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn on_initialize() -> Weight { - (3_947_000 as Weight) + (3_733_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) } fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (46_644_000 as Weight) + (49_569_000 as Weight) // Standard Error: 5_000 .saturating_add((2_295_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) @@ -162,235 +164,259 @@ impl WeightInfo for SubstrateWeight { .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 164_000 - .saturating_add((165_220_000 as Weight).saturating_mul(q as Weight)) + (358_064_000 as Weight) + // Standard Error: 143_000 + .saturating_add((140_992_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn instrument(c: u32, ) -> Weight { + (44_198_000 as Weight) + // Standard Error: 188_000 + .saturating_add((125_833_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn update_schedule() -> Weight { - (28_195_000 as Weight) + (29_190_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 126_000 - .saturating_add((154_196_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 63_000 - .saturating_add((2_764_000 as Weight).saturating_mul(s as Weight)) + (180_015_000 as Weight) + // Standard Error: 197_000 + .saturating_add((167_480_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 12_000 + .saturating_add((2_581_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(5 as Weight)) } - fn instantiate(s: u32, ) -> Weight { - (201_407_000 as Weight) + fn instantiate(c: u32, s: u32, ) -> Weight { + (180_996_000 as Weight) + // Standard Error: 14_000 + .saturating_add((8_684_000 as Weight).saturating_mul(c as Weight)) // Standard Error: 1_000 - .saturating_add((2_247_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_518_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } - fn call() -> Weight { - (180_337_000 as Weight) + fn call(c: u32, ) -> Weight { + (184_326_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_920_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } - fn claim_surcharge() -> Weight { - (322_371_000 as Weight) + fn claim_surcharge(c: u32, ) -> Weight { + (303_270_000 as Weight) + // Standard Error: 5_000 + .saturating_add((5_108_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn seal_caller(r: u32, ) -> Weight { - (135_499_000 as Weight) - // Standard Error: 296_000 - .saturating_add((275_938_000 as Weight).saturating_mul(r as Weight)) + (128_965_000 as Weight) + // Standard Error: 130_000 + .saturating_add((270_123_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_address(r: u32, ) -> Weight { - (132_674_000 as Weight) - // Standard Error: 158_000 - .saturating_add((273_808_000 as Weight).saturating_mul(r as Weight)) + (137_748_000 as Weight) + // Standard Error: 184_000 + .saturating_add((270_103_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_gas_left(r: u32, ) -> Weight { - (126_819_000 as Weight) - // Standard Error: 145_000 - .saturating_add((269_173_000 as Weight).saturating_mul(r as Weight)) + (118_784_000 as Weight) + // Standard Error: 234_000 + .saturating_add((264_467_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_balance(r: u32, ) -> Weight { - (140_223_000 as Weight) - // Standard Error: 259_000 - .saturating_add((581_353_000 as Weight).saturating_mul(r as Weight)) + (146_072_000 as Weight) + // Standard Error: 207_000 + .saturating_add((573_282_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_value_transferred(r: u32, ) -> Weight { - (129_490_000 as Weight) - // Standard Error: 132_000 - .saturating_add((269_433_000 as Weight).saturating_mul(r as Weight)) + (133_857_000 as Weight) + // Standard Error: 151_000 + .saturating_add((263_110_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_minimum_balance(r: u32, ) -> Weight { - (127_251_000 as Weight) - // Standard Error: 161_000 - .saturating_add((268_720_000 as Weight).saturating_mul(r as Weight)) + (130_447_000 as Weight) + // Standard Error: 125_000 + .saturating_add((265_565_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (129_546_000 as Weight) - // Standard Error: 130_000 - .saturating_add((268_280_000 as Weight).saturating_mul(r as Weight)) + (116_232_000 as Weight) + // Standard Error: 327_000 + .saturating_add((265_728_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_rent_allowance(r: u32, ) -> Weight { - (133_306_000 as Weight) - // Standard Error: 208_000 - .saturating_add((604_235_000 as Weight).saturating_mul(r as Weight)) + (175_561_000 as Weight) + // Standard Error: 292_000 + .saturating_add((604_373_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_block_number(r: u32, ) -> Weight { - (133_689_000 as Weight) - // Standard Error: 115_000 - .saturating_add((267_107_000 as Weight).saturating_mul(r as Weight)) + (133_961_000 as Weight) + // Standard Error: 150_000 + .saturating_add((262_329_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_now(r: u32, ) -> Weight { - (133_773_000 as Weight) - // Standard Error: 130_000 - .saturating_add((268_897_000 as Weight).saturating_mul(r as Weight)) + (128_662_000 as Weight) + // Standard Error: 150_000 + .saturating_add((263_234_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_weight_to_fee(r: u32, ) -> Weight { - (133_222_000 as Weight) - // Standard Error: 476_000 - .saturating_add((514_400_000 as Weight).saturating_mul(r as Weight)) + (142_580_000 as Weight) + // Standard Error: 205_000 + .saturating_add((505_378_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) } fn seal_gas(r: u32, ) -> Weight { - (118_769_000 as Weight) - // Standard Error: 102_000 - .saturating_add((134_134_000 as Weight).saturating_mul(r as Weight)) + (116_346_000 as Weight) + // Standard Error: 86_000 + .saturating_add((124_599_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_input(r: u32, ) -> Weight { - (124_719_000 as Weight) - // Standard Error: 93_000 - .saturating_add((7_486_000 as Weight).saturating_mul(r as Weight)) + (124_679_000 as Weight) + // Standard Error: 81_000 + .saturating_add((7_310_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_input_per_kb(n: u32, ) -> Weight { - (136_348_000 as Weight) + (136_069_000 as Weight) // Standard Error: 0 .saturating_add((274_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_return(r: u32, ) -> Weight { - (118_710_000 as Weight) - // Standard Error: 77_000 - .saturating_add((4_566_000 as Weight).saturating_mul(r as Weight)) + (118_807_000 as Weight) + // Standard Error: 66_000 + .saturating_add((4_740_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_return_per_kb(n: u32, ) -> Weight { - (127_609_000 as Weight) + (127_702_000 as Weight) // Standard Error: 0 - .saturating_add((786_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((784_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_terminate(r: u32, ) -> Weight { - (125_463_000 as Weight) - // Standard Error: 154_000 - .saturating_add((106_188_000 as Weight).saturating_mul(r as Weight)) + (124_847_000 as Weight) + // Standard Error: 87_000 + .saturating_add((107_679_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } + fn seal_terminate_per_code_kb(c: u32, ) -> Weight { + (237_115_000 as Weight) + // Standard Error: 6_000 + .saturating_add((8_556_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } fn seal_restore_to(r: u32, ) -> Weight { - (219_195_000 as Weight) - // Standard Error: 361_000 - .saturating_add((131_326_000 as Weight).saturating_mul(r as Weight)) + (217_959_000 as Weight) + // Standard Error: 455_000 + .saturating_add((134_528_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes((6 as Weight).saturating_mul(r as Weight))) } - fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (6_742_000 as Weight) - // Standard Error: 2_484_000 - .saturating_add((3_747_735_000 as Weight).saturating_mul(d as Weight)) + fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 151_000 + .saturating_add((9_061_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 151_000 + .saturating_add((4_807_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 1_331_000 + .saturating_add((3_736_196_000 as Weight).saturating_mul(d as Weight)) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(T::DbWeight::get().writes(7 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (137_248_000 as Weight) - // Standard Error: 662_000 - .saturating_add((661_121_000 as Weight).saturating_mul(r as Weight)) + (134_143_000 as Weight) + // Standard Error: 233_000 + .saturating_add((643_555_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) } fn seal_deposit_event(r: u32, ) -> Weight { - (147_654_000 as Weight) - // Standard Error: 305_000 - .saturating_add((935_148_000 as Weight).saturating_mul(r as Weight)) + (142_838_000 as Weight) + // Standard Error: 367_000 + .saturating_add((937_126_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_246_123_000 as Weight) - // Standard Error: 2_807_000 - .saturating_add((585_535_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 553_000 - .saturating_add((249_976_000 as Weight).saturating_mul(n as Weight)) + (1_210_711_000 as Weight) + // Standard Error: 2_124_000 + .saturating_add((594_541_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 418_000 + .saturating_add((251_068_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (140_588_000 as Weight) - // Standard Error: 228_000 - .saturating_add((707_872_000 as Weight).saturating_mul(r as Weight)) + (144_533_000 as Weight) + // Standard Error: 220_000 + .saturating_add((714_590_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn seal_set_storage(r: u32, ) -> Weight { - (2_767_124_000 as Weight) - // Standard Error: 18_504_000 - .saturating_add((17_507_873_000 as Weight).saturating_mul(r as Weight)) + (406_366_000 as Weight) + // Standard Error: 3_533_000 + .saturating_add((16_167_082_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (1_748_586_000 as Weight) - // Standard Error: 359_000 - .saturating_add((75_231_000 as Weight).saturating_mul(n as Weight)) + (1_739_590_000 as Weight) + // Standard Error: 390_000 + .saturating_add((74_815_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_209_000 - .saturating_add((2_261_355_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_284_000 + .saturating_add((2_281_347_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) .saturating_add(T::DbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_get_storage(r: u32, ) -> Weight { - (83_780_000 as Weight) - // Standard Error: 965_000 - .saturating_add((973_164_000 as Weight).saturating_mul(r as Weight)) + (81_889_000 as Weight) + // Standard Error: 1_171_000 + .saturating_add((930_704_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (728_625_000 as Weight) - // Standard Error: 294_000 - .saturating_add((154_625_000 as Weight).saturating_mul(n as Weight)) + (709_323_000 as Weight) + // Standard Error: 391_000 + .saturating_add((155_689_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) } fn seal_transfer(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_543_000 - .saturating_add((5_467_966_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_846_000 + .saturating_add((5_566_275_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(1 as Weight)) @@ -398,355 +424,359 @@ impl WeightInfo for SubstrateWeight { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 9_216_000 - .saturating_add((10_265_093_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 4_823_000 + .saturating_add((10_461_861_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) } - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (10_426_869_000 as Weight) - // Standard Error: 114_622_000 - .saturating_add((4_366_037_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 40_000 - .saturating_add((59_741_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 43_000 - .saturating_add((82_331_000 as Weight).saturating_mul(o as Weight)) + fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight { + (9_686_594_000 as Weight) + // Standard Error: 473_000 + .saturating_add((393_132_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 197_094_000 + .saturating_add((4_957_181_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 62_000 + .saturating_add((59_974_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 66_000 + .saturating_add((83_027_000 as Weight).saturating_mul(o as Weight)) .saturating_add(T::DbWeight::get().reads(206 as Weight)) .saturating_add(T::DbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 35_927_000 - .saturating_add((21_088_623_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 34_133_000 + .saturating_add((21_407_630_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) } - fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { - (17_200_760_000 as Weight) - // Standard Error: 157_000 - .saturating_add((61_221_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 157_000 - .saturating_add((84_149_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 157_000 - .saturating_add((284_655_000 as Weight).saturating_mul(s as Weight)) + fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight { + (9_705_322_000 as Weight) + // Standard Error: 674_000 + .saturating_add((879_118_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 95_000 + .saturating_add((63_025_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 95_000 + .saturating_add((87_633_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 95_000 + .saturating_add((311_987_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(207 as Weight)) .saturating_add(T::DbWeight::get().writes(203 as Weight)) } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (126_005_000 as Weight) - // Standard Error: 133_000 - .saturating_add((252_338_000 as Weight).saturating_mul(r as Weight)) + (125_486_000 as Weight) + // Standard Error: 266_000 + .saturating_add((240_913_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (727_930_000 as Weight) - // Standard Error: 57_000 - .saturating_add((430_299_000 as Weight).saturating_mul(n as Weight)) + (636_153_000 as Weight) + // Standard Error: 47_000 + .saturating_add((429_541_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (129_778_000 as Weight) - // Standard Error: 146_000 - .saturating_add((266_097_000 as Weight).saturating_mul(r as Weight)) + (131_768_000 as Weight) + // Standard Error: 176_000 + .saturating_add((256_946_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (683_078_000 as Weight) - // Standard Error: 42_000 - .saturating_add((344_294_000 as Weight).saturating_mul(n as Weight)) + (647_777_000 as Weight) + // Standard Error: 29_000 + .saturating_add((344_145_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (141_731_000 as Weight) - // Standard Error: 251_000 - .saturating_add((239_931_000 as Weight).saturating_mul(r as Weight)) + (130_042_000 as Weight) + // Standard Error: 158_000 + .saturating_add((225_474_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (563_895_000 as Weight) - // Standard Error: 51_000 - .saturating_add((160_216_000 as Weight).saturating_mul(n as Weight)) + (638_275_000 as Weight) + // Standard Error: 30_000 + .saturating_add((159_832_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (132_587_000 as Weight) - // Standard Error: 159_000 - .saturating_add((239_287_000 as Weight).saturating_mul(r as Weight)) + (126_632_000 as Weight) + // Standard Error: 143_000 + .saturating_add((225_612_000 as Weight).saturating_mul(r as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (606_572_000 as Weight) - // Standard Error: 34_000 - .saturating_add((160_101_000 as Weight).saturating_mul(n as Weight)) + (656_936_000 as Weight) + // Standard Error: 35_000 + .saturating_add((159_763_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (24_366_000 as Weight) - // Standard Error: 21_000 - .saturating_add((3_114_000 as Weight).saturating_mul(r as Weight)) + (25_205_000 as Weight) + // Standard Error: 26_000 + .saturating_add((3_311_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (26_779_000 as Weight) + (27_394_000 as Weight) // Standard Error: 28_000 - .saturating_add((161_654_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((159_123_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (26_763_000 as Weight) - // Standard Error: 88_000 - .saturating_add((232_822_000 as Weight).saturating_mul(r as Weight)) + (27_398_000 as Weight) + // Standard Error: 57_000 + .saturating_add((229_775_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (24_342_000 as Weight) - // Standard Error: 36_000 - .saturating_add((12_530_000 as Weight).saturating_mul(r as Weight)) + (25_212_000 as Weight) + // Standard Error: 22_000 + .saturating_add((12_291_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (24_301_000 as Weight) - // Standard Error: 25_000 - .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + (25_116_000 as Weight) + // Standard Error: 16_000 + .saturating_add((12_146_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (24_253_000 as Weight) - // Standard Error: 21_000 - .saturating_add((6_464_000 as Weight).saturating_mul(r as Weight)) + (25_119_000 as Weight) + // Standard Error: 19_000 + .saturating_add((6_608_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (24_259_000 as Weight) - // Standard Error: 20_000 - .saturating_add((14_030_000 as Weight).saturating_mul(r as Weight)) + (25_146_000 as Weight) + // Standard Error: 23_000 + .saturating_add((14_017_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (24_313_000 as Weight) - // Standard Error: 37_000 - .saturating_add((15_788_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 21_000 + .saturating_add((15_460_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (37_991_000 as Weight) - // Standard Error: 0 - .saturating_add((138_000 as Weight).saturating_mul(e as Weight)) + (37_079_000 as Weight) + // Standard Error: 1_000 + .saturating_add((160_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (24_739_000 as Weight) - // Standard Error: 31_000 - .saturating_add((97_567_000 as Weight).saturating_mul(r as Weight)) + (25_599_000 as Weight) + // Standard Error: 201_000 + .saturating_add((99_705_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (32_395_000 as Weight) - // Standard Error: 432_000 - .saturating_add((198_972_000 as Weight).saturating_mul(r as Weight)) + (33_236_000 as Weight) + // Standard Error: 368_000 + .saturating_add((199_753_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (238_857_000 as Weight) + (247_488_000 as Weight) // Standard Error: 6_000 - .saturating_add((3_491_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((3_374_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (42_196_000 as Weight) - // Standard Error: 22_000 - .saturating_add((3_161_000 as Weight).saturating_mul(r as Weight)) + (44_133_000 as Weight) + // Standard Error: 20_000 + .saturating_add((3_235_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (42_133_000 as Weight) - // Standard Error: 29_000 - .saturating_add((3_459_000 as Weight).saturating_mul(r as Weight)) + (44_107_000 as Weight) + // Standard Error: 20_000 + .saturating_add((3_486_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (42_164_000 as Weight) - // Standard Error: 25_000 - .saturating_add((4_653_000 as Weight).saturating_mul(r as Weight)) + (44_116_000 as Weight) + // Standard Error: 23_000 + .saturating_add((4_757_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (27_802_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_780_000 as Weight).saturating_mul(r as Weight)) + (28_712_000 as Weight) + // Standard Error: 29_000 + .saturating_add((7_659_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (27_826_000 as Weight) - // Standard Error: 21_000 - .saturating_add((11_978_000 as Weight).saturating_mul(r as Weight)) + (28_624_000 as Weight) + // Standard Error: 25_000 + .saturating_add((11_841_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (26_753_000 as Weight) - // Standard Error: 20_000 - .saturating_add((3_494_000 as Weight).saturating_mul(r as Weight)) + (27_445_000 as Weight) + // Standard Error: 18_000 + .saturating_add((3_487_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (25_078_000 as Weight) - // Standard Error: 4_213_000 - .saturating_add((2_324_209_000 as Weight).saturating_mul(r as Weight)) + (26_016_000 as Weight) + // Standard Error: 4_230_000 + .saturating_add((2_300_044_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (24_301_000 as Weight) - // Standard Error: 28_000 - .saturating_add((5_201_000 as Weight).saturating_mul(r as Weight)) + (25_227_000 as Weight) + // Standard Error: 29_000 + .saturating_add((5_341_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (24_237_000 as Weight) - // Standard Error: 14_000 - .saturating_add((5_251_000 as Weight).saturating_mul(r as Weight)) + (25_163_000 as Weight) + // Standard Error: 26_000 + .saturating_add((5_355_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (24_290_000 as Weight) - // Standard Error: 20_000 - .saturating_add((5_780_000 as Weight).saturating_mul(r as Weight)) + (25_204_000 as Weight) + // Standard Error: 29_000 + .saturating_add((5_930_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (24_278_000 as Weight) - // Standard Error: 17_000 - .saturating_add((5_145_000 as Weight).saturating_mul(r as Weight)) + (25_177_000 as Weight) + // Standard Error: 21_000 + .saturating_add((5_457_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (24_249_000 as Weight) - // Standard Error: 14_000 - .saturating_add((5_248_000 as Weight).saturating_mul(r as Weight)) + (25_206_000 as Weight) + // Standard Error: 19_000 + .saturating_add((5_229_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (24_266_000 as Weight) - // Standard Error: 13_000 - .saturating_add((5_236_000 as Weight).saturating_mul(r as Weight)) + (25_165_000 as Weight) + // Standard Error: 17_000 + .saturating_add((5_301_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (24_236_000 as Weight) - // Standard Error: 12_000 - .saturating_add((5_304_000 as Weight).saturating_mul(r as Weight)) + (25_184_000 as Weight) + // Standard Error: 28_000 + .saturating_add((5_356_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (24_262_000 as Weight) - // Standard Error: 22_000 - .saturating_add((7_220_000 as Weight).saturating_mul(r as Weight)) + (25_195_000 as Weight) + // Standard Error: 48_000 + .saturating_add((7_406_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (24_287_000 as Weight) - // Standard Error: 25_000 - .saturating_add((7_072_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 19_000 + .saturating_add((7_303_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (24_211_000 as Weight) - // Standard Error: 12_000 - .saturating_add((7_196_000 as Weight).saturating_mul(r as Weight)) + (25_165_000 as Weight) + // Standard Error: 34_000 + .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (24_175_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) + (25_152_000 as Weight) + // Standard Error: 46_000 + .saturating_add((7_464_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (24_209_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_131_000 as Weight).saturating_mul(r as Weight)) + (25_140_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_308_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (24_261_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_203_000 as Weight).saturating_mul(r as Weight)) + (25_723_000 as Weight) + // Standard Error: 29_000 + .saturating_add((6_846_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (24_258_000 as Weight) - // Standard Error: 25_000 - .saturating_add((7_120_000 as Weight).saturating_mul(r as Weight)) + (25_201_000 as Weight) + // Standard Error: 20_000 + .saturating_add((7_226_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (24_236_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_076_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 24_000 + .saturating_add((7_143_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (24_262_000 as Weight) - // Standard Error: 20_000 - .saturating_add((7_261_000 as Weight).saturating_mul(r as Weight)) + (25_146_000 as Weight) + // Standard Error: 37_000 + .saturating_add((7_451_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (24_242_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_249_000 as Weight).saturating_mul(r as Weight)) + (25_193_000 as Weight) + // Standard Error: 30_000 + .saturating_add((7_391_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (24_248_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_149_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 30_000 + .saturating_add((7_214_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (24_243_000 as Weight) - // Standard Error: 14_000 - .saturating_add((7_128_000 as Weight).saturating_mul(r as Weight)) + (25_221_000 as Weight) + // Standard Error: 34_000 + .saturating_add((7_168_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (24_217_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_237_000 as Weight).saturating_mul(r as Weight)) + (25_221_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_200_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (24_191_000 as Weight) - // Standard Error: 28_000 - .saturating_add((12_970_000 as Weight).saturating_mul(r as Weight)) + (25_229_000 as Weight) + // Standard Error: 32_000 + .saturating_add((13_066_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (24_213_000 as Weight) - // Standard Error: 19_000 - .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + (25_210_000 as Weight) + // Standard Error: 28_000 + .saturating_add((12_314_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (24_238_000 as Weight) - // Standard Error: 15_000 - .saturating_add((12_944_000 as Weight).saturating_mul(r as Weight)) + (25_186_000 as Weight) + // Standard Error: 24_000 + .saturating_add((13_055_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (24_317_000 as Weight) - // Standard Error: 16_000 - .saturating_add((12_129_000 as Weight).saturating_mul(r as Weight)) + (25_162_000 as Weight) + // Standard Error: 25_000 + .saturating_add((12_327_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (24_282_000 as Weight) - // Standard Error: 14_000 - .saturating_add((7_123_000 as Weight).saturating_mul(r as Weight)) + (25_191_000 as Weight) + // Standard Error: 24_000 + .saturating_add((7_153_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (24_243_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_148_000 as Weight).saturating_mul(r as Weight)) + (25_184_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_120_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (24_239_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_157_000 as Weight).saturating_mul(r as Weight)) + (25_129_000 as Weight) + // Standard Error: 31_000 + .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (24_279_000 as Weight) + (25_156_000 as Weight) // Standard Error: 16_000 - .saturating_add((7_253_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((7_333_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (24_285_000 as Weight) - // Standard Error: 29_000 - .saturating_add((7_333_000 as Weight).saturating_mul(r as Weight)) + (25_159_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_415_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (24_298_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) + (25_181_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_265_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (24_226_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_269_000 as Weight).saturating_mul(r as Weight)) + (25_165_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_443_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (24_235_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_299_000 as Weight).saturating_mul(r as Weight)) + (25_103_000 as Weight) + // Standard Error: 44_000 + .saturating_add((7_463_000 as Weight).saturating_mul(r as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { fn on_initialize() -> Weight { - (3_947_000 as Weight) + (3_733_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) } fn on_initialize_per_trie_key(k: u32, ) -> Weight { - (46_644_000 as Weight) + (49_569_000 as Weight) // Standard Error: 5_000 .saturating_add((2_295_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) @@ -754,235 +784,259 @@ impl WeightInfo for () { .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn on_initialize_per_queue_item(q: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 164_000 - .saturating_add((165_220_000 as Weight).saturating_mul(q as Weight)) + (358_064_000 as Weight) + // Standard Error: 143_000 + .saturating_add((140_992_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn instrument(c: u32, ) -> Weight { + (44_198_000 as Weight) + // Standard Error: 188_000 + .saturating_add((125_833_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn update_schedule() -> Weight { - (28_195_000 as Weight) + (29_190_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn instantiate_with_code(c: u32, s: u32, ) -> Weight { - (0 as Weight) - // Standard Error: 126_000 - .saturating_add((154_196_000 as Weight).saturating_mul(c as Weight)) - // Standard Error: 63_000 - .saturating_add((2_764_000 as Weight).saturating_mul(s as Weight)) + (180_015_000 as Weight) + // Standard Error: 197_000 + .saturating_add((167_480_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 12_000 + .saturating_add((2_581_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(5 as Weight)) } - fn instantiate(s: u32, ) -> Weight { - (201_407_000 as Weight) + fn instantiate(c: u32, s: u32, ) -> Weight { + (180_996_000 as Weight) + // Standard Error: 14_000 + .saturating_add((8_684_000 as Weight).saturating_mul(c as Weight)) // Standard Error: 1_000 - .saturating_add((2_247_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_518_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } - fn call() -> Weight { - (180_337_000 as Weight) + fn call(c: u32, ) -> Weight { + (184_326_000 as Weight) + // Standard Error: 2_000 + .saturating_add((3_920_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } - fn claim_surcharge() -> Weight { - (322_371_000 as Weight) + fn claim_surcharge(c: u32, ) -> Weight { + (303_270_000 as Weight) + // Standard Error: 5_000 + .saturating_add((5_108_000 as Weight).saturating_mul(c as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn seal_caller(r: u32, ) -> Weight { - (135_499_000 as Weight) - // Standard Error: 296_000 - .saturating_add((275_938_000 as Weight).saturating_mul(r as Weight)) + (128_965_000 as Weight) + // Standard Error: 130_000 + .saturating_add((270_123_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_address(r: u32, ) -> Weight { - (132_674_000 as Weight) - // Standard Error: 158_000 - .saturating_add((273_808_000 as Weight).saturating_mul(r as Weight)) + (137_748_000 as Weight) + // Standard Error: 184_000 + .saturating_add((270_103_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_gas_left(r: u32, ) -> Weight { - (126_819_000 as Weight) - // Standard Error: 145_000 - .saturating_add((269_173_000 as Weight).saturating_mul(r as Weight)) + (118_784_000 as Weight) + // Standard Error: 234_000 + .saturating_add((264_467_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_balance(r: u32, ) -> Weight { - (140_223_000 as Weight) - // Standard Error: 259_000 - .saturating_add((581_353_000 as Weight).saturating_mul(r as Weight)) + (146_072_000 as Weight) + // Standard Error: 207_000 + .saturating_add((573_282_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_value_transferred(r: u32, ) -> Weight { - (129_490_000 as Weight) - // Standard Error: 132_000 - .saturating_add((269_433_000 as Weight).saturating_mul(r as Weight)) + (133_857_000 as Weight) + // Standard Error: 151_000 + .saturating_add((263_110_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_minimum_balance(r: u32, ) -> Weight { - (127_251_000 as Weight) - // Standard Error: 161_000 - .saturating_add((268_720_000 as Weight).saturating_mul(r as Weight)) + (130_447_000 as Weight) + // Standard Error: 125_000 + .saturating_add((265_565_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_tombstone_deposit(r: u32, ) -> Weight { - (129_546_000 as Weight) - // Standard Error: 130_000 - .saturating_add((268_280_000 as Weight).saturating_mul(r as Weight)) + (116_232_000 as Weight) + // Standard Error: 327_000 + .saturating_add((265_728_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_rent_allowance(r: u32, ) -> Weight { - (133_306_000 as Weight) - // Standard Error: 208_000 - .saturating_add((604_235_000 as Weight).saturating_mul(r as Weight)) + (175_561_000 as Weight) + // Standard Error: 292_000 + .saturating_add((604_373_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_block_number(r: u32, ) -> Weight { - (133_689_000 as Weight) - // Standard Error: 115_000 - .saturating_add((267_107_000 as Weight).saturating_mul(r as Weight)) + (133_961_000 as Weight) + // Standard Error: 150_000 + .saturating_add((262_329_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_now(r: u32, ) -> Weight { - (133_773_000 as Weight) - // Standard Error: 130_000 - .saturating_add((268_897_000 as Weight).saturating_mul(r as Weight)) + (128_662_000 as Weight) + // Standard Error: 150_000 + .saturating_add((263_234_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_weight_to_fee(r: u32, ) -> Weight { - (133_222_000 as Weight) - // Standard Error: 476_000 - .saturating_add((514_400_000 as Weight).saturating_mul(r as Weight)) + (142_580_000 as Weight) + // Standard Error: 205_000 + .saturating_add((505_378_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) } fn seal_gas(r: u32, ) -> Weight { - (118_769_000 as Weight) - // Standard Error: 102_000 - .saturating_add((134_134_000 as Weight).saturating_mul(r as Weight)) + (116_346_000 as Weight) + // Standard Error: 86_000 + .saturating_add((124_599_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_input(r: u32, ) -> Weight { - (124_719_000 as Weight) - // Standard Error: 93_000 - .saturating_add((7_486_000 as Weight).saturating_mul(r as Weight)) + (124_679_000 as Weight) + // Standard Error: 81_000 + .saturating_add((7_310_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_input_per_kb(n: u32, ) -> Weight { - (136_348_000 as Weight) + (136_069_000 as Weight) // Standard Error: 0 .saturating_add((274_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_return(r: u32, ) -> Weight { - (118_710_000 as Weight) - // Standard Error: 77_000 - .saturating_add((4_566_000 as Weight).saturating_mul(r as Weight)) + (118_807_000 as Weight) + // Standard Error: 66_000 + .saturating_add((4_740_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_return_per_kb(n: u32, ) -> Weight { - (127_609_000 as Weight) + (127_702_000 as Weight) // Standard Error: 0 - .saturating_add((786_000 as Weight).saturating_mul(n as Weight)) + .saturating_add((784_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_terminate(r: u32, ) -> Weight { - (125_463_000 as Weight) - // Standard Error: 154_000 - .saturating_add((106_188_000 as Weight).saturating_mul(r as Weight)) + (124_847_000 as Weight) + // Standard Error: 87_000 + .saturating_add((107_679_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((2 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes((5 as Weight).saturating_mul(r as Weight))) } + fn seal_terminate_per_code_kb(c: u32, ) -> Weight { + (237_115_000 as Weight) + // Standard Error: 6_000 + .saturating_add((8_556_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } fn seal_restore_to(r: u32, ) -> Weight { - (219_195_000 as Weight) - // Standard Error: 361_000 - .saturating_add((131_326_000 as Weight).saturating_mul(r as Weight)) + (217_959_000 as Weight) + // Standard Error: 455_000 + .saturating_add((134_528_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes((6 as Weight).saturating_mul(r as Weight))) } - fn seal_restore_to_per_delta(d: u32, ) -> Weight { - (6_742_000 as Weight) - // Standard Error: 2_484_000 - .saturating_add((3_747_735_000 as Weight).saturating_mul(d as Weight)) + fn seal_restore_to_per_code_kb_delta(c: u32, t: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 151_000 + .saturating_add((9_061_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 151_000 + .saturating_add((4_807_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 1_331_000 + .saturating_add((3_736_196_000 as Weight).saturating_mul(d as Weight)) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(d as Weight))) .saturating_add(RocksDbWeight::get().writes(7 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(d as Weight))) } fn seal_random(r: u32, ) -> Weight { - (137_248_000 as Weight) - // Standard Error: 662_000 - .saturating_add((661_121_000 as Weight).saturating_mul(r as Weight)) + (134_143_000 as Weight) + // Standard Error: 233_000 + .saturating_add((643_555_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) } fn seal_deposit_event(r: u32, ) -> Weight { - (147_654_000 as Weight) - // Standard Error: 305_000 - .saturating_add((935_148_000 as Weight).saturating_mul(r as Weight)) + (142_838_000 as Weight) + // Standard Error: 367_000 + .saturating_add((937_126_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_deposit_event_per_topic_and_kb(t: u32, n: u32, ) -> Weight { - (1_246_123_000 as Weight) - // Standard Error: 2_807_000 - .saturating_add((585_535_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 553_000 - .saturating_add((249_976_000 as Weight).saturating_mul(n as Weight)) + (1_210_711_000 as Weight) + // Standard Error: 2_124_000 + .saturating_add((594_541_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 418_000 + .saturating_add((251_068_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(t as Weight))) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(t as Weight))) } fn seal_set_rent_allowance(r: u32, ) -> Weight { - (140_588_000 as Weight) - // Standard Error: 228_000 - .saturating_add((707_872_000 as Weight).saturating_mul(r as Weight)) + (144_533_000 as Weight) + // Standard Error: 220_000 + .saturating_add((714_590_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn seal_set_storage(r: u32, ) -> Weight { - (2_767_124_000 as Weight) - // Standard Error: 18_504_000 - .saturating_add((17_507_873_000 as Weight).saturating_mul(r as Weight)) + (406_366_000 as Weight) + // Standard Error: 3_533_000 + .saturating_add((16_167_082_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_set_storage_per_kb(n: u32, ) -> Weight { - (1_748_586_000 as Weight) - // Standard Error: 359_000 - .saturating_add((75_231_000 as Weight).saturating_mul(n as Weight)) + (1_739_590_000 as Weight) + // Standard Error: 390_000 + .saturating_add((74_815_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn seal_clear_storage(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 2_209_000 - .saturating_add((2_261_355_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 2_284_000 + .saturating_add((2_281_347_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) .saturating_add(RocksDbWeight::get().writes((100 as Weight).saturating_mul(r as Weight))) } fn seal_get_storage(r: u32, ) -> Weight { - (83_780_000 as Weight) - // Standard Error: 965_000 - .saturating_add((973_164_000 as Weight).saturating_mul(r as Weight)) + (81_889_000 as Weight) + // Standard Error: 1_171_000 + .saturating_add((930_704_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) } fn seal_get_storage_per_kb(n: u32, ) -> Weight { - (728_625_000 as Weight) - // Standard Error: 294_000 - .saturating_add((154_625_000 as Weight).saturating_mul(n as Weight)) + (709_323_000 as Weight) + // Standard Error: 391_000 + .saturating_add((155_689_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) } fn seal_transfer(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 1_543_000 - .saturating_add((5_467_966_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 1_846_000 + .saturating_add((5_566_275_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().reads((100 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) @@ -990,343 +1044,347 @@ impl WeightInfo for () { } fn seal_call(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 9_216_000 - .saturating_add((10_265_093_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 4_823_000 + .saturating_add((10_461_861_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().reads((200 as Weight).saturating_mul(r as Weight))) } - fn seal_call_per_transfer_input_output_kb(t: u32, i: u32, o: u32, ) -> Weight { - (10_426_869_000 as Weight) - // Standard Error: 114_622_000 - .saturating_add((4_366_037_000 as Weight).saturating_mul(t as Weight)) - // Standard Error: 40_000 - .saturating_add((59_741_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 43_000 - .saturating_add((82_331_000 as Weight).saturating_mul(o as Weight)) + fn seal_call_per_code_transfer_input_output_kb(c: u32, t: u32, i: u32, o: u32, ) -> Weight { + (9_686_594_000 as Weight) + // Standard Error: 473_000 + .saturating_add((393_132_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 197_094_000 + .saturating_add((4_957_181_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 62_000 + .saturating_add((59_974_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 66_000 + .saturating_add((83_027_000 as Weight).saturating_mul(o as Weight)) .saturating_add(RocksDbWeight::get().reads(206 as Weight)) .saturating_add(RocksDbWeight::get().writes((101 as Weight).saturating_mul(t as Weight))) } fn seal_instantiate(r: u32, ) -> Weight { (0 as Weight) - // Standard Error: 35_927_000 - .saturating_add((21_088_623_000 as Weight).saturating_mul(r as Weight)) + // Standard Error: 34_133_000 + .saturating_add((21_407_630_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().reads((300 as Weight).saturating_mul(r as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((300 as Weight).saturating_mul(r as Weight))) } - fn seal_instantiate_per_input_output_salt_kb(i: u32, o: u32, s: u32, ) -> Weight { - (17_200_760_000 as Weight) - // Standard Error: 157_000 - .saturating_add((61_221_000 as Weight).saturating_mul(i as Weight)) - // Standard Error: 157_000 - .saturating_add((84_149_000 as Weight).saturating_mul(o as Weight)) - // Standard Error: 157_000 - .saturating_add((284_655_000 as Weight).saturating_mul(s as Weight)) + fn seal_instantiate_per_code_input_output_salt_kb(c: u32, i: u32, o: u32, s: u32, ) -> Weight { + (9_705_322_000 as Weight) + // Standard Error: 674_000 + .saturating_add((879_118_000 as Weight).saturating_mul(c as Weight)) + // Standard Error: 95_000 + .saturating_add((63_025_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 95_000 + .saturating_add((87_633_000 as Weight).saturating_mul(o as Weight)) + // Standard Error: 95_000 + .saturating_add((311_987_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(207 as Weight)) .saturating_add(RocksDbWeight::get().writes(203 as Weight)) } fn seal_hash_sha2_256(r: u32, ) -> Weight { - (126_005_000 as Weight) - // Standard Error: 133_000 - .saturating_add((252_338_000 as Weight).saturating_mul(r as Weight)) + (125_486_000 as Weight) + // Standard Error: 266_000 + .saturating_add((240_913_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_sha2_256_per_kb(n: u32, ) -> Weight { - (727_930_000 as Weight) - // Standard Error: 57_000 - .saturating_add((430_299_000 as Weight).saturating_mul(n as Weight)) + (636_153_000 as Weight) + // Standard Error: 47_000 + .saturating_add((429_541_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_keccak_256(r: u32, ) -> Weight { - (129_778_000 as Weight) - // Standard Error: 146_000 - .saturating_add((266_097_000 as Weight).saturating_mul(r as Weight)) + (131_768_000 as Weight) + // Standard Error: 176_000 + .saturating_add((256_946_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_keccak_256_per_kb(n: u32, ) -> Weight { - (683_078_000 as Weight) - // Standard Error: 42_000 - .saturating_add((344_294_000 as Weight).saturating_mul(n as Weight)) + (647_777_000 as Weight) + // Standard Error: 29_000 + .saturating_add((344_145_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_256(r: u32, ) -> Weight { - (141_731_000 as Weight) - // Standard Error: 251_000 - .saturating_add((239_931_000 as Weight).saturating_mul(r as Weight)) + (130_042_000 as Weight) + // Standard Error: 158_000 + .saturating_add((225_474_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_256_per_kb(n: u32, ) -> Weight { - (563_895_000 as Weight) - // Standard Error: 51_000 - .saturating_add((160_216_000 as Weight).saturating_mul(n as Weight)) + (638_275_000 as Weight) + // Standard Error: 30_000 + .saturating_add((159_832_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_128(r: u32, ) -> Weight { - (132_587_000 as Weight) - // Standard Error: 159_000 - .saturating_add((239_287_000 as Weight).saturating_mul(r as Weight)) + (126_632_000 as Weight) + // Standard Error: 143_000 + .saturating_add((225_612_000 as Weight).saturating_mul(r as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn seal_hash_blake2_128_per_kb(n: u32, ) -> Weight { - (606_572_000 as Weight) - // Standard Error: 34_000 - .saturating_add((160_101_000 as Weight).saturating_mul(n as Weight)) + (656_936_000 as Weight) + // Standard Error: 35_000 + .saturating_add((159_763_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) } fn instr_i64const(r: u32, ) -> Weight { - (24_366_000 as Weight) - // Standard Error: 21_000 - .saturating_add((3_114_000 as Weight).saturating_mul(r as Weight)) + (25_205_000 as Weight) + // Standard Error: 26_000 + .saturating_add((3_311_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64load(r: u32, ) -> Weight { - (26_779_000 as Weight) + (27_394_000 as Weight) // Standard Error: 28_000 - .saturating_add((161_654_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((159_123_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64store(r: u32, ) -> Weight { - (26_763_000 as Weight) - // Standard Error: 88_000 - .saturating_add((232_822_000 as Weight).saturating_mul(r as Weight)) + (27_398_000 as Weight) + // Standard Error: 57_000 + .saturating_add((229_775_000 as Weight).saturating_mul(r as Weight)) } fn instr_select(r: u32, ) -> Weight { - (24_342_000 as Weight) - // Standard Error: 36_000 - .saturating_add((12_530_000 as Weight).saturating_mul(r as Weight)) + (25_212_000 as Weight) + // Standard Error: 22_000 + .saturating_add((12_291_000 as Weight).saturating_mul(r as Weight)) } fn instr_if(r: u32, ) -> Weight { - (24_301_000 as Weight) - // Standard Error: 25_000 - .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + (25_116_000 as Weight) + // Standard Error: 16_000 + .saturating_add((12_146_000 as Weight).saturating_mul(r as Weight)) } fn instr_br(r: u32, ) -> Weight { - (24_253_000 as Weight) - // Standard Error: 21_000 - .saturating_add((6_464_000 as Weight).saturating_mul(r as Weight)) + (25_119_000 as Weight) + // Standard Error: 19_000 + .saturating_add((6_608_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_if(r: u32, ) -> Weight { - (24_259_000 as Weight) - // Standard Error: 20_000 - .saturating_add((14_030_000 as Weight).saturating_mul(r as Weight)) + (25_146_000 as Weight) + // Standard Error: 23_000 + .saturating_add((14_017_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table(r: u32, ) -> Weight { - (24_313_000 as Weight) - // Standard Error: 37_000 - .saturating_add((15_788_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 21_000 + .saturating_add((15_460_000 as Weight).saturating_mul(r as Weight)) } fn instr_br_table_per_entry(e: u32, ) -> Weight { - (37_991_000 as Weight) - // Standard Error: 0 - .saturating_add((138_000 as Weight).saturating_mul(e as Weight)) + (37_079_000 as Weight) + // Standard Error: 1_000 + .saturating_add((160_000 as Weight).saturating_mul(e as Weight)) } fn instr_call(r: u32, ) -> Weight { - (24_739_000 as Weight) - // Standard Error: 31_000 - .saturating_add((97_567_000 as Weight).saturating_mul(r as Weight)) + (25_599_000 as Weight) + // Standard Error: 201_000 + .saturating_add((99_705_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect(r: u32, ) -> Weight { - (32_395_000 as Weight) - // Standard Error: 432_000 - .saturating_add((198_972_000 as Weight).saturating_mul(r as Weight)) + (33_236_000 as Weight) + // Standard Error: 368_000 + .saturating_add((199_753_000 as Weight).saturating_mul(r as Weight)) } fn instr_call_indirect_per_param(p: u32, ) -> Weight { - (238_857_000 as Weight) + (247_488_000 as Weight) // Standard Error: 6_000 - .saturating_add((3_491_000 as Weight).saturating_mul(p as Weight)) + .saturating_add((3_374_000 as Weight).saturating_mul(p as Weight)) } fn instr_local_get(r: u32, ) -> Weight { - (42_196_000 as Weight) - // Standard Error: 22_000 - .saturating_add((3_161_000 as Weight).saturating_mul(r as Weight)) + (44_133_000 as Weight) + // Standard Error: 20_000 + .saturating_add((3_235_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_set(r: u32, ) -> Weight { - (42_133_000 as Weight) - // Standard Error: 29_000 - .saturating_add((3_459_000 as Weight).saturating_mul(r as Weight)) + (44_107_000 as Weight) + // Standard Error: 20_000 + .saturating_add((3_486_000 as Weight).saturating_mul(r as Weight)) } fn instr_local_tee(r: u32, ) -> Weight { - (42_164_000 as Weight) - // Standard Error: 25_000 - .saturating_add((4_653_000 as Weight).saturating_mul(r as Weight)) + (44_116_000 as Weight) + // Standard Error: 23_000 + .saturating_add((4_757_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_get(r: u32, ) -> Weight { - (27_802_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_780_000 as Weight).saturating_mul(r as Weight)) + (28_712_000 as Weight) + // Standard Error: 29_000 + .saturating_add((7_659_000 as Weight).saturating_mul(r as Weight)) } fn instr_global_set(r: u32, ) -> Weight { - (27_826_000 as Weight) - // Standard Error: 21_000 - .saturating_add((11_978_000 as Weight).saturating_mul(r as Weight)) + (28_624_000 as Weight) + // Standard Error: 25_000 + .saturating_add((11_841_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_current(r: u32, ) -> Weight { - (26_753_000 as Weight) - // Standard Error: 20_000 - .saturating_add((3_494_000 as Weight).saturating_mul(r as Weight)) + (27_445_000 as Weight) + // Standard Error: 18_000 + .saturating_add((3_487_000 as Weight).saturating_mul(r as Weight)) } fn instr_memory_grow(r: u32, ) -> Weight { - (25_078_000 as Weight) - // Standard Error: 4_213_000 - .saturating_add((2_324_209_000 as Weight).saturating_mul(r as Weight)) + (26_016_000 as Weight) + // Standard Error: 4_230_000 + .saturating_add((2_300_044_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64clz(r: u32, ) -> Weight { - (24_301_000 as Weight) - // Standard Error: 28_000 - .saturating_add((5_201_000 as Weight).saturating_mul(r as Weight)) + (25_227_000 as Weight) + // Standard Error: 29_000 + .saturating_add((5_341_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ctz(r: u32, ) -> Weight { - (24_237_000 as Weight) - // Standard Error: 14_000 - .saturating_add((5_251_000 as Weight).saturating_mul(r as Weight)) + (25_163_000 as Weight) + // Standard Error: 26_000 + .saturating_add((5_355_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64popcnt(r: u32, ) -> Weight { - (24_290_000 as Weight) - // Standard Error: 20_000 - .saturating_add((5_780_000 as Weight).saturating_mul(r as Weight)) + (25_204_000 as Weight) + // Standard Error: 29_000 + .saturating_add((5_930_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eqz(r: u32, ) -> Weight { - (24_278_000 as Weight) - // Standard Error: 17_000 - .saturating_add((5_145_000 as Weight).saturating_mul(r as Weight)) + (25_177_000 as Weight) + // Standard Error: 21_000 + .saturating_add((5_457_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendsi32(r: u32, ) -> Weight { - (24_249_000 as Weight) - // Standard Error: 14_000 - .saturating_add((5_248_000 as Weight).saturating_mul(r as Weight)) + (25_206_000 as Weight) + // Standard Error: 19_000 + .saturating_add((5_229_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64extendui32(r: u32, ) -> Weight { - (24_266_000 as Weight) - // Standard Error: 13_000 - .saturating_add((5_236_000 as Weight).saturating_mul(r as Weight)) + (25_165_000 as Weight) + // Standard Error: 17_000 + .saturating_add((5_301_000 as Weight).saturating_mul(r as Weight)) } fn instr_i32wrapi64(r: u32, ) -> Weight { - (24_236_000 as Weight) - // Standard Error: 12_000 - .saturating_add((5_304_000 as Weight).saturating_mul(r as Weight)) + (25_184_000 as Weight) + // Standard Error: 28_000 + .saturating_add((5_356_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64eq(r: u32, ) -> Weight { - (24_262_000 as Weight) - // Standard Error: 22_000 - .saturating_add((7_220_000 as Weight).saturating_mul(r as Weight)) + (25_195_000 as Weight) + // Standard Error: 48_000 + .saturating_add((7_406_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ne(r: u32, ) -> Weight { - (24_287_000 as Weight) - // Standard Error: 25_000 - .saturating_add((7_072_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 19_000 + .saturating_add((7_303_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64lts(r: u32, ) -> Weight { - (24_211_000 as Weight) - // Standard Error: 12_000 - .saturating_add((7_196_000 as Weight).saturating_mul(r as Weight)) + (25_165_000 as Weight) + // Standard Error: 34_000 + .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ltu(r: u32, ) -> Weight { - (24_175_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_392_000 as Weight).saturating_mul(r as Weight)) + (25_152_000 as Weight) + // Standard Error: 46_000 + .saturating_add((7_464_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gts(r: u32, ) -> Weight { - (24_209_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_131_000 as Weight).saturating_mul(r as Weight)) + (25_140_000 as Weight) + // Standard Error: 27_000 + .saturating_add((7_308_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64gtu(r: u32, ) -> Weight { - (24_261_000 as Weight) - // Standard Error: 19_000 - .saturating_add((7_203_000 as Weight).saturating_mul(r as Weight)) + (25_723_000 as Weight) + // Standard Error: 29_000 + .saturating_add((6_846_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64les(r: u32, ) -> Weight { - (24_258_000 as Weight) - // Standard Error: 25_000 - .saturating_add((7_120_000 as Weight).saturating_mul(r as Weight)) + (25_201_000 as Weight) + // Standard Error: 20_000 + .saturating_add((7_226_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64leu(r: u32, ) -> Weight { - (24_236_000 as Weight) - // Standard Error: 11_000 - .saturating_add((7_076_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 24_000 + .saturating_add((7_143_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64ges(r: u32, ) -> Weight { - (24_262_000 as Weight) - // Standard Error: 20_000 - .saturating_add((7_261_000 as Weight).saturating_mul(r as Weight)) + (25_146_000 as Weight) + // Standard Error: 37_000 + .saturating_add((7_451_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64geu(r: u32, ) -> Weight { - (24_242_000 as Weight) - // Standard Error: 23_000 - .saturating_add((7_249_000 as Weight).saturating_mul(r as Weight)) + (25_193_000 as Weight) + // Standard Error: 30_000 + .saturating_add((7_391_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64add(r: u32, ) -> Weight { - (24_248_000 as Weight) - // Standard Error: 28_000 - .saturating_add((7_149_000 as Weight).saturating_mul(r as Weight)) + (25_192_000 as Weight) + // Standard Error: 30_000 + .saturating_add((7_214_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64sub(r: u32, ) -> Weight { - (24_243_000 as Weight) - // Standard Error: 14_000 - .saturating_add((7_128_000 as Weight).saturating_mul(r as Weight)) + (25_221_000 as Weight) + // Standard Error: 34_000 + .saturating_add((7_168_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64mul(r: u32, ) -> Weight { - (24_217_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_237_000 as Weight).saturating_mul(r as Weight)) + (25_221_000 as Weight) + // Standard Error: 28_000 + .saturating_add((7_200_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divs(r: u32, ) -> Weight { - (24_191_000 as Weight) - // Standard Error: 28_000 - .saturating_add((12_970_000 as Weight).saturating_mul(r as Weight)) + (25_229_000 as Weight) + // Standard Error: 32_000 + .saturating_add((13_066_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64divu(r: u32, ) -> Weight { - (24_213_000 as Weight) - // Standard Error: 19_000 - .saturating_add((12_106_000 as Weight).saturating_mul(r as Weight)) + (25_210_000 as Weight) + // Standard Error: 28_000 + .saturating_add((12_314_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rems(r: u32, ) -> Weight { - (24_238_000 as Weight) - // Standard Error: 15_000 - .saturating_add((12_944_000 as Weight).saturating_mul(r as Weight)) + (25_186_000 as Weight) + // Standard Error: 24_000 + .saturating_add((13_055_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64remu(r: u32, ) -> Weight { - (24_317_000 as Weight) - // Standard Error: 16_000 - .saturating_add((12_129_000 as Weight).saturating_mul(r as Weight)) + (25_162_000 as Weight) + // Standard Error: 25_000 + .saturating_add((12_327_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64and(r: u32, ) -> Weight { - (24_282_000 as Weight) - // Standard Error: 14_000 - .saturating_add((7_123_000 as Weight).saturating_mul(r as Weight)) + (25_191_000 as Weight) + // Standard Error: 24_000 + .saturating_add((7_153_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64or(r: u32, ) -> Weight { - (24_243_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_148_000 as Weight).saturating_mul(r as Weight)) + (25_184_000 as Weight) + // Standard Error: 23_000 + .saturating_add((7_120_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64xor(r: u32, ) -> Weight { - (24_239_000 as Weight) - // Standard Error: 18_000 - .saturating_add((7_157_000 as Weight).saturating_mul(r as Weight)) + (25_129_000 as Weight) + // Standard Error: 31_000 + .saturating_add((7_247_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shl(r: u32, ) -> Weight { - (24_279_000 as Weight) + (25_156_000 as Weight) // Standard Error: 16_000 - .saturating_add((7_253_000 as Weight).saturating_mul(r as Weight)) + .saturating_add((7_333_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shrs(r: u32, ) -> Weight { - (24_285_000 as Weight) - // Standard Error: 29_000 - .saturating_add((7_333_000 as Weight).saturating_mul(r as Weight)) + (25_159_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_415_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64shru(r: u32, ) -> Weight { - (24_298_000 as Weight) - // Standard Error: 17_000 - .saturating_add((7_228_000 as Weight).saturating_mul(r as Weight)) + (25_181_000 as Weight) + // Standard Error: 25_000 + .saturating_add((7_265_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotl(r: u32, ) -> Weight { - (24_226_000 as Weight) - // Standard Error: 16_000 - .saturating_add((7_269_000 as Weight).saturating_mul(r as Weight)) + (25_165_000 as Weight) + // Standard Error: 17_000 + .saturating_add((7_443_000 as Weight).saturating_mul(r as Weight)) } fn instr_i64rotr(r: u32, ) -> Weight { - (24_235_000 as Weight) - // Standard Error: 27_000 - .saturating_add((7_299_000 as Weight).saturating_mul(r as Weight)) + (25_103_000 as Weight) + // Standard Error: 44_000 + .saturating_add((7_463_000 as Weight).saturating_mul(r as Weight)) } } diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 2e675dd251..f9b0d035b0 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -18,7 +18,7 @@ codec = { package = "parity-scale-codec", version = "2.0.0", default-features = sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -48,3 +48,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "sp-runtime/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index c66ce20dab..57447944d2 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -19,7 +19,7 @@ use super::*; -use frame_benchmarking::{benchmarks, account, whitelist_account}; +use frame_benchmarking::{benchmarks, account, whitelist_account, impl_benchmark_test_suite}; use frame_support::{ IterableStorageMap, traits::{Currency, Get, EnsureOrigin, OnInitialize, UnfilteredDispatchable, schedule::DispatchTime}, @@ -781,44 +781,9 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose::()); - assert_ok!(test_benchmark_second::()); - assert_ok!(test_benchmark_vote_new::()); - assert_ok!(test_benchmark_vote_existing::()); - assert_ok!(test_benchmark_emergency_cancel::()); - assert_ok!(test_benchmark_external_propose::()); - assert_ok!(test_benchmark_external_propose_majority::()); - assert_ok!(test_benchmark_external_propose_default::()); - assert_ok!(test_benchmark_fast_track::()); - assert_ok!(test_benchmark_veto_external::()); - assert_ok!(test_benchmark_cancel_referendum::()); - assert_ok!(test_benchmark_cancel_queued::()); - assert_ok!(test_benchmark_on_initialize_external::()); - assert_ok!(test_benchmark_on_initialize_public::()); - assert_ok!(test_benchmark_on_initialize_base::()); - assert_ok!(test_benchmark_delegate::()); - assert_ok!(test_benchmark_undelegate::()); - assert_ok!(test_benchmark_clear_public_proposals::()); - assert_ok!(test_benchmark_note_preimage::()); - assert_ok!(test_benchmark_note_imminent_preimage::()); - assert_ok!(test_benchmark_reap_preimage::()); - assert_ok!(test_benchmark_unlock_remove::()); - assert_ok!(test_benchmark_unlock_set::()); - assert_ok!(test_benchmark_remove_vote::()); - assert_ok!(test_benchmark_remove_other_vote::()); - assert_ok!(test_benchmark_enact_proposal_execute::()); - assert_ok!(test_benchmark_enact_proposal_slash::()); - assert_ok!(test_benchmark_blacklist::()); - assert_ok!(test_benchmark_cancel_proposal::()); - }); - } -} + +impl_benchmark_test_suite!( + Democracy, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/election-provider-multi-phase/Cargo.toml b/frame/election-provider-multi-phase/Cargo.toml new file mode 100644 index 0000000000..1d63f9df40 --- /dev/null +++ b/frame/election-provider-multi-phase/Cargo.toml @@ -0,0 +1,70 @@ +[package] +name = "pallet-election-provider-multi-phase" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "PALLET two phase election providers" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +static_assertions = "1.1.0" +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +log = { version = "0.4.14", default-features = false } + +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } + +sp-io ={ version = "3.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../primitives/arithmetic" } +sp-election-providers = { version = "3.0.0", default-features = false, path = "../../primitives/election-providers" } + +# Optional imports for benchmarking +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } +rand = { version = "0.7.3", default-features = false, optional = true, features = ["alloc", "small_rng"] } + +[dev-dependencies] +paste = "1.0.3" +parking_lot = "0.11.0" +rand = { version = "0.7.3" } +hex-literal = "0.3.1" +substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sp-election-providers = { version = "3.0.0", features = ["runtime-benchmarks"], path = "../../primitives/election-providers" } +pallet-balances = { version = "3.0.0", path = "../balances" } +frame-benchmarking = { path = "../benchmarking" , version = "3.1.0"} + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "log/std", + + "frame-support/std", + "frame-system/std", + + "sp-io/std", + "sp-std/std", + "sp-runtime/std", + "sp-npos-elections/std", + "sp-arithmetic/std", + "sp-election-providers/std", + "log/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "rand", +] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/election-provider-multi-phase/src/benchmarking.rs b/frame/election-provider-multi-phase/src/benchmarking.rs new file mode 100644 index 0000000000..74db28c6e3 --- /dev/null +++ b/frame/election-provider-multi-phase/src/benchmarking.rs @@ -0,0 +1,282 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Two phase election pallet benchmarking. + +use super::*; +use crate::Module as MultiPhase; + +pub use frame_benchmarking::{account, benchmarks, whitelist_account, whitelisted_caller}; +use frame_support::{assert_ok, traits::OnInitialize}; +use frame_system::RawOrigin; +use rand::{prelude::SliceRandom, rngs::SmallRng, SeedableRng}; +use sp_election_providers::Assignment; +use sp_arithmetic::traits::One; +use sp_runtime::InnerOf; +use sp_std::convert::TryInto; + +const SEED: u32 = 0; + +/// Creates a **valid** solution with exactly the given size. +/// +/// The snapshot is also created internally. +fn solution_with_size( + size: SolutionOrSnapshotSize, + active_voters_count: u32, + desired_targets: u32, +) -> RawSolution> { + assert!(size.targets >= desired_targets, "must have enough targets"); + assert!( + size.targets >= (>::LIMIT * 2) as u32, + "must have enough targets for unique votes." + ); + assert!(size.voters >= active_voters_count, "must have enough voters"); + assert!( + (>::LIMIT as u32) < desired_targets, + "must have enough winners to give them votes." + ); + + let ed: VoteWeight = T::Currency::minimum_balance().saturated_into::(); + let stake: VoteWeight = ed.max(One::one()).saturating_mul(100); + + // first generates random targets. + let targets: Vec = + (0..size.targets).map(|i| account("Targets", i, SEED)).collect(); + + let mut rng = SmallRng::seed_from_u64(999u64); + + // decide who are the winners. + let winners = targets + .as_slice() + .choose_multiple(&mut rng, desired_targets as usize) + .cloned() + .collect::>(); + + // first generate active voters who must vote for a subset of winners. + let active_voters = (0..active_voters_count) + .map(|i| { + // chose a random subset of winners. + let winner_votes = winners + .as_slice() + .choose_multiple(&mut rng, >::LIMIT) + .cloned() + .collect::>(); + let voter = account::("Voter", i, SEED); + (voter, stake, winner_votes) + }) + .collect::>(); + + // rest of the voters. They can only vote for non-winners. + let non_winners = + targets.iter().filter(|t| !winners.contains(t)).cloned().collect::>(); + let rest_voters = (active_voters_count..size.voters) + .map(|i| { + let votes = (&non_winners) + .choose_multiple(&mut rng, >::LIMIT) + .cloned() + .collect::>(); + let voter = account::("Voter", i, SEED); + (voter, stake, votes) + }) + .collect::>(); + + let mut all_voters = active_voters.clone(); + all_voters.extend(rest_voters); + all_voters.shuffle(&mut rng); + + assert_eq!(active_voters.len() as u32, active_voters_count); + assert_eq!(all_voters.len() as u32, size.voters); + assert_eq!(winners.len() as u32, desired_targets); + + >::put(SolutionOrSnapshotSize { + voters: all_voters.len() as u32, + targets: targets.len() as u32, + }); + >::put(desired_targets); + >::put(RoundSnapshot { voters: all_voters.clone(), targets: targets.clone() }); + + // write the snapshot to staking or whoever is the data provider. + T::DataProvider::put_snapshot(all_voters.clone(), targets.clone()); + + let cache = helpers::generate_voter_cache::(&all_voters); + let stake_of = helpers::stake_of_fn::(&all_voters, &cache); + let voter_index = helpers::voter_index_fn::(&cache); + let target_index = helpers::target_index_fn_linear::(&targets); + let voter_at = helpers::voter_at_fn::(&all_voters); + let target_at = helpers::target_at_fn::(&targets); + + let assignments = active_voters + .iter() + .map(|(voter, _stake, votes)| { + let percent_per_edge: InnerOf> = + (100 / votes.len()).try_into().unwrap_or_else(|_| panic!("failed to convert")); + Assignment { + who: voter.clone(), + distribution: votes + .iter() + .map(|t| (t.clone(), >::from_percent(percent_per_edge))) + .collect::>(), + } + }) + .collect::>(); + + let compact = + >::from_assignment(assignments, &voter_index, &target_index).unwrap(); + let score = compact.clone().score(&winners, stake_of, voter_at, target_at).unwrap(); + let round = >::round(); + RawSolution { compact, score, round } +} + +benchmarks! { + on_initialize_nothing { + assert!(>::current_phase().is_off()); + }: { + >::on_initialize(1u32.into()); + } verify { + assert!(>::current_phase().is_off()); + } + + on_initialize_open_signed { + // NOTE: this benchmark currently doesn't have any components because the length of a db + // read/write is not captured. Otherwise, it is quite influenced by how much data + // `T::ElectionDataProvider` is reading and passing on. + assert!(>::snapshot().is_none()); + assert!(>::current_phase().is_off()); + }: { + >::on_initialize_open_signed(); + } verify { + assert!(>::snapshot().is_some()); + assert!(>::current_phase().is_signed()); + } + + on_initialize_open_unsigned_with_snapshot { + assert!(>::snapshot().is_none()); + assert!(>::current_phase().is_off()); + }: { + >::on_initialize_open_unsigned(true, true, 1u32.into()); + } verify { + assert!(>::snapshot().is_some()); + assert!(>::current_phase().is_unsigned()); + } + + on_initialize_open_unsigned_without_snapshot { + // need to assume signed phase was open before + >::on_initialize_open_signed(); + assert!(>::snapshot().is_some()); + assert!(>::current_phase().is_signed()); + }: { + >::on_initialize_open_unsigned(false, true, 1u32.into()); + } verify { + assert!(>::snapshot().is_some()); + assert!(>::current_phase().is_unsigned()); + } + + #[extra] + create_snapshot { + assert!(>::snapshot().is_none()); + }: { + >::create_snapshot() + } verify { + assert!(>::snapshot().is_some()); + } + + submit_unsigned { + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; + // number of assignments, i.e. compact.len(). This means the active nominators, thus must be + // a subset of `v` component. + let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + // number of desired targets. Must be a subset of `t` component. + let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + + let witness = SolutionOrSnapshotSize { voters: v, targets: t }; + let raw_solution = solution_with_size::(witness, a, d); + + assert!(>::queued_solution().is_none()); + >::put(Phase::Unsigned((true, 1u32.into()))); + + // encode the most significant storage item that needs to be decoded in the dispatch. + let encoded_snapshot = >::snapshot().unwrap().encode(); + let encoded_call = >::submit_unsigned(raw_solution.clone(), witness).encode(); + }: { + assert_ok!(>::submit_unsigned(RawOrigin::None.into(), raw_solution, witness)); + let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot).unwrap(); + let _decoded_call = as Decode>::decode(&mut &*encoded_call).unwrap(); + } verify { + assert!(>::queued_solution().is_some()); + } + + // This is checking a valid solution. The worse case is indeed a valid solution. + feasibility_check { + // number of votes in snapshot. + let v in (T::BenchmarkingConfig::VOTERS[0]) .. T::BenchmarkingConfig::VOTERS[1]; + // number of targets in snapshot. + let t in (T::BenchmarkingConfig::TARGETS[0]) .. T::BenchmarkingConfig::TARGETS[1]; + // number of assignments, i.e. compact.len(). This means the active nominators, thus must be + // a subset of `v` component. + let a in (T::BenchmarkingConfig::ACTIVE_VOTERS[0]) .. T::BenchmarkingConfig::ACTIVE_VOTERS[1]; + // number of desired targets. Must be a subset of `t` component. + let d in (T::BenchmarkingConfig::DESIRED_TARGETS[0]) .. T::BenchmarkingConfig::DESIRED_TARGETS[1]; + + let size = SolutionOrSnapshotSize { voters: v, targets: t }; + let raw_solution = solution_with_size::(size, a, d); + + assert_eq!(raw_solution.compact.voter_count() as u32, a); + assert_eq!(raw_solution.compact.unique_targets().len() as u32, d); + + // encode the most significant storage item that needs to be decoded in the dispatch. + let encoded_snapshot = >::snapshot().unwrap().encode(); + }: { + assert_ok!(>::feasibility_check(raw_solution, ElectionCompute::Unsigned)); + let _decoded_snap = as Decode>::decode(&mut &*encoded_snapshot).unwrap(); + } +} + +#[cfg(test)] +mod test { + use super::*; + use crate::mock::*; + + #[test] + fn test_benchmarks() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(test_benchmark_feasibility_check::()); + }); + + ExtBuilder::default().build_and_execute(|| { + assert_ok!(test_benchmark_submit_unsigned::()); + }); + + ExtBuilder::default().build_and_execute(|| { + assert_ok!(test_benchmark_on_initialize_open_unsigned_with_snapshot::()); + }); + + ExtBuilder::default().build_and_execute(|| { + assert_ok!(test_benchmark_on_initialize_open_unsigned_without_snapshot::()); + }); + + ExtBuilder::default().build_and_execute(|| { + assert_ok!(test_benchmark_on_initialize_nothing::()); + }); + + ExtBuilder::default().build_and_execute(|| { + assert_ok!(test_benchmark_create_snapshot::()); + }); + } +} diff --git a/frame/election-provider-multi-phase/src/helpers.rs b/frame/election-provider-multi-phase/src/helpers.rs new file mode 100644 index 0000000000..7375ce017f --- /dev/null +++ b/frame/election-provider-multi-phase/src/helpers.rs @@ -0,0 +1,159 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Some helper functions/macros for this crate. + +use super::{Config, VoteWeight, CompactVoterIndexOf, CompactTargetIndexOf}; +use sp_std::{collections::btree_map::BTreeMap, convert::TryInto, boxed::Box, prelude::*}; + +#[macro_export] +macro_rules! log { + ($level:tt, $pattern:expr $(, $values:expr)* $(,)?) => { + log::$level!( + target: $crate::LOG_TARGET, + concat!("🗳 ", $pattern) $(, $values)* + ) + }; +} + +/// Generate a btree-map cache of the voters and their indices. +/// +/// This can be used to efficiently build index getter closures. +pub fn generate_voter_cache( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> BTreeMap { + let mut cache: BTreeMap = BTreeMap::new(); + snapshot.iter().enumerate().for_each(|(i, (x, _, _))| { + let _existed = cache.insert(x.clone(), i); + // if a duplicate exists, we only consider the last one. Defensive only, should never + // happen. + debug_assert!(_existed.is_none()); + }); + + cache +} + +/// Create a function the returns the index a voter in the snapshot. +/// +/// The returning index type is the same as the one defined in [`T::CompactSolution::Voter`]. +/// +/// ## Warning +/// +/// The snapshot must be the same is the one used to create `cache`. +pub fn voter_index_fn( + cache: &BTreeMap, +) -> Box Option> + '_> { + Box::new(move |who| { + cache.get(who).and_then(|i| >>::try_into(*i).ok()) + }) +} + +/// Same as [`voter_index_fn`], but the returning index is converted into usize, if possible. +/// +/// ## Warning +/// +/// The snapshot must be the same is the one used to create `cache`. +pub fn voter_index_fn_usize( + cache: &BTreeMap, +) -> Box Option + '_> { + Box::new(move |who| cache.get(who).cloned()) +} + +/// A non-optimized, linear version of [`voter_index_fn`] that does not need a cache and does a +/// linear search. +/// +/// ## Warning +/// +/// Not meant to be used in production. +pub fn voter_index_fn_linear( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> Box Option> + '_> { + Box::new(move |who| { + snapshot + .iter() + .position(|(x, _, _)| x == who) + .and_then(|i| >>::try_into(i).ok()) + }) +} + +/// Create a function the returns the index a targets in the snapshot. +/// +/// The returning index type is the same as the one defined in [`T::CompactSolution::Target`]. +pub fn target_index_fn_linear( + snapshot: &Vec, +) -> Box Option> + '_> { + Box::new(move |who| { + snapshot + .iter() + .position(|x| x == who) + .and_then(|i| >>::try_into(i).ok()) + }) +} + +/// Create a function that can map a voter index ([`CompactVoterIndexOf`]) to the actual voter +/// account using a linearly indexible snapshot. +pub fn voter_at_fn( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> Box) -> Option + '_> { + Box::new(move |i| { + as TryInto>::try_into(i) + .ok() + .and_then(|i| snapshot.get(i).map(|(x, _, _)| x).cloned()) + }) +} + +/// Create a function that can map a target index ([`CompactTargetIndexOf`]) to the actual target +/// account using a linearly indexible snapshot. +pub fn target_at_fn( + snapshot: &Vec, +) -> Box) -> Option + '_> { + Box::new(move |i| { + as TryInto>::try_into(i) + .ok() + .and_then(|i| snapshot.get(i).cloned()) + }) +} + +/// Create a function to get the stake of a voter. +/// +/// This is not optimized and uses a linear search. +pub fn stake_of_fn_linear( + snapshot: &Vec<(T::AccountId, VoteWeight, Vec)>, +) -> Box VoteWeight + '_> { + Box::new(move |who| { + snapshot.iter().find(|(x, _, _)| x == who).map(|(_, x, _)| *x).unwrap_or_default() + }) +} + +/// Create a function to get the stake of a voter. +/// +/// ## Warning +/// +/// The cache need must be derived from the same snapshot. Zero is returned if a voter is +/// non-existent. +pub fn stake_of_fn<'a, T: Config>( + snapshot: &'a Vec<(T::AccountId, VoteWeight, Vec)>, + cache: &'a BTreeMap, +) -> Box VoteWeight + 'a> { + Box::new(move |who| { + if let Some(index) = cache.get(who) { + snapshot.get(*index).map(|(_, x, _)| x).cloned().unwrap_or_default() + } else { + 0 + } + }) +} diff --git a/frame/election-provider-multi-phase/src/lib.rs b/frame/election-provider-multi-phase/src/lib.rs new file mode 100644 index 0000000000..c4a5e0fa69 --- /dev/null +++ b/frame/election-provider-multi-phase/src/lib.rs @@ -0,0 +1,1457 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Multi phase, offchain election provider pallet. +//! +//! Currently, this election-provider has two distinct phases (see [`Phase`]), **signed** and +//! **unsigned**. +//! +//! ## Phases +//! +//! The timeline of pallet is as follows. At each block, +//! [`sp_election_providers::ElectionDataProvider::next_election_prediction`] is used to estimate +//! the time remaining to the next call to [`sp_election_providers::ElectionProvider::elect`]. Based +//! on this, a phase is chosen. The timeline is as follows. +//! +//! ```ignore +//! elect() +//! + <--T::SignedPhase--> + <--T::UnsignedPhase--> + +//! +-------------------------------------------------------------------+ +//! Phase::Off + Phase::Signed + Phase::Unsigned + +//! ``` +//! +//! Note that the unsigned phase starts [`pallet::Config::UnsignedPhase`] blocks before the +//! `next_election_prediction`, but only ends when a call to [`ElectionProvider::elect`] happens. If +//! no `elect` happens, the signed phase is extended. +//! +//! > Given this, it is rather important for the user of this pallet to ensure it always terminates +//! election via `elect` before requesting a new one. +//! +//! Each of the phases can be disabled by essentially setting their length to zero. If both phases +//! have length zero, then the pallet essentially runs only the fallback strategy, denoted by +//! [`Config::FallbackStrategy`]. +//! ### Signed Phase +//! +//! In the signed phase, solutions (of type [`RawSolution`]) are submitted and queued on chain. A +//! deposit is reserved, based on the size of the solution, for the cost of keeping this solution +//! on-chain for a number of blocks, and the potential weight of the solution upon being checked. A +//! maximum of [`pallet::Config::MaxSignedSubmissions`] solutions are stored. The queue is always +//! sorted based on score (worse to best). +//! +//! Upon arrival of a new solution: +//! +//! 1. If the queue is not full, it is stored in the appropriate sorted index. +//! 2. If the queue is full but the submitted solution is better than one of the queued ones, the +//! worse solution is discarded, the bond of the outgoing solution is returned, and the new +//! solution is stored in the correct index. +//! 3. If the queue is full and the solution is not an improvement compared to any of the queued +//! ones, it is instantly rejected and no additional bond is reserved. +//! +//! A signed solution cannot be reversed, taken back, updated, or retracted. In other words, the +//! origin can not bail out in any way, if their solution is queued. +//! +//! Upon the end of the signed phase, the solutions are examined from best to worse (i.e. `pop()`ed +//! until drained). Each solution undergoes an expensive [`Pallet::feasibility_check`], which +//! ensures the score claimed by this score was correct, and it is valid based on the election data +//! (i.e. votes and candidates). At each step, if the current best solution passes the feasibility +//! check, it is considered to be the best one. The sender of the origin is rewarded, and the rest +//! of the queued solutions get their deposit back and are discarded, without being checked. +//! +//! The following example covers all of the cases at the end of the signed phase: +//! +//! ```ignore +//! Queue +//! +-------------------------------+ +//! |Solution(score=20, valid=false)| +--> Slashed +//! +-------------------------------+ +//! |Solution(score=15, valid=true )| +--> Rewarded, Saved +//! +-------------------------------+ +//! |Solution(score=10, valid=true )| +--> Discarded +//! +-------------------------------+ +//! |Solution(score=05, valid=false)| +--> Discarded +//! +-------------------------------+ +//! | None | +//! +-------------------------------+ +//! ``` +//! +//! Note that both of the bottom solutions end up being discarded and get their deposit back, +//! despite one of them being *invalid*. +//! +//! ## Unsigned Phase +//! +//! The unsigned phase will always follow the signed phase, with the specified duration. In this +//! phase, only validator nodes can submit solutions. A validator node who has offchain workers +//! enabled will start to mine a solution in this phase and submits it back to the chain as an +//! unsigned transaction, thus the name _unsigned_ phase. This unsigned transaction can never be +//! valid if propagated, and it acts similar to an inherent. +//! +//! Validators will only submit solutions if the one that they have computed is sufficiently better +//! than the best queued one (see [`pallet::Config::SolutionImprovementThreshold`]) and will limit +//! the weigh of the solution to [`pallet::Config::MinerMaxWeight`]. +//! +//! The unsigned phase can be made passive depending on how the previous signed phase went, by +//! setting the first inner value of [`Phase`] to `false`. For now, the signed phase is always +//! active. +//! +//! ### Fallback +//! +//! If we reach the end of both phases (i.e. call to [`ElectionProvider::elect`] happens) and no +//! good solution is queued, then the fallback strategy [`pallet::Config::Fallback`] is used to +//! determine what needs to be done. The on-chain election is slow, and contains no balancing or +//! reduction post-processing. See [`onchain::OnChainSequentialPhragmen`]. The +//! [`FallbackStrategy::Nothing`] should probably only be used for testing, and returns an error. +//! +//! ## Feasible Solution (correct solution) +//! +//! All submissions must undergo a feasibility check. Signed solutions are checked on by one at the +//! end of the signed phase, and the unsigned solutions are checked on the spot. A feasible solution +//! is as follows: +//! +//! 0. **all** of the used indices must be correct. +//! 1. present *exactly* correct number of winners. +//! 2. any assignment is checked to match with [`RoundSnapshot::voters`]. +//! 3. the claimed score is valid, based on the fixed point arithmetic accuracy. +//! +//! ## Accuracy +//! +//! The accuracy of the election is configured via two trait parameters. namely, +//! [`OnChainAccuracyOf`] dictates the accuracy used to compute the on-chain fallback election and +//! [`CompactAccuracyOf`] is the accuracy that the submitted solutions must adhere to. +//! +//! Note that both accuracies are of great importance. The offchain solution should be as small as +//! possible, reducing solutions size/weight. The on-chain solution can use more space for accuracy, +//! but should still be fast to prevent massively large blocks in case of a fallback. +//! +//! ## Error types +//! +//! This pallet provides a verbose error system to ease future debugging and debugging. The +//! overall hierarchy of errors is as follows: +//! +//! 1. [`pallet::Error`]: These are the errors that can be returned in the dispatchables of the +//! pallet, either signed or unsigned. Since decomposition with nested enums is not possible +//! here, they are prefixed with the logical sub-system to which they belong. +//! 2. [`ElectionError`]: These are the errors that can be generated while the pallet is doing +//! something in automatic scenarios, such as `offchain_worker` or `on_initialize`. These errors +//! are helpful for logging and are thus nested as: +//! - [`ElectionError::Miner`]: wraps a [`unsigned::MinerError`]. +//! - [`ElectionError::Feasibility`]: wraps a [`FeasibilityError`]. +//! - [`ElectionError::OnChainFallback`]: wraps a [`sp_election_providers::onchain::Error`]. +//! +//! Note that there could be an overlap between these sub-errors. For example, A +//! `SnapshotUnavailable` can happen in both miner and feasibility check phase. +//! +//! ## Future Plans +//! +//! **Challenge Phase**. We plan adding a third phase to the pallet, called the challenge phase. +//! This is phase in which no further solutions are processed, and the current best solution might +//! be challenged by anyone (signed or unsigned). The main plan here is to enforce the solution to +//! be PJR. Checking PJR on-chain is quite expensive, yet proving that a solution is **not** PJR is +//! rather cheap. If a queued solution is challenged: +//! +//! 1. We must surely slash whoever submitted that solution (might be a challenge for unsigned +//! solutions). +//! 2. It is probably fine to fallback to the on-chain election, as we expect this to happen rarely. +//! +//! **Bailing out**. The functionality of bailing out of a queued solution is nice. A miner can +//! submit a solution as soon as they _think_ it is high probability feasible, and do the checks +//! afterwards, and remove their solution (for a small cost of probably just transaction fees, or a +//! portion of the bond). +//! +//! **Conditionally open unsigned phase**: Currently, the unsigned phase is always opened. This is +//! useful because an honest validation will run our OCW code, which should be good enough to trump +//! a mediocre or malicious signed submission (assuming in the absence of honest signed bots). If an +//! when the signed submissions are checked against an absolute measure (e.g. PJR), then we can only +//! open the unsigned phase in extreme conditions (i.e. "not good signed solution received") to +//! spare some work in the validators +//! +//! **Allow smaller solutions and build up**: For now we only allow solutions that are exactly +//! [`DesiredTargets`], no more, no less. Over time, we can change this to a [min, max] where any +//! solution within this range is acceptable, where bigger solutions are prioritized. +//! +//! **Recursive Fallback**: Currently, the fallback is a separate enum. A different and fancier way +//! of doing this would be to have the fallback be another +//! [`sp_election_providers::ElectionProvider`]. In this case, this pallet can even have the +//! on-chain election provider as fallback, or special _noop_ fallback that simply returns an error, +//! thus replicating [`FallbackStrategy::Nothing`]. In this case, we won't need the additional +//! config OnChainAccuracy either. +//! +//! **Score based on (byte) size**: We should always prioritize small solutions over bigger ones, if +//! there is a tie. Even more harsh should be to enforce the bound of the `reduce` algorithm. +//! +//! **Offchain resubmit**: Essentially port https://github.com/paritytech/substrate/pull/7976 to +//! this pallet as well. The `OFFCHAIN_REPEAT` also needs to become an adjustable parameter of the +//! pallet. +//! +//! **Make the number of nominators configurable from the runtime**. Remove `sp_npos_elections` +//! dependency from staking and the compact solution type. It should be generated at runtime, there +//! it should be encoded how many votes each nominators have. Essentially translate +//! https://github.com/paritytech/substrate/pull/7929 to this pallet. + +#![cfg_attr(not(feature = "std"), no_std)] + +use codec::{Decode, Encode}; +use frame_support::{ + dispatch::DispatchResultWithPostInfo, + ensure, + traits::{Currency, Get, ReservableCurrency}, + weights::Weight, +}; +use frame_system::{ensure_none, offchain::SendTransactionTypes}; +use sp_election_providers::{ElectionDataProvider, ElectionProvider, onchain}; +use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, is_score_better, CompactSolution, ElectionScore, + EvaluateSupport, PerThing128, Supports, VoteWeight, +}; +use sp_runtime::{ + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + TransactionValidityError, ValidTransaction, + }, + DispatchError, PerThing, Perbill, RuntimeDebug, SaturatedConversion, +}; +use sp_std::prelude::*; +use sp_arithmetic::{ + UpperOf, + traits::{Zero, CheckedAdd}, +}; + +#[cfg(any(feature = "runtime-benchmarks", test))] +mod benchmarking; +#[cfg(test)] +mod mock; +#[macro_use] +pub mod helpers; + +const LOG_TARGET: &'static str = "runtime::election-provider"; + +pub mod unsigned; +pub mod weights; + +/// The weight declaration of the pallet. +pub use weights::WeightInfo; + +/// The compact solution type used by this crate. +pub type CompactOf = ::CompactSolution; + +/// The voter index. Derived from [`CompactOf`]. +pub type CompactVoterIndexOf = as CompactSolution>::Voter; +/// The target index. Derived from [`CompactOf`]. +pub type CompactTargetIndexOf = as CompactSolution>::Target; +/// The accuracy of the election, when submitted from offchain. Derived from [`CompactOf`]. +pub type CompactAccuracyOf = as CompactSolution>::Accuracy; +/// The accuracy of the election, when computed on-chain. Equal to [`Config::OnChainAccuracy`]. +pub type OnChainAccuracyOf = ::OnChainAccuracy; + +/// Wrapper type that implements the configurations needed for the on-chain backup. +struct OnChainConfig(sp_std::marker::PhantomData); +impl onchain::Config for OnChainConfig { + type AccountId = T::AccountId; + type BlockNumber = T::BlockNumber; + type Accuracy = T::OnChainAccuracy; + type DataProvider = T::DataProvider; +} + +/// Configuration for the benchmarks of the pallet. +pub trait BenchmarkingConfig { + /// Range of voters. + const VOTERS: [u32; 2]; + /// Range of targets. + const TARGETS: [u32; 2]; + /// Range of active voters. + const ACTIVE_VOTERS: [u32; 2]; + /// Range of desired targets. + const DESIRED_TARGETS: [u32; 2]; +} + +impl BenchmarkingConfig for () { + const VOTERS: [u32; 2] = [4000, 6000]; + const TARGETS: [u32; 2] = [1000, 1600]; + const ACTIVE_VOTERS: [u32; 2] = [1000, 3000]; + const DESIRED_TARGETS: [u32; 2] = [400, 800]; +} + +/// Current phase of the pallet. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +pub enum Phase { + /// Nothing, the election is not happening. + Off, + /// Signed phase is open. + Signed, + /// Unsigned phase. First element is whether it is open or not, second the starting block + /// number. + Unsigned((bool, Bn)), +} + +impl Default for Phase { + fn default() -> Self { + Phase::Off + } +} + +impl Phase { + /// Weather the phase is signed or not. + pub fn is_signed(&self) -> bool { + matches!(self, Phase::Signed) + } + + /// Weather the phase is unsigned or not. + pub fn is_unsigned(&self) -> bool { + matches!(self, Phase::Unsigned(_)) + } + + /// Weather the phase is unsigned and open or not, with specific start. + pub fn is_unsigned_open_at(&self, at: Bn) -> bool { + matches!(self, Phase::Unsigned((true, real)) if *real == at) + } + + /// Weather the phase is unsigned and open or not. + pub fn is_unsigned_open(&self) -> bool { + matches!(self, Phase::Unsigned((true, _))) + } + + /// Weather the phase is off or not. + pub fn is_off(&self) -> bool { + matches!(self, Phase::Off) + } +} + +/// A configuration for the pallet to indicate what should happen in the case of a fallback i.e. +/// reaching a call to `elect` with no good solution. +#[cfg_attr(test, derive(Clone))] +pub enum FallbackStrategy { + /// Run a on-chain sequential phragmen. + /// + /// This might burn the chain for a few minutes due to a stall, but is generally a safe + /// approach to maintain a sensible validator set. + OnChain, + /// Nothing. Return an error. + Nothing, +} + +/// The type of `Computation` that provided this election data. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +pub enum ElectionCompute { + /// Election was computed on-chain. + OnChain, + /// Election was computed with a signed submission. + Signed, + /// Election was computed with an unsigned submission. + Unsigned, +} + +impl Default for ElectionCompute { + fn default() -> Self { + ElectionCompute::OnChain + } +} + +/// A raw, unchecked solution. +/// +/// This is what will get submitted to the chain. +/// +/// Such a solution should never become effective in anyway before being checked by the +/// [`Pallet::feasibility_check`] +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +pub struct RawSolution { + /// Compact election edges. + compact: C, + /// The _claimed_ score of the solution. + score: ElectionScore, + /// The round at which this solution should be submitted. + round: u32, +} + +impl Default for RawSolution { + fn default() -> Self { + // Round 0 is always invalid, only set this to 1. + Self { round: 1, compact: Default::default(), score: Default::default() } + } +} + +/// A checked solution, ready to be enacted. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default)] +pub struct ReadySolution { + /// The final supports of the solution. + /// + /// This is target-major vector, storing each winners, total backing, and each individual + /// backer. + supports: Supports, + /// The score of the solution. + /// + /// This is needed to potentially challenge the solution. + score: ElectionScore, + /// How this election was computed. + compute: ElectionCompute, +} + +/// A snapshot of all the data that is needed for en entire round. They are provided by +/// [`ElectionDataProvider`] and are kept around until the round is finished. +/// +/// These are stored together because they are often accessed together. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, Default)] +pub struct RoundSnapshot { + /// All of the voters. + pub voters: Vec<(A, VoteWeight, Vec)>, + /// All of the targets. + pub targets: Vec, +} + +/// Encodes the length of a solution or a snapshot. +/// +/// This is stored automatically on-chain, and it contains the **size of the entire snapshot**. +/// This is also used in dispatchables as weight witness data and should **only contain the size of +/// the presented solution**, not the entire snapshot. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug, Default)] +pub struct SolutionOrSnapshotSize { + /// The length of voters. + #[codec(compact)] + voters: u32, + /// The length of targets. + #[codec(compact)] + targets: u32, +} + +/// Internal errors of the pallet. +/// +/// Note that this is different from [`pallet::Error`]. +#[derive(Debug, Eq, PartialEq)] +pub enum ElectionError { + /// An error happened in the feasibility check sub-system. + Feasibility(FeasibilityError), + /// An error in the miner (offchain) sub-system. + Miner(unsigned::MinerError), + /// An error in the on-chain fallback. + OnChainFallback(onchain::Error), + /// No fallback is configured. This is a special case. + NoFallbackConfigured, +} + +impl From for ElectionError { + fn from(e: onchain::Error) -> Self { + ElectionError::OnChainFallback(e) + } +} + +impl From for ElectionError { + fn from(e: FeasibilityError) -> Self { + ElectionError::Feasibility(e) + } +} + +impl From for ElectionError { + fn from(e: unsigned::MinerError) -> Self { + ElectionError::Miner(e) + } +} + +/// Errors that can happen in the feasibility check. +#[derive(Debug, Eq, PartialEq)] +pub enum FeasibilityError { + /// Wrong number of winners presented. + WrongWinnerCount, + /// The snapshot is not available. + /// + /// Kinda defensive: The pallet should technically never attempt to do a feasibility check when + /// no snapshot is present. + SnapshotUnavailable, + /// Internal error from the election crate. + NposElection(sp_npos_elections::Error), + /// A vote is invalid. + InvalidVote, + /// A voter is invalid. + InvalidVoter, + /// A winner is invalid. + InvalidWinner, + /// The given score was invalid. + InvalidScore, + /// The provided round is incorrect. + InvalidRound, +} + +impl From for FeasibilityError { + fn from(e: sp_npos_elections::Error) -> Self { + FeasibilityError::NposElection(e) + } +} + +pub use pallet::*; +#[frame_support::pallet] +pub mod pallet { + use super::*; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + + #[pallet::config] + pub trait Config: frame_system::Config + SendTransactionTypes> { + type Event: From> + IsType<::Event>; + + /// Currency type. + type Currency: ReservableCurrency + Currency; + + /// Duration of the unsigned phase. + #[pallet::constant] + type UnsignedPhase: Get; + /// Duration of the signed phase. + #[pallet::constant] + type SignedPhase: Get; + + /// The minimum amount of improvement to the solution score that defines a solution as + /// "better" (in any phase). + #[pallet::constant] + type SolutionImprovementThreshold: Get; + + /// The priority of the unsigned transaction submitted in the unsigned-phase + type MinerTxPriority: Get; + /// Maximum number of iteration of balancing that will be executed in the embedded miner of + /// the pallet. + type MinerMaxIterations: Get; + /// Maximum weight that the miner should consume. + /// + /// The miner will ensure that the total weight of the unsigned solution will not exceed + /// this values, based on [`WeightInfo::submit_unsigned`]. + type MinerMaxWeight: Get; + + /// Something that will provide the election data. + type DataProvider: ElectionDataProvider; + + /// The compact solution type + type CompactSolution: codec::Codec + + Default + + PartialEq + + Eq + + Clone + + sp_std::fmt::Debug + + CompactSolution; + + /// Accuracy used for fallback on-chain election. + type OnChainAccuracy: PerThing128; + + /// Configuration for the fallback + type Fallback: Get; + + /// The configuration of benchmarking. + type BenchmarkingConfig: BenchmarkingConfig; + + /// The weight of the pallet. + type WeightInfo: WeightInfo; + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(now: T::BlockNumber) -> Weight { + let next_election = T::DataProvider::next_election_prediction(now).max(now); + + let signed_deadline = T::SignedPhase::get() + T::UnsignedPhase::get(); + let unsigned_deadline = T::UnsignedPhase::get(); + + let remaining = next_election - now; + let current_phase = Self::current_phase(); + + match current_phase { + Phase::Off if remaining <= signed_deadline && remaining > unsigned_deadline => { + Self::on_initialize_open_signed(); + log!(info, "Starting signed phase at #{:?} , round {}.", now, Self::round()); + T::WeightInfo::on_initialize_open_signed() + } + Phase::Signed | Phase::Off + if remaining <= unsigned_deadline && remaining > 0u32.into() => + { + let (need_snapshot, enabled, additional) = if current_phase == Phase::Signed { + // followed by a signed phase: close the signed phase, no need for snapshot. + // TWO_PHASE_NOTE: later on once we have signed phase, this should return + // something else. + (false, true, Weight::zero()) + } else { + // no signed phase: create a new snapshot, definitely `enable` the unsigned + // phase. + (true, true, Weight::zero()) + }; + + Self::on_initialize_open_unsigned(need_snapshot, enabled, now); + log!(info, "Starting unsigned phase({}) at #{:?}.", enabled, now); + + let base_weight = if need_snapshot { + T::WeightInfo::on_initialize_open_unsigned_with_snapshot() + } else { + T::WeightInfo::on_initialize_open_unsigned_without_snapshot() + }; + base_weight.saturating_add(additional) + } + _ => T::WeightInfo::on_initialize_nothing(), + } + } + + fn offchain_worker(n: T::BlockNumber) { + // We only run the OCW in the first block of the unsigned phase. + if Self::current_phase().is_unsigned_open_at(n) { + match Self::try_acquire_offchain_lock(n) { + Ok(_) => { + let outcome = Self::mine_check_and_submit().map_err(ElectionError::from); + log!(info, "miner exeuction done: {:?}", outcome); + } + Err(why) => log!(warn, "denied offchain worker: {:?}", why), + } + } + } + + fn integrity_test() { + use sp_std::mem::size_of; + // The index type of both voters and targets need to be smaller than that of usize (very + // unlikely to be the case, but anyhow). + assert!(size_of::>() <= size_of::()); + assert!(size_of::>() <= size_of::()); + + // ---------------------------- + // based on the requirements of [`sp_npos_elections::Assignment::try_normalize`]. + let max_vote: usize = as CompactSolution>::LIMIT; + + // 1. Maximum sum of [ChainAccuracy; 16] must fit into `UpperOf`.. + let maximum_chain_accuracy: Vec>> = (0..max_vote) + .map(|_| { + >>::from( + >::one().deconstruct(), + ) + }) + .collect(); + let _: UpperOf> = maximum_chain_accuracy + .iter() + .fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap()); + + // 2. Maximum sum of [CompactAccuracy; 16] must fit into `UpperOf`. + let maximum_chain_accuracy: Vec>> = (0..max_vote) + .map(|_| { + >>::from( + >::one().deconstruct(), + ) + }) + .collect(); + let _: UpperOf> = maximum_chain_accuracy + .iter() + .fold(Zero::zero(), |acc, x| acc.checked_add(x).unwrap()); + } + } + + #[pallet::call] + impl Pallet { + /// Submit a solution for the unsigned phase. + /// + /// The dispatch origin fo this call must be __none__. + /// + /// This submission is checked on the fly. Moreover, this unsigned solution is only + /// validated when submitted to the pool from the **local** node. Effectively, this means + /// that only active validators can submit this transaction when authoring a block (similar + /// to an inherent). + /// + /// To prevent any incorrect solution (and thus wasted time/weight), this transaction will + /// panic if the solution submitted by the validator is invalid in any way, effectively + /// putting their authoring reward at risk. + /// + /// No deposit or reward is associated with this submission. + #[pallet::weight(T::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32 + ))] + pub fn submit_unsigned( + origin: OriginFor, + solution: RawSolution>, + witness: SolutionOrSnapshotSize, + ) -> DispatchResultWithPostInfo { + ensure_none(origin)?; + let error_message = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward."; + + // Check score being an improvement, phase, and desired targets. + Self::unsigned_pre_dispatch_checks(&solution).expect(error_message); + + // ensure witness was correct. + let SolutionOrSnapshotSize { voters, targets } = + Self::snapshot_metadata().expect(error_message); + + // NOTE: we are asserting, not `ensure`ing -- we want to panic here. + assert!(voters as u32 == witness.voters, error_message); + assert!(targets as u32 == witness.targets, error_message); + + let ready = + Self::feasibility_check(solution, ElectionCompute::Unsigned).expect(error_message); + + // store the newly received solution. + log!(info, "queued unsigned solution with score {:?}", ready.score); + >::put(ready); + Self::deposit_event(Event::SolutionStored(ElectionCompute::Unsigned)); + + Ok(None.into()) + } + } + + #[pallet::event] + #[pallet::metadata(::AccountId = "AccountId")] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A solution was stored with the given compute. + /// + /// If the solution is signed, this means that it hasn't yet been processed. If the + /// solution is unsigned, this means that it has also been processed. + SolutionStored(ElectionCompute), + /// The election has been finalized, with `Some` of the given computation, or else if the + /// election failed, `None`. + ElectionFinalized(Option), + /// An account has been rewarded for their signed submission being finalized. + Rewarded(::AccountId), + /// An account has been slashed for submitting an invalid signed submission. + Slashed(::AccountId), + /// The signed phase of the given round has started. + SignedPhaseStarted(u32), + /// The unsigned phase of the given round has started. + UnsignedPhaseStarted(u32), + } + + /// Error of the pallet that can be returned in response to dispatches. + #[pallet::error] + pub enum Error { + /// Submission was too early. + PreDispatchEarlySubmission, + /// Wrong number of winners presented. + PreDispatchWrongWinnerCount, + /// Submission was too weak, score-wise. + PreDispatchWeakSubmission, + } + + #[pallet::origin] + pub struct Origin(PhantomData); + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_unsigned(solution, _) = call { + // discard solution not coming from the local OCW. + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + _ => { + return InvalidTransaction::Call.into(); + } + } + + let _ = Self::unsigned_pre_dispatch_checks(solution) + .map_err(|err| { + log!(error, "unsigned transaction validation failed due to {:?}", err); + err + }) + .map_err(dispatch_error_to_invalid)?; + + ValidTransaction::with_tag_prefix("OffchainElection") + // The higher the score[0], the better a solution is. + .priority( + T::MinerTxPriority::get().saturating_add( + solution.score[0].saturated_into() + ), + ) + // used to deduplicate unsigned solutions: each validator should produce one + // solution per round at most, and solutions are not propagate. + .and_provides(solution.round) + // transaction should stay in the pool for the duration of the unsigned phase. + .longevity(T::UnsignedPhase::get().saturated_into::()) + // We don't propagate this. This can never be validated at a remote node. + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + if let Call::submit_unsigned(solution, _) = call { + Self::unsigned_pre_dispatch_checks(solution) + .map_err(dispatch_error_to_invalid) + .map_err(Into::into) + } else { + Err(InvalidTransaction::Call.into()) + } + } + } + + #[pallet::type_value] + pub fn DefaultForRound() -> u32 { + 1 + } + + /// Internal counter for the number of rounds. + /// + /// This is useful for de-duplication of transactions submitted to the pool, and general + /// diagnostics of the pallet. + /// + /// This is merely incremented once per every time that an upstream `elect` is called. + #[pallet::storage] + #[pallet::getter(fn round)] + pub type Round = StorageValue<_, u32, ValueQuery, DefaultForRound>; + + /// Current phase. + #[pallet::storage] + #[pallet::getter(fn current_phase)] + pub type CurrentPhase = StorageValue<_, Phase, ValueQuery>; + + /// Current best solution, signed or unsigned, queued to be returned upon `elect`. + #[pallet::storage] + #[pallet::getter(fn queued_solution)] + pub type QueuedSolution = StorageValue<_, ReadySolution>; + + /// Snapshot data of the round. + /// + /// This is created at the beginning of the signed phase and cleared upon calling `elect`. + #[pallet::storage] + #[pallet::getter(fn snapshot)] + pub type Snapshot = StorageValue<_, RoundSnapshot>; + + /// Desired number of targets to elect for this round. + /// + /// Only exists when [`Snapshot`] is present. + #[pallet::storage] + #[pallet::getter(fn desired_targets)] + pub type DesiredTargets = StorageValue<_, u32>; + + /// The metadata of the [`RoundSnapshot`] + /// + /// Only exists when [`Snapshot`] is present. + #[pallet::storage] + #[pallet::getter(fn snapshot_metadata)] + pub type SnapshotMetadata = StorageValue<_, SolutionOrSnapshotSize>; + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(PhantomData); +} + +impl Pallet { + /// Logic for `::on_initialize` when signed phase is being opened. + /// + /// This is decoupled for easy weight calculation. + pub(crate) fn on_initialize_open_signed() { + >::put(Phase::Signed); + Self::create_snapshot(); + Self::deposit_event(Event::SignedPhaseStarted(Self::round())); + } + + /// Logic for `>::on_initialize` when unsigned phase is being opened. + /// + /// This is decoupled for easy weight calculation. Note that the default weight benchmark of + /// this function will assume an empty signed queue for `finalize_signed_phase`. + pub(crate) fn on_initialize_open_unsigned( + need_snapshot: bool, + enabled: bool, + now: T::BlockNumber, + ) { + if need_snapshot { + // if not being followed by a signed phase, then create the snapshots. + debug_assert!(Self::snapshot().is_none()); + Self::create_snapshot(); + } + + >::put(Phase::Unsigned((enabled, now))); + Self::deposit_event(Event::UnsignedPhaseStarted(Self::round())); + } + + /// Creates the snapshot. Writes new data to: + /// + /// 1. [`SnapshotMetadata`] + /// 2. [`RoundSnapshot`] + /// 3. [`DesiredTargets`] + pub(crate) fn create_snapshot() { + // if any of them don't exist, create all of them. This is a bit conservative. + let targets = T::DataProvider::targets(); + let voters = T::DataProvider::voters(); + let desired_targets = T::DataProvider::desired_targets(); + + >::put(SolutionOrSnapshotSize { + voters: voters.len() as u32, + targets: targets.len() as u32, + }); + >::put(desired_targets); + >::put(RoundSnapshot { voters, targets }); + } + + /// Kill everything created by [`Pallet::create_snapshot`]. + pub(crate) fn kill_snapshot() { + >::kill(); + >::kill(); + >::kill(); + } + + /// Checks the feasibility of a solution. + fn feasibility_check( + solution: RawSolution>, + compute: ElectionCompute, + ) -> Result, FeasibilityError> { + let RawSolution { compact, score, round } = solution; + + // first, check round. + ensure!(Self::round() == round, FeasibilityError::InvalidRound); + + // winners are not directly encoded in the solution. + let winners = compact.unique_targets(); + + let desired_targets = + Self::desired_targets().ok_or(FeasibilityError::SnapshotUnavailable)?; + + // NOTE: this is a bit of duplicate, but we keep it around for veracity. The unsigned path + // already checked this in `unsigned_per_dispatch_checks`. The signed path *could* check it + // upon arrival, thus we would then remove it here. Given overlay it is cheap anyhow + ensure!(winners.len() as u32 == desired_targets, FeasibilityError::WrongWinnerCount); + + // read the entire snapshot. + let RoundSnapshot { voters: snapshot_voters, targets: snapshot_targets } = + Self::snapshot().ok_or(FeasibilityError::SnapshotUnavailable)?; + + // ----- Start building. First, we need some closures. + let cache = helpers::generate_voter_cache::(&snapshot_voters); + let voter_at = helpers::voter_at_fn::(&snapshot_voters); + let target_at = helpers::target_at_fn::(&snapshot_targets); + let voter_index = helpers::voter_index_fn_usize::(&cache); + + // first, make sure that all the winners are sane. + // OPTIMIZATION: we could first build the assignments, and then extract the winners directly + // from that, as that would eliminate a little bit of duplicate work. For now, we keep them + // separate: First extract winners separately from compact, and then assignments. This is + // also better, because we can reject solutions that don't meet `desired_targets` early on. + let winners = winners + .into_iter() + .map(|i| target_at(i).ok_or(FeasibilityError::InvalidWinner)) + .collect::, FeasibilityError>>()?; + + // Then convert compact -> assignment. This will fail if any of the indices are gibberish. + let assignments = compact + .into_assignment(voter_at, target_at) + .map_err::(Into::into)?; + + // Ensure that assignments is correct. + let _ = assignments + .iter() + .map(|ref assignment| { + // check that assignment.who is actually a voter (defensive-only). + // NOTE: while using the index map from `voter_index` is better than a blind linear + // search, this *still* has room for optimization. Note that we had the index when + // we did `compact -> assignment` and we lost it. Ideal is to keep the index around. + + // defensive-only: must exist in the snapshot. + let snapshot_index = + voter_index(&assignment.who).ok_or(FeasibilityError::InvalidVoter)?; + // defensive-only: index comes from the snapshot, must exist. + let (_voter, _stake, targets) = + snapshot_voters.get(snapshot_index).ok_or(FeasibilityError::InvalidVoter)?; + + // check that all of the targets are valid based on the snapshot. + if assignment.distribution.iter().any(|(d, _)| !targets.contains(d)) { + return Err(FeasibilityError::InvalidVote); + } + Ok(()) + }) + .collect::>()?; + + // ----- Start building support. First, we need one more closure. + let stake_of = helpers::stake_of_fn::(&snapshot_voters, &cache); + + // This might fail if the normalization fails. Very unlikely. See `integrity_test`. + let staked_assignments = assignment_ratio_to_staked_normalized(assignments, stake_of) + .map_err::(Into::into)?; + + // This might fail if one of the voter edges is pointing to a non-winner, which is not + // really possible anymore because all the winners come from the same `compact`. + let supports = sp_npos_elections::to_supports(&winners, &staked_assignments) + .map_err::(Into::into)?; + + // Finally, check that the claimed score was indeed correct. + let known_score = (&supports).evaluate(); + ensure!(known_score == score, FeasibilityError::InvalidScore); + + Ok(ReadySolution { supports, compute, score }) + } + + /// Perform the tasks to be done after a new `elect` has been triggered: + /// + /// 1. Increment round. + /// 2. Change phase to [`Phase::Off`] + /// 3. Clear all snapshot data. + fn post_elect() { + // inc round + >::mutate(|r| *r = *r + 1); + + // change phase + >::put(Phase::Off); + + // kill snapshots + Self::kill_snapshot(); + } + + /// On-chain fallback of election. + fn onchain_fallback() -> Result, ElectionError> { + > as ElectionProvider< + T::AccountId, + T::BlockNumber, + >>::elect() + .map_err(Into::into) + } + + fn do_elect() -> Result, ElectionError> { + >::take() + .map_or_else( + || match T::Fallback::get() { + FallbackStrategy::OnChain => Self::onchain_fallback() + .map(|r| (r, ElectionCompute::OnChain)) + .map_err(Into::into), + FallbackStrategy::Nothing => Err(ElectionError::NoFallbackConfigured), + }, + |ReadySolution { supports, compute, .. }| Ok((supports, compute)), + ) + .map(|(supports, compute)| { + Self::deposit_event(Event::ElectionFinalized(Some(compute))); + log!(info, "Finalized election round with compute {:?}.", compute); + supports + }) + .map_err(|err| { + Self::deposit_event(Event::ElectionFinalized(None)); + log!(warn, "Failed to finalize election round. reason {:?}", err); + err + }) + } +} + +impl ElectionProvider for Pallet { + type Error = ElectionError; + type DataProvider = T::DataProvider; + + fn elect() -> Result, Self::Error> { + let outcome = Self::do_elect(); + Self::post_elect(); + outcome + } +} + +/// convert a DispatchError to a custom InvalidTransaction with the inner code being the error +/// number. +pub fn dispatch_error_to_invalid(error: DispatchError) -> InvalidTransaction { + let error_number = match error { + DispatchError::Module { error, .. } => error, + _ => 0, + }; + InvalidTransaction::Custom(error_number) +} + +#[cfg(test)] +mod feasibility_check { + //! All of the tests here should be dedicated to only testing the feasibility check and nothing + //! more. The best way to audit and review these tests is to try and come up with a solution + //! that is invalid, but gets through the system as valid. + + use super::{mock::*, *}; + + const COMPUTE: ElectionCompute = ElectionCompute::OnChain; + + #[test] + fn snapshot_is_there() { + ExtBuilder::default().build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + let solution = raw_solution(); + + // for whatever reason it might be: + >::kill(); + + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::SnapshotUnavailable + ); + }) + } + + #[test] + fn round() { + ExtBuilder::default().build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + solution.round += 1; + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidRound + ); + }) + } + + #[test] + fn desired_targets() { + ExtBuilder::default().desired_targets(8).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let solution = raw_solution(); + + assert_eq!(solution.compact.unique_targets().len(), 4); + assert_eq!(MultiPhase::desired_targets().unwrap(), 8); + + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::WrongWinnerCount, + ); + }) + } + + #[test] + fn winner_indices() { + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().targets.len(), 4); + // ----------------------------------------------------^^ valid range is [0..3]. + + // swap all votes from 3 to 4. This will ensure that the number of unique winners + // will still be 4, but one of the indices will be gibberish. Requirement is to make + // sure 3 a winner, which we don't do here. + solution + .compact + .votes1 + .iter_mut() + .filter(|(_, t)| *t == 3u16) + .for_each(|(_, t)| *t += 1); + solution.compact.votes2.iter_mut().for_each(|(_, (t0, _), t1)| { + if *t0 == 3u16 { + *t0 += 1 + }; + if *t1 == 3u16 { + *t1 += 1 + }; + }); + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidWinner + ); + }) + } + + #[test] + fn voter_indices() { + // should be caught in `compact.into_assignment`. + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); + // ----------------------------------------------------^^ valid range is [0..7]. + + // check that there is a index 7 in votes1, and flip to 8. + assert!( + solution + .compact + .votes1 + .iter_mut() + .filter(|(v, _)| *v == 7u32) + .map(|(v, _)| *v = 8) + .count() > 0 + ); + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::NposElection(sp_npos_elections::Error::CompactInvalidIndex), + ); + }) + } + + #[test] + fn voter_votes() { + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); + // ----------------------------------------------------^^ valid range is [0..7]. + + // first, check that voter at index 7 (40) actually voted for 3 (40) -- this is self + // vote. Then, change the vote to 2 (30). + assert_eq!( + solution + .compact + .votes1 + .iter_mut() + .filter(|(v, t)| *v == 7 && *t == 3) + .map(|(_, t)| *t = 2) + .count(), + 1, + ); + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidVote, + ); + }) + } + + #[test] + fn score() { + ExtBuilder::default().desired_targets(2).build_and_execute(|| { + roll_to(::get() - ::get() - ::get()); + assert!(MultiPhase::current_phase().is_signed()); + + let mut solution = raw_solution(); + assert_eq!(MultiPhase::snapshot().unwrap().voters.len(), 8); + + // simply faff with the score. + solution.score[0] += 1; + + assert_noop!( + MultiPhase::feasibility_check(solution, COMPUTE), + FeasibilityError::InvalidScore, + ); + }) + } +} + +#[cfg(test)] +mod tests { + use super::{mock::*, Event, *}; + use sp_election_providers::ElectionProvider; + use sp_npos_elections::Support; + + #[test] + fn phase_rotation_works() { + ExtBuilder::default().build_and_execute(|| { + // 0 ------- 15 ------- 25 ------- 30 ------- ------- 45 ------- 55 ------- 60 + // | | | | + // Signed Unsigned Signed Unsigned + + assert_eq!(System::block_number(), 0); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + assert_eq!(MultiPhase::round(), 1); + + roll_to(4); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + assert!(MultiPhase::snapshot().is_none()); + assert_eq!(MultiPhase::round(), 1); + + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::round(), 1); + + roll_to(24); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::round(), 1); + + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert_eq!( + multi_phase_events(), + vec![Event::SignedPhaseStarted(1), Event::UnsignedPhaseStarted(1)], + ); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(29); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(30); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert!(MultiPhase::snapshot().is_some()); + + // we close when upstream tells us to elect. + roll_to(32); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + assert!(MultiPhase::snapshot().is_some()); + + MultiPhase::elect().unwrap(); + + assert!(MultiPhase::current_phase().is_off()); + assert!(MultiPhase::snapshot().is_none()); + assert_eq!(MultiPhase::round(), 2); + + roll_to(44); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(45); + assert!(MultiPhase::current_phase().is_signed()); + + roll_to(55); + assert!(MultiPhase::current_phase().is_unsigned_open_at(55)); + }) + } + + #[test] + fn signed_phase_void() { + ExtBuilder::default().phases(0, 10).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(19); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(20); + assert!(MultiPhase::current_phase().is_unsigned_open_at(20)); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(30); + assert!(MultiPhase::current_phase().is_unsigned_open_at(20)); + + MultiPhase::elect().unwrap(); + + assert!(MultiPhase::current_phase().is_off()); + assert!(MultiPhase::snapshot().is_none()); + }); + } + + #[test] + fn unsigned_phase_void() { + ExtBuilder::default().phases(10, 0).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(19); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(20); + assert!(MultiPhase::current_phase().is_signed()); + assert!(MultiPhase::snapshot().is_some()); + + roll_to(30); + assert!(MultiPhase::current_phase().is_signed()); + + let _ = MultiPhase::elect().unwrap(); + + assert!(MultiPhase::current_phase().is_off()); + assert!(MultiPhase::snapshot().is_none()); + }); + } + + #[test] + fn both_phases_void() { + ExtBuilder::default().phases(0, 0).build_and_execute(|| { + roll_to(15); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(19); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(20); + assert!(MultiPhase::current_phase().is_off()); + + roll_to(30); + assert!(MultiPhase::current_phase().is_off()); + + // this module is now only capable of doing on-chain backup. + let _ = MultiPhase::elect().unwrap(); + + assert!(MultiPhase::current_phase().is_off()); + }); + } + + #[test] + fn early_termination() { + // an early termination in the signed phase, with no queued solution. + ExtBuilder::default().build_and_execute(|| { + // signed phase started at block 15 and will end at 25. + roll_to(14); + assert_eq!(MultiPhase::current_phase(), Phase::Off); + + roll_to(15); + assert_eq!(multi_phase_events(), vec![Event::SignedPhaseStarted(1)]); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert_eq!(MultiPhase::round(), 1); + + // an unexpected call to elect. + roll_to(20); + MultiPhase::elect().unwrap(); + + // we surely can't have any feasible solutions. This will cause an on-chain election. + assert_eq!( + multi_phase_events(), + vec![ + Event::SignedPhaseStarted(1), + Event::ElectionFinalized(Some(ElectionCompute::OnChain)) + ], + ); + // all storage items must be cleared. + assert_eq!(MultiPhase::round(), 2); + assert!(MultiPhase::snapshot().is_none()); + assert!(MultiPhase::snapshot_metadata().is_none()); + assert!(MultiPhase::desired_targets().is_none()); + assert!(MultiPhase::queued_solution().is_none()); + }) + } + + #[test] + fn fallback_strategy_works() { + ExtBuilder::default().fallabck(FallbackStrategy::OnChain).build_and_execute(|| { + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // zilch solutions thus far. + let supports = MultiPhase::elect().unwrap(); + + assert_eq!( + supports, + vec![ + (30, Support { total: 40, voters: vec![(2, 5), (4, 5), (30, 30)] }), + (40, Support { total: 60, voters: vec![(2, 5), (3, 10), (4, 5), (40, 40)] }) + ] + ) + }); + + ExtBuilder::default().fallabck(FallbackStrategy::Nothing).build_and_execute(|| { + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // zilch solutions thus far. + assert_eq!(MultiPhase::elect().unwrap_err(), ElectionError::NoFallbackConfigured); + }) + } + + #[test] + fn number_of_voters_allowed_2sec_block() { + // Just a rough estimate with the substrate weights. + assert!(!MockWeightInfo::get()); + + let all_voters: u32 = 10_000; + let all_targets: u32 = 5_000; + let desired: u32 = 1_000; + let weight_with = |active| { + ::WeightInfo::submit_unsigned( + all_voters, + all_targets, + active, + desired, + ) + }; + + let mut active = 1; + while weight_with(active) + <= ::BlockWeights::get().max_block + || active == all_voters + { + active += 1; + } + + println!("can support {} voters to yield a weight of {}", active, weight_with(active)); + } +} diff --git a/frame/election-provider-multi-phase/src/mock.rs b/frame/election-provider-multi-phase/src/mock.rs new file mode 100644 index 0000000000..eb38a4cd52 --- /dev/null +++ b/frame/election-provider-multi-phase/src/mock.rs @@ -0,0 +1,381 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::*; +use crate as multi_phase; +pub use frame_support::{assert_noop, assert_ok}; +use frame_support::{ + parameter_types, + traits::{Hooks}, + weights::Weight, +}; +use parking_lot::RwLock; +use sp_core::{ + offchain::{ + testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainExt, TransactionPoolExt, + }, + H256, +}; +use sp_election_providers::ElectionDataProvider; +use sp_npos_elections::{ + assignment_ratio_to_staked_normalized, seq_phragmen, to_supports, to_without_backing, + CompactSolution, ElectionResult, EvaluateSupport, +}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + PerU16, +}; +use std::sync::Arc; + +pub type Block = sp_runtime::generic::Block; +pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + +frame_support::construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: frame_system::{Module, Call, Event, Config}, + Balances: pallet_balances::{Module, Call, Event, Config}, + MultiPhase: multi_phase::{Module, Call, Event}, + } +); + +pub(crate) type Balance = u64; +pub(crate) type AccountId = u64; + +sp_npos_elections::generate_solution_type!( + #[compact] + pub struct TestCompact::(16) +); + +/// All events of this pallet. +pub(crate) fn multi_phase_events() -> Vec> { + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| if let Event::multi_phase(inner) = e { Some(inner) } else { None }) + .collect::>() +} + +/// To from `now` to block `n`. +pub fn roll_to(n: u64) { + let now = System::block_number(); + for i in now + 1..=n { + System::set_block_number(i); + MultiPhase::on_initialize(i); + } +} + +pub fn roll_to_with_ocw(n: u64) { + let now = System::block_number(); + for i in now + 1..=n { + System::set_block_number(i); + MultiPhase::on_initialize(i); + MultiPhase::offchain_worker(i); + } +} + +/// Spit out a verifiable raw solution. +/// +/// This is a good example of what an offchain miner would do. +pub fn raw_solution() -> RawSolution> { + let RoundSnapshot { voters, targets } = MultiPhase::snapshot().unwrap(); + let desired_targets = MultiPhase::desired_targets().unwrap(); + + // closures + let cache = helpers::generate_voter_cache::(&voters); + let voter_index = helpers::voter_index_fn_linear::(&voters); + let target_index = helpers::target_index_fn_linear::(&targets); + let stake_of = helpers::stake_of_fn::(&voters, &cache); + + let ElectionResult { winners, assignments } = seq_phragmen::<_, CompactAccuracyOf>( + desired_targets as usize, + targets.clone(), + voters.clone(), + None, + ) + .unwrap(); + + let winners = to_without_backing(winners); + + let score = { + let staked = assignment_ratio_to_staked_normalized(assignments.clone(), &stake_of).unwrap(); + to_supports(&winners, &staked).unwrap().evaluate() + }; + let compact = + >::from_assignment(assignments, &voter_index, &target_index).unwrap(); + + let round = MultiPhase::round(); + RawSolution { compact, score, round } +} + +pub fn witness() -> SolutionOrSnapshotSize { + MultiPhase::snapshot() + .map(|snap| SolutionOrSnapshotSize { + voters: snap.voters.len() as u32, + targets: snap.targets.len() as u32, + }) + .unwrap_or_default() +} + +impl frame_system::Config for Runtime { + type SS58Prefix = (); + type BaseCallFilter = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = (); + type DbWeight = (); + type BlockLength = (); + type BlockWeights = BlockWeights; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); +} + +const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub BlockWeights: frame_system::limits::BlockWeights = frame_system::limits::BlockWeights + ::with_sensible_defaults(2 * frame_support::weights::constants::WEIGHT_PER_SECOND, NORMAL_DISPATCH_RATIO); +} + +impl pallet_balances::Config for Runtime { + type Balance = Balance; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type MaxLocks = (); + type WeightInfo = (); +} + +parameter_types! { + pub static Targets: Vec = vec![10, 20, 30, 40]; + pub static Voters: Vec<(AccountId, VoteWeight, Vec)> = vec![ + (1, 10, vec![10, 20]), + (2, 10, vec![30, 40]), + (3, 10, vec![40]), + (4, 10, vec![10, 20, 30, 40]), + // self votes. + (10, 10, vec![10]), + (20, 20, vec![20]), + (30, 30, vec![30]), + (40, 40, vec![40]), + ]; + + pub static Fallback: FallbackStrategy = FallbackStrategy::OnChain; + pub static DesiredTargets: u32 = 2; + pub static SignedPhase: u64 = 10; + pub static UnsignedPhase: u64 = 5; + pub static MaxSignedSubmissions: u32 = 5; + + pub static MinerMaxIterations: u32 = 5; + pub static MinerTxPriority: u64 = 100; + pub static SolutionImprovementThreshold: Perbill = Perbill::zero(); + pub static MinerMaxWeight: Weight = BlockWeights::get().max_block; + pub static MockWeightInfo: bool = false; + + + pub static EpochLength: u64 = 30; +} + +// Hopefully this won't be too much of a hassle to maintain. +pub struct DualMockWeightInfo; +impl multi_phase::weights::WeightInfo for DualMockWeightInfo { + fn on_initialize_nothing() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_nothing() + } + } + fn on_initialize_open_signed() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_open_signed() + } + } + fn on_initialize_open_unsigned_with_snapshot() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_open_unsigned_with_snapshot() + } + } + fn on_initialize_open_unsigned_without_snapshot() -> Weight { + if MockWeightInfo::get() { + Zero::zero() + } else { + <() as multi_phase::weights::WeightInfo>::on_initialize_open_unsigned_without_snapshot() + } + } + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32) -> Weight { + if MockWeightInfo::get() { + // 10 base + // 5 per edge. + (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)) + } else { + <() as multi_phase::weights::WeightInfo>::submit_unsigned(v, t, a, d) + } + } + fn feasibility_check(v: u32, t: u32, a: u32, d: u32) -> Weight { + if MockWeightInfo::get() { + // 10 base + // 5 per edge. + (10 as Weight).saturating_add((5 as Weight).saturating_mul(a as Weight)) + } else { + <() as multi_phase::weights::WeightInfo>::feasibility_check(v, t, a, d) + } + } +} + +impl crate::Config for Runtime { + type Event = Event; + type Currency = Balances; + type SignedPhase = SignedPhase; + type UnsignedPhase = UnsignedPhase; + type SolutionImprovementThreshold = SolutionImprovementThreshold; + type MinerMaxIterations = MinerMaxIterations; + type MinerMaxWeight = MinerMaxWeight; + type MinerTxPriority = MinerTxPriority; + type DataProvider = StakingMock; + type WeightInfo = DualMockWeightInfo; + type BenchmarkingConfig = (); + type OnChainAccuracy = Perbill; + type Fallback = Fallback; + type CompactSolution = TestCompact; +} + +impl frame_system::offchain::SendTransactionTypes for Runtime +where + Call: From, +{ + type OverarchingCall = Call; + type Extrinsic = Extrinsic; +} + +pub type Extrinsic = sp_runtime::testing::TestXt; + +#[derive(Default)] +pub struct ExtBuilder {} + +pub struct StakingMock; +impl ElectionDataProvider for StakingMock { + fn targets() -> Vec { + Targets::get() + } + fn voters() -> Vec<(AccountId, VoteWeight, Vec)> { + Voters::get() + } + fn desired_targets() -> u32 { + DesiredTargets::get() + } + fn next_election_prediction(now: u64) -> u64 { + now + EpochLength::get() - now % EpochLength::get() + } +} + +impl ExtBuilder { + pub fn miner_tx_priority(self, p: u64) -> Self { + ::set(p); + self + } + pub fn solution_improvement_threshold(self, p: Perbill) -> Self { + ::set(p); + self + } + pub fn phases(self, signed: u64, unsigned: u64) -> Self { + ::set(signed); + ::set(unsigned); + self + } + pub fn fallabck(self, fallback: FallbackStrategy) -> Self { + ::set(fallback); + self + } + pub fn miner_weight(self, weight: Weight) -> Self { + ::set(weight); + self + } + pub fn mock_weight_info(self, mock: bool) -> Self { + ::set(mock); + self + } + pub fn desired_targets(self, t: u32) -> Self { + ::set(t); + self + } + pub fn add_voter(self, who: AccountId, stake: Balance, targets: Vec) -> Self { + VOTERS.with(|v| v.borrow_mut().push((who, stake, targets))); + self + } + pub fn build(self) -> sp_io::TestExternalities { + sp_tracing::try_init_simple(); + let mut storage = + frame_system::GenesisConfig::default().build_storage::().unwrap(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![ + // bunch of account for submitting stuff only. + (99, 100), + (999, 100), + (9999, 100), + ], + } + .assimilate_storage(&mut storage); + + sp_io::TestExternalities::from(storage) + } + + pub fn build_offchainify( + self, + iters: u32, + ) -> (sp_io::TestExternalities, Arc>) { + let mut ext = self.build(); + let (offchain, offchain_state) = TestOffchainExt::new(); + let (pool, pool_state) = TestTransactionPoolExt::new(); + + let mut seed = [0_u8; 32]; + seed[0..4].copy_from_slice(&iters.to_le_bytes()); + offchain_state.write().seed = seed; + + ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + (ext, pool_state) + } + + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + self.build().execute_with(test) + } +} diff --git a/frame/election-provider-multi-phase/src/unsigned.rs b/frame/election-provider-multi-phase/src/unsigned.rs new file mode 100644 index 0000000000..2039e5d9f0 --- /dev/null +++ b/frame/election-provider-multi-phase/src/unsigned.rs @@ -0,0 +1,873 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! The unsigned phase implementation. + +use crate::*; +use frame_support::dispatch::DispatchResult; +use frame_system::offchain::SubmitTransaction; +use sp_npos_elections::{ + seq_phragmen, CompactSolution, ElectionResult, assignment_ratio_to_staked_normalized, + assignment_staked_to_ratio_normalized, +}; +use sp_runtime::{offchain::storage::StorageValueRef, traits::TrailingZeroInput}; +use sp_std::cmp::Ordering; + +/// Storage key used to store the persistent offchain worker status. +pub(crate) const OFFCHAIN_HEAD_DB: &[u8] = b"parity/multi-phase-unsigned-election"; + +/// The repeat threshold of the offchain worker. This means we won't run the offchain worker twice +/// within a window of 5 blocks. +pub(crate) const OFFCHAIN_REPEAT: u32 = 5; + +#[derive(Debug, Eq, PartialEq)] +pub enum MinerError { + /// An internal error in the NPoS elections crate. + NposElections(sp_npos_elections::Error), + /// Snapshot data was unavailable unexpectedly. + SnapshotUnAvailable, + /// Submitting a transaction to the pool failed. + PoolSubmissionFailed, + /// The pre-dispatch checks failed for the mined solution. + PreDispatchChecksFailed, + /// The solution generated from the miner is not feasible. + Feasibility(FeasibilityError), +} + +impl From for MinerError { + fn from(e: sp_npos_elections::Error) -> Self { + MinerError::NposElections(e) + } +} + +impl From for MinerError { + fn from(e: FeasibilityError) -> Self { + MinerError::Feasibility(e) + } +} + +impl Pallet { + /// Mine a new solution, and submit it back to the chain as an unsigned transaction. + pub fn mine_check_and_submit() -> Result<(), MinerError> { + let iters = Self::get_balancing_iters(); + // get the solution, with a load of checks to ensure if submitted, IT IS ABSOLUTELY VALID. + let (raw_solution, witness) = Self::mine_and_check(iters)?; + + let call = Call::submit_unsigned(raw_solution, witness).into(); + SubmitTransaction::>::submit_unsigned_transaction(call) + .map_err(|_| MinerError::PoolSubmissionFailed) + } + + /// Mine a new npos solution, with all the relevant checks to make sure that it will be accepted + /// to the chain. + /// + /// If you want an unchecked solution, use [`Pallet::mine_solution`]. + /// If you want a checked solution and submit it at the same time, use + /// [`Pallet::mine_check_and_submit`]. + pub fn mine_and_check( + iters: usize, + ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { + let (raw_solution, witness) = Self::mine_solution(iters)?; + + // ensure that this will pass the pre-dispatch checks + Self::unsigned_pre_dispatch_checks(&raw_solution).map_err(|e| { + log!(warn, "pre-dispatch-checks failed for mined solution: {:?}", e); + MinerError::PreDispatchChecksFailed + })?; + + // ensure that this is a feasible solution + let _ = Self::feasibility_check(raw_solution.clone(), ElectionCompute::Unsigned).map_err( + |e| { + log!(warn, "feasibility-check failed for mined solution: {:?}", e); + MinerError::from(e) + }, + )?; + + Ok((raw_solution, witness)) + } + + /// Mine a new npos solution. + pub fn mine_solution( + iters: usize, + ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { + let RoundSnapshot { voters, targets } = + Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; + let desired_targets = Self::desired_targets().ok_or(MinerError::SnapshotUnAvailable)?; + + seq_phragmen::<_, CompactAccuracyOf>( + desired_targets as usize, + targets, + voters, + Some((iters, 0)), + ) + .map_err(Into::into) + .and_then(Self::prepare_election_result) + } + + /// Convert a raw solution from [`sp_npos_elections::ElectionResult`] to [`RawSolution`], which + /// is ready to be submitted to the chain. + /// + /// Will always reduce the solution as well. + pub fn prepare_election_result( + election_result: ElectionResult>, + ) -> Result<(RawSolution>, SolutionOrSnapshotSize), MinerError> { + // NOTE: This code path is generally not optimized as it is run offchain. Could use some at + // some point though. + + // storage items. Note: we have already read this from storage, they must be in cache. + let RoundSnapshot { voters, targets } = + Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; + let desired_targets = Self::desired_targets().ok_or(MinerError::SnapshotUnAvailable)?; + + // closures. + let cache = helpers::generate_voter_cache::(&voters); + let voter_index = helpers::voter_index_fn::(&cache); + let target_index = helpers::target_index_fn_linear::(&targets); + let voter_at = helpers::voter_at_fn::(&voters); + let target_at = helpers::target_at_fn::(&targets); + let stake_of = helpers::stake_of_fn::(&voters, &cache); + + let ElectionResult { assignments, winners } = election_result; + + // convert to staked and reduce. + let mut staked = assignment_ratio_to_staked_normalized(assignments, &stake_of) + .map_err::(Into::into)?; + sp_npos_elections::reduce(&mut staked); + + // convert back to ration and make compact. + let ratio = assignment_staked_to_ratio_normalized(staked)?; + let compact = >::from_assignment(ratio, &voter_index, &target_index)?; + + let size = + SolutionOrSnapshotSize { voters: voters.len() as u32, targets: targets.len() as u32 }; + let maximum_allowed_voters = Self::maximum_voter_for_weight::( + desired_targets, + size, + T::MinerMaxWeight::get(), + ); + log!( + debug, + "miner: current compact solution voters = {}, maximum_allowed = {}", + compact.voter_count(), + maximum_allowed_voters, + ); + let compact = Self::trim_compact(maximum_allowed_voters, compact, &voter_index)?; + + // re-calc score. + let winners = sp_npos_elections::to_without_backing(winners); + let score = compact.clone().score(&winners, stake_of, voter_at, target_at)?; + + let round = Self::round(); + Ok((RawSolution { compact, score, round }, size)) + } + + /// Get a random number of iterations to run the balancing in the OCW. + /// + /// Uses the offchain seed to generate a random number, maxed with + /// [`Config::MinerMaxIterations`]. + pub fn get_balancing_iters() -> usize { + match T::MinerMaxIterations::get() { + 0 => 0, + max @ _ => { + let seed = sp_io::offchain::random_seed(); + let random = ::decode(&mut TrailingZeroInput::new(seed.as_ref())) + .expect("input is padded with zeroes; qed") + % max.saturating_add(1); + random as usize + } + } + } + + /// Greedily reduce the size of the a solution to fit into the block, w.r.t. weight. + /// + /// The weight of the solution is foremost a function of the number of voters (i.e. + /// `compact.len()`). Aside from this, the other components of the weight are invariant. The + /// number of winners shall not be changed (otherwise the solution is invalid) and the + /// `ElectionSize` is merely a representation of the total number of stakers. + /// + /// Thus, we reside to stripping away some voters. This means only changing the `compact` + /// struct. + /// + /// Note that the solution is already computed, and the winners are elected based on the merit + /// of the entire stake in the system. Nonetheless, some of the voters will be removed further + /// down the line. + /// + /// Indeed, the score must be computed **after** this step. If this step reduces the score too + /// much or remove a winner, then the solution must be discarded **after** this step. + pub fn trim_compact( + maximum_allowed_voters: u32, + mut compact: CompactOf, + voter_index: FN, + ) -> Result, MinerError> + where + for<'r> FN: Fn(&'r T::AccountId) -> Option>, + { + match compact.voter_count().checked_sub(maximum_allowed_voters as usize) { + Some(to_remove) if to_remove > 0 => { + // grab all voters and sort them by least stake. + let RoundSnapshot { voters, .. } = + Self::snapshot().ok_or(MinerError::SnapshotUnAvailable)?; + let mut voters_sorted = voters + .into_iter() + .map(|(who, stake, _)| (who.clone(), stake)) + .collect::>(); + voters_sorted.sort_by_key(|(_, y)| *y); + + // start removing from the least stake. Iterate until we know enough have been + // removed. + let mut removed = 0; + for (maybe_index, _stake) in + voters_sorted.iter().map(|(who, stake)| (voter_index(&who), stake)) + { + let index = maybe_index.ok_or(MinerError::SnapshotUnAvailable)?; + if compact.remove_voter(index) { + removed += 1 + } + + if removed >= to_remove { + break; + } + } + + Ok(compact) + } + _ => { + // nada, return as-is + Ok(compact) + } + } + } + + /// Find the maximum `len` that a compact can have in order to fit into the block weight. + /// + /// This only returns a value between zero and `size.nominators`. + pub fn maximum_voter_for_weight( + desired_winners: u32, + size: SolutionOrSnapshotSize, + max_weight: Weight, + ) -> u32 { + if size.voters < 1 { + return size.voters; + } + + let max_voters = size.voters.max(1); + let mut voters = max_voters; + + // helper closures. + let weight_with = |active_voters: u32| -> Weight { + W::submit_unsigned(size.voters, size.targets, active_voters, desired_winners) + }; + + let next_voters = |current_weight: Weight, voters: u32, step: u32| -> Result { + match current_weight.cmp(&max_weight) { + Ordering::Less => { + let next_voters = voters.checked_add(step); + match next_voters { + Some(voters) if voters < max_voters => Ok(voters), + _ => Err(()), + } + } + Ordering::Greater => voters.checked_sub(step).ok_or(()), + Ordering::Equal => Ok(voters), + } + }; + + // First binary-search the right amount of voters + let mut step = voters / 2; + let mut current_weight = weight_with(voters); + while step > 0 { + match next_voters(current_weight, voters, step) { + // proceed with the binary search + Ok(next) if next != voters => { + voters = next; + } + // we are out of bounds, break out of the loop. + Err(()) => { + break; + } + // we found the right value - early exit the function. + Ok(next) => return next, + } + step = step / 2; + current_weight = weight_with(voters); + } + + // Time to finish. We might have reduced less than expected due to rounding error. Increase + // one last time if we have any room left, the reduce until we are sure we are below limit. + while voters + 1 <= max_voters && weight_with(voters + 1) < max_weight { + voters += 1; + } + while voters.checked_sub(1).is_some() && weight_with(voters) > max_weight { + voters -= 1; + } + + debug_assert!( + weight_with(voters.min(size.voters)) <= max_weight, + "weight_with({}) <= {}", + voters.min(size.voters), + max_weight, + ); + voters.min(size.voters) + } + + /// Checks if an execution of the offchain worker is permitted at the given block number, or + /// not. + /// + /// This essentially makes sure that we don't run on previous blocks in case of a re-org, and we + /// don't run twice within a window of length [`OFFCHAIN_REPEAT`]. + /// + /// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. + pub(crate) fn try_acquire_offchain_lock(now: T::BlockNumber) -> Result<(), &'static str> { + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + let threshold = T::BlockNumber::from(OFFCHAIN_REPEAT); + + let mutate_stat = + storage.mutate::<_, &'static str, _>(|maybe_head: Option>| { + match maybe_head { + Some(Some(head)) if now < head => Err("fork."), + Some(Some(head)) if now >= head && now <= head + threshold => { + Err("recently executed.") + } + Some(Some(head)) if now > head + threshold => { + // we can run again now. Write the new head. + Ok(now) + } + _ => { + // value doesn't exists. Probably this node just booted up. Write, and run + Ok(now) + } + } + }); + + match mutate_stat { + // all good + Ok(Ok(_)) => Ok(()), + // failed to write. + Ok(Err(_)) => Err("failed to write to offchain db."), + // fork etc. + Err(why) => Err(why), + } + } + + /// Do the basics checks that MUST happen during the validation and pre-dispatch of an unsigned + /// transaction. + /// + /// Can optionally also be called during dispatch, if needed. + /// + /// NOTE: Ideally, these tests should move more and more outside of this and more to the miner's + /// code, so that we do less and less storage reads here. + pub(crate) fn unsigned_pre_dispatch_checks( + solution: &RawSolution>, + ) -> DispatchResult { + // ensure solution is timely. Don't panic yet. This is a cheap check. + ensure!(Self::current_phase().is_unsigned_open(), Error::::PreDispatchEarlySubmission); + + // ensure correct number of winners. + ensure!( + Self::desired_targets().unwrap_or_default() + == solution.compact.unique_targets().len() as u32, + Error::::PreDispatchWrongWinnerCount, + ); + + // ensure score is being improved. Panic henceforth. + ensure!( + Self::queued_solution().map_or(true, |q: ReadySolution<_>| is_score_better::( + solution.score, + q.score, + T::SolutionImprovementThreshold::get() + )), + Error::::PreDispatchWeakSubmission, + ); + + Ok(()) + } +} + +#[cfg(test)] +mod max_weight { + #![allow(unused_variables)] + use super::{mock::*, *}; + + struct TestWeight; + impl crate::weights::WeightInfo for TestWeight { + fn on_initialize_nothing() -> Weight { + unreachable!() + } + fn on_initialize_open_signed() -> Weight { + unreachable!() + } + fn on_initialize_open_unsigned_with_snapshot() -> Weight { + unreachable!() + } + fn on_initialize_open_unsigned_without_snapshot() -> Weight { + unreachable!() + } + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32) -> Weight { + (0 * v + 0 * t + 1000 * a + 0 * d) as Weight + } + fn feasibility_check(v: u32, _t: u32, a: u32, d: u32) -> Weight { + unreachable!() + } + } + + #[test] + fn find_max_voter_binary_search_works() { + let w = SolutionOrSnapshotSize { voters: 10, targets: 0 }; + + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 0), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 999), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1990), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1999), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2000), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2001), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2010), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2990), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2999), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3000), 3); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3333), 3); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 5500), 5); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 7777), 7); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 9999), 9); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 10_000), 10); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 10_999), 10); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 11_000), 10); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 22_000), 10); + + let w = SolutionOrSnapshotSize { voters: 1, targets: 0 }; + + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 0), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 999), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1990), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1999), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2010), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3333), 1); + + let w = SolutionOrSnapshotSize { voters: 2, targets: 0 }; + + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 0), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 999), 0); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1000), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1001), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 1999), 1); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2000), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2001), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 2010), 2); + assert_eq!(MultiPhase::maximum_voter_for_weight::(0, w, 3333), 2); + } +} + +#[cfg(test)] +mod tests { + use super::{ + mock::{Origin, *}, + Call, *, + }; + use frame_support::{dispatch::Dispatchable, traits::OffchainWorker}; + use mock::Call as OuterCall; + use sp_election_providers::Assignment; + use sp_runtime::{traits::ValidateUnsigned, PerU16}; + + #[test] + fn validate_unsigned_retracts_wrong_phase() { + ExtBuilder::default().desired_targets(0).build_and_execute(|| { + let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned(solution.clone(), witness()); + + // initial + assert_eq!(MultiPhase::current_phase(), Phase::Off); + assert!(matches!( + ::validate_unsigned(TransactionSource::Local, &call) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + + // signed + roll_to(15); + assert_eq!(MultiPhase::current_phase(), Phase::Signed); + assert!(matches!( + ::validate_unsigned(TransactionSource::Local, &call) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + + // unsigned + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + assert!(::validate_unsigned( + TransactionSource::Local, + &call + ) + .is_ok()); + assert!(::pre_dispatch(&call).is_ok()); + + // unsigned -- but not enabled. + >::put(Phase::Unsigned((false, 25))); + assert!(MultiPhase::current_phase().is_unsigned()); + assert!(matches!( + ::validate_unsigned(TransactionSource::Local, &call) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(0)) + )); + }) + } + + #[test] + fn validate_unsigned_retracts_low_score() { + ExtBuilder::default().desired_targets(0).build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned(solution.clone(), witness()); + + // initial + assert!(::validate_unsigned( + TransactionSource::Local, + &call + ) + .is_ok()); + assert!(::pre_dispatch(&call).is_ok()); + + // set a better score + let ready = ReadySolution { score: [10, 0, 0], ..Default::default() }; + >::put(ready); + + // won't work anymore. + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)) + )); + assert!(matches!( + ::pre_dispatch(&call).unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(2)) + )); + }) + } + + #[test] + fn validate_unsigned_retracts_incorrect_winner_count() { + ExtBuilder::default().desired_targets(1).build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned(solution.clone(), witness()); + assert_eq!(solution.compact.unique_targets().len(), 0); + + // won't work anymore. + assert!(matches!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap_err(), + TransactionValidityError::Invalid(InvalidTransaction::Custom(1)) + )); + }) + } + + #[test] + fn priority_is_set() { + ExtBuilder::default().miner_tx_priority(20).desired_targets(0).build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned(solution.clone(), witness()); + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &call + ) + .unwrap() + .priority, + 25 + ); + }) + } + + #[test] + #[should_panic(expected = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward.: \ + DispatchError::Module { index: 2, error: 1, message: \ + Some(\"PreDispatchWrongWinnerCount\") }")] + fn unfeasible_solution_panics() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // This is in itself an invalid BS solution. + let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + let call = Call::submit_unsigned(solution.clone(), witness()); + let outer_call: OuterCall = call.into(); + let _ = outer_call.dispatch(Origin::none()); + }) + } + + #[test] + #[should_panic(expected = "Invalid unsigned submission must produce invalid block and \ + deprive validator from their authoring reward.")] + fn wrong_witness_panics() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // This solution is unfeasible as well, but we won't even get there. + let solution = RawSolution:: { score: [5, 0, 0], ..Default::default() }; + + let mut correct_witness = witness(); + correct_witness.voters += 1; + correct_witness.targets -= 1; + let call = Call::submit_unsigned(solution.clone(), correct_witness); + let outer_call: OuterCall = call.into(); + let _ = outer_call.dispatch(Origin::none()); + }) + } + + #[test] + fn miner_works() { + ExtBuilder::default().build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // ensure we have snapshots in place. + assert!(MultiPhase::snapshot().is_some()); + assert_eq!(MultiPhase::desired_targets().unwrap(), 2); + + // mine seq_phragmen solution with 2 iters. + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + + // ensure this solution is valid. + assert!(MultiPhase::queued_solution().is_none()); + assert_ok!(MultiPhase::submit_unsigned(Origin::none(), solution, witness)); + assert!(MultiPhase::queued_solution().is_some()); + }) + } + + #[test] + fn miner_trims_weight() { + ExtBuilder::default().miner_weight(100).mock_weight_info(true).build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + let solution_weight = ::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 35); + assert_eq!(solution.compact.voter_count(), 5); + + // now reduce the max weight + ::set(25); + + let (solution, witness) = MultiPhase::mine_solution(2).unwrap(); + let solution_weight = ::WeightInfo::submit_unsigned( + witness.voters, + witness.targets, + solution.compact.voter_count() as u32, + solution.compact.unique_targets().len() as u32, + ); + // default solution will have 5 edges (5 * 5 + 10) + assert_eq!(solution_weight, 25); + assert_eq!(solution.compact.voter_count(), 3); + }) + } + + #[test] + fn miner_will_not_submit_if_not_enough_winners() { + let (mut ext, _) = ExtBuilder::default().desired_targets(8).build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // mine seq_phragmen solution with 2 iters. + assert_eq!( + MultiPhase::mine_check_and_submit().unwrap_err(), + MinerError::PreDispatchChecksFailed, + ); + }) + } + + #[test] + fn unsigned_per_dispatch_checks_can_only_submit_threshold_better() { + ExtBuilder::default() + .desired_targets(1) + .add_voter(7, 2, vec![10]) + .add_voter(8, 5, vec![10]) + .solution_improvement_threshold(Perbill::from_percent(50)) + .build_and_execute(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + assert_eq!(MultiPhase::desired_targets().unwrap(), 1); + + // an initial solution + let result = ElectionResult { + // note: This second element of backing stake is not important here. + winners: vec![(10, 10)], + assignments: vec![Assignment { + who: 10, + distribution: vec![(10, PerU16::one())], + }], + }; + let (solution, witness) = MultiPhase::prepare_election_result(result).unwrap(); + assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); + assert_ok!(MultiPhase::submit_unsigned(Origin::none(), solution, witness)); + assert_eq!(MultiPhase::queued_solution().unwrap().score[0], 10); + + // trial 1: a solution who's score is only 2, i.e. 20% better in the first element. + let result = ElectionResult { + winners: vec![(10, 12)], + assignments: vec![ + Assignment { who: 10, distribution: vec![(10, PerU16::one())] }, + Assignment { + who: 7, + // note: this percent doesn't even matter, in compact it is 100%. + distribution: vec![(10, PerU16::one())], + }, + ], + }; + let (solution, _) = MultiPhase::prepare_election_result(result).unwrap(); + // 12 is not 50% more than 10 + assert_eq!(solution.score[0], 12); + assert_noop!( + MultiPhase::unsigned_pre_dispatch_checks(&solution), + Error::::PreDispatchWeakSubmission, + ); + // submitting this will actually panic. + + // trial 2: a solution who's score is only 7, i.e. 70% better in the first element. + let result = ElectionResult { + winners: vec![(10, 12)], + assignments: vec![ + Assignment { who: 10, distribution: vec![(10, PerU16::one())] }, + Assignment { who: 7, distribution: vec![(10, PerU16::one())] }, + Assignment { + who: 8, + // note: this percent doesn't even matter, in compact it is 100%. + distribution: vec![(10, PerU16::one())], + }, + ], + }; + let (solution, witness) = MultiPhase::prepare_election_result(result).unwrap(); + assert_eq!(solution.score[0], 17); + + // and it is fine + assert_ok!(MultiPhase::unsigned_pre_dispatch_checks(&solution)); + assert_ok!(MultiPhase::submit_unsigned(Origin::none(), solution, witness)); + }) + } + + #[test] + fn ocw_check_prevent_duplicate() { + let (mut ext, _) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert!(MultiPhase::current_phase().is_unsigned()); + + // first execution -- okay. + assert!(MultiPhase::try_acquire_offchain_lock(25).is_ok()); + + // next block: rejected. + assert!(MultiPhase::try_acquire_offchain_lock(26).is_err()); + + // allowed after `OFFCHAIN_REPEAT` + assert!(MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT).into()).is_ok()); + + // a fork like situation: re-execute last 3. + assert!( + MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 3).into()).is_err() + ); + assert!( + MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 2).into()).is_err() + ); + assert!( + MultiPhase::try_acquire_offchain_lock((26 + OFFCHAIN_REPEAT - 1).into()).is_err() + ); + }) + } + + #[test] + fn ocw_only_runs_when_signed_open_now() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + + // we must clear the offchain storage to ensure the offchain execution check doesn't get + // in the way. + let mut storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + + MultiPhase::offchain_worker(24); + assert!(pool.read().transactions.len().is_zero()); + storage.clear(); + + MultiPhase::offchain_worker(26); + assert!(pool.read().transactions.len().is_zero()); + storage.clear(); + + // submits! + MultiPhase::offchain_worker(25); + assert!(!pool.read().transactions.len().is_zero()); + }) + } + + #[test] + fn ocw_can_submit_to_pool() { + let (mut ext, pool) = ExtBuilder::default().build_offchainify(0); + ext.execute_with(|| { + roll_to_with_ocw(25); + assert_eq!(MultiPhase::current_phase(), Phase::Unsigned((true, 25))); + // OCW must have submitted now + + let encoded = pool.read().transactions[0].clone(); + let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); + let call = extrinsic.call; + assert!(matches!(call, OuterCall::MultiPhase(Call::submit_unsigned(_, _)))); + }) + } +} diff --git a/frame/election-provider-multi-phase/src/weights.rs b/frame/election-provider-multi-phase/src/weights.rs new file mode 100644 index 0000000000..cbdc5b39bf --- /dev/null +++ b/frame/election-provider-multi-phase/src/weights.rs @@ -0,0 +1,150 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_election_provider_multi_phase +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-02-12, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_election_provider_multi_phase +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/election-provider-multi-phase/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_election_provider_multi_phase. +pub trait WeightInfo { + fn on_initialize_nothing() -> Weight; + fn on_initialize_open_signed() -> Weight; + fn on_initialize_open_unsigned_with_snapshot() -> Weight; + fn on_initialize_open_unsigned_without_snapshot() -> Weight; + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight; + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight; +} + +/// Weights for pallet_election_provider_multi_phase using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn on_initialize_nothing() -> Weight { + (23_401_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + } + fn on_initialize_open_signed() -> Weight { + (79_260_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn on_initialize_open_unsigned_with_snapshot() -> Weight { + (77_745_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) + .saturating_add(T::DbWeight::get().writes(4 as Weight)) + } + fn on_initialize_open_unsigned_without_snapshot() -> Weight { + (21_764_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 23_000 + .saturating_add((4_171_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 78_000 + .saturating_add((229_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 23_000 + .saturating_add((13_661_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 117_000 + .saturating_add((4_499_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(6 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 12_000 + .saturating_add((4_232_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 42_000 + .saturating_add((636_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 12_000 + .saturating_add((10_294_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 64_000 + .saturating_add((4_428_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn on_initialize_nothing() -> Weight { + (23_401_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + } + fn on_initialize_open_signed() -> Weight { + (79_260_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn on_initialize_open_unsigned_with_snapshot() -> Weight { + (77_745_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + .saturating_add(RocksDbWeight::get().writes(4 as Weight)) + } + fn on_initialize_open_unsigned_without_snapshot() -> Weight { + (21_764_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn submit_unsigned(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 23_000 + .saturating_add((4_171_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 78_000 + .saturating_add((229_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 23_000 + .saturating_add((13_661_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 117_000 + .saturating_add((4_499_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(6 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn feasibility_check(v: u32, t: u32, a: u32, d: u32, ) -> Weight { + (0 as Weight) + // Standard Error: 12_000 + .saturating_add((4_232_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 42_000 + .saturating_add((636_000 as Weight).saturating_mul(t as Weight)) + // Standard Error: 12_000 + .saturating_add((10_294_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 64_000 + .saturating_add((4_428_000 as Weight).saturating_mul(d as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + } +} diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index bdb301c73e..89723cb85f 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -20,7 +20,8 @@ sp-npos-elections = { version = "3.0.0", default-features = false, path = "../.. frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-io = { version = "3.0.0", path = "../../primitives/io" } @@ -39,9 +40,11 @@ std = [ "sp-npos-elections/std", "frame-system/std", "sp-std/std", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/elections-phragmen/src/benchmarking.rs b/frame/elections-phragmen/src/benchmarking.rs index 511d2751a5..cfdcd80207 100644 --- a/frame/elections-phragmen/src/benchmarking.rs +++ b/frame/elections-phragmen/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; use frame_support::traits::OnInitialize; use crate::Module as Elections; @@ -536,84 +536,9 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks_elections_phragmen() { - ExtBuilder::default() - .desired_members(13) - .desired_runners_up(7) - .build_and_execute(|| { - assert_ok!(test_benchmark_vote_equal::()); - }); - - ExtBuilder::default() - .desired_members(13) - .desired_runners_up(7) - .build_and_execute(|| { - assert_ok!(test_benchmark_vote_more::()); - }); - - ExtBuilder::default() - .desired_members(13) - .desired_runners_up(7) - .build_and_execute(|| { - assert_ok!(test_benchmark_vote_less::()); - }); - - ExtBuilder::default() - .desired_members(13) - .desired_runners_up(7) - .build_and_execute(|| { - assert_ok!(test_benchmark_remove_voter::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_submit_candidacy::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_candidate::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_runners_up::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_renounce_candidacy_members::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_member_without_replacement::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_remove_member_with_replacement::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_clean_defunct_voters::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_election_phragmen::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_election_phragmen::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_election_phragmen_c_e::()); - }); - - ExtBuilder::default().desired_members(13).desired_runners_up(7).build_and_execute(|| { - assert_ok!(test_benchmark_election_phragmen_v::()); - }); - } -} +impl_benchmark_test_suite!( + Elections, + crate::tests::ExtBuilder::default().desired_members(13).desired_runners_up(7), + crate::tests::Test, + exec_name = build_and_execute, +); diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 057e9f181c..d4676e98b8 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -704,8 +704,9 @@ impl Module { } else { // overlap. This can never happen. If so, it seems like our intended replacement // is already a member, so not much more to do. - frame_support::debug::error!( - "pallet-elections-phragmen: a member seems to also be a runner-up." + log::error!( + target: "runtime::elections-phragmen", + "A member seems to also be a runner-up.", ); } next_best @@ -998,7 +999,11 @@ impl Module { Self::deposit_event(RawEvent::NewTerm(new_members_sorted_by_id)); ElectionRounds::mutate(|v| *v += 1); }).map_err(|e| { - frame_support::debug::error!("elections-phragmen: failed to run election [{:?}].", e); + log::error!( + target: "runtime::elections-phragmen", + "Failed to run election [{:?}].", + e, + ); Self::deposit_event(RawEvent::ElectionError); }); diff --git a/frame/elections-phragmen/src/migrations_3_0_0.rs b/frame/elections-phragmen/src/migrations_3_0_0.rs index 0737a12207..8adc4c1a69 100644 --- a/frame/elections-phragmen/src/migrations_3_0_0.rs +++ b/frame/elections-phragmen/src/migrations_3_0_0.rs @@ -95,9 +95,10 @@ type Voting = StorageMap<__Voting, Twox64Concat, T::AccountId, Voter< /// with care and run at your own risk. pub fn apply(old_voter_bond: T::Balance, old_candidacy_bond: T::Balance) -> Weight { let maybe_storage_version = ::storage_version(); - frame_support::debug::info!( + log::info!( + target: "runtime::elections-phragmen", "Running migration for elections-phragmen with storage version {:?}", - maybe_storage_version + maybe_storage_version, ); match maybe_storage_version { Some(storage_version) if storage_version <= PalletVersion::new(2, 0, 0) => { @@ -108,9 +109,10 @@ pub fn apply(old_voter_bond: T::Balance, old_candidacy_bond: T::Balan Weight::max_value() } _ => { - frame_support::debug::warn!( + log::warn!( + target: "runtime::elections-phragmen", "Attempted to apply migration to V3 but failed because storage version is {:?}", - maybe_storage_version + maybe_storage_version, ); 0 }, @@ -129,7 +131,8 @@ pub fn migrate_voters_to_recorded_deposit(old_deposit: T::Balance) { }, ); - frame_support::debug::info!( + log::info!( + target: "runtime::elections-phragmen", "migrated {} voter accounts.", >::iter().count(), ); @@ -140,9 +143,10 @@ pub fn migrate_candidates_to_recorded_deposit(old_deposit: T::Balance let _ = >::translate::, _>( |maybe_old_candidates| { maybe_old_candidates.map(|old_candidates| { - frame_support::debug::info!( + log::info!( + target: "runtime::elections-phragmen", "migrated {} candidate accounts.", - old_candidates.len() + old_candidates.len(), ); old_candidates .into_iter() @@ -158,7 +162,11 @@ pub fn migrate_members_to_recorded_deposit(old_deposit: T::Balance) { let _ = >::translate::, _>( |maybe_old_members| { maybe_old_members.map(|old_members| { - frame_support::debug::info!("migrated {} member accounts.", old_members.len()); + log::info!( + target: "runtime::elections-phragmen", + "migrated {} member accounts.", + old_members.len(), + ); old_members .into_iter() .map(|(who, stake)| SeatHolder { @@ -177,9 +185,10 @@ pub fn migrate_runners_up_to_recorded_deposit(old_deposit: T::Balance let _ = >::translate::, _>( |maybe_old_runners_up| { maybe_old_runners_up.map(|old_runners_up| { - frame_support::debug::info!( + log::info!( + target: "runtime::elections-phragmen", "migrated {} runner-up accounts.", - old_runners_up.len() + old_runners_up.len(), ); old_runners_up .into_iter() diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index a13c6d7567..ac3c709300 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -38,3 +38,4 @@ std = [ "sp-runtime/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index 5a2db258f8..3718da643d 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -23,6 +23,7 @@ sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } lite-json = { version = "0.1", default-features = false } +log = { version = "0.4.14", default-features = false } [features] default = ["std"] @@ -37,4 +38,6 @@ std = [ "sp-keystore", "sp-runtime/std", "sp-std/std", + "log/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/example-offchain-worker/README.md b/frame/example-offchain-worker/README.md index 885801841b..5299027f39 100644 --- a/frame/example-offchain-worker/README.md +++ b/frame/example-offchain-worker/README.md @@ -7,7 +7,7 @@ concepts, APIs and structures common to most offchain workers. Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's documentation. -- [`pallet_example_offchain_worker::Config`](./trait.Trait.html) +- [`pallet_example_offchain_worker::Trait`](./trait.Trait.html) - [`Call`](./enum.Call.html) - [`Module`](./struct.Module.html) diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index dbcf7b10f4..a3c1441e13 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -16,7 +16,7 @@ // limitations under the License. //! -//! # Offchain Worker Example Module +//! # Offchain Worker Example Pallet //! //! The Offchain Worker Example: A simple pallet demonstrating //! concepts, APIs and structures common to most offchain workers. @@ -24,9 +24,9 @@ //! Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's //! documentation. //! -//! - [`pallet_example_offchain_worker::Config`](./trait.Config.html) -//! - [`Call`](./enum.Call.html) -//! - [`Module`](./struct.Module.html) +//! - [`Config`] +//! - [`Call`] +//! - [`Pallet`] //! //! //! ## Overview @@ -44,27 +44,18 @@ use frame_system::{ self as system, - ensure_signed, - ensure_none, offchain::{ AppCrypto, CreateSignedTransaction, SendUnsignedTransaction, SendSignedTransaction, SignedPayload, SigningTypes, Signer, SubmitTransaction, } }; -use frame_support::{ - debug, - dispatch::DispatchResult, decl_module, decl_storage, decl_event, - traits::Get, -}; +use frame_support::traits::Get; use sp_core::crypto::KeyTypeId; use sp_runtime::{ RuntimeDebug, offchain::{http, Duration, storage::StorageValueRef}, traits::Zero, - transaction_validity::{ - InvalidTransaction, ValidTransaction, TransactionValidity, TransactionSource, - TransactionPriority, - }, + transaction_validity::{InvalidTransaction, ValidTransaction, TransactionValidity}, }; use codec::{Encode, Decode}; use sp_std::vec::Vec; @@ -102,81 +93,106 @@ pub mod crypto { } } -/// This pallet's configuration trait -pub trait Config: CreateSignedTransaction> { - /// The identifier type for an offchain worker. - type AuthorityId: AppCrypto; +pub use pallet::*; - /// The overarching event type. - type Event: From> + Into<::Event>; - /// The overarching dispatch call type. - type Call: From>; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - // Configuration parameters + /// This pallet's configuration trait + #[pallet::config] + pub trait Config: CreateSignedTransaction> + frame_system::Config { + /// The identifier type for an offchain worker. + type AuthorityId: AppCrypto; - /// A grace period after we send transaction. - /// - /// To avoid sending too many transactions, we only attempt to send one - /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate - /// sending between distinct runs of this offchain worker. - type GracePeriod: Get; + /// The overarching event type. + type Event: From> + IsType<::Event>; - /// Number of blocks of cooldown after unsigned transaction is included. - /// - /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. - type UnsignedInterval: Get; + /// The overarching dispatch call type. + type Call: From>; - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; -} + // Configuration parameters -/// Payload used by this example crate to hold price -/// data required to submit a transaction. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct PricePayload { - block_number: BlockNumber, - price: u32, - public: Public, -} - -impl SignedPayload for PricePayload { - fn public(&self) -> T::Public { - self.public.clone() - } -} + /// A grace period after we send transaction. + /// + /// To avoid sending too many transactions, we only attempt to send one + /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate + /// sending between distinct runs of this offchain worker. + #[pallet::constant] + type GracePeriod: Get; -decl_storage! { - trait Store for Module as ExampleOffchainWorker { - /// A vector of recently submitted prices. + /// Number of blocks of cooldown after unsigned transaction is included. /// - /// This is used to calculate average price, should have bounded size. - Prices get(fn prices): Vec; - /// Defines the block when next unsigned transaction will be accepted. + /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. + #[pallet::constant] + type UnsignedInterval: Get; + + /// A configuration for base priority of unsigned transactions. /// - /// To prevent spam of unsigned (and unpayed!) transactions on the network, - /// we only allow one transaction every `T::UnsignedInterval` blocks. - /// This storage entry defines when new transaction is going to be accepted. - NextUnsignedAt get(fn next_unsigned_at): T::BlockNumber; + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + #[pallet::constant] + type UnsignedPriority: Get; } -} -decl_event!( - /// Events generated by the module. - pub enum Event where AccountId = ::AccountId { - /// Event generated when new price is accepted to contribute to the average. - /// \[price, who\] - NewPrice(u32, AccountId), + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet { + /// Offchain Worker entry point. + /// + /// By implementing `fn offchain_worker` you declare a new offchain worker. + /// This function will be called when the node is fully synced and a new best block is + /// succesfuly imported. + /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might + /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), + /// so the code should be able to handle that. + /// You can use `Local Storage` API to coordinate runs of the worker. + fn offchain_worker(block_number: T::BlockNumber) { + // Note that having logs compiled to WASM may cause the size of the blob to increase + // significantly. You can use `RuntimeDebug` custom derive to hide details of the types + // in WASM. The `sp-api` crate also provides a feature `disable-logging` to disable + // all logging and thus, remove any logging from the WASM. + log::info!("Hello World from offchain workers!"); + + // Since off-chain workers are just part of the runtime code, they have direct access + // to the storage and other included pallets. + // + // We can easily import `frame_system` and retrieve a block hash of the parent block. + let parent_hash = >::block_hash(block_number - 1u32.into()); + log::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); + + // It's a good practice to keep `fn offchain_worker()` function minimal, and move most + // of the code to separate `impl` block. + // Here we call a helper function to calculate current average price. + // This function reads storage entries of the current state. + let average: Option = Self::average_price(); + log::debug!("Current price: {:?}", average); + + // For this example we are going to send both signed and unsigned transactions + // depending on the block number. + // Usually it's enough to choose one or the other. + let should_send = Self::choose_transaction_type(block_number); + let res = match should_send { + TransactionType::Signed => Self::fetch_price_and_send_signed(), + TransactionType::UnsignedForAny => Self::fetch_price_and_send_unsigned_for_any_account(block_number), + TransactionType::UnsignedForAll => Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), + TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), + TransactionType::None => Ok(()), + }; + if let Err(e) = res { + log::error!("Error: {}", e); + } + } } -); -decl_module! { /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - + #[pallet::call] + impl Pallet { /// Submit new price to the list. /// /// This method is a public function of the module and can be called from within @@ -191,13 +207,13 @@ decl_module! { /// working and receives (and provides) meaningful data. /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. - #[weight = 0] - pub fn submit_price(origin, price: u32) -> DispatchResult { + #[pallet::weight(0)] + pub fn submit_price(origin: OriginFor, price: u32) -> DispatchResultWithPostInfo { // Retrieve sender of the transaction. let who = ensure_signed(origin)?; // Add the price to the on-chain list. Self::add_price(who, price); - Ok(()) + Ok(().into()) } /// Submit new price to the list via unsigned transaction. @@ -216,86 +232,108 @@ decl_module! { /// /// This example is not focused on correctness of the oracle itself, but rather its /// purpose is to showcase offchain worker capabilities. - #[weight = 0] - pub fn submit_price_unsigned(origin, _block_number: T::BlockNumber, price: u32) - -> DispatchResult - { + #[pallet::weight(0)] + pub fn submit_price_unsigned( + origin: OriginFor, + _block_number: T::BlockNumber, + price: u32 + ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; // Add the price to the on-chain list, but mark it as coming from an empty address. Self::add_price(Default::default(), price); // now increment the block number at which we expect next unsigned transaction. - let current_block = >::block_number(); + let current_block = >::block_number(); >::put(current_block + T::UnsignedInterval::get()); - Ok(()) + Ok(().into()) } - #[weight = 0] + #[pallet::weight(0)] pub fn submit_price_unsigned_with_signed_payload( - origin, + origin: OriginFor, price_payload: PricePayload, _signature: T::Signature, - ) -> DispatchResult { + ) -> DispatchResultWithPostInfo { // This ensures that the function can only be called via unsigned transaction. ensure_none(origin)?; // Add the price to the on-chain list, but mark it as coming from an empty address. Self::add_price(Default::default(), price_payload.price); // now increment the block number at which we expect next unsigned transaction. - let current_block = >::block_number(); + let current_block = >::block_number(); >::put(current_block + T::UnsignedInterval::get()); - Ok(()) + Ok(().into()) } + } - /// Offchain Worker entry point. - /// - /// By implementing `fn offchain_worker` within `decl_module!` you declare a new offchain - /// worker. - /// This function will be called when the node is fully synced and a new best block is - /// succesfuly imported. - /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might - /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), - /// so the code should be able to handle that. - /// You can use `Local Storage` API to coordinate runs of the worker. - fn offchain_worker(block_number: T::BlockNumber) { - // It's a good idea to add logs to your offchain workers. - // Using the `frame_support::debug` module you have access to the same API exposed by - // the `log` crate. - // Note that having logs compiled to WASM may cause the size of the blob to increase - // significantly. You can use `RuntimeDebug` custom derive to hide details of the types - // in WASM or use `debug::native` namespace to produce logs only when the worker is - // running natively. - debug::native::info!("Hello World from offchain workers!"); - - // Since off-chain workers are just part of the runtime code, they have direct access - // to the storage and other included pallets. - // - // We can easily import `frame_system` and retrieve a block hash of the parent block. - let parent_hash = >::block_hash(block_number - 1u32.into()); - debug::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); + /// Events for the pallet. + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// Event generated when new price is accepted to contribute to the average. + /// \[price, who\] + NewPrice(u32, T::AccountId), + } - // It's a good practice to keep `fn offchain_worker()` function minimal, and move most - // of the code to separate `impl` block. - // Here we call a helper function to calculate current average price. - // This function reads storage entries of the current state. - let average: Option = Self::average_price(); - debug::debug!("Current price: {:?}", average); + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; - // For this example we are going to send both signed and unsigned transactions - // depending on the block number. - // Usually it's enough to choose one or the other. - let should_send = Self::choose_transaction_type(block_number); - let res = match should_send { - TransactionType::Signed => Self::fetch_price_and_send_signed(), - TransactionType::UnsignedForAny => Self::fetch_price_and_send_unsigned_for_any_account(block_number), - TransactionType::UnsignedForAll => Self::fetch_price_and_send_unsigned_for_all_accounts(block_number), - TransactionType::Raw => Self::fetch_price_and_send_raw_unsigned(block_number), - TransactionType::None => Ok(()), - }; - if let Err(e) = res { - debug::error!("Error: {}", e); + /// Validate unsigned call to this module. + /// + /// By default unsigned transactions are disallowed, but implementing the validator + /// here we make sure that some particular calls (the ones produced by offchain worker) + /// are being whitelisted and marked as valid. + fn validate_unsigned( + _source: TransactionSource, + call: &Self::Call, + ) -> TransactionValidity { + // Firstly let's check that we call the right function. + if let Call::submit_price_unsigned_with_signed_payload( + ref payload, ref signature + ) = call { + let signature_valid = SignedPayload::::verify::(payload, signature.clone()); + if !signature_valid { + return InvalidTransaction::BadProof.into(); + } + Self::validate_transaction_parameters(&payload.block_number, &payload.price) + } else if let Call::submit_price_unsigned(block_number, new_price) = call { + Self::validate_transaction_parameters(block_number, new_price) + } else { + InvalidTransaction::Call.into() } } } + + /// A vector of recently submitted prices. + /// + /// This is used to calculate average price, should have bounded size. + #[pallet::storage] + #[pallet::getter(fn prices)] + pub(super) type Prices = StorageValue<_, Vec, ValueQuery>; + + /// Defines the block when next unsigned transaction will be accepted. + /// + /// To prevent spam of unsigned (and unpayed!) transactions on the network, + /// we only allow one transaction every `T::UnsignedInterval` blocks. + /// This storage entry defines when new transaction is going to be accepted. + #[pallet::storage] + #[pallet::getter(fn next_unsigned_at)] + pub(super) type NextUnsignedAt = StorageValue<_, T::BlockNumber, ValueQuery>; +} + +/// Payload used by this example crate to hold price +/// data required to submit a transaction. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct PricePayload { + block_number: BlockNumber, + price: u32, + public: Public, +} + +impl SignedPayload for PricePayload { + fn public(&self) -> T::Public { + self.public.clone() + } } enum TransactionType { @@ -306,11 +344,7 @@ enum TransactionType { None, } -/// Most of the functions are moved outside of the `decl_module!` macro. -/// -/// This greatly helps with error messages, as the ones inside the macro -/// can sometimes be hard to debug. -impl Module { +impl Pallet { /// Chooses which transaction type to send. /// /// This function serves mostly to showcase `StorageValue` helper @@ -409,8 +443,8 @@ impl Module { for (acc, res) in &results { match res { - Ok(()) => debug::info!("[{:?}] Submitted price of {} cents", acc.id, price), - Err(e) => debug::error!("[{:?}] Failed to submit transaction: {:?}", acc.id, e), + Ok(()) => log::info!("[{:?}] Submitted price of {} cents", acc.id, price), + Err(e) => log::error!("[{:?}] Failed to submit transaction: {:?}", acc.id, e), } } @@ -545,7 +579,7 @@ impl Module { .map_err(|_| http::Error::DeadlineReached)??; // Let's check the status code before we proceed to reading the response. if response.code != 200 { - debug::warn!("Unexpected status code: {}", response.code); + log::warn!("Unexpected status code: {}", response.code); return Err(http::Error::Unknown); } @@ -556,19 +590,19 @@ impl Module { // Create a str slice from the body. let body_str = sp_std::str::from_utf8(&body).map_err(|_| { - debug::warn!("No UTF8 body"); + log::warn!("No UTF8 body"); http::Error::Unknown })?; let price = match Self::parse_price(body_str) { Some(price) => Ok(price), None => { - debug::warn!("Unable to extract price from the response: {:?}", body_str); + log::warn!("Unable to extract price from the response: {:?}", body_str); Err(http::Error::Unknown) } }?; - debug::warn!("Got price: {} cents", price); + log::warn!("Got price: {} cents", price); Ok(price) } @@ -597,8 +631,8 @@ impl Module { /// Add new price to the list. fn add_price(who: T::AccountId, price: u32) { - debug::info!("Adding to the average: {}", price); - Prices::mutate(|prices| { + log::info!("Adding to the average: {}", price); + >::mutate(|prices| { const MAX_LEN: usize = 64; if prices.len() < MAX_LEN { @@ -610,14 +644,14 @@ impl Module { let average = Self::average_price() .expect("The average is not empty, because it was just mutated; qed"); - debug::info!("Current average price is: {}", average); + log::info!("Current average price is: {}", average); // here we are raising the NewPrice event - Self::deposit_event(RawEvent::NewPrice(price, who)); + Self::deposit_event(Event::NewPrice(price, who)); } /// Calculate current average price. fn average_price() -> Option { - let prices = Prices::get(); + let prices = >::get(); if prices.is_empty() { None } else { @@ -635,7 +669,7 @@ impl Module { return InvalidTransaction::Stale.into(); } // Let's make sure to reject transactions from the future. - let current_block = >::block_number(); + let current_block = >::block_number(); if ¤t_block < block_number { return InvalidTransaction::Future.into(); } @@ -677,33 +711,3 @@ impl Module { .build() } } - -#[allow(deprecated)] // ValidateUnsigned -impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - - /// Validate unsigned call to this module. - /// - /// By default unsigned transactions are disallowed, but implementing the validator - /// here we make sure that some particular calls (the ones produced by offchain worker) - /// are being whitelisted and marked as valid. - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - // Firstly let's check that we call the right function. - if let Call::submit_price_unsigned_with_signed_payload( - ref payload, ref signature - ) = call { - let signature_valid = SignedPayload::::verify::(payload, signature.clone()); - if !signature_valid { - return InvalidTransaction::BadProof.into(); - } - Self::validate_transaction_parameters(&payload.block_number, &payload.price) - } else if let Call::submit_price_unsigned(block_number, new_price) = call { - Self::validate_transaction_parameters(block_number, new_price) - } else { - InvalidTransaction::Call.into() - } - } -} diff --git a/frame/example-parallel/Cargo.toml b/frame/example-parallel/Cargo.toml index b2f28887ce..2c593db7ec 100644 --- a/frame/example-parallel/Cargo.toml +++ b/frame/example-parallel/Cargo.toml @@ -36,3 +36,4 @@ std = [ "sp-std/std", "sp-tasks/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/example-parallel/src/lib.rs b/frame/example-parallel/src/lib.rs index c83a722be1..e777100c6f 100644 --- a/frame/example-parallel/src/lib.rs +++ b/frame/example-parallel/src/lib.rs @@ -22,10 +22,6 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_system::ensure_signed; -use frame_support::{ - dispatch::DispatchResult, decl_module, decl_storage, decl_event, -}; use sp_runtime::RuntimeDebug; use codec::{Encode, Decode}; @@ -34,33 +30,71 @@ use sp_std::vec::Vec; #[cfg(test)] mod tests; -pub trait Config: frame_system::Config { - /// The overarching event type. - type Event: From + Into<::Event>; - /// The overarching dispatch call type. - type Call: From>; -} +pub use pallet::*; -decl_storage! { - trait Store for Module as ExampleOffchainWorker { - /// A vector of current participants - /// - /// To enlist someone to participate, signed payload should be - /// sent to `enlist`. - Participants get(fn participants): Vec>; +#[frame_support::pallet] +pub mod pallet { + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; - /// Current event id to enlist participants to. - CurrentEventId get(fn get_current_event_id): Vec; + #[pallet::config] + pub trait Config: frame_system::Config { + /// The overarching dispatch call type. + type Call: From>; } -} -decl_event!( - /// Events generated by the module. - pub enum Event { - /// When new event is drafted. - NewEventDrafted(Vec), + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + #[pallet::hooks] + impl Hooks> for Pallet {} + + /// A public part of the pallet. + #[pallet::call] + impl Pallet { + /// Get the new event running. + #[pallet::weight(0)] + pub fn run_event(origin: OriginFor, id: Vec) -> DispatchResultWithPostInfo { + let _ = ensure_signed(origin)?; + >::kill(); + >::mutate(move |event_id| *event_id = id); + Ok(().into()) + } + + /// Submit list of participants to the current event. + /// + /// The example utilizes parallel execution by checking half of the + /// signatures in spawned task. + #[pallet::weight(0)] + pub fn enlist_participants(origin: OriginFor, participants: Vec) + -> DispatchResultWithPostInfo + { + let _ = ensure_signed(origin)?; + + if validate_participants_parallel(&>::get(), &participants[..]) { + for participant in participants { + >::append(participant.account); + } + } + Ok(().into()) + } } -); + + /// A vector of current participants + /// + /// To enlist someone to participate, signed payload should be + /// sent to `enlist`. + #[pallet::storage] + #[pallet::getter(fn participants)] + pub(super) type Participants = StorageValue<_, Vec>, ValueQuery>; + + /// Current event id to enlist participants to. + #[pallet::storage] + #[pallet::getter(fn get_current_event_id)] + pub(super) type CurrentEventId = StorageValue<_, Vec, ValueQuery>; +} /// Request to enlist participant. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] @@ -85,40 +119,6 @@ impl EnlistedParticipant { } } -decl_module! { - /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Get the new event running. - #[weight = 0] - pub fn run_event(origin, id: Vec) -> DispatchResult { - let _ = ensure_signed(origin)?; - Participants::kill(); - CurrentEventId::mutate(move |event_id| *event_id = id); - Ok(()) - } - - /// Submit list of participants to the current event. - /// - /// The example utilizes parallel execution by checking half of the - /// signatures in spawned task. - #[weight = 0] - pub fn enlist_participants(origin, participants: Vec) - -> DispatchResult - { - let _ = ensure_signed(origin)?; - - if validate_participants_parallel(&CurrentEventId::get(), &participants[..]) { - for participant in participants { - Participants::append(participant.account); - } - } - Ok(()) - } - } -} - fn validate_participants_parallel(event_id: &[u8], participants: &[EnlistedParticipant]) -> bool { fn spawn_verify(data: Vec) -> Vec { diff --git a/frame/example-parallel/src/tests.rs b/frame/example-parallel/src/tests.rs index 9c921e0ddf..da2892c67d 100644 --- a/frame/example-parallel/src/tests.rs +++ b/frame/example-parallel/src/tests.rs @@ -34,7 +34,7 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Module, Call, Config, Storage, Event}, - Example: pallet_example_parallel::{Module, Call, Storage, Event}, + Example: pallet_example_parallel::{Module, Call, Storage}, } ); @@ -75,7 +75,6 @@ parameter_types! { } impl Config for Test { - type Event = Event; type Call = Call; } diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index c6dfc018b3..6187306884 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 335c277b7c..b4ae35c550 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -63,9 +63,9 @@ //! // Include the following links that shows what trait needs to be implemented to use the pallet //! // and the supported dispatchables that are documented in the Call enum. //! -//! - \[`::Config`](./trait.Config.html) -//! - \[`Call`](./enum.Call.html) -//! - \[`Module`](./struct.Module.html) +//! - \[`Config`] +//! - \[`Call`] +//! - \[`Pallet`] //! //! \## Overview //! @@ -257,11 +257,11 @@ use sp_std::marker::PhantomData; use frame_support::{ - dispatch::DispatchResult, decl_module, decl_storage, decl_event, traits::IsSubType, + dispatch::DispatchResult, traits::IsSubType, weights::{DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, Pays}, }; use sp_std::prelude::*; -use frame_system::{ensure_signed, ensure_root}; +use frame_system::{ensure_signed}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{ @@ -278,7 +278,7 @@ use sp_runtime::{ // The `WeightData` trait has access to the arguments of the dispatch that it wants to assign a // weight to. Nonetheless, the trait itself can not make any assumptions about what the generic type // of the arguments (`T`) is. Based on our needs, we could replace `T` with a more concrete type -// while implementing the trait. The `decl_module!` expects whatever implements `WeighData` to +// while implementing the trait. The `pallet::weight` expects whatever implements `WeighData` to // replace `T` with a tuple of the dispatch arguments. This is exactly how we will craft the // implementation below. // @@ -315,111 +315,97 @@ impl PaysFee<(&BalanceOf,)> for WeightForSetDummy /// A type alias for the balance type from this pallet's point of view. type BalanceOf = ::Balance; -/// Our pallet's configuration trait. All our types and constants go in here. If the -/// pallet is dependent on specific other pallets, then their configuration traits -/// should be added to our implied traits list. -/// -/// `frame_system::Config` should always be included in our implied traits. -pub trait Config: pallet_balances::Config { - /// The overarching event type. - type Event: From> + Into<::Event>; -} +// Re-export pallet items so that they can be accessed from the crate namespace. +pub use pallet::*; -decl_storage! { - // A macro for the Storage trait, and its implementation, for this pallet. - // This allows for type-safe usage of the Substrate storage database, so you can - // keep things around between blocks. - // - // It is important to update your storage name so that your pallet's - // storage items are isolated from other pallets. - // ---------------------------------vvvvvvv - trait Store for Module as Example { - // Any storage declarations of the form: - // `pub? Name get(fn getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` - // where `` is either: - // - `Type` (a basic value item); or - // - `map hasher(HasherKind) KeyType => ValueType` (a map item). - // - // Note that there are two optional modifiers for the storage type declaration. - // - `Foo: Option`: - // - `Foo::put(1); Foo::get()` returns `Some(1)`; - // - `Foo::kill(); Foo::get()` returns `None`. - // - `Foo: u32`: - // - `Foo::put(1); Foo::get()` returns `1`; - // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). - // e.g. Foo: u32; - // e.g. pub Bar get(fn bar): map hasher(blake2_128_concat) T::AccountId => Vec<(T::Balance, u64)>; - // - // For basic value items, you'll get a type which implements - // `frame_support::StorageValue`. For map items, you'll get a type which - // implements `frame_support::StorageMap`. +// Definition of the pallet logic, to be aggregated at runtime definition through +// `construct_runtime`. +#[frame_support::pallet] +pub mod pallet { + // Import various types used to declare pallet in scope. + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + use super::*; + + /// Our pallet's configuration trait. All our types and constants go in here. If the + /// pallet is dependent on specific other pallets, then their configuration traits + /// should be added to our implied traits list. + /// + /// `frame_system::Config` should always be included. + #[pallet::config] + pub trait Config: pallet_balances::Config + frame_system::Config { + /// The overarching event type. + type Event: From> + IsType<::Event>; + } + + // Simple declaration of the `Pallet` type. It is placeholder we use to implement traits and + // method. + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + // Pallet implements [`Hooks`] trait to define some logic to execute in some context. + #[pallet::hooks] + impl Hooks> for Pallet { + // `on_initialize` is executed at the beginning of the block before any extrinsic are + // dispatched. // - // If they have a getter (`get(getter_name)`), then your pallet will come - // equipped with `fn getter_name() -> Type` for basic value items or - // `fn getter_name(key: KeyType) -> ValueType` for map items. - Dummy get(fn dummy) config(): Option; + // This function must return the weight consumed by `on_initialize` and `on_finalize`. + fn on_initialize(_n: T::BlockNumber) -> Weight { + // Anything that needs to be done at the start of the block. + // We don't do anything here. - // A map that has enumerable entries. - Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + 0 + } - // this one uses the default, we'll demonstrate the usage of 'mutate' API. - Foo get(fn foo) config(): T::Balance; - } -} + // `on_finalize` is executed at the end of block after all extrinsic are dispatched. + fn on_finalize(_n: T::BlockNumber) { + // We just kill our dummy storage item. + >::kill(); + } -decl_event!( - /// Events are a simple means of reporting specific conditions and - /// circumstances that have happened that users, Dapps and/or chain explorers would find - /// interesting and otherwise difficult to detect. - pub enum Event where B = ::Balance { - // Just a normal `enum`, here's a dummy event to ensure it compiles. - /// Dummy event, just here so there's a generic type that's used. - Dummy(B), + // A runtime code run after every block and have access to extended set of APIs. + // + // For instance you can generate extrinsics for the upcoming produced block. + fn offchain_worker(_n: T::BlockNumber) { + // We don't do anything here. + // but we could dispatch extrinsic (transaction/unsigned/inherent) using + // sp_io::submit_extrinsic + } } -); -// The module declaration. This states the entry points that we handle. The -// macro takes care of the marshalling of arguments and dispatch. -// -// Anyone can have these functions execute by signing and submitting -// an extrinsic. Ensure that calls into each of these execute in a time, memory and -// using storage space proportional to any costs paid for by the caller or otherwise the -// difficulty of forcing the call to happen. -// -// Generally you'll want to split these into three groups: -// - Public calls that are signed by an external account. -// - Root calls that are allowed to be made only by the governance system. -// - Unsigned calls that can be of two kinds: -// * "Inherent extrinsics" that are opinions generally held by the block -// authors that build child blocks. -// * Unsigned Transactions that are of intrinsic recognizable utility to the -// network, and are validated by the runtime. -// -// Information about where this dispatch initiated from is provided as the first argument -// "origin". As such functions must always look like: -// -// `fn foo(origin, bar: Bar, baz: Baz) -> Result;` -// -// The `Result` is required as part of the syntax (and expands to the conventional dispatch -// result of `Result<(), &'static str>`). -// -// When you come to `impl` them later in the pallet, you must specify the full type for `origin`: -// -// `fn foo(origin: T::Origin, bar: Bar, baz: Baz) { ... }` -// -// There are three entries in the `frame_system::Origin` enum that correspond -// to the above bullets: `::Signed(AccountId)`, `::Root` and `::None`. You should always match -// against them as the first thing you do in your function. There are three convenience calls -// in system that do the matching for you and return a convenient result: `ensure_signed`, -// `ensure_root` and `ensure_none`. -decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { - /// Deposit one of this pallet's events by using the default implementation. - /// It is also possible to provide a custom implementation. - /// For non-generic events, the generic parameter just needs to be dropped, so that it - /// looks like: `fn deposit_event() = default;`. - fn deposit_event() = default; + // The call declaration. This states the entry points that we handle. The + // macro takes care of the marshalling of arguments and dispatch. + // + // Anyone can have these functions execute by signing and submitting + // an extrinsic. Ensure that calls into each of these execute in a time, memory and + // using storage space proportional to any costs paid for by the caller or otherwise the + // difficulty of forcing the call to happen. + // + // Generally you'll want to split these into three groups: + // - Public calls that are signed by an external account. + // - Root calls that are allowed to be made only by the governance system. + // - Unsigned calls that can be of two kinds: + // * "Inherent extrinsics" that are opinions generally held by the block + // authors that build child blocks. + // * Unsigned Transactions that are of intrinsic recognizable utility to the + // network, and are validated by the runtime. + // + // Information about where this dispatch initiated from is provided as the first argument + // "origin". As such functions must always look like: + // + // `fn foo(origin: OriginFor, bar: Bar, baz: Baz) -> DispatchResultWithPostInfo { ... }` + // + // The `DispatchResultWithPostInfo` is required as part of the syntax (and can be found at + // `pallet_prelude::DispatchResultWithPostInfo`). + // + // There are three entries in the `frame_system::Origin` enum that correspond + // to the above bullets: `::Signed(AccountId)`, `::Root` and `::None`. You should always match + // against them as the first thing you do in your function. There are three convenience calls + // in system that do the matching for you and return a convenient result: `ensure_signed`, + // `ensure_root` and `ensure_none`. + #[pallet::call] + impl Pallet { /// This is your public interface. Be extremely careful. /// This is just a simple example of how to interact with the pallet from the external /// world. @@ -458,18 +444,22 @@ decl_module! { // // If you don't respect these rules, it is likely that your chain will be attackable. // - // Each transaction can define an optional `#[weight]` attribute to convey a set of static + // Each transaction must define a `#[pallet::weight(..)]` attribute to convey a set of static // information about its dispatch. FRAME System and FRAME Executive pallet then use this // information to properly execute the transaction, whilst keeping the total load of the // chain in a moderate rate. // - // The _right-hand-side_ value of the `#[weight]` attribute can be any type that implements - // a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. The former conveys the - // weight (a numeric representation of pure execution time and difficulty) of the - // transaction and the latter demonstrates the [`DispatchClass`] of the call. A higher - // weight means a larger transaction (less of which can be placed in a single block). - #[weight = 0] - fn accumulate_dummy(origin, increase_by: T::Balance) -> DispatchResult { + // The parenthesized value of the `#[pallet::weight(..)]` attribute can be any type that + // implements a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. + // The former conveys the weight (a numeric representation of pure execution time and + // difficulty) of the transaction and the latter demonstrates the [`DispatchClass`] of the + // call. A higher weight means a larger transaction (less of which can be placed in a + // single block). + #[pallet::weight(0)] + pub(super) fn accumulate_dummy( + origin: OriginFor, + increase_by: T::Balance + ) -> DispatchResultWithPostInfo { // This is a public call, so we ensure that the origin is some signed account. let _sender = ensure_signed(origin)?; @@ -493,10 +483,10 @@ decl_module! { }); // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(RawEvent::Dummy(increase_by)); + Self::deposit_event(Event::Dummy(increase_by)); - // All good. - Ok(()) + // All good, no refund. + Ok(().into()) } /// A privileged call; in this case it resets our dummy value to something new. @@ -506,39 +496,92 @@ decl_module! { // calls to be executed - we don't need to care why. Because it's privileged, we can // assume it's a one-off operation and substantial processing/storage/memory can be used // without worrying about gameability or attack scenarios. - // If you do not specify `Result` explicitly as return value, it will be added automatically - // for you and `Ok(())` will be returned. - #[weight = WeightForSetDummy::(>::from(100u32))] - fn set_dummy(origin, #[compact] new_value: T::Balance) { + #[pallet::weight(WeightForSetDummy::(>::from(100u32)))] + fn set_dummy( + origin: OriginFor, + #[pallet::compact] new_value: T::Balance, + ) -> DispatchResultWithPostInfo { ensure_root(origin)?; // Put the new value into storage. >::put(new_value); + + // All good, no refund. + Ok(().into()) } + } - // The signature could also look like: `fn on_initialize()`. - // This function could also very well have a weight annotation, similar to any other. The - // only difference is that it mut be returned, not annotated. - fn on_initialize(_n: T::BlockNumber) -> Weight { - // Anything that needs to be done at the start of the block. - // We don't do anything here. + /// Events are a simple means of reporting specific conditions and + /// circumstances that have happened that users, Dapps and/or chain explorers would find + /// interesting and otherwise difficult to detect. + #[pallet::event] + /// This attribute generate the function `deposit_event` to deposit one of this pallet event, + /// it is optional, it is also possible to provide a custom implementation. + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + // Just a normal `enum`, here's a dummy event to ensure it compiles. + /// Dummy event, just here so there's a generic type that's used. + Dummy(BalanceOf), + } - 0 - } + // pallet::storage attributes allow for type-safe usage of the Substrate storage database, + // so you can keep things around between blocks. + // + // Any storage must be one of `StorageValue`, `StorageMap` or `StorageDoubleMap`. + // The first generic holds the prefix to use and is generated by the macro. + // The query kind is either `OptionQuery` (the default) or `ValueQuery`. + // - for `type Foo = StorageValue<_, u32, OptionQuery>`: + // - `Foo::put(1); Foo::get()` returns `Some(1)`; + // - `Foo::kill(); Foo::get()` returns `None`. + // - for `type Foo = StorageValue<_, u32, ValueQuery>`: + // - `Foo::put(1); Foo::get()` returns `1`; + // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). + #[pallet::storage] + // The getter attribute generate a function on `Pallet` placeholder: + // `fn getter_name() -> Type` for basic value items or + // `fn getter_name(key: KeyType) -> ValueType` for map items. + #[pallet::getter(fn dummy)] + pub(super) type Dummy = StorageValue<_, T::Balance>; + + // A map that has enumerable entries. + #[pallet::storage] + #[pallet::getter(fn bar)] + pub(super) type Bar = StorageMap<_, Blake2_128Concat, T::AccountId, T::Balance, ValueQuery>; + + // this one uses the query kind: `ValueQuery`, we'll demonstrate the usage of 'mutate' API. + #[pallet::storage] + #[pallet::getter(fn foo)] + pub(super) type Foo = StorageValue<_, T::Balance, ValueQuery>; + + + // The genesis config type. + #[pallet::genesis_config] + pub struct GenesisConfig { + pub dummy: T::Balance, + pub bar: Vec<(T::AccountId, T::Balance)>, + pub foo: T::Balance, + } - // The signature could also look like: `fn on_finalize()` - fn on_finalize(_n: T::BlockNumber) { - // Anything that needs to be done at the end of the block. - // We just kill our dummy storage item. - >::kill(); + // The default value for the genesis config type. + #[cfg(feature = "std")] + impl Default for GenesisConfig { + fn default() -> Self { + Self { + dummy: Default::default(), + bar: Default::default(), + foo: Default::default(), + } } + } - // A runtime code run after every block and have access to extended set of APIs. - // - // For instance you can generate extrinsics for the upcoming produced block. - fn offchain_worker(_n: T::BlockNumber) { - // We don't do anything here. - // but we could dispatch extrinsic (transaction/unsigned/inherent) using - // sp_io::submit_extrinsic + // The build of genesis for the pallet. + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + >::put(&self.dummy); + for (a, b) in &self.bar { + >::insert(a, b); + } + >::put(&self.foo); } } } @@ -548,7 +591,7 @@ decl_module! { // - Public interface. These are functions that are `pub` and generally fall into inspector // functions that do not write to storage and operation functions that do. // - Private functions. These are your usual private utilities unavailable to other pallets. -impl Module { +impl Pallet { // Add public immutables and private mutables. #[allow(dead_code)] fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { @@ -577,7 +620,7 @@ impl Module { // // Note that a signed extension can also indicate that a particular data must be present in the // _signing payload_ of a transaction by providing an implementation for the `additional_signed` -// method. This example will not cover this type of extension. See `CheckSpecVersion` in +// method. This example will not cover this type of extension. See `CheckSpecVersion` in // [FRAME System](https://github.com/paritytech/substrate/tree/master/frame/system#signed-extensions) // for an example. // @@ -652,7 +695,7 @@ where #[cfg(feature = "runtime-benchmarks")] mod benchmarking { use super::*; - use frame_benchmarking::{benchmarks, account}; + use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; use frame_system::RawOrigin; benchmarks!{ @@ -684,22 +727,7 @@ mod benchmarking { } } - #[cfg(test)] - mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_accumulate_dummy::()); - assert_ok!(test_benchmark_set_dummy::()); - assert_ok!(test_benchmark_another_set_dummy::()); - assert_ok!(test_benchmark_sort_vector::()); - }); - } - } + impl_benchmark_test_suite!(Pallet, crate::tests::new_test_ext(), crate::tests::Test); } #[cfg(test)] diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 31f1f34174..7ef00e7ff7 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -47,3 +47,6 @@ std = [ "sp-tracing/std", "sp-std/std", ] +try-runtime = [ + "frame-support/try-runtime" +] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 143434ddb6..ed76911486 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -44,7 +44,8 @@ //! //! ## Usage //! -//! The default Substrate node template declares the [`Executive`](./struct.Executive.html) type in its library. +//! The default Substrate node template declares the [`Executive`](./struct.Executive.html) type in +//! its library. //! //! ### Example //! @@ -185,26 +186,58 @@ where } impl< - System: frame_system::Config, - Block: traits::Block, - Context: Default, - UnsignedValidator, - AllModules: - OnRuntimeUpgrade + - OnInitialize + - OnFinalize + - OffchainWorker, - COnRuntimeUpgrade: OnRuntimeUpgrade, -> Executive + System: frame_system::Config, + Block: traits::Block

, + Context: Default, + UnsignedValidator, + AllModules: OnRuntimeUpgrade + + OnInitialize + + OnFinalize + + OffchainWorker, + COnRuntimeUpgrade: OnRuntimeUpgrade, + > Executive where Block::Extrinsic: Checkable + Codec, - CheckedOf: - Applyable + - GetDispatchInfo, - CallOf: Dispatchable, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: + Dispatchable, OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, + UnsignedValidator: ValidateUnsigned>, { + /// Execute all `OnRuntimeUpgrade` of this runtime, and return the aggregate weight. + pub fn execute_on_runtime_upgrade() -> frame_support::weights::Weight { + let mut weight = 0; + weight = weight.saturating_add( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + ); + weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); + weight = weight.saturating_add(::on_runtime_upgrade()); + + weight + } + + /// Execute all `OnRuntimeUpgrade` of this runtime, including the pre and post migration checks. + /// + /// This should only be used for testing. + #[cfg(feature = "try-runtime")] + pub fn try_runtime_upgrade() -> Result { + < + (frame_system::Module::, COnRuntimeUpgrade, AllModules) + as + OnRuntimeUpgrade + >::pre_upgrade()?; + + let weight = Self::execute_on_runtime_upgrade(); + + < + (frame_system::Module::, COnRuntimeUpgrade, AllModules) + as + OnRuntimeUpgrade + >::post_upgrade()?; + + Ok(weight) + } + /// Start the execution of a particular block. pub fn initialize_block(header: &System::Header) { sp_io::init_tracing(); @@ -234,10 +267,7 @@ where ) { let mut weight = 0; if Self::runtime_upgraded() { - // System is not part of `AllModules`, so we need to call this manually. - weight = weight.saturating_add( as OnRuntimeUpgrade>::on_runtime_upgrade()); - weight = weight.saturating_add(COnRuntimeUpgrade::on_runtime_upgrade()); - weight = weight.saturating_add(::on_runtime_upgrade()); + weight = weight.saturating_add(Self::execute_on_runtime_upgrade()); } >::initialize( block_number, @@ -320,7 +350,7 @@ where ) { extrinsics.into_iter().for_each(|e| if let Err(e) = Self::apply_extrinsic(e) { let err: &'static str = e.into(); - panic!(err) + panic!("{}", err) }); // post-extrinsics book-keeping @@ -461,10 +491,6 @@ where // as well. frame_system::BlockHash::::insert(header.number(), header.hash()); - // Initialize logger, so the log messages are visible - // also when running WASM. - frame_support::debug::RuntimeLogger::init(); - >::offchain_worker(*header.number()) } } diff --git a/frame/gilt/Cargo.toml b/frame/gilt/Cargo.toml new file mode 100644 index 0000000000..f1e0d61158 --- /dev/null +++ b/frame/gilt/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "pallet-gilt" +version = "3.0.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for rewarding account freezing." +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } +sp-arithmetic = { version = "3.0.0", default-features = false, path = "../../primitives/arithmetic" } +frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "3.0.0", default-features = false, path = "../support" } +frame-system = { version = "3.0.0", default-features = false, path = "../system" } + +[dev-dependencies] +sp-io = { version = "3.0.0", path = "../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } +pallet-balances = { version = "3.0.0", path = "../balances" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "sp-arithmetic/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", +] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] diff --git a/frame/gilt/README.md b/frame/gilt/README.md new file mode 100644 index 0000000000..4eaddae178 --- /dev/null +++ b/frame/gilt/README.md @@ -0,0 +1,2 @@ + +License: Apache-2.0 diff --git a/frame/gilt/src/benchmarking.rs b/frame/gilt/src/benchmarking.rs new file mode 100644 index 0000000000..2ee7bffd94 --- /dev/null +++ b/frame/gilt/src/benchmarking.rs @@ -0,0 +1,136 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Benchmarks for Gilt Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use sp_std::prelude::*; +use super::*; +use sp_runtime::traits::{Zero, Bounded}; +use sp_arithmetic::Perquintill; +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; +use frame_support::{traits::{Currency, Get, EnsureOrigin}, dispatch::UnfilteredDispatchable}; + +use crate::Pallet as Gilt; + +type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +benchmarks! { + place_bid { + let l in 0..(T::MaxQueueLen::get() - 1); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for i in 0..l { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + }: _(RawOrigin::Signed(caller.clone()), T::MinFreeze::get() * BalanceOf::::from(2u32), 1) + verify { + assert_eq!(QueueTotals::::get()[0], (l + 1, T::MinFreeze::get() * BalanceOf::::from(l + 2))); + } + + place_bid_max { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for i in 0..T::MaxQueueLen::get() { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + }: { + Gilt::::place_bid( + RawOrigin::Signed(caller.clone()).into(), + T::MinFreeze::get() * BalanceOf::::from(2u32), + 1, + )? + } + verify { + assert_eq!(QueueTotals::::get()[0], ( + T::MaxQueueLen::get(), + T::MinFreeze::get() * BalanceOf::::from(T::MaxQueueLen::get() + 1), + )); + } + + retract_bid { + let l in 1..T::MaxQueueLen::get(); + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + for i in 0..l { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + }: _(RawOrigin::Signed(caller.clone()), T::MinFreeze::get(), 1) + verify { + assert_eq!(QueueTotals::::get()[0], (l - 1, T::MinFreeze::get() * BalanceOf::::from(l - 1))); + } + + set_target { + let call = Call::::set_target(Default::default()); + let origin = T::AdminOrigin::successful_origin(); + }: { call.dispatch_bypass_filter(origin)? } + + thaw { + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(3u32)); + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + Gilt::::enlarge(T::MinFreeze::get() * BalanceOf::::from(2u32), 2); + Active::::mutate(0, |m_g| if let Some(ref mut g) = m_g { g.expiry = Zero::zero() }); + }: _(RawOrigin::Signed(caller.clone()), 0) + verify { + assert!(Active::::get(0).is_none()); + } + + pursue_target_noop { + }: { Gilt::::pursue_target(0) } + + pursue_target_per_item { + // bids taken + let b in 1..T::MaxQueueLen::get(); + + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(b + 1)); + + for _ in 0..b { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), 1)?; + } + + Call::::set_target(Perquintill::from_percent(100)) + .dispatch_bypass_filter(T::AdminOrigin::successful_origin())?; + + }: { Gilt::::pursue_target(b) } + + pursue_target_per_queue { + // total queues hit + let q in 1..T::QueueCount::get(); + + let caller: T::AccountId = whitelisted_caller(); + T::Currency::make_free_balance_be(&caller, T::MinFreeze::get() * BalanceOf::::from(q + 1)); + + for i in 0..q { + Gilt::::place_bid(RawOrigin::Signed(caller.clone()).into(), T::MinFreeze::get(), i + 1)?; + } + + Call::::set_target(Perquintill::from_percent(100)) + .dispatch_bypass_filter(T::AdminOrigin::successful_origin())?; + + }: { Gilt::::pursue_target(q) } +} + +impl_benchmark_test_suite!( + Gilt, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/gilt/src/lib.rs b/frame/gilt/src/lib.rs new file mode 100644 index 0000000000..94d341f47f --- /dev/null +++ b/frame/gilt/src/lib.rs @@ -0,0 +1,582 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Gilt Pallet +//! A pallet allowing accounts to auction for being frozen and receive open-ended +//! inflation-protection in return. +//! +//! ## Overview +//! +//! Lock up tokens, for at least as long as you offer, and be free from both inflation and +//! intermediate reward or exchange until the tokens become unlocked. +//! +//! ## Design +//! +//! Queues for each of 1-`QueueCount` periods, given in blocks (`Period`). Queues are limited in +//! size to something sensible, `MaxQueueLen`. A secondary storage item with `QueueCount` x `u32` +//! elements with the number of items in each queue. +//! +//! Queues are split into two parts. The first part is a priority queue based on bid size. The +//! second part is just a FIFO (the size of the second part is set with `FifoQueueLen`). Items are +//! always prepended so that removal is always O(1) since removal often happens many times under a +//! single weighed function (`on_initialize`) yet placing bids only ever happens once per weighed +//! function (`place_bid`). If the queue has a priority portion, then it remains sorted in order of +//! bid size so that smaller bids fall off as it gets too large. +//! +//! Account may enqueue a balance with some number of `Period`s lock up, up to a maximum of +//! `QueueCount`. The balance gets reserved. There's a minimum of `MinFreeze` to avoid dust. +//! +//! Until your bid is turned into an issued gilt you can retract it instantly and the funds are +//! unreserved. +//! +//! There's a target proportion of effective total issuance (i.e. accounting for existing gilts) +//! which the we attempt to have frozen at any one time. It will likely be gradually increased over +//! time by governance. +//! +//! As the total funds frozen under gilts drops below `FrozenFraction` of the total effective +//! issuance, then bids are taken from queues, with the queue of the greatest period taking +//! priority. If the item in the queue's locked amount is greater than the amount left to be +//! frozen, then it is split up into multiple bids and becomes partially frozen under gilt. +//! +//! Once an account's balance is frozen, it remains frozen until the owner thaws the balance of the +//! account. This may happen no earlier than queue's period after the point at which the gilt is +//! issued. +//! +//! ## Suggested Values +//! +//! - `QueueCount`: 300 +//! - `Period`: 432,000 +//! - `MaxQueueLen`: 1000 +//! - `MinFreeze`: Around CHF 100 in value. + +#![cfg_attr(not(feature = "std"), no_std)] + +pub use pallet::*; + +#[cfg(test)] +mod mock; +#[cfg(test)] +mod tests; +mod benchmarking; +pub mod weights; + +#[frame_support::pallet] +pub mod pallet { + use sp_std::prelude::*; + use sp_arithmetic::{Perquintill, PerThing}; + use sp_runtime::traits::{Zero, Saturating, SaturatedConversion}; + use frame_support::traits::{Currency, OnUnbalanced, ReservableCurrency}; + use frame_support::pallet_prelude::*; + use frame_system::pallet_prelude::*; + pub use crate::weights::WeightInfo; + + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + type PositiveImbalanceOf = + <::Currency as Currency<::AccountId>>::PositiveImbalance; + type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; + + #[pallet::config] + pub trait Config: frame_system::Config { + /// Overarching event type. + type Event: From> + IsType<::Event>; + + /// Currency type that this works on. + type Currency: ReservableCurrency; + + /// Origin required for setting the target proportion to be under gilt. + type AdminOrigin: EnsureOrigin; + + /// Unbalanced handler to account for funds created (in case of a higher total issuance over + /// freezing period). + type Deficit: OnUnbalanced>; + + /// Unbalanced handler to account for funds destroyed (in case of a lower total issuance + /// over freezing period). + type Surplus: OnUnbalanced>; + + /// Number of duration queues in total. This sets the maximum duration supported, which is + /// this value multiplied by `Period`. + #[pallet::constant] + type QueueCount: Get; + + /// Maximum number of items that may be in each duration queue. + #[pallet::constant] + type MaxQueueLen: Get; + + /// Portion of the queue which is free from ordering and just a FIFO. + /// + /// Must be no greater than `MaxQueueLen`. + #[pallet::constant] + type FifoQueueLen: Get; + + /// The base period for the duration queues. This is the common multiple across all + /// supported freezing durations that can be bid upon. + #[pallet::constant] + type Period: Get; + + /// The minimum amount of funds that may be offered to freeze for a gilt. Note that this + /// does not actually limit the amount which may be frozen in a gilt since gilts may be + /// split up in order to satisfy the desired amount of funds under gilts. + /// + /// It should be at least big enough to ensure that there is no possible storage spam attack + /// or queue-filling attack. + #[pallet::constant] + type MinFreeze: Get>; + + /// The number of blocks between consecutive attempts to issue more gilts in an effort to + /// get to the target amount to be frozen. + /// + /// A larger value results in fewer storage hits each block, but a slower period to get to + /// the target. + #[pallet::constant] + type IntakePeriod: Get; + + /// The maximum amount of bids that can be turned into issued gilts each block. A larger + /// value here means less of the block available for transactions should there be a glut of + /// bids to make into gilts to reach the target. + #[pallet::constant] + type MaxIntakeBids: Get; + + /// Information on runtime weights. + type WeightInfo: WeightInfo; + } + + #[pallet::pallet] + #[pallet::generate_store(pub(super) trait Store)] + pub struct Pallet(_); + + /// A single bid on a gilt, an item of a *queue* in `Queues`. + #[derive(Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug)] + pub struct GiltBid { + /// The amount bid. + pub amount: Balance, + /// The owner of the bid. + pub who: AccountId, + } + + /// Information representing an active gilt. + #[derive(Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug)] + pub struct ActiveGilt { + /// The proportion of the effective total issuance (i.e. accounting for any eventual gilt + /// expansion or contraction that may eventually be claimed). + pub proportion: Perquintill, + /// The amount reserved under this gilt. + pub amount: Balance, + /// The account to whom this gilt belongs. + pub who: AccountId, + /// The time after which this gilt can be redeemed for the proportional amount of balance. + pub expiry: BlockNumber, + } + + /// An index for a gilt. + pub type ActiveIndex = u32; + + /// Overall information package on the active gilts. + /// + /// The way of determining the net issuance (i.e. after factoring in all maturing frozen funds) + /// is: + /// + /// `total_issuance - frozen + proportion * total_issuance` + #[derive(Clone, Eq, PartialEq, Default, Encode, Decode, RuntimeDebug)] + pub struct ActiveGiltsTotal { + /// The total amount of funds held in reserve for all active gilts. + pub frozen: Balance, + /// The proportion of funds that the `frozen` balance represents to total issuance. + pub proportion: Perquintill, + /// The total number of gilts issued so far. + pub index: ActiveIndex, + /// The target proportion of gilts within total issuance. + pub target: Perquintill, + } + + /// The totals of items and balances within each queue. Saves a lot of storage reads in the + /// case of sparsely packed queues. + /// + /// The vector is indexed by duration in `Period`s, offset by one, so information on the queue + /// whose duration is one `Period` would be storage `0`. + #[pallet::storage] + pub type QueueTotals = StorageValue<_, Vec<(u32, BalanceOf)>, ValueQuery>; + + /// The queues of bids ready to become gilts. Indexed by duration (in `Period`s). + #[pallet::storage] + pub type Queues = StorageMap< + _, + Blake2_128Concat, + u32, + Vec, T::AccountId>>, + ValueQuery, + >; + + /// Information relating to the gilts currently active. + #[pallet::storage] + pub type ActiveTotal = StorageValue<_, ActiveGiltsTotal>, ValueQuery>; + + /// The currently active gilts, indexed according to the order of creation. + #[pallet::storage] + pub type Active = StorageMap< + _, + Blake2_128Concat, + ActiveIndex, + ActiveGilt, ::AccountId, ::BlockNumber>, + OptionQuery, + >; + + #[pallet::genesis_config] + #[derive(Default)] + pub struct GenesisConfig; + + #[pallet::genesis_build] + impl GenesisBuild for GenesisConfig { + fn build(&self) { + QueueTotals::::put(vec![(0, BalanceOf::::zero()); T::QueueCount::get() as usize]); + } + } + + #[pallet::event] + #[pallet::metadata(T::AccountId = "AccountId")] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + /// A bid was successfully placed. + /// \[ who, amount, duration \] + BidPlaced(T::AccountId, BalanceOf, u32), + /// A bid was successfully removed (before being accepted as a gilt). + /// \[ who, amount, duration \] + BidRetracted(T::AccountId, BalanceOf, u32), + /// A bid was accepted as a gilt. The balance may not be released until expiry. + /// \[ index, expiry, who, amount \] + GiltIssued(ActiveIndex, T::BlockNumber, T::AccountId, BalanceOf), + /// An expired gilt has been thawed. + /// \[ index, who, original_amount, additional_amount \] + GiltThawed(ActiveIndex, T::AccountId, BalanceOf, BalanceOf), + } + + #[pallet::error] + pub enum Error { + /// The duration of the bid is less than one. + DurationTooSmall, + /// The duration is the bid is greater than the number of queues. + DurationTooBig, + /// The amount of the bid is less than the minimum allowed. + AmountTooSmall, + /// The queue for the bid's duration is full and the amount bid is too low to get in through + /// replacing an existing bid. + BidTooLow, + /// Gilt index is unknown. + Unknown, + /// Not the owner of the gilt. + NotOwner, + /// Gilt not yet at expiry date. + NotExpired, + /// The given bid for retraction is not found. + NotFound, + } + + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_initialize(n: T::BlockNumber) -> Weight { + if (n % T::IntakePeriod::get()).is_zero() { + Self::pursue_target(T::MaxIntakeBids::get()) + } else { + 0 + } + } + } + + #[pallet::call] + impl Pallet { + /// Place a bid for a gilt to be issued. + /// + /// Origin must be Signed, and account must have at least `amount` in free balance. + /// + /// - `amount`: The amount of the bid; these funds will be reserved. If the bid is + /// successfully elevated into an issued gilt, then these funds will continue to be + /// reserved until the gilt expires. Must be at least `MinFreeze`. + /// - `duration`: The number of periods for which the funds will be locked if the gilt is + /// issued. It will expire only after this period has elapsed after the point of issuance. + /// Must be greater than 1 and no more than `QueueCount`. + /// + /// Complexities: + /// - `Queues[duration].len()` (just take max). + #[pallet::weight(T::WeightInfo::place_bid_max())] + pub fn place_bid( + origin: OriginFor, + #[pallet::compact] amount: BalanceOf, + duration: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + ensure!(amount >= T::MinFreeze::get(), Error::::AmountTooSmall); + let queue_count = T::QueueCount::get() as usize; + let queue_index = duration.checked_sub(1) + .ok_or(Error::::DurationTooSmall)? as usize; + ensure!(queue_index < queue_count, Error::::DurationTooBig); + + let net = Queues::::try_mutate(duration, |q| + -> Result<(u32, BalanceOf::), DispatchError> + { + let queue_full = q.len() == T::MaxQueueLen::get() as usize; + ensure!(!queue_full || q[0].amount < amount, Error::::BidTooLow); + T::Currency::reserve(&who, amount)?; + + // queue is + let mut bid = GiltBid { amount, who: who.clone() }; + let net = if queue_full { + sp_std::mem::swap(&mut q[0], &mut bid); + T::Currency::unreserve(&bid.who, bid.amount); + (0, amount - bid.amount) + } else { + q.insert(0, bid); + (1, amount) + }; + + let sorted_item_count = q.len().saturating_sub(T::FifoQueueLen::get() as usize); + if sorted_item_count > 1 { + q[0..sorted_item_count].sort_by_key(|x| x.amount); + } + + Ok(net) + })?; + QueueTotals::::mutate(|qs| { + qs.resize(queue_count, (0, Zero::zero())); + qs[queue_index].0 += net.0; + qs[queue_index].1 = qs[queue_index].1.saturating_add(net.1); + }); + Self::deposit_event(Event::BidPlaced(who.clone(), amount, duration)); + + Ok(().into()) + } + + /// Retract a previously placed bid. + /// + /// Origin must be Signed, and the account should have previously issued a still-active bid + /// of `amount` for `duration`. + /// + /// - `amount`: The amount of the previous bid. + /// - `duration`: The duration of the previous bid. + #[pallet::weight(T::WeightInfo::place_bid(T::MaxQueueLen::get()))] + pub fn retract_bid( + origin: OriginFor, + #[pallet::compact] amount: BalanceOf, + duration: u32, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + let queue_count = T::QueueCount::get() as usize; + let queue_index = duration.checked_sub(1) + .ok_or(Error::::DurationTooSmall)? as usize; + ensure!(queue_index < queue_count, Error::::DurationTooBig); + + let bid = GiltBid { amount, who }; + let new_len = Queues::::try_mutate(duration, |q| -> Result { + let pos = q.iter().position(|i| i == &bid).ok_or(Error::::NotFound)?; + q.remove(pos); + Ok(q.len() as u32) + })?; + + QueueTotals::::mutate(|qs| { + qs.resize(queue_count, (0, Zero::zero())); + qs[queue_index].0 = new_len; + qs[queue_index].1 = qs[queue_index].1.saturating_sub(bid.amount); + }); + + T::Currency::unreserve(&bid.who, bid.amount); + Self::deposit_event(Event::BidRetracted(bid.who, bid.amount, duration)); + + Ok(().into()) + } + + /// Set target proportion of gilt-funds. + /// + /// Origin must be `AdminOrigin`. + /// + /// - `target`: The target proportion of effective issued funds that should be under gilts + /// at any one time. + #[pallet::weight(T::WeightInfo::set_target())] + pub fn set_target( + origin: OriginFor, + #[pallet::compact] target: Perquintill, + ) -> DispatchResultWithPostInfo { + T::AdminOrigin::ensure_origin(origin)?; + ActiveTotal::::mutate(|totals| totals.target = target); + Ok(().into()) + } + + /// Remove an active but expired gilt. Reserved funds under gilt are freed and balance is + /// adjusted to ensure that the funds grow or shrink to maintain the equivalent proportion + /// of effective total issued funds. + /// + /// Origin must be Signed and the account must be the owner of the gilt of the given index. + /// + /// - `index`: The index of the gilt to be thawed. + #[pallet::weight(T::WeightInfo::thaw())] + pub fn thaw( + origin: OriginFor, + #[pallet::compact] index: ActiveIndex, + ) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + + // Look for `index` + let gilt = Active::::get(index).ok_or(Error::::Unknown)?; + // If found, check the owner is `who`. + ensure!(gilt.who == who, Error::::NotOwner); + let now = frame_system::Module::::block_number(); + ensure!(now >= gilt.expiry, Error::::NotExpired); + // Remove it + Active::::remove(index); + + // Multiply the proportion it is by the total issued. + let total_issuance = T::Currency::total_issuance(); + ActiveTotal::::mutate(|totals| { + let nongilt_issuance: u128 = total_issuance.saturating_sub(totals.frozen) + .saturated_into(); + let effective_issuance = totals.proportion.left_from_one() + .saturating_reciprocal_mul(nongilt_issuance); + let gilt_value: BalanceOf = (gilt.proportion * effective_issuance).saturated_into(); + + totals.frozen = totals.frozen.saturating_sub(gilt.amount); + totals.proportion = totals.proportion.saturating_sub(gilt.proportion); + + // Remove or mint the additional to the amount using `Deficit`/`Surplus`. + if gilt_value > gilt.amount { + // Unreserve full amount. + T::Currency::unreserve(&gilt.who, gilt.amount); + let amount = gilt_value - gilt.amount; + let deficit = T::Currency::deposit_creating(&gilt.who, amount); + T::Deficit::on_unbalanced(deficit); + } else { + if gilt_value < gilt.amount { + // We take anything reserved beyond the gilt's final value. + let rest = gilt.amount - gilt_value; + // `slash` might seem a little aggressive, but it's the only way to do it + // in case it's locked into the staking system. + let surplus = T::Currency::slash_reserved(&gilt.who, rest).0; + T::Surplus::on_unbalanced(surplus); + } + // Unreserve only its new value (less than the amount reserved). Everything + // should add up, but (defensive) in case it doesn't, unreserve takes lower + // priority over the funds. + let err_amt = T::Currency::unreserve(&gilt.who, gilt_value); + debug_assert!(err_amt.is_zero()); + } + + let e = Event::GiltThawed(index, gilt.who, gilt.amount, gilt_value); + Self::deposit_event(e); + }); + + Ok(().into()) + } + } + + impl Pallet { + /// Attempt to enlarge our gilt-set from bids in order to satisfy our desired target amount + /// of funds frozen into gilts. + pub fn pursue_target(max_bids: u32) -> Weight { + let totals = ActiveTotal::::get(); + if totals.proportion < totals.target { + let missing = totals.target.saturating_sub(totals.proportion); + + let total_issuance = T::Currency::total_issuance(); + let nongilt_issuance: u128 = total_issuance.saturating_sub(totals.frozen) + .saturated_into(); + let effective_issuance = totals.proportion.left_from_one() + .saturating_reciprocal_mul(nongilt_issuance); + let intake: BalanceOf = (missing * effective_issuance).saturated_into(); + + let (bids_taken, queues_hit) = Self::enlarge(intake, max_bids); + let first_from_each_queue = T::WeightInfo::pursue_target_per_queue(queues_hit); + let rest_from_each_queue = T::WeightInfo::pursue_target_per_item(bids_taken) + .saturating_sub(T::WeightInfo::pursue_target_per_item(queues_hit)); + first_from_each_queue + rest_from_each_queue + } else { + T::WeightInfo::pursue_target_noop() + } + } + + /// Freeze additional funds from queue of bids up to `amount`. Use at most `max_bids` + /// from the queue. + /// + /// Return the number of bids taken and the number of distinct queues taken from. + pub fn enlarge( + amount: BalanceOf, + max_bids: u32, + ) -> (u32, u32) { + let total_issuance = T::Currency::total_issuance(); + let mut remaining = amount; + let mut bids_taken = 0; + let mut queues_hit = 0; + let now = frame_system::Module::::block_number(); + + ActiveTotal::::mutate(|totals| { + QueueTotals::::mutate(|qs| { + for duration in (1..=T::QueueCount::get()).rev() { + if qs[duration as usize - 1].0 == 0 { + continue + } + let queue_index = duration as usize - 1; + let expiry = now.saturating_add(T::Period::get().saturating_mul(duration.into())); + Queues::::mutate(duration, |q| { + while let Some(mut bid) = q.pop() { + if remaining < bid.amount { + let overflow = bid.amount - remaining; + bid.amount = remaining; + q.push(GiltBid { amount: overflow, who: bid.who.clone() }); + } + let amount = bid.amount; + // Can never overflow due to block above. + remaining -= amount; + // Should never underflow since it should track the total of the bids + // exactly, but we'll be defensive. + qs[queue_index].1 = qs[queue_index].1.saturating_sub(bid.amount); + + // Now to activate the bid... + let nongilt_issuance: u128 = total_issuance.saturating_sub(totals.frozen) + .saturated_into(); + let effective_issuance = totals.proportion.left_from_one() + .saturating_reciprocal_mul(nongilt_issuance); + let n: u128 = amount.saturated_into(); + let d = effective_issuance; + let proportion = Perquintill::from_rational_approximation(n, d); + let who = bid.who; + let index = totals.index; + totals.frozen += bid.amount; + totals.proportion = totals.proportion.saturating_add(proportion); + totals.index += 1; + let e = Event::GiltIssued(index, expiry, who.clone(), amount); + Self::deposit_event(e); + let gilt = ActiveGilt { amount, proportion, who, expiry }; + Active::::insert(index, gilt); + + bids_taken += 1; + + if remaining.is_zero() || bids_taken == max_bids { + break; + } + } + queues_hit += 1; + qs[queue_index].0 = q.len() as u32; + }); + if remaining.is_zero() || bids_taken == max_bids { + break + } + } + }); + }); + (bids_taken, queues_hit) + } + } +} diff --git a/frame/gilt/src/mock.rs b/frame/gilt/src/mock.rs new file mode 100644 index 0000000000..701c5c2f6d --- /dev/null +++ b/frame/gilt/src/mock.rs @@ -0,0 +1,138 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test environment for Gilt pallet. + +use crate as pallet_gilt; + +use frame_support::{ + parameter_types, ord_parameter_types, traits::{OnInitialize, OnFinalize, GenesisBuild}, +}; +use sp_core::H256; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + +type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; + +// Configure a mock runtime to test the pallet. +frame_support::construct_runtime!( + pub enum Test where + Block = Block, + NodeBlock = Block, + UncheckedExtrinsic = UncheckedExtrinsic, + { + System: frame_system::{Module, Call, Config, Storage, Event}, + Balances: pallet_balances::{Module, Call, Config, Storage, Event}, + Gilt: pallet_gilt::{Module, Call, Config, Storage, Event}, + } +); + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const SS58Prefix: u8 = 42; +} + +impl frame_system::Config for Test { + type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type Version = (); + type PalletInfo = PalletInfo; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + type SystemWeightInfo = (); + type SS58Prefix = SS58Prefix; +} + +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} + +impl pallet_balances::Config for Test { + type MaxLocks = (); + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + type WeightInfo = (); +} + +parameter_types! { + pub const QueueCount: u32 = 3; + pub const MaxQueueLen: u32 = 3; + pub const FifoQueueLen: u32 = 1; + pub const Period: u64 = 3; + pub const MinFreeze: u64 = 2; + pub const IntakePeriod: u64 = 2; + pub const MaxIntakeBids: u32 = 2; +} +ord_parameter_types! { + pub const One: u64 = 1; +} + +impl pallet_gilt::Config for Test { + type Event = Event; + type Currency = Balances; + type AdminOrigin = frame_system::EnsureSignedBy; + type Deficit = (); + type Surplus = (); + type QueueCount = QueueCount; + type MaxQueueLen = MaxQueueLen; + type FifoQueueLen = FifoQueueLen; + type Period = Period; + type MinFreeze = MinFreeze; + type IntakePeriod = IntakePeriod; + type MaxIntakeBids = MaxIntakeBids; + type WeightInfo = (); +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + balances: vec![(1, 100), (2, 100), (3, 100), (4, 100)], + }.assimilate_storage(&mut t).unwrap(); + GenesisBuild::::assimilate_storage(&crate::GenesisConfig, &mut t).unwrap(); + t.into() +} + +pub fn run_to_block(n: u64) { + while System::block_number() < n { + Gilt::on_finalize(System::block_number()); + Balances::on_finalize(System::block_number()); + System::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Balances::on_initialize(System::block_number()); + Gilt::on_initialize(System::block_number()); + } +} diff --git a/frame/gilt/src/tests.rs b/frame/gilt/src/tests.rs new file mode 100644 index 0000000000..637a6a8705 --- /dev/null +++ b/frame/gilt/src/tests.rs @@ -0,0 +1,499 @@ +// This file is part of Substrate. + +// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Tests for Gilt pallet. + +use super::*; +use crate::{Error, mock::*}; +use frame_support::{assert_ok, assert_noop, dispatch::DispatchError, traits::Currency}; +use sp_arithmetic::Perquintill; +use pallet_balances::Error as BalancesError; + +#[test] +fn basic_setup_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + + for q in 0..3 { + assert!(Queues::::get(q).is_empty()); + } + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 0, + target: Perquintill::zero(), + }); + assert_eq!(QueueTotals::::get(), vec![(0, 0); 3]); + }); +} + +#[test] +fn set_target_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + let e = DispatchError::BadOrigin; + assert_noop!(Gilt::set_target(Origin::signed(2), Perquintill::from_percent(50)), e); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(50))); + + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 0, + target: Perquintill::from_percent(50), + }); + }); +} + +#[test] +fn place_bid_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_noop!(Gilt::place_bid(Origin::signed(1), 1, 2), Error::::AmountTooSmall); + assert_noop!(Gilt::place_bid(Origin::signed(1), 101, 2), BalancesError::::InsufficientBalance); + assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 4), Error::::DurationTooBig); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_eq!(Balances::reserved_balance(1), 10); + assert_eq!(Queues::::get(2), vec![GiltBid { amount: 10, who: 1 }]); + assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); + }); +} + +#[test] +fn place_bid_queuing_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 20, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 5, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(1), 5, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(Origin::signed(1), 15, 2)); + assert_eq!(Balances::reserved_balance(1), 45); + + assert_ok!(Gilt::place_bid(Origin::signed(1), 25, 2)); + assert_eq!(Balances::reserved_balance(1), 60); + assert_noop!(Gilt::place_bid(Origin::signed(1), 10, 2), Error::::BidTooLow); + assert_eq!(Queues::::get(2), vec![ + GiltBid { amount: 15, who: 1 }, + GiltBid { amount: 25, who: 1 }, + GiltBid { amount: 20, who: 1 }, + ]); + assert_eq!(QueueTotals::::get(), vec![(0, 0), (3, 60), (0, 0)]); + }); +} + +#[test] +fn place_bid_fails_when_queue_full() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 10, 2)); + assert_noop!(Gilt::place_bid(Origin::signed(4), 10, 2), Error::::BidTooLow); + assert_ok!(Gilt::place_bid(Origin::signed(4), 10, 3)); + }); +} + +#[test] +fn multiple_place_bids_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + + assert_eq!(Balances::reserved_balance(1), 40); + assert_eq!(Balances::reserved_balance(2), 10); + assert_eq!(Queues::::get(1), vec![ + GiltBid { amount: 10, who: 1 }, + ]); + assert_eq!(Queues::::get(2), vec![ + GiltBid { amount: 10, who: 2 }, + GiltBid { amount: 10, who: 1 }, + GiltBid { amount: 10, who: 1 }, + ]); + assert_eq!(Queues::::get(3), vec![ + GiltBid { amount: 10, who: 1 }, + ]); + assert_eq!(QueueTotals::::get(), vec![(1, 10), (3, 30), (1, 10)]); + }); +} + +#[test] +fn retract_single_item_queue_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 1)); + + assert_eq!(Balances::reserved_balance(1), 10); + assert_eq!(Queues::::get(1), vec![]); + assert_eq!(Queues::::get(2), vec![ GiltBid { amount: 10, who: 1 } ]); + assert_eq!(QueueTotals::::get(), vec![(0, 0), (1, 10), (0, 0)]); + }); +} + +#[test] +fn retract_with_other_and_duplicate_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 10, 2)); + + assert_ok!(Gilt::retract_bid(Origin::signed(1), 10, 2)); + assert_eq!(Balances::reserved_balance(1), 20); + assert_eq!(Balances::reserved_balance(2), 10); + assert_eq!(Queues::::get(1), vec![ + GiltBid { amount: 10, who: 1 }, + ]); + assert_eq!(Queues::::get(2), vec![ + GiltBid { amount: 10, who: 2 }, + GiltBid { amount: 10, who: 1 }, + ]); + assert_eq!(QueueTotals::::get(), vec![(1, 10), (2, 20), (0, 0)]); + }); +} + +#[test] +fn retract_non_existent_item_fails() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 1), Error::::NotFound); + assert_ok!(Gilt::place_bid(Origin::signed(1), 10, 1)); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 20, 1), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(1), 10, 2), Error::::NotFound); + assert_noop!(Gilt::retract_bid(Origin::signed(2), 10, 1), Error::::NotFound); + }); +} + +#[test] +fn basic_enlarge_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + Gilt::enlarge(40, 2); + + // Takes 2/2, then stopped because it reaches its max amount + assert_eq!(Balances::reserved_balance(1), 40); + assert_eq!(Balances::reserved_balance(2), 40); + assert_eq!(Queues::::get(1), vec![ GiltBid { amount: 40, who: 1 } ]); + assert_eq!(Queues::::get(2), vec![]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (0, 0), (0, 0)]); + + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 40, + proportion: Perquintill::from_percent(10), + index: 1, + target: Perquintill::zero(), + }); + assert_eq!(Active::::get(0).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 7, + }); + }); +} + +#[test] +fn enlarge_respects_bids_limit() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(4), 40, 3)); + Gilt::enlarge(100, 2); + + // Should have taken 4/3 and 2/2, then stopped because it's only allowed 2. + assert_eq!(Queues::::get(1), vec![ GiltBid { amount: 40, who: 1 } ]); + assert_eq!(Queues::::get(2), vec![ GiltBid { amount: 40, who: 3 } ]); + assert_eq!(Queues::::get(3), vec![]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (1, 40), (0, 0)]); + + assert_eq!(Active::::get(0).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 4, + expiry: 10, + }); + assert_eq!(Active::::get(1).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 7, + }); + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::zero(), + }); + }); +} + +#[test] +fn enlarge_respects_amount_limit_and_will_split() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 80, 1)); + Gilt::enlarge(40, 2); + + // Takes 2/2, then stopped because it reaches its max amount + assert_eq!(Queues::::get(1), vec![ GiltBid { amount: 40, who: 1 } ]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (0, 0), (0, 0)]); + + assert_eq!(Active::::get(0).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 4, + }); + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 40, + proportion: Perquintill::from_percent(10), + index: 1, + target: Perquintill::zero(), + }); + }); +} + +#[test] +fn basic_thaw_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + Gilt::enlarge(40, 1); + run_to_block(3); + assert_noop!(Gilt::thaw(Origin::signed(1), 0), Error::::NotExpired); + run_to_block(4); + assert_noop!(Gilt::thaw(Origin::signed(1), 1), Error::::Unknown); + assert_noop!(Gilt::thaw(Origin::signed(2), 0), Error::::NotOwner); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 0, + proportion: Perquintill::zero(), + index: 1, + target: Perquintill::zero(), + }); + assert_eq!(Active::::get(0), None); + assert_eq!(Balances::free_balance(1), 100); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn thaw_when_issuance_higher_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + Gilt::enlarge(100, 1); + + // Everybody else's balances goes up by 50% + Balances::make_free_balance_be(&2, 150); + Balances::make_free_balance_be(&3, 150); + Balances::make_free_balance_be(&4, 150); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 150); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn thaw_when_issuance_lower_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 100, 1)); + Gilt::enlarge(100, 1); + + // Everybody else's balances goes down by 25% + Balances::make_free_balance_be(&2, 75); + Balances::make_free_balance_be(&3, 75); + Balances::make_free_balance_be(&4, 75); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 75); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn multiple_thaws_works() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); + Gilt::enlarge(200, 3); + + // Double everyone's free balances. + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 200); + Balances::make_free_balance_be(&4, 200); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + assert_ok!(Gilt::thaw(Origin::signed(1), 1)); + assert_ok!(Gilt::thaw(Origin::signed(2), 2)); + + assert_eq!(Balances::free_balance(1), 200); + assert_eq!(Balances::free_balance(2), 200); + }); +} + +#[test] +fn multiple_thaws_works_in_alternative_thaw_order() { + new_test_ext().execute_with(|| { + run_to_block(1); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 60, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 50, 1)); + Gilt::enlarge(200, 3); + + // Double everyone's free balances. + Balances::make_free_balance_be(&2, 100); + Balances::make_free_balance_be(&3, 200); + Balances::make_free_balance_be(&4, 200); + + run_to_block(4); + assert_ok!(Gilt::thaw(Origin::signed(2), 2)); + assert_ok!(Gilt::thaw(Origin::signed(1), 1)); + assert_ok!(Gilt::thaw(Origin::signed(1), 0)); + + assert_eq!(Balances::free_balance(1), 200); + assert_eq!(Balances::free_balance(2), 200); + }); +} + +#[test] +fn enlargement_to_target_works() { + new_test_ext().execute_with(|| { + run_to_block(2); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 1)); + assert_ok!(Gilt::place_bid(Origin::signed(1), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 2)); + assert_ok!(Gilt::place_bid(Origin::signed(2), 40, 3)); + assert_ok!(Gilt::place_bid(Origin::signed(3), 40, 3)); + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(40))); + + run_to_block(3); + assert_eq!(Queues::::get(1), vec![ + GiltBid { amount: 40, who: 1 }, + ]); + assert_eq!(Queues::::get(2), vec![ + GiltBid { amount: 40, who: 2 }, + GiltBid { amount: 40, who: 1 }, + ]); + assert_eq!(Queues::::get(3), vec![ + GiltBid { amount: 40, who: 3 }, + GiltBid { amount: 40, who: 2 }, + ]); + assert_eq!(QueueTotals::::get(), vec![(1, 40), (2, 80), (2, 80)]); + + run_to_block(4); + // Two new gilts should have been issued to 2 & 3 for 40 each & duration of 3. + assert_eq!(Active::::get(0).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 13, + }); + assert_eq!(Active::::get(1).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 3, + expiry: 13, + + }); + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::from_percent(40), + }); + + run_to_block(5); + // No change + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 80, + proportion: Perquintill::from_percent(20), + index: 2, + target: Perquintill::from_percent(40), + }); + + run_to_block(6); + // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. + assert_eq!(Active::::get(2).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 12, + }); + assert_eq!(Active::::get(3).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 2, + expiry: 12, + + }); + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 160, + proportion: Perquintill::from_percent(40), + index: 4, + target: Perquintill::from_percent(40), + }); + + run_to_block(8); + // No change now. + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 160, + proportion: Perquintill::from_percent(40), + index: 4, + target: Perquintill::from_percent(40), + }); + + // Set target a bit higher to use up the remaining bid. + assert_ok!(Gilt::set_target(Origin::signed(1), Perquintill::from_percent(60))); + run_to_block(10); + + // Two new gilts should have been issued to 1 & 2 for 40 each & duration of 2. + assert_eq!(Active::::get(4).unwrap(), ActiveGilt { + proportion: Perquintill::from_percent(10), + amount: 40, + who: 1, + expiry: 13, + }); + + assert_eq!(ActiveTotal::::get(), ActiveGiltsTotal { + frozen: 200, + proportion: Perquintill::from_percent(50), + index: 5, + target: Perquintill::from_percent(60), + }); + }); +} diff --git a/frame/gilt/src/weights.rs b/frame/gilt/src/weights.rs new file mode 100644 index 0000000000..f202ae47ff --- /dev/null +++ b/frame/gilt/src/weights.rs @@ -0,0 +1,164 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Autogenerated weights for pallet_gilt +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-02-23, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 + +// Executed Command: +// target/release/substrate +// benchmark +// --chain=dev +// --steps=50 +// --repeat=20 +// --pallet=pallet_gilt +// --extrinsic=* +// --execution=wasm +// --wasm-execution=compiled +// --heap-pages=4096 +// --output=./frame/gilt/src/weights.rs +// --template=./.maintain/frame-weight-template.hbs + + +#![allow(unused_parens)] +#![allow(unused_imports)] + +use frame_support::{traits::Get, weights::{Weight, constants::RocksDbWeight}}; +use sp_std::marker::PhantomData; + +/// Weight functions needed for pallet_gilt. +pub trait WeightInfo { + fn place_bid(l: u32, ) -> Weight; + fn place_bid_max() -> Weight; + fn retract_bid(l: u32, ) -> Weight; + fn set_target() -> Weight; + fn thaw() -> Weight; + fn pursue_target_noop() -> Weight; + fn pursue_target_per_item(b: u32, ) -> Weight; + fn pursue_target_per_queue(q: u32, ) -> Weight; +} + +/// Weights for pallet_gilt using the Substrate node and recommended hardware. +pub struct SubstrateWeight(PhantomData); +impl WeightInfo for SubstrateWeight { + fn place_bid(l: u32, ) -> Weight { + (79_274_000 as Weight) + // Standard Error: 0 + .saturating_add((289_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn place_bid_max() -> Weight { + (297_825_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn retract_bid(l: u32, ) -> Weight { + (79_731_000 as Weight) + // Standard Error: 0 + .saturating_add((231_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn set_target() -> Weight { + (6_113_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } + fn thaw() -> Weight { + (74_792_000 as Weight) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + } + fn pursue_target_noop() -> Weight { + (3_468_000 as Weight) + .saturating_add(T::DbWeight::get().reads(1 as Weight)) + } + fn pursue_target_per_item(b: u32, ) -> Weight { + (65_792_000 as Weight) + // Standard Error: 2_000 + .saturating_add((11_402_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(T::DbWeight::get().reads(3 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + } + fn pursue_target_per_queue(q: u32, ) -> Weight { + (32_391_000 as Weight) + // Standard Error: 7_000 + .saturating_add((18_500_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(T::DbWeight::get().reads(2 as Weight)) + .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) + .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + } +} + +// For backwards compatibility and tests +impl WeightInfo for () { + fn place_bid(l: u32, ) -> Weight { + (79_274_000 as Weight) + // Standard Error: 0 + .saturating_add((289_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn place_bid_max() -> Weight { + (297_825_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn retract_bid(l: u32, ) -> Weight { + (79_731_000 as Weight) + // Standard Error: 0 + .saturating_add((231_000 as Weight).saturating_mul(l as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn set_target() -> Weight { + (6_113_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + .saturating_add(RocksDbWeight::get().writes(1 as Weight)) + } + fn thaw() -> Weight { + (74_792_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + } + fn pursue_target_noop() -> Weight { + (3_468_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(1 as Weight)) + } + fn pursue_target_per_item(b: u32, ) -> Weight { + (65_792_000 as Weight) + // Standard Error: 2_000 + .saturating_add((11_402_000 as Weight).saturating_mul(b as Weight)) + .saturating_add(RocksDbWeight::get().reads(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes(3 as Weight)) + .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(b as Weight))) + } + fn pursue_target_per_queue(q: u32, ) -> Weight { + (32_391_000 as Weight) + // Standard Error: 7_000 + .saturating_add((18_500_000 as Weight).saturating_mul(q as Weight)) + .saturating_add(RocksDbWeight::get().reads(2 as Weight)) + .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(q as Weight))) + .saturating_add(RocksDbWeight::get().writes(2 as Weight)) + .saturating_add(RocksDbWeight::get().writes((2 as Weight).saturating_mul(q as Weight))) + } +} diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index a9ba0ccc56..2bf7306f58 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -22,15 +22,16 @@ sp-session = { version = "3.0.0", default-features = false, path = "../../primit sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } pallet-session = { version = "3.0.0", default-features = false, path = "../session" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] -frame-benchmarking = { version = "3.0.0", path = "../benchmarking" } -grandpa = { package = "finality-grandpa", version = "0.13.0", features = ["derive-codec"] } +frame-benchmarking = { version = "3.1.0", path = "../benchmarking" } +grandpa = { package = "finality-grandpa", version = "0.14.0", features = ["derive-codec"] } sp-io = { version = "3.0.0", path = "../../primitives/io" } sp-keyring = { version = "3.0.0", path = "../../primitives/keyring" } pallet-balances = { version = "3.0.0", path = "../balances" } @@ -38,6 +39,7 @@ pallet-offences = { version = "3.0.0", path = "../offences" } pallet-staking = { version = "3.0.0", path = "../staking" } pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } pallet-timestamp = { version = "3.0.0", path = "../timestamp" } +sp-election-providers = { version = "3.0.0", path = "../../primitives/election-providers" } [features] default = ["std"] @@ -56,5 +58,7 @@ std = [ "frame-system/std", "pallet-authorship/std", "pallet-session/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/grandpa/src/equivocation.rs b/frame/grandpa/src/equivocation.rs index b8bff59d39..37496fdeb8 100644 --- a/frame/grandpa/src/equivocation.rs +++ b/frame/grandpa/src/equivocation.rs @@ -40,10 +40,7 @@ use sp_std::prelude::*; use codec::{self as codec, Decode, Encode}; -use frame_support::{ - debug, - traits::{Get, KeyOwnerProofSystem}, -}; +use frame_support::traits::{Get, KeyOwnerProofSystem}; use sp_finality_grandpa::{EquivocationProof, RoundNumber, SetId}; use sp_runtime::{ transaction_validity::{ @@ -174,8 +171,15 @@ where let call = Call::report_equivocation_unsigned(equivocation_proof, key_owner_proof); match SubmitTransaction::>::submit_unsigned_transaction(call.into()) { - Ok(()) => debug::info!("Submitted GRANDPA equivocation report."), - Err(e) => debug::error!("Error submitting equivocation report: {:?}", e), + Ok(()) => log::info!( + target: "runtime::afg", + "Submitted GRANDPA equivocation report.", + ), + Err(e) => log::error!( + target: "runtime::afg", + "Error submitting equivocation report: {:?}", + e, + ), } Ok(()) @@ -207,8 +211,8 @@ impl frame_support::unsigned::ValidateUnsigned for Module { match source { TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } _ => { - debug::warn!( - target: "afg", + log::warn!( + target: "runtime::afg", "rejecting unsigned report equivocation transaction because it is not local/in-block." ); diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index e8703dba50..0a24a23445 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -30,7 +30,6 @@ use frame_support::{ use pallet_staking::EraIndex; use sp_core::{crypto::KeyTypeId, H256}; use sp_finality_grandpa::{RoundNumber, SetId, GRANDPA_ENGINE_ID}; -use sp_io; use sp_keyring::Ed25519Keyring; use sp_runtime::{ curve::PiecewiseLinear, @@ -41,6 +40,7 @@ use sp_runtime::{ }; use sp_staking::SessionIndex; use pallet_session::historical as pallet_session_historical; +use sp_election_providers::onchain; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; type Block = frame_system::mocking::MockBlock; @@ -190,6 +190,13 @@ parameter_types! { pub const StakingUnsignedPriority: u64 = u64::max_value() / 2; } +impl onchain::Config for Test { + type AccountId = ::AccountId; + type BlockNumber = ::BlockNumber; + type Accuracy = Perbill; + type DataProvider = Staking; +} + impl pallet_staking::Config for Test { type RewardRemainder = (); type CurrencyToVote = frame_support::traits::SaturatingCurrencyToVote; @@ -212,6 +219,7 @@ impl pallet_staking::Config for Test { type MaxIterations = (); type MinSolutionScoreBump = (); type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 3fd0c30a0f..08109fda25 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -19,7 +19,7 @@ enumflags2 = { version = "0.6.2" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } @@ -40,3 +40,4 @@ std = [ "frame-system/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index e916bdfa50..645b3817d6 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::{EventRecord, RawOrigin}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Module as Identity; @@ -403,31 +403,8 @@ benchmarks! { } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_add_registrar::()); - assert_ok!(test_benchmark_set_identity::()); - assert_ok!(test_benchmark_set_subs_new::()); - assert_ok!(test_benchmark_set_subs_old::()); - assert_ok!(test_benchmark_clear_identity::()); - assert_ok!(test_benchmark_request_judgement::()); - assert_ok!(test_benchmark_cancel_request::()); - assert_ok!(test_benchmark_set_fee::()); - assert_ok!(test_benchmark_set_account_id::()); - assert_ok!(test_benchmark_set_fields::()); - assert_ok!(test_benchmark_provide_judgement::()); - assert_ok!(test_benchmark_kill_identity::()); - assert_ok!(test_benchmark_add_sub::()); - assert_ok!(test_benchmark_rename_sub::()); - assert_ok!(test_benchmark_remove_sub::()); - assert_ok!(test_benchmark_quit_sub::()); - }); - } -} +impl_benchmark_test_suite!( + Identity, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index bde041c437..4c5b4a8863 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -24,8 +24,9 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] pallet-session = { version = "3.0.0", path = "../session" } @@ -44,5 +45,7 @@ std = [ "sp-staking/std", "frame-support/std", "frame-system/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index ef7f66307a..287a2c6fd3 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use sp_core::OpaquePeerId; use sp_core::offchain::OpaqueMultiaddr; use sp_runtime::traits::{ValidateUnsigned, Zero}; @@ -91,18 +91,9 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Runtime}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_heartbeat::()); - assert_ok!(test_benchmark_validate_unsigned::()); - assert_ok!(test_benchmark_validate_unsigned_and_then_heartbeat::()); - }); - } -} + +impl_benchmark_test_suite!( + ImOnline, + crate::mock::new_test_ext(), + crate::mock::Runtime, +); diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index bd597acfb1..f0df19d6ab 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -93,7 +93,7 @@ use sp_staking::{ offence::{ReportOffence, Offence, Kind}, }; use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, debug, decl_error, + decl_module, decl_event, decl_storage, Parameter, decl_error, traits::{Get, ValidatorSet, ValidatorSetWithIdentification, OneSessionHandler}, }; use frame_system::ensure_none; @@ -388,8 +388,8 @@ decl_module! { if sp_io::offchain::is_validator() { for res in Self::send_heartbeats(now).into_iter().flatten() { if let Err(e) = res { - debug::debug!( - target: "imonline", + log::debug!( + target: "runtime::im-online", "Skipping heartbeat at {:?}: {:?}", now, e, @@ -397,8 +397,8 @@ decl_module! { } } } else { - debug::trace!( - target: "imonline", + log::trace!( + target: "runtime::im-online", "Skipping heartbeat at {:?}. Not a validator.", now, ) @@ -529,8 +529,8 @@ impl Module { block_number, || { let call = prepare_heartbeat()?; - debug::info!( - target: "imonline", + log::info!( + target: "runtime::im-online", "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", authority_index, block_number, diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index cde3cdeeec..ce9b2053ff 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -23,7 +23,7 @@ sp-core = { version = "3.0.0", default-features = false, path = "../../primitive frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] pallet-balances = { version = "3.0.0", path = "../balances" } @@ -45,3 +45,4 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/indices/src/benchmarking.rs b/frame/indices/src/benchmarking.rs index f83e05ee9c..6ea39e9ccc 100644 --- a/frame/indices/src/benchmarking.rs +++ b/frame/indices/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Module as Indices; @@ -93,20 +93,9 @@ benchmarks! { // TODO in another PR: lookup and unlookup trait weights (not critical) } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_claim::()); - assert_ok!(test_benchmark_transfer::()); - assert_ok!(test_benchmark_free::()); - assert_ok!(test_benchmark_force_transfer::()); - assert_ok!(test_benchmark_freeze::()); - }); - } -} +impl_benchmark_test_suite!( + Indices, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/lottery/Cargo.toml b/frame/lottery/Cargo.toml index 05bb7e385f..0d60b0aaca 100644 --- a/frame/lottery/Cargo.toml +++ b/frame/lottery/Cargo.toml @@ -19,7 +19,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] pallet-balances = { version = "3.0.0", path = "../balances" } @@ -41,3 +41,4 @@ runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/lottery/src/benchmarking.rs b/frame/lottery/src/benchmarking.rs index b9b0d7fd00..a2b8946ecc 100644 --- a/frame/lottery/src/benchmarking.rs +++ b/frame/lottery/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use frame_system::RawOrigin; use frame_support::traits::{OnInitialize, UnfilteredDispatchable}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::{Bounded, Zero}; use crate::Module as Lottery; @@ -170,21 +170,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_buy_ticket::()); - assert_ok!(test_benchmark_set_calls::()); - assert_ok!(test_benchmark_start_lottery::()); - assert_ok!(test_benchmark_stop_repeat::()); - assert_ok!(test_benchmark_on_initialize_end::()); - assert_ok!(test_benchmark_on_initialize_repeat::()); - }); - } -} +impl_benchmark_test_suite!( + Lottery, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 98987e6fe9..37e7aa2cb8 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -35,3 +35,4 @@ std = [ "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/merkle-mountain-range/Cargo.toml b/frame/merkle-mountain-range/Cargo.toml index eea3845ae1..8861ba5c0c 100644 --- a/frame/merkle-mountain-range/Cargo.toml +++ b/frame/merkle-mountain-range/Cargo.toml @@ -13,7 +13,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } mmr-lib = { package = "ckb-merkle-mountain-range", default-features = false, version = "0.3.1" } @@ -44,3 +44,4 @@ std = [ "sp-std/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/merkle-mountain-range/primitives/Cargo.toml b/frame/merkle-mountain-range/primitives/Cargo.toml index be0a8bdc3a..62a6f4ff1c 100644 --- a/frame/merkle-mountain-range/primitives/Cargo.toml +++ b/frame/merkle-mountain-range/primitives/Cargo.toml @@ -20,6 +20,7 @@ sp-api = { version = "3.0.0", default-features = false, path = "../../../primiti sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] hex-literal = "0.3" @@ -35,4 +36,5 @@ std = [ "sp-core/std", "sp-runtime/std", "sp-std/std", + "log/std", ] diff --git a/frame/merkle-mountain-range/primitives/src/lib.rs b/frame/merkle-mountain-range/primitives/src/lib.rs index d57f8565b6..f1ee15b48b 100644 --- a/frame/merkle-mountain-range/primitives/src/lib.rs +++ b/frame/merkle-mountain-range/primitives/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use frame_support::{RuntimeDebug, debug}; +use frame_support::RuntimeDebug; use sp_runtime::traits::{self, Saturating, One}; use sp_std::fmt; #[cfg(not(feature = "std"))] @@ -307,13 +307,23 @@ impl Error { #![allow(unused_variables)] /// Consume given error `e` with `self` and generate a native log entry with error details. pub fn log_error(self, e: impl fmt::Debug) -> Self { - debug::native::error!("[{:?}] MMR error: {:?}", self, e); + log::error!( + target: "runtime::mmr", + "[{:?}] MMR error: {:?}", + self, + e, + ); self } /// Consume given error `e` with `self` and generate a native log entry with error details. pub fn log_debug(self, e: impl fmt::Debug) -> Self { - debug::native::debug!("[{:?}] MMR error: {:?}", self, e); + log::debug!( + target: "runtime::mmr", + "[{:?}] MMR error: {:?}", + self, + e, + ); self } } diff --git a/frame/merkle-mountain-range/src/benchmarking.rs b/frame/merkle-mountain-range/src/benchmarking.rs index e6b3cf7f21..750a140382 100644 --- a/frame/merkle-mountain-range/src/benchmarking.rs +++ b/frame/merkle-mountain-range/src/benchmarking.rs @@ -21,7 +21,7 @@ use crate::*; use frame_support::traits::OnInitialize; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use sp_std::prelude::*; benchmarks! { @@ -38,17 +38,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::*; - use crate::tests::new_test_ext; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_on_initialize::()); - }) - } -} +impl_benchmark_test_suite!( + Module, + crate::tests::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/multisig/Cargo.toml b/frame/multisig/Cargo.toml index e8d6251383..e48f80567f 100644 --- a/frame/multisig/Cargo.toml +++ b/frame/multisig/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core" } @@ -43,3 +43,4 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/multisig/src/benchmarking.rs b/frame/multisig/src/benchmarking.rs index 748223072b..b530a96396 100644 --- a/frame/multisig/src/benchmarking.rs +++ b/frame/multisig/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use core::convert::TryInto; @@ -298,25 +298,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_as_multi_threshold_1::()); - assert_ok!(test_benchmark_as_multi_create::()); - assert_ok!(test_benchmark_as_multi_create_store::()); - assert_ok!(test_benchmark_as_multi_approve::()); - assert_ok!(test_benchmark_as_multi_approve_store::()); - assert_ok!(test_benchmark_as_multi_complete::()); - assert_ok!(test_benchmark_approve_as_multi_create::()); - assert_ok!(test_benchmark_approve_as_multi_approve::()); - assert_ok!(test_benchmark_approve_as_multi_complete::()); - assert_ok!(test_benchmark_cancel_as_multi::()); - }); - } -} +impl_benchmark_test_suite!( + Multisig, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/multisig/src/lib.rs b/frame/multisig/src/lib.rs index a015f291bc..aa72d2d1ad 100644 --- a/frame/multisig/src/lib.rs +++ b/frame/multisig/src/lib.rs @@ -160,7 +160,7 @@ decl_error! { /// A timepoint was given, yet no multisig operation is underway. UnexpectedTimepoint, /// The maximum weight information provided was too low. - WeightTooLow, + MaxWeightTooLow, /// The data to be stored is already stored. AlreadyStored, } @@ -503,7 +503,7 @@ impl Module { if let Some((call, call_len)) = maybe_approved_call { // verify weight - ensure!(call.get_dispatch_info().weight <= max_weight, Error::::WeightTooLow); + ensure!(call.get_dispatch_info().weight <= max_weight, Error::::MaxWeightTooLow); // Clean up storage before executing call to avoid an possibility of reentrancy // attack. diff --git a/frame/multisig/src/tests.rs b/frame/multisig/src/tests.rs index 78301b2b69..a3f47a26e6 100644 --- a/frame/multisig/src/tests.rs +++ b/frame/multisig/src/tests.rs @@ -531,7 +531,7 @@ fn weight_check_works() { assert_noop!( Multisig::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), data, false, 0), - Error::::WeightTooLow, + Error::::MaxWeightTooLow, ); }); } diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 611f492b81..6c8b609b40 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -36,3 +36,4 @@ std = [ "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/node-authorization/Cargo.toml b/frame/node-authorization/Cargo.toml index db77f25c18..245db9176f 100644 --- a/frame/node-authorization/Cargo.toml +++ b/frame/node-authorization/Cargo.toml @@ -20,6 +20,7 @@ sp-core = { version = "3.0.0", default-features = false, path = "../../primitive sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +log = { version = "0.4.14", default-features = false } [features] default = ["std"] @@ -32,4 +33,6 @@ std = [ "sp-io/std", "sp-runtime/std", "sp-std/std", + "log/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/node-authorization/src/lib.rs b/frame/node-authorization/src/lib.rs index f1f70e9eac..090be28492 100644 --- a/frame/node-authorization/src/lib.rs +++ b/frame/node-authorization/src/lib.rs @@ -46,9 +46,7 @@ use sp_std::{ use codec::Decode; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, - debug, ensure, - weights::{DispatchClass, Weight}, - traits::{Get, EnsureOrigin}, + ensure, weights::{DispatchClass, Weight}, traits::{Get, EnsureOrigin}, }; use frame_system::ensure_signed; @@ -387,11 +385,19 @@ decl_module! { fn offchain_worker(now: T::BlockNumber) { let network_state = sp_io::offchain::network_state(); match network_state { - Err(_) => debug::error!("Error: failed to get network state of node at {:?}", now), + Err(_) => log::error!( + target: "runtime::node-authorization", + "Error: failed to get network state of node at {:?}", + now, + ), Ok(state) => { let encoded_peer = state.peer_id.0; match Decode::decode(&mut &encoded_peer[..]) { - Err(_) => debug::error!("Error: failed to decode PeerId at {:?}", now), + Err(_) => log::error!( + target: "runtime::node-authorization", + "Error: failed to decode PeerId at {:?}", + now, + ), Ok(node) => sp_io::offchain::set_authorized_nodes( Self::get_authorized_nodes(&PeerId(node)), true diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 3232d5f3ae..a34c5f6bc3 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -21,6 +21,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit sp-staking = { version = "3.0.0", default-features = false, path = "../../primitives/staking" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-io = { version = "3.0.0", path = "../../primitives/io" } @@ -37,5 +38,7 @@ std = [ "sp-staking/std", "frame-support/std", "frame-system/std", + "log/std", ] runtime-benchmarks = [] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml index a27b6c3012..6be2787734 100644 --- a/frame/offences/benchmarking/Cargo.toml +++ b/frame/offences/benchmarking/Cargo.toml @@ -14,7 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../benchmarking" } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../../benchmarking" } frame-support = { version = "3.0.0", default-features = false, path = "../../support" } frame-system = { version = "3.0.0", default-features = false, path = "../../system" } pallet-babe = { version = "3.0.0", default-features = false, path = "../../babe" } @@ -27,6 +27,7 @@ pallet-staking = { version = "3.0.0", default-features = false, features = ["run sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } sp-staking = { version = "3.0.0", default-features = false, path = "../../../primitives/staking" } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } +sp-election-providers = { version = "3.0.0", default-features = false, path = "../../../primitives/election-providers" } [dev-dependencies] pallet-staking-reward-curve = { version = "3.0.0", path = "../../staking/reward-curve" } @@ -50,6 +51,7 @@ std = [ "pallet-staking/std", "sp-runtime/std", "sp-staking/std", + "sp-election-providers/std", "sp-std/std", "codec/std", ] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index 57672f13ed..a14e4cf5d2 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -25,7 +25,7 @@ use sp_std::prelude::*; use sp_std::vec; use frame_system::{RawOrigin, Module as System, Config as SystemConfig}; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{benchmarks, account, impl_benchmark_test_suite}; use frame_support::traits::{Currency, OnInitialize, ValidatorSet, ValidatorSetWithIdentification}; use sp_runtime::{Perbill, traits::{Convert, StaticLookup, Saturating, UniqueSaturatedInto}}; @@ -420,19 +420,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_report_offence_im_online::()); - assert_ok!(test_benchmark_report_offence_grandpa::()); - assert_ok!(test_benchmark_report_offence_babe::()); - assert_ok!(test_benchmark_on_initialize::()); - }); - } -} +impl_benchmark_test_suite!( + Module, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/offences/benchmarking/src/mock.rs b/frame/offences/benchmarking/src/mock.rs index e4ec32d0bc..124e6b13b7 100644 --- a/frame/offences/benchmarking/src/mock.rs +++ b/frame/offences/benchmarking/src/mock.rs @@ -29,6 +29,7 @@ use sp_runtime::{ traits::IdentityLookup, testing::{Header, UintAuthorityId}, }; +use sp_election_providers::onchain; use pallet_session::historical as pallet_session_historical; type AccountId = u64; @@ -148,6 +149,13 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; +impl onchain::Config for Test { + type AccountId = AccountId; + type BlockNumber = BlockNumber; + type Accuracy = Perbill; + type DataProvider = Staking; +} + impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; @@ -170,6 +178,7 @@ impl pallet_staking::Config for Test { type MaxIterations = (); type MinSolutionScoreBump = (); type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 5c1247853d..2765c0aaa0 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -27,9 +27,7 @@ mod tests; use sp_std::vec::Vec; use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, debug, - traits::Get, - weights::Weight, + decl_module, decl_event, decl_storage, Parameter, traits::Get, weights::Weight, }; use sp_runtime::{traits::{Hash, Zero}, Perbill}; use sp_staking::{ @@ -141,9 +139,10 @@ decl_module! { false }, Err(_) => { - debug::native::error!( - target: "pallet-offences", - "re-submitting a deferred slash returned Err at {}. This should not happen with pallet-staking", + log::error!( + target: "runtime::offences", + "re-submitting a deferred slash returned Err at {:?}. \ + This should not happen with pallet-staking", now, ); true diff --git a/frame/proxy/Cargo.toml b/frame/proxy/Cargo.toml index 9490364abd..2934b9953b 100644 --- a/frame/proxy/Cargo.toml +++ b/frame/proxy/Cargo.toml @@ -22,7 +22,7 @@ sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core" } @@ -44,3 +44,4 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/proxy/src/benchmarking.rs b/frame/proxy/src/benchmarking.rs index 29c2e475c6..130c980011 100644 --- a/frame/proxy/src/benchmarking.rs +++ b/frame/proxy/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::{RawOrigin, EventRecord}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Module as Proxy; @@ -251,25 +251,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_proxy::()); - assert_ok!(test_benchmark_proxy_announced::()); - assert_ok!(test_benchmark_remove_announcement::()); - assert_ok!(test_benchmark_reject_announcement::()); - assert_ok!(test_benchmark_announce::()); - assert_ok!(test_benchmark_add_proxy::()); - assert_ok!(test_benchmark_remove_proxy::()); - assert_ok!(test_benchmark_remove_proxies::()); - assert_ok!(test_benchmark_anonymous::()); - assert_ok!(test_benchmark_kill_anonymous::()); - }); - } -} +impl_benchmark_test_suite!( + Proxy, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 285326ef1e..ad9bcb9783 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -35,3 +35,4 @@ std = [ "sp-runtime/std", "sp-std/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 80450db0bd..1f8003bd4d 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -37,3 +37,4 @@ std = [ "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml index eef287d867..4d82133b6a 100644 --- a/frame/scheduler/Cargo.toml +++ b/frame/scheduler/Cargo.toml @@ -17,8 +17,9 @@ frame-system = { version = "3.0.0", default-features = false, path = "../system" sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } +log = { version = "0.4.14", default-features = false } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core", default-features = false } @@ -34,10 +35,12 @@ std = [ "frame-support/std", "frame-system/std", "sp-io/std", - "sp-std/std" + "sp-std/std", + "log/std", ] runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/scheduler/src/benchmarking.rs b/frame/scheduler/src/benchmarking.rs index defc334ba7..37ccb900a8 100644 --- a/frame/scheduler/src/benchmarking.rs +++ b/frame/scheduler/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use sp_std::{vec, prelude::*}; use frame_system::RawOrigin; use frame_support::{ensure, traits::OnInitialize}; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use crate::Module as Scheduler; use frame_system::Module as System; @@ -141,20 +141,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_schedule::()); - assert_ok!(test_benchmark_cancel::()); - assert_ok!(test_benchmark_schedule_named::()); - assert_ok!(test_benchmark_cancel_named::()); - assert_ok!(test_benchmark_on_initialize::()); - }); - } -} +impl_benchmark_test_suite!( + Scheduler, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index a869fae27d..5cab10b0af 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -333,7 +333,8 @@ decl_module! { .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) .collect::>(); if queued.len() as u32 > T::MaxScheduledPerBlock::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::scheduler", "Warning: This block has more items queued in Scheduler than \ expected from the runtime configuration. An update might be needed." ); @@ -500,9 +501,10 @@ impl Module { Agenda::::append(when, s); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; if index > T::MaxScheduledPerBlock::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::scheduler", "Warning: There are more items queued in the Scheduler than \ - expected from the runtime configuration. An update might be needed." + expected from the runtime configuration. An update might be needed.", ); } Self::deposit_event(RawEvent::Scheduled(when, index)); @@ -590,9 +592,10 @@ impl Module { Agenda::::append(when, Some(s)); let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; if index > T::MaxScheduledPerBlock::get() { - frame_support::debug::warn!( + log::warn!( + target: "runtime::scheduler", "Warning: There are more items queued in the Scheduler than \ - expected from the runtime configuration. An update might be needed." + expected from the runtime configuration. An update might be needed.", ); } let address = (when, index); diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index e5e71dba68..97e3a954d7 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -36,3 +36,4 @@ std = [ "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 5b8fe6e2d1..52b8ebbdf4 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -47,3 +47,4 @@ std = [ "pallet-timestamp/std", "sp-trie/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml index bf5a9a9617..47265ed5ef 100644 --- a/frame/session/benchmarking/Cargo.toml +++ b/frame/session/benchmarking/Cargo.toml @@ -13,11 +13,11 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } sp-session = { version = "3.0.0", default-features = false, path = "../../../primitives/session" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } frame-system = { version = "3.0.0", default-features = false, path = "../../system" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../benchmarking" } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../../benchmarking" } frame-support = { version = "3.0.0", default-features = false, path = "../../support" } pallet-staking = { version = "3.0.0", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } pallet-session = { version = "3.0.0", default-features = false, path = "../../session" } @@ -31,12 +31,14 @@ pallet-staking-reward-curve = { version = "3.0.0", path = "../../staking/reward- sp-io ={ version = "3.0.0", path = "../../../primitives/io" } pallet-timestamp = { version = "3.0.0", path = "../../timestamp" } pallet-balances = { version = "3.0.0", path = "../../balances" } +sp-election-providers = { version = "3.0.0", path = "../../../primitives/election-providers" } [features] default = ["std"] std = [ "sp-std/std", "sp-session/std", + "sp-election-providers/std", "sp-runtime/std", "frame-system/std", "frame-benchmarking/std", diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 06dfa3da34..8546800ee4 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -25,7 +25,7 @@ mod mock; use sp_std::prelude::*; use sp_std::vec; -use frame_benchmarking::benchmarks; +use frame_benchmarking::{benchmarks, impl_benchmark_test_suite}; use frame_support::{ codec::Decode, storage::StorageValue, @@ -169,17 +169,9 @@ fn check_membership_proof_setup( (key, Historical::::prove(key).unwrap()) } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_keys::()); - assert_ok!(test_benchmark_purge_keys::()); - }); - } -} +impl_benchmark_test_suite!( + Module, + crate::mock::new_test_ext(), + crate::mock::Test, + extra = false, +); diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index b25b169c82..0eba5452b2 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -20,6 +20,7 @@ #![cfg(test)] use sp_runtime::traits::IdentityLookup; +use sp_election_providers::onchain; use frame_support::parameter_types; type AccountId = u64; @@ -145,13 +146,21 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl frame_system::offchain::SendTransactionTypes for Test where +impl frame_system::offchain::SendTransactionTypes for Test +where Call: From, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } +impl onchain::Config for Test { + type AccountId = AccountId; + type BlockNumber = BlockNumber; + type Accuracy = sp_runtime::Perbill; + type DataProvider = Staking; +} + impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; @@ -174,6 +183,7 @@ impl pallet_staking::Config for Test { type MaxIterations = (); type MinSolutionScoreBump = (); type OffchainSolutionWeightLimit = (); + type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 64ec31ad99..d95d99389f 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -169,11 +169,13 @@ impl< Some(if now > offset { let block_after_last_session = (now.clone() - offset) % period.clone(); if block_after_last_session > Zero::zero() { - now.saturating_add( - period.saturating_sub(block_after_last_session) - ) + now.saturating_add(period.saturating_sub(block_after_last_session)) } else { - now + // this branch happens when the session is already rotated or will rotate in this + // block (depending on being called before or after `session::on_initialize`). Here, + // we assume the latter, namely that this is called after `session::on_initialize`, + // and thus we add period to it as well. + now + period } } else { offset @@ -187,6 +189,10 @@ impl< // reasonable to come back here and properly calculate the weight of this function. 0 } + + fn average_session_length() -> BlockNumber { + Period::get() + } } /// A trait for managing creation of new validator set. @@ -833,6 +839,10 @@ impl EstimateNextNewSession for Module { T::NextSessionRotation::estimate_next_session_rotation(now) } + fn average_session_length() -> T::BlockNumber { + T::NextSessionRotation::average_session_length() + } + fn weight(now: T::BlockNumber) -> Weight { T::NextSessionRotation::weight(now) } diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index c876770c74..b2e086aed9 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -275,7 +275,7 @@ fn periodic_session_works() { } assert!(P::should_end_session(13u64)); - assert_eq!(P::estimate_next_session_rotation(13u64).unwrap(), 13); + assert_eq!(P::estimate_next_session_rotation(13u64).unwrap(), 23); assert!(!P::should_end_session(14u64)); assert_eq!(P::estimate_next_session_rotation(14u64).unwrap(), 23); diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 5ddebeb9f5..913e40e030 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -41,3 +41,4 @@ runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index c5f7dba075..1f9f29570a 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -17,6 +17,7 @@ static_assertions = "1.1.0" serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } +# TWO_PHASE_NOTE:: ideally we should be able to get rid of this. sp-npos-elections = { version = "3.0.0", default-features = false, path = "../../primitives/npos-elections" } sp-io ={ version = "3.0.0", default-features = false, path = "../../primitives/io" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } @@ -26,20 +27,23 @@ frame-system = { version = "3.0.0", default-features = false, path = "../system" pallet-session = { version = "3.0.0", default-features = false, features = ["historical"], path = "../session" } pallet-authorship = { version = "3.0.0", default-features = false, path = "../authorship" } sp-application-crypto = { version = "3.0.0", default-features = false, path = "../../primitives/application-crypto" } +log = { version = "0.4.14", default-features = false } +sp-election-providers = { version = "3.0.0", default-features = false, path = "../../primitives/election-providers" } # Optional imports for benchmarking -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "3.0.0", path = "../../primitives/core" } sp-storage = { version = "3.0.0", path = "../../primitives/storage" } sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } +sp-core = { version = "3.0.0", path = "../../primitives/core" } pallet-balances = { version = "3.0.0", path = "../balances" } pallet-timestamp = { version = "3.0.0", path = "../timestamp" } pallet-staking-reward-curve = { version = "3.0.0", path = "../staking/reward-curve" } substrate-test-utils = { version = "3.0.0", path = "../../test-utils" } -frame-benchmarking = { version = "3.0.0", path = "../benchmarking" } +frame-benchmarking = { version = "3.1.0", path = "../benchmarking" } +sp-election-providers = { version = "3.0.0", features = ["runtime-benchmarks"], path = "../../primitives/election-providers" } rand_chacha = { version = "0.2" } parking_lot = "0.11.1" hex = "0.4" @@ -59,8 +63,12 @@ std = [ "frame-system/std", "pallet-authorship/std", "sp-application-crypto/std", + "log/std", + "sp-election-providers/std", ] runtime-benchmarks = [ "frame-benchmarking", + "sp-election-providers/runtime-benchmarks", "rand_chacha", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/staking/fuzzer/Cargo.toml b/frame/staking/fuzzer/Cargo.toml index a88e961917..84758c6bf6 100644 --- a/frame/staking/fuzzer/Cargo.toml +++ b/frame/staking/fuzzer/Cargo.toml @@ -28,6 +28,7 @@ sp-io ={ version = "3.0.0", path = "../../../primitives/io" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sp-npos-elections = { version = "3.0.0", path = "../../../primitives/npos-elections" } sp-runtime = { version = "3.0.0", path = "../../../primitives/runtime" } +sp-election-providers = { version = "3.0.0", path = "../../../primitives/election-providers" } serde = "1.0.101" [features] diff --git a/frame/staking/fuzzer/src/mock.rs b/frame/staking/fuzzer/src/mock.rs index 88b001c7e6..05d001d238 100644 --- a/frame/staking/fuzzer/src/mock.rs +++ b/frame/staking/fuzzer/src/mock.rs @@ -149,13 +149,24 @@ parameter_types! { pub type Extrinsic = sp_runtime::testing::TestXt; -impl frame_system::offchain::SendTransactionTypes for Test where +impl frame_system::offchain::SendTransactionTypes for Test +where Call: From, { type OverarchingCall = Call; type Extrinsic = Extrinsic; } +pub struct MockElectionProvider; +impl sp_election_providers::ElectionProvider for MockElectionProvider { + type Error = (); + type DataProvider = pallet_staking::Module; + + fn elect() -> Result, Self::Error> { + Err(()) + } +} + impl pallet_staking::Config for Test { type Currency = Balances; type UnixTime = pallet_timestamp::Module; @@ -179,4 +190,5 @@ impl pallet_staking::Config for Test { type UnsignedPriority = (); type OffchainSolutionWeightLimit = (); type WeightInfo = (); + type ElectionProvider = MockElectionProvider; } diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs index d94ee49b96..b661a83a1b 100644 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ b/frame/staking/fuzzer/src/submit_solution.rs @@ -164,7 +164,7 @@ fn main() { assert_eq!( call.dispatch_bypass_filter(origin.into()).unwrap_err().error, DispatchError::Module { - index: 0, + index: 2, error: 16, message: Some("OffchainElectionWeakSubmission"), }, diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 51926e1ccc..8713f5e100 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -16,7 +16,7 @@ proc-macro = true [dependencies] syn = { version = "1.0.58", features = ["full", "visit"] } -quote = "1.0.9" +quote = "1.0.3" proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index 3a8d625e83..2e1bc1f185 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -353,13 +353,13 @@ fn generate_piecewise_linear(points: Vec<(u32, u32)>) -> TokenStream2 { .unwrap_or(1_000_000_000); for (x, y) in points { - let error = || panic!(format!( + let error = || panic!( "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] \ because of point: x = {:07} per million y = {:07} per million", x, y - )); + ); let x_perbill = x.checked_mul(1_000).unwrap_or_else(error); let y_perbill = y.checked_mul(1_000).unwrap_or_else(error); @@ -420,14 +420,14 @@ fn generate_test_module(input: &INposInput) -> TokenStream2 { / float_res as u64 ) as u32; if err > #precision { - panic!(format!("\n\ + panic!("\n\ Generated reward curve approximation differ from real one:\n\t\ for i = {} and base = {}, f(i/base) * base = {},\n\t\ but approximation = {},\n\t\ err = {:07} millionth,\n\t\ try increase the number of segment: {} or the test_error: {}.\n", i, base, float_res, int_res, err, #max_piece_count, #precision - )); + ); } } } diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index beddc326b5..ecaa9889b5 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -24,7 +24,13 @@ use testing_utils::*; use sp_npos_elections::CompactSolution; use sp_runtime::traits::One; use frame_system::RawOrigin; -pub use frame_benchmarking::{benchmarks, account, whitelisted_caller, whitelist_account}; +pub use frame_benchmarking::{ + benchmarks, + account, + whitelisted_caller, + whitelist_account, + impl_benchmark_test_suite, +}; const SEED: u32 = 0; const MAX_SPANS: u32 = 100; const MAX_VALIDATORS: u32 = 1000; @@ -861,40 +867,6 @@ mod tests { }); } - #[test] - fn test_benchmarks() { - ExtBuilder::default().has_stakers(true).build().execute_with(|| { - assert_ok!(test_benchmark_bond::()); - assert_ok!(test_benchmark_bond_extra::()); - assert_ok!(test_benchmark_unbond::()); - assert_ok!(test_benchmark_withdraw_unbonded_update::()); - assert_ok!(test_benchmark_withdraw_unbonded_kill::()); - assert_ok!(test_benchmark_validate::()); - assert_ok!(test_benchmark_kick::()); - assert_ok!(test_benchmark_nominate::()); - assert_ok!(test_benchmark_chill::()); - assert_ok!(test_benchmark_set_payee::()); - assert_ok!(test_benchmark_set_controller::()); - assert_ok!(test_benchmark_set_validator_count::()); - assert_ok!(test_benchmark_force_no_eras::()); - assert_ok!(test_benchmark_force_new_era::()); - assert_ok!(test_benchmark_force_new_era_always::()); - assert_ok!(test_benchmark_set_invulnerables::()); - assert_ok!(test_benchmark_force_unstake::()); - assert_ok!(test_benchmark_cancel_deferred_slash::()); - assert_ok!(test_benchmark_payout_stakers_dead_controller::()); - assert_ok!(test_benchmark_payout_stakers_alive_staked::()); - assert_ok!(test_benchmark_rebond::()); - assert_ok!(test_benchmark_set_history_depth::()); - assert_ok!(test_benchmark_reap_stash::()); - assert_ok!(test_benchmark_new_era::()); - assert_ok!(test_benchmark_do_slash::()); - assert_ok!(test_benchmark_payout_all::()); - // only run one of them to same time on the CI. ignore the other two. - assert_ok!(test_benchmark_submit_solution_initial::()); - }); - } - #[test] #[ignore] fn test_benchmarks_offchain() { @@ -905,3 +877,9 @@ mod tests { } } + +impl_benchmark_test_suite!( + Staking, + crate::mock::ExtBuilder::default().has_stakers(true).build(), + crate::mock::Test, +); diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 3ea66e937e..6a1b0c8d80 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -328,24 +328,22 @@ use frame_system::{ }; use sp_npos_elections::{ ExtendedBalance, Assignment, ElectionScore, ElectionResult as PrimitiveElectionResult, - to_support_map, EvaluateSupport, seq_phragmen, generate_solution_type, is_score_better, - SupportMap, VoteWeight, CompactSolution, PerThing128, + to_supports, EvaluateSupport, seq_phragmen, generate_solution_type, is_score_better, Supports, + VoteWeight, CompactSolution, PerThing128, }; +use sp_election_providers::ElectionProvider; pub use weights::WeightInfo; const STAKING_ID: LockIdentifier = *b"staking "; -pub const MAX_UNLOCKING_CHUNKS: usize = 32; -pub const MAX_NOMINATIONS: usize = ::LIMIT; - -pub(crate) const LOG_TARGET: &'static str = "staking"; +pub(crate) const LOG_TARGET: &'static str = "runtime::staking"; // syntactic sugar for logging. #[macro_export] macro_rules! log { ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { - frame_support::debug::$level!( + log::$level!( target: crate::LOG_TARGET, - $patter $(, $values)* + concat!("💸 ", $patter) $(, $values)* ) }; } @@ -365,6 +363,10 @@ static_assertions::const_assert!(size_of::() <= size_of::() /// Maximum number of stakers that can be stored in a snapshot. pub(crate) const MAX_VALIDATORS: usize = ValidatorIndex::max_value() as usize; pub(crate) const MAX_NOMINATORS: usize = NominatorIndex::max_value() as usize; +pub const MAX_NOMINATIONS: usize = + ::LIMIT; + +pub const MAX_UNLOCKING_CHUNKS: usize = 32; /// Counter for the number of eras that have passed. pub type EraIndex = u32; @@ -388,10 +390,12 @@ pub type OffchainAccuracy = PerU16; pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; +type PositiveImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::PositiveImbalance; +type NegativeImbalanceOf = <::Currency as Currency< + ::AccountId, +>>::NegativeImbalance; /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug)] @@ -778,7 +782,7 @@ impl SessionInterface<::AccountId> for T w pub trait Config: frame_system::Config + SendTransactionTypes> { /// The staking balance. - type Currency: LockableCurrency; + type Currency: LockableCurrency; /// Time used for computing era duration. /// @@ -793,6 +797,14 @@ pub trait Config: frame_system::Config + SendTransactionTypes> { /// [`BalanceOf`]. type CurrencyToVote: CurrencyToVote>; + /// Something that provides the election functionality. + type ElectionProvider: sp_election_providers::ElectionProvider< + Self::AccountId, + Self::BlockNumber, + // we only accept an election provider that has staking as data provider. + DataProvider = Module, + >; + /// Tokens have been minted and are unused for validator-reward. /// See [Era payout](./index.html#era-payout). type RewardRemainder: OnUnbalanced>; @@ -889,7 +901,9 @@ pub enum Forcing { } impl Default for Forcing { - fn default() -> Self { Forcing::NotForcing } + fn default() -> Self { + Forcing::NotForcing + } } // A value placed in storage that represents the current version of the Staking storage. This value @@ -1066,28 +1080,45 @@ decl_storage! { /// The earliest era for which we have a pending, unapplied slash. EarliestUnappliedSlash: Option; + /// The last planned session scheduled by the session pallet. + /// + /// This is basically in sync with the call to [`SessionManager::new_session`]. + pub CurrentPlannedSession get(fn current_planned_session): SessionIndex; + /// Snapshot of validators at the beginning of the current election window. This should only /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. + /// + /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. pub SnapshotValidators get(fn snapshot_validators): Option>; /// Snapshot of nominators at the beginning of the current election window. This should only /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. + /// + /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. pub SnapshotNominators get(fn snapshot_nominators): Option>; /// The next validator set. At the end of an era, if this is available (potentially from the /// result of an offchain worker), it is immediately used. Otherwise, the on-chain election /// is executed. + /// + /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. pub QueuedElected get(fn queued_elected): Option>>; /// The score of the current [`QueuedElected`]. + /// + /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. pub QueuedScore get(fn queued_score): Option; /// Flag to control the execution of the offchain election. When `Open(_)`, we accept /// solutions to be submitted. + /// + /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. pub EraElectionStatus get(fn era_election_status): ElectionStatus; /// True if the current **planned** session is final. Note that this does not take era /// forcing into account. + /// + /// TWO_PHASE_NOTE: should be removed once we switch to multi-phase. pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false; /// True if network has been upgraded to this version. @@ -1345,14 +1376,14 @@ decl_module! { ElectionStatus::::Open(now) ); add_weight(0, 1, 0); - log!(info, "💸 Election window is Open({:?}). Snapshot created", now); + log!(info, "Election window is Open({:?}). Snapshot created", now); } else { - log!(warn, "💸 Failed to create snapshot at {:?}.", now); + log!(warn, "Failed to create snapshot at {:?}.", now); } } } } else { - log!(warn, "💸 Estimating next session change failed."); + log!(warn, "Estimating next session change failed."); } add_weight(0, 0, T::NextNewSession::weight(now)) } @@ -1367,14 +1398,13 @@ decl_module! { /// to open. If so, it runs the offchain worker code. fn offchain_worker(now: T::BlockNumber) { use offchain_election::{set_check_offchain_execution_status, compute_offchain_election}; - if Self::era_election_status().is_open_at(now) { let offchain_status = set_check_offchain_execution_status::(now); if let Err(why) = offchain_status { - log!(warn, "💸 skipping offchain worker in open election window due to [{}]", why); + log!(warn, "skipping offchain worker in open election window due to [{}]", why); } else { if let Err(e) = compute_offchain_election::() { - log!(error, "💸 Error in election offchain worker: {:?}", e); + log!(error, "Error in election offchain worker: {:?}", e); } else { log!(debug, "💸 Executed offchain worker thread without errors."); } @@ -1638,7 +1668,7 @@ decl_module! { ledger = ledger.consolidate_unlocked(current_era) } - let post_info_weight = if ledger.unlocking.is_empty() && ledger.active <= T::Currency::minimum_balance() { + let post_info_weight = if ledger.unlocking.is_empty() && ledger.active < T::Currency::minimum_balance() { // This account must have called `unbond()` with some value that caused the active // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove all staking-related information. @@ -2267,7 +2297,10 @@ impl Module { } /// Internal impl of [`Self::slashable_balance_of`] that returns [`VoteWeight`]. - pub fn slashable_balance_of_vote_weight(stash: &T::AccountId, issuance: BalanceOf) -> VoteWeight { + pub fn slashable_balance_of_vote_weight( + stash: &T::AccountId, + issuance: BalanceOf, + ) -> VoteWeight { T::CurrencyToVote::to_vote(Self::slashable_balance_of(stash), issuance) } @@ -2306,7 +2339,7 @@ impl Module { { log!( warn, - "💸 Snapshot size too big [{} <> {}][{} <> {}].", + "Snapshot size too big [{} <> {}][{} <> {}].", num_validators, MAX_VALIDATORS, num_nominators, @@ -2330,10 +2363,7 @@ impl Module { >::kill(); } - fn do_payout_stakers( - validator_stash: T::AccountId, - era: EraIndex, - ) -> DispatchResult { + fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { // Validate input data let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; ensure!(era <= current_era, Error::::InvalidEraToReward); @@ -2626,7 +2656,7 @@ impl Module { validator_at, ).map_err(|e| { // log the error since it is not propagated into the runtime error. - log!(warn, "💸 un-compacting solution failed due to {:?}", e); + log!(warn, "un-compacting solution failed due to {:?}", e); Error::::OffchainElectionBogusCompact })?; @@ -2641,7 +2671,7 @@ impl Module { // all of the indices must map to either a validator or a nominator. If this is ever // not the case, then the locking system of staking is most likely faulty, or we // have bigger problems. - log!(error, "💸 detected an error in the staking locking and snapshot."); + log!(error, "detected an error in the staking locking and snapshot."); // abort. return Err(Error::::OffchainElectionBogusNominator.into()); } @@ -2690,7 +2720,7 @@ impl Module { ); // build the support map thereof in order to evaluate. - let supports = to_support_map::(&winners, &staked_assignments) + let supports = to_supports(&winners, &staked_assignments) .map_err(|_| Error::::OffchainElectionBogusEdge)?; // Check if the score is the same as the claimed one. @@ -2698,10 +2728,11 @@ impl Module { ensure!(submitted_score == claimed_score, Error::::OffchainElectionBogusScore); // At last, alles Ok. Exposures and store the result. - let exposures = Self::collect_exposure(supports); + let exposures = Self::collect_exposures(supports); log!( info, - "💸 A better solution (with compute {:?} and score {:?}) has been validated and stored on chain.", + "A better solution (with compute {:?} and score {:?}) has been validated and stored \ + on chain.", compute, submitted_score, ); @@ -2834,6 +2865,8 @@ impl Module { // Set staking information for new era. let maybe_new_validators = Self::select_and_update_validators(current_era); + // TWO_PHASE_NOTE: use this later on. + let _unused_new_validators = Self::enact_election(current_era); maybe_new_validators } @@ -2901,7 +2934,7 @@ impl Module { log!( info, - "💸 new validator set of size {:?} has been elected via {:?} for era {:?}", + "new validator set of size {:?} has been elected via {:?} for staring era {:?}", elected_stashes.len(), compute, current_era, @@ -2950,20 +2983,20 @@ impl Module { Self::slashable_balance_of_fn(), ); - let supports = to_support_map::( + let supports = to_supports( &elected_stashes, &staked_assignments, ) .map_err(|_| log!( error, - "💸 on-chain phragmen is failing due to a problem in the result. This must be a bug." + "on-chain phragmen is failing due to a problem in the result. This must be a bug." ) ) .ok()?; // collect exposures - let exposures = Self::collect_exposure(supports); + let exposures = Self::collect_exposures(supports); // In order to keep the property required by `on_session_ending` that we must return the // new validator set even if it's the same as the old, as long as any underlying @@ -3025,7 +3058,7 @@ impl Module { // If we don't have enough candidates, nothing to do. log!( warn, - "💸 Chain does not have enough staking candidates to operate. Era {:?}.", + "chain does not have enough staking candidates to operate. Era {:?}.", Self::current_era() ); None @@ -3041,9 +3074,10 @@ impl Module { } } - /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a [`Exposure`] - fn collect_exposure( - supports: SupportMap, + /// Consume a set of [`Supports`] from [`sp_npos_elections`] and collect them into a + /// [`Exposure`]. + fn collect_exposures( + supports: Supports, ) -> Vec<(T::AccountId, Exposure>)> { let total_issuance = T::Currency::total_issuance(); let to_currency = |e: ExtendedBalance| T::CurrencyToVote::to_currency(e, total_issuance); @@ -3075,12 +3109,86 @@ impl Module { }).collect::)>>() } + /// Process the output of the election. + /// + /// This ensures enough validators have been elected, converts all supports to exposures and + /// writes them to the associated storage. + /// + /// Returns `Err(())` if less than [`MinimumValidatorCount`] validators have been elected, `Ok` + /// otherwise. + // TWO_PHASE_NOTE: remove the dead code. + #[allow(dead_code)] + pub fn process_election( + flat_supports: sp_npos_elections::Supports, + current_era: EraIndex, + ) -> Result, ()> { + let exposures = Self::collect_exposures(flat_supports); + let elected_stashes = exposures.iter().cloned().map(|(x, _)| x).collect::>(); + + if (elected_stashes.len() as u32) <= Self::minimum_validator_count() { + log!( + warn, + "chain does not have enough staking candidates to operate for era {:?}", + current_era, + ); + return Err(()); + } + + // Populate Stakers and write slot stake. + let mut total_stake: BalanceOf = Zero::zero(); + exposures.into_iter().for_each(|(stash, exposure)| { + total_stake = total_stake.saturating_add(exposure.total); + >::insert(current_era, &stash, &exposure); + + let mut exposure_clipped = exposure; + let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; + if exposure_clipped.others.len() > clipped_max_len { + exposure_clipped.others.sort_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.truncate(clipped_max_len); + } + >::insert(¤t_era, &stash, exposure_clipped); + }); + + // Insert current era staking information + >::insert(¤t_era, total_stake); + + // collect the pref of all winners + for stash in &elected_stashes { + let pref = Self::validators(stash); + >::insert(¤t_era, stash, pref); + } + + // emit event + // TWO_PHASE_NOTE: remove the inner value. + Self::deposit_event(RawEvent::StakingElection(ElectionCompute::Signed)); + + log!( + info, + "new validator set of size {:?} has been processed for era {:?}", + elected_stashes.len(), + current_era, + ); + + Ok(elected_stashes) + } + + /// Enact and process the election using the `ElectionProvider` type. + /// + /// This will also process the election, as noted in [`process_election`]. + fn enact_election(_current_era: EraIndex) -> Option> { + let _outcome = T::ElectionProvider::elect().map(|_| ()); + log!(debug, "Experimental election provider outputted {:?}", _outcome); + // TWO_PHASE_NOTE: This code path shall not return anything for now. Later on, redirect the + // results to `process_election`. + None + } + /// Remove all associated data of a stash account from the staking system. /// /// Assumes storage is upgraded before calling. /// /// This is called: - /// - after a `withdraw_unbond()` call that frees all of a stash's bonded balance. + /// - after a `withdraw_unbonded()` call that frees all of a stash's bonded balance. /// - through `reap_stash()` if the balance has fallen to zero (through slashing). fn kill_stash(stash: &T::AccountId, num_slashing_spans: u32) -> DispatchResult { let controller = >::get(stash).ok_or(Error::::NotStash)?; @@ -3167,7 +3275,11 @@ impl Module { } #[cfg(feature = "runtime-benchmarks")] - pub fn add_era_stakers(current_era: EraIndex, controller: T::AccountId, exposure: Exposure>) { + pub fn add_era_stakers( + current_era: EraIndex, + controller: T::AccountId, + exposure: Exposure>, + ) { >::insert(¤t_era, &controller, &exposure); } @@ -3180,6 +3292,109 @@ impl Module { pub fn set_slash_reward_fraction(fraction: Perbill) { SlashRewardFraction::put(fraction); } + + /// Get all of the voters that are eligible for the npos election. + /// + /// This will use all on-chain nominators, and all the validators will inject a self vote. + /// + /// ### Slashing + /// + /// All nominations that have been submitted before the last non-zero slash of the validator are + /// auto-chilled. + /// + /// Note that this is VERY expensive. Use with care. + pub fn get_npos_voters() -> Vec<(T::AccountId, VoteWeight, Vec)> { + let weight_of = Self::slashable_balance_of_fn(); + let mut all_voters = Vec::new(); + + for (validator, _) in >::iter() { + // append self vote + let self_vote = (validator.clone(), weight_of(&validator), vec![validator.clone()]); + all_voters.push(self_vote); + } + + for (nominator, nominations) in >::iter() { + let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; + + // Filter out nomination targets which were nominated before the most recent + // slashing span. + targets.retain(|stash| { + Self::slashing_spans(&stash) + .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) + }); + + let vote_weight = weight_of(&nominator); + all_voters.push((nominator, vote_weight, targets)) + } + + all_voters + } + + pub fn get_npos_targets() -> Vec { + >::iter().map(|(v, _)| v).collect::>() + } +} + +impl sp_election_providers::ElectionDataProvider + for Module +{ + fn desired_targets() -> u32 { + Self::validator_count() + } + + fn voters() -> Vec<(T::AccountId, VoteWeight, Vec)> { + Self::get_npos_voters() + } + + fn targets() -> Vec { + Self::get_npos_targets() + } + + fn next_election_prediction(now: T::BlockNumber) -> T::BlockNumber { + let current_era = Self::current_era().unwrap_or(0); + let current_session = Self::current_planned_session(); + let current_era_start_session_index = + Self::eras_start_session_index(current_era).unwrap_or(0); + let era_length = current_session + .saturating_sub(current_era_start_session_index) + .min(T::SessionsPerEra::get()); + + let session_length = T::NextNewSession::average_session_length(); + + let until_this_session_end = T::NextNewSession::estimate_next_new_session(now) + .unwrap_or_default() + .saturating_sub(now); + + let sessions_left: T::BlockNumber = T::SessionsPerEra::get() + .saturating_sub(era_length) + // one session is computed in this_session_end. + .saturating_sub(1) + .into(); + + now.saturating_add( + until_this_session_end.saturating_add(sessions_left.saturating_mul(session_length)), + ) + } + + #[cfg(any(feature = "runtime-benchmarks", test))] + fn put_snapshot( + voters: Vec<(T::AccountId, VoteWeight, Vec)>, + targets: Vec, + ) { + targets.into_iter().for_each(|v| { + >::insert( + v, + ValidatorPrefs { commission: Perbill::zero(), blocked: false }, + ); + }); + + voters.into_iter().for_each(|(v, _s, t)| { + >::insert( + v, + Nominations { targets: t, submitted_in: 0, suppressed: false }, + ); + }); + } } /// In this implementation `new_session(session)` must be called before `end_session(session-1)` @@ -3189,38 +3404,41 @@ impl Module { /// some session can lag in between the newest session planned and the latest session started. impl pallet_session::SessionManager for Module { fn new_session(new_index: SessionIndex) -> Option> { - frame_support::debug::native::trace!( - target: LOG_TARGET, - "[{}] planning new_session({})", + log!( + trace, + "[{:?}] planning new_session({})", >::block_number(), - new_index + new_index, ); + CurrentPlannedSession::put(new_index); Self::new_session(new_index) } fn start_session(start_index: SessionIndex) { - frame_support::debug::native::trace!( - target: LOG_TARGET, - "[{}] starting start_session({})", + log!( + trace, + "[{:?}] starting start_session({})", >::block_number(), - start_index + start_index, ); Self::start_session(start_index) } fn end_session(end_index: SessionIndex) { - frame_support::debug::native::trace!( - target: LOG_TARGET, - "[{}] ending end_session({})", + log!( + trace, + "[{:?}] ending end_session({})", >::block_number(), - end_index + end_index, ); Self::end_session(end_index) } } -impl historical::SessionManager>> for Module { - fn new_session(new_index: SessionIndex) - -> Option>)>> - { +impl historical::SessionManager>> + for Module +{ + fn new_session( + new_index: SessionIndex, + ) -> Option>)>> { >::new_session(new_index).map(|validators| { let current_era = Self::current_era() // Must be some as a new era has been created. @@ -3245,8 +3463,8 @@ impl historical::SessionManager pallet_authorship::EventHandler for Module - where - T: Config + pallet_authorship::Config + pallet_session::Config +where + T: Config + pallet_authorship::Config + pallet_session::Config, { fn note_author(author: T::AccountId) { Self::reward_by_ids(vec![(author, 20)]) @@ -3289,9 +3507,10 @@ impl Convert } /// This is intended to be used with `FilterHistoricalOffences`. -impl +impl OnOffenceHandler, Weight> -for Module where + for Module +where T: pallet_session::Config::AccountId>, T: pallet_session::historical::Config< FullIdentification = Exposure<::AccountId, BalanceOf>, @@ -3305,12 +3524,15 @@ for Module where >, { fn on_offence( - offenders: &[OffenceDetails>], + offenders: &[OffenceDetails< + T::AccountId, + pallet_session::historical::IdentificationTuple, + >], slash_fraction: &[Perbill], slash_session: SessionIndex, ) -> Result { if !Self::can_report() { - return Err(()) + return Err(()); } let reward_proportion = SlashRewardFraction::get(); @@ -3421,6 +3643,7 @@ for Module where } fn can_report() -> bool { + // TWO_PHASE_NOTE: we can get rid of this API Self::era_election_status().is_closed() } } @@ -3431,7 +3654,8 @@ pub struct FilterHistoricalOffences { } impl ReportOffence - for FilterHistoricalOffences, R> where + for FilterHistoricalOffences, R> +where T: Config, R: ReportOffence, O: Offence, diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index 0eb77e7c14..0d6701c48b 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -28,7 +28,7 @@ use frame_support::{ use sp_core::H256; use sp_io; use sp_npos_elections::{ - to_support_map, EvaluateSupport, reduce, ExtendedBalance, StakedAssignment, ElectionScore, + to_supports, reduce, ExtendedBalance, StakedAssignment, ElectionScore, EvaluateSupport, }; use sp_runtime::{ curve::PiecewiseLinear, @@ -37,6 +37,7 @@ use sp_runtime::{ }; use sp_staking::offence::{OffenceDetails, OnOffenceHandler}; use std::{cell::RefCell, collections::HashSet}; +use sp_election_providers::onchain; pub const INIT_TIMESTAMP: u64 = 30_000; pub const BLOCK_TIME: u64 = 1000; @@ -239,6 +240,12 @@ impl OnUnbalanced> for RewardRemainderMock { } } +impl onchain::Config for Test { + type AccountId = AccountId; + type BlockNumber = BlockNumber; + type Accuracy = Perbill; + type DataProvider = Staking; +} impl Config for Test { type Currency = Balances; type UnixTime = Timestamp; @@ -261,6 +268,7 @@ impl Config for Test { type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; type UnsignedPriority = UnsignedPriority; type OffchainSolutionWeightLimit = OffchainSolutionWeightLimit; + type ElectionProvider = onchain::OnChainSequentialPhragmen; type WeightInfo = (); } @@ -760,7 +768,7 @@ pub(crate) fn add_slash(who: &AccountId) { on_offence_now( &[ OffenceDetails { - offender: (who.clone(), Staking::eras_stakers(Staking::active_era().unwrap().index, who.clone())), + offender: (who.clone(), Staking::eras_stakers(active_era(), who.clone())), reporters: vec![], }, ], @@ -841,7 +849,7 @@ pub(crate) fn horrible_npos_solution( let score = { let (_, _, better_score) = prepare_submission_with(true, true, 0, |_| {}); - let support = to_support_map::(&winners, &staked_assignment).unwrap(); + let support = to_supports::(&winners, &staked_assignment).unwrap(); let score = support.evaluate(); assert!(sp_npos_elections::is_score_better::( @@ -941,7 +949,7 @@ pub(crate) fn prepare_submission_with( Staking::slashable_balance_of_fn(), ); - let support_map = to_support_map::( + let support_map = to_supports( winners.as_slice(), staked.as_slice(), ).unwrap(); @@ -962,9 +970,8 @@ pub(crate) fn prepare_submission_with( /// Make all validator and nominator request their payment pub(crate) fn make_all_reward_payment(era: EraIndex) { - let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() - .cloned() - .collect::>(); + let validators_with_reward = + ErasRewardPoints::::get(era).individual.keys().cloned().collect::>(); // reward validators for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { @@ -988,10 +995,10 @@ macro_rules! assert_session_era { $session, ); assert_eq!( - Staking::active_era().unwrap().index, + Staking::current_era().unwrap(), $era, - "wrong active era {} != {}", - Staking::active_era().unwrap().index, + "wrong current era {} != {}", + Staking::current_era().unwrap(), $era, ); }; diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs index 4f80d75086..8398c2022f 100644 --- a/frame/staking/src/offchain_election.rs +++ b/frame/staking/src/offchain_election.rs @@ -25,7 +25,7 @@ use codec::Decode; use frame_support::{traits::Get, weights::Weight, IterableStorageMap}; use frame_system::offchain::SubmitTransaction; use sp_npos_elections::{ - to_support_map, EvaluateSupport, reduce, Assignment, ElectionResult, ElectionScore, + to_supports, EvaluateSupport, reduce, Assignment, ElectionResult, ElectionScore, ExtendedBalance, CompactSolution, }; use sp_runtime::{ @@ -127,7 +127,7 @@ pub(crate) fn compute_offchain_election() -> Result<(), OffchainElect crate::log!( info, - "💸 prepared a seq-phragmen solution with {} balancing iterations and score {:?}", + "prepared a seq-phragmen solution with {} balancing iterations and score {:?}", iters, score, ); @@ -284,7 +284,7 @@ where if compact.remove_voter(index) { crate::log!( trace, - "💸 removed a voter at index {} with stake {:?} from compact to reduce the size", + "removed a voter at index {} with stake {:?} from compact to reduce the size", index, _stake, ); @@ -297,19 +297,17 @@ where } crate::log!( - warn, - "💸 {} nominators out of {} had to be removed from compact solution due to size limits.", - removed, - compact.voter_count() + removed, - ); + warn, + "{} nominators out of {} had to be removed from compact solution due to size \ + limits.", + removed, + compact.voter_count() + removed, + ); Ok(compact) } _ => { // nada, return as-is - crate::log!( - info, - "💸 Compact solution did not get trimmed due to block weight limits.", - ); + crate::log!(info, "Compact solution did not get trimmed due to block weight limits.",); Ok(compact) } } @@ -390,13 +388,16 @@ pub fn prepare_submission( let maximum_allowed_voters = maximum_compact_len::(winners.len() as u32, size, maximum_weight); - crate::log!(debug, "💸 Maximum weight = {:?} // current weight = {:?} // maximum voters = {:?} // current votes = {:?}", + crate::log!( + debug, + "Maximum weight = {:?} // current weight = {:?} // maximum voters = {:?} // current votes \ + = {:?}", maximum_weight, T::WeightInfo::submit_solution_better( - size.validators.into(), - size.nominators.into(), - compact.voter_count() as u32, - winners.len() as u32, + size.validators.into(), + size.nominators.into(), + compact.voter_count() as u32, + winners.len() as u32, ), maximum_allowed_voters, compact.voter_count(), @@ -415,7 +416,7 @@ pub fn prepare_submission( >::slashable_balance_of_fn(), ); - let support_map = to_support_map::(&winners, &staked) + let support_map = to_supports::(&winners, &staked) .map_err(|_| OffchainElectionError::ElectionFailed)?; support_map.evaluate() }; diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index a30c013655..f6ee89704d 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -247,7 +247,7 @@ pub fn get_weak_solution( ); let support_map = - to_support_map::(winners.as_slice(), staked.as_slice()).unwrap(); + to_supports::(winners.as_slice(), staked.as_slice()).unwrap(); support_map.evaluate() }; diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 1f5e2a4888..529cd7b87c 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -1833,6 +1833,7 @@ fn bond_with_duplicate_vote_should_be_ignored_by_npos_election() { } assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); + // 11 should not be elected. All of these count as ONE vote. assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 11, 11, 21, 31,])); assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); @@ -1886,7 +1887,6 @@ fn bond_with_duplicate_vote_should_be_ignored_by_npos_election_elected() { assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 31])); // winners should be 21 and 31. Otherwise this election is taking duplicates into account. - let sp_npos_elections::ElectionResult { winners, assignments, @@ -2029,7 +2029,7 @@ fn reward_from_authorship_event_handler_works() { fn add_reward_points_fns_works() { ExtBuilder::default().build_and_execute(|| { // Not mandatory but must be coherent with rewards - assert_eq!(Session::validators(), vec![21, 11]); + assert_eq_uvec!(Session::validators(), vec![21, 11]); >::reward_by_ids(vec![ (21, 1), @@ -3048,7 +3048,7 @@ mod offchain_election { assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); run_to_block(40); - assert_session_era!(4, 0); + assert_session_era!(4, 1); assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); assert!(Staking::snapshot_nominators().is_none()); assert!(Staking::snapshot_validators().is_none()); @@ -3066,7 +3066,7 @@ mod offchain_election { assert!(Staking::snapshot_validators().is_some()); run_to_block(90); - assert_session_era!(9, 1); + assert_session_era!(9, 2); assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); assert!(Staking::snapshot_nominators().is_none()); assert!(Staking::snapshot_validators().is_none()); @@ -4978,3 +4978,129 @@ fn cannot_bond_extra_to_lower_than_ed() { ); }) } + +#[test] +fn do_not_die_when_active_is_ed() { + let ed = 10; + ExtBuilder::default() + .existential_deposit(ed) + .build_and_execute(|| { + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + + // unbond all of it except ed. + assert_ok!(Staking::unbond(Origin::signed(20), 1000 - ed)); + start_active_era(3); + assert_ok!(Staking::withdraw_unbonded(Origin::signed(20), 100)); + + // initial stuff. + assert_eq!( + Staking::ledger(&20).unwrap(), + StakingLedger { + stash: 21, + total: ed, + active: ed, + unlocking: vec![], + claimed_rewards: vec![] + } + ); + }) +} + +mod election_data_provider { + use super::*; + use sp_election_providers::ElectionDataProvider; + + #[test] + fn voters_include_self_vote() { + ExtBuilder::default().nominate(false).build().execute_with(|| { + assert!(>::iter().map(|(x, _)| x).all(|v| Staking::voters() + .into_iter() + .find(|(w, _, t)| { v == *w && t[0] == *w }) + .is_some())) + }) + } + + #[test] + fn voters_exclude_slashed() { + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + assert_eq!( + >::voters() + .iter() + .find(|x| x.0 == 101) + .unwrap() + .2, + vec![11, 21] + ); + + start_active_era(1); + add_slash(&11); + + // 11 is gone. + start_active_era(2); + assert_eq!( + >::voters() + .iter() + .find(|x| x.0 == 101) + .unwrap() + .2, + vec![21] + ); + + // resubmit and it is back + assert_ok!(Staking::nominate(Origin::signed(100), vec![11, 21])); + assert_eq!( + >::voters() + .iter() + .find(|x| x.0 == 101) + .unwrap() + .2, + vec![11, 21] + ); + }) + } + + #[test] + fn estimate_next_election_works() { + ExtBuilder::default().session_per_era(5).period(5).build().execute_with(|| { + // first session is always length 0. + for b in 1..20 { + run_to_block(b); + assert_eq!(Staking::next_election_prediction(System::block_number()), 20); + } + + // election + run_to_block(20); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45); + assert_eq!(staking_events().len(), 1); + assert_eq!( + *staking_events().last().unwrap(), + RawEvent::StakingElection(ElectionCompute::OnChain) + ); + + for b in 21..45 { + run_to_block(b); + assert_eq!(Staking::next_election_prediction(System::block_number()), 45); + } + + // election + run_to_block(45); + assert_eq!(Staking::next_election_prediction(System::block_number()), 70); + assert_eq!(staking_events().len(), 3); + assert_eq!( + *staking_events().last().unwrap(), + RawEvent::StakingElection(ElectionCompute::OnChain) + ); + }) + } +} diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index b70563ccf4..c7b7edad55 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_staking //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.1 -//! DATE: 2021-01-19, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-02-13, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -75,171 +75,171 @@ pub trait WeightInfo { pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn bond() -> Weight { - (76_281_000 as Weight) + (81_642_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (62_062_000 as Weight) + (66_025_000 as Weight) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (57_195_000 as Weight) + (60_810_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (58_043_000 as Weight) + (61_537_000 as Weight) // Standard Error: 1_000 - .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((60_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (89_920_000 as Weight) - // Standard Error: 3_000 - .saturating_add((2_526_000 as Weight).saturating_mul(s as Weight)) + (95_741_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_754_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (20_228_000 as Weight) + (21_009_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (31_066_000 as Weight) - // Standard Error: 11_000 - .saturating_add((17_754_000 as Weight).saturating_mul(k as Weight)) + (31_832_000 as Weight) + // Standard Error: 15_000 + .saturating_add((19_418_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (33_494_000 as Weight) - // Standard Error: 23_000 - .saturating_add((5_253_000 as Weight).saturating_mul(n as Weight)) + (34_304_000 as Weight) + // Standard Error: 20_000 + .saturating_add((5_643_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (19_396_000 as Weight) + (20_103_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_449_000 as Weight) + (13_858_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (29_184_000 as Weight) + (30_269_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_266_000 as Weight) + (2_444_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_462_000 as Weight) + (2_766_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_483_000 as Weight) + (2_724_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_495_000 as Weight) + (2_702_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_712_000 as Weight) + (2_914_000 as Weight) // Standard Error: 0 - .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (60_508_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_525_000 as Weight).saturating_mul(s as Weight)) + (64_032_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_886_772_000 as Weight) - // Standard Error: 393_000 - .saturating_add((34_849_000 as Weight).saturating_mul(s as Weight)) + (5_903_394_000 as Weight) + // Standard Error: 391_000 + .saturating_add((34_834_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (127_627_000 as Weight) - // Standard Error: 27_000 - .saturating_add((49_354_000 as Weight).saturating_mul(n as Weight)) + (141_724_000 as Weight) + // Standard Error: 24_000 + .saturating_add((53_018_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (156_838_000 as Weight) - // Standard Error: 24_000 - .saturating_add((62_653_000 as Weight).saturating_mul(n as Weight)) + (159_994_000 as Weight) + // Standard Error: 28_000 + .saturating_add((67_746_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(12 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (40_110_000 as Weight) + (42_177_000 as Weight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 70_000 - .saturating_add((32_883_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 65_000 + .saturating_add((34_151_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (64_605_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_506_000 as Weight).saturating_mul(s as Weight)) + (68_377_000 as Weight) + // Standard Error: 0 + .saturating_add((2_757_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 926_000 - .saturating_add((548_212_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 46_000 - .saturating_add((78_343_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(T::DbWeight::get().reads(7 as Weight)) + // Standard Error: 908_000 + .saturating_add((588_562_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 45_000 + .saturating_add((83_485_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(T::DbWeight::get().reads(9 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(T::DbWeight::get().writes(8 as Weight)) + .saturating_add(T::DbWeight::get().writes(13 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - // Standard Error: 48_000 - .saturating_add((937_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 19_000 - .saturating_add((657_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 48_000 - .saturating_add((70_669_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 101_000 - .saturating_add((7_658_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 52_000 + .saturating_add((750_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 20_000 + .saturating_add((556_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 52_000 + .saturating_add((76_201_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 108_000 + .saturating_add((7_271_000 as Weight).saturating_mul(w as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) @@ -250,171 +250,171 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { fn bond() -> Weight { - (76_281_000 as Weight) + (81_642_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } fn bond_extra() -> Weight { - (62_062_000 as Weight) + (66_025_000 as Weight) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn unbond() -> Weight { - (57_195_000 as Weight) + (60_810_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_update(s: u32, ) -> Weight { - (58_043_000 as Weight) + (61_537_000 as Weight) // Standard Error: 1_000 - .saturating_add((52_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((60_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (89_920_000 as Weight) - // Standard Error: 3_000 - .saturating_add((2_526_000 as Weight).saturating_mul(s as Weight)) + (95_741_000 as Weight) + // Standard Error: 1_000 + .saturating_add((2_754_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn validate() -> Weight { - (20_228_000 as Weight) + (21_009_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn kick(k: u32, ) -> Weight { - (31_066_000 as Weight) - // Standard Error: 11_000 - .saturating_add((17_754_000 as Weight).saturating_mul(k as Weight)) + (31_832_000 as Weight) + // Standard Error: 15_000 + .saturating_add((19_418_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } fn nominate(n: u32, ) -> Weight { - (33_494_000 as Weight) - // Standard Error: 23_000 - .saturating_add((5_253_000 as Weight).saturating_mul(n as Weight)) + (34_304_000 as Weight) + // Standard Error: 20_000 + .saturating_add((5_643_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn chill() -> Weight { - (19_396_000 as Weight) + (20_103_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_payee() -> Weight { - (13_449_000 as Weight) + (13_858_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_controller() -> Weight { - (29_184_000 as Weight) + (30_269_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_validator_count() -> Weight { - (2_266_000 as Weight) + (2_444_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_no_eras() -> Weight { - (2_462_000 as Weight) + (2_766_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era() -> Weight { - (2_483_000 as Weight) + (2_724_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_new_era_always() -> Weight { - (2_495_000 as Weight) + (2_702_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_invulnerables(v: u32, ) -> Weight { - (2_712_000 as Weight) + (2_914_000 as Weight) // Standard Error: 0 - .saturating_add((9_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((35_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn force_unstake(s: u32, ) -> Weight { - (60_508_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_525_000 as Weight).saturating_mul(s as Weight)) + (64_032_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_787_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn cancel_deferred_slash(s: u32, ) -> Weight { - (5_886_772_000 as Weight) - // Standard Error: 393_000 - .saturating_add((34_849_000 as Weight).saturating_mul(s as Weight)) + (5_903_394_000 as Weight) + // Standard Error: 391_000 + .saturating_add((34_834_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (127_627_000 as Weight) - // Standard Error: 27_000 - .saturating_add((49_354_000 as Weight).saturating_mul(n as Weight)) + (141_724_000 as Weight) + // Standard Error: 24_000 + .saturating_add((53_018_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (156_838_000 as Weight) - // Standard Error: 24_000 - .saturating_add((62_653_000 as Weight).saturating_mul(n as Weight)) + (159_994_000 as Weight) + // Standard Error: 28_000 + .saturating_add((67_746_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(12 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } fn rebond(l: u32, ) -> Weight { - (40_110_000 as Weight) + (42_177_000 as Weight) // Standard Error: 1_000 - .saturating_add((78_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((82_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 70_000 - .saturating_add((32_883_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 65_000 + .saturating_add((34_151_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } fn reap_stash(s: u32, ) -> Weight { - (64_605_000 as Weight) - // Standard Error: 1_000 - .saturating_add((2_506_000 as Weight).saturating_mul(s as Weight)) + (68_377_000 as Weight) + // Standard Error: 0 + .saturating_add((2_757_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 926_000 - .saturating_add((548_212_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 46_000 - .saturating_add((78_343_000 as Weight).saturating_mul(n as Weight)) - .saturating_add(RocksDbWeight::get().reads(7 as Weight)) + // Standard Error: 908_000 + .saturating_add((588_562_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 45_000 + .saturating_add((83_485_000 as Weight).saturating_mul(n as Weight)) + .saturating_add(RocksDbWeight::get().reads(9 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) - .saturating_add(RocksDbWeight::get().writes(8 as Weight)) + .saturating_add(RocksDbWeight::get().writes(13 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } fn submit_solution_better(v: u32, n: u32, a: u32, w: u32, ) -> Weight { (0 as Weight) - // Standard Error: 48_000 - .saturating_add((937_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 19_000 - .saturating_add((657_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 48_000 - .saturating_add((70_669_000 as Weight).saturating_mul(a as Weight)) - // Standard Error: 101_000 - .saturating_add((7_658_000 as Weight).saturating_mul(w as Weight)) + // Standard Error: 52_000 + .saturating_add((750_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 20_000 + .saturating_add((556_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 52_000 + .saturating_add((76_201_000 as Weight).saturating_mul(a as Weight)) + // Standard Error: 108_000 + .saturating_add((7_271_000 as Weight).saturating_mul(w as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().reads((4 as Weight).saturating_mul(a as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(w as Weight))) diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index ed19d2e165..c1b841c30c 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -35,3 +35,4 @@ std = [ "frame-support/std", "frame-system/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 8edf1ff6dd..7b1179122b 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -13,7 +13,6 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } frame-metadata = { version = "13.0.0", default-features = false, path = "../metadata" } @@ -32,13 +31,12 @@ sp-state-machine = { version = "0.9.0", optional = true, path = "../../primitive bitflags = "1.2" impl-trait-for-tuples = "0.2.1" smallvec = "1.4.1" +log = { version = "0.4.14", default-features = false } [dev-dependencies] pretty_assertions = "0.6.1" frame-system = { version = "3.0.0", path = "../system" } parity-util-mem = { version = "0.9.0", default-features = false, features = ["primitive-types"] } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-api = { version = "3.0.0", default-features = false, path = "../../primitives/api" } [features] default = ["std"] @@ -56,7 +54,7 @@ std = [ "sp-staking/std", "sp-state-machine", "frame-support-procedural/std", + "log/std", ] -nightly = [] -strict = [] runtime-benchmarks = [] +try-runtime = [] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index b2c5e2887f..4a00a24e38 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] frame-support-procedural-tools = { version = "3.0.0", path = "./tools" } proc-macro2 = "1.0.6" -quote = "1.0.9" +quote = "1.0.3" Inflector = "0.11.4" syn = { version = "1.0.58", features = ["full"] } diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index 2c2cdf00a0..e64a364d29 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -17,7 +17,7 @@ //! Proc macro of Support code for the runtime. -#![recursion_limit="512"] +#![recursion_limit = "512"] mod storage; mod construct_runtime; diff --git a/frame/support/procedural/src/pallet/expand/hooks.rs b/frame/support/procedural/src/pallet/expand/hooks.rs index 2e4fddebb7..b1eee507fd 100644 --- a/frame/support/procedural/src/pallet/expand/hooks.rs +++ b/frame/support/procedural/src/pallet/expand/hooks.rs @@ -75,6 +75,24 @@ pub fn expand_hooks(def: &mut Def) -> proc_macro2::TokenStream { result.saturating_add(additional_write) } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + < + Self + as + #frame_support::traits::Hooks<::BlockNumber> + >::pre_upgrade() + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + < + Self + as + #frame_support::traits::Hooks<::BlockNumber> + >::post_upgrade() + } } impl<#type_impl_gen> diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 7b5dfee4b8..4165cb32c3 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -14,6 +14,6 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] frame-support-procedural-tools-derive = { version = "3.0.0", path = "./derive" } proc-macro2 = "1.0.6" -quote = "1.0.9" +quote = "1.0.3" syn = { version = "1.0.58", features = ["full", "visit"] } proc-macro-crate = "0.1.5" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index bfeec1c2b2..c377680af1 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -16,5 +16,5 @@ proc-macro = true [dependencies] proc-macro2 = "1.0.6" -quote = { version = "1.0.9", features = ["proc-macro"] } +quote = { version = "1.0.3", features = ["proc-macro"] } syn = { version = "1.0.58", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } diff --git a/frame/support/src/debug.rs b/frame/support/src/debug.rs deleted file mode 100644 index 43efd3d916..0000000000 --- a/frame/support/src/debug.rs +++ /dev/null @@ -1,247 +0,0 @@ -// This file is part of Substrate. - -// Copyright (C) 2019-2021 Parity Technologies (UK) Ltd. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//! Runtime debugging and logging utilities. -//! -//! This module contains macros and functions that will allow -//! you to print logs out of the runtime code. -//! -//! First and foremost be aware that adding regular logging code to -//! your runtime will have a negative effect on the performance -//! and size of the blob. Luckily there are some ways to mitigate -//! this that are described below. -//! -//! First component to utilize debug-printing and logging is actually -//! located in `primitives` crate: `sp_core::RuntimeDebug`. -//! This custom-derive generates `core::fmt::Debug` implementation, -//! just like regular `derive(Debug)`, however it does not generate -//! any code when the code is compiled to WASM. This means that -//! you can safely sprinkle `RuntimeDebug` in your runtime codebase, -//! without affecting the size. This also allows you to print/log -//! both when the code is running natively or in WASM, but note -//! that WASM debug formatting of structs will be empty. -//! -//! ```rust,no_run -//! use frame_support::debug; -//! -//! #[derive(sp_core::RuntimeDebug)] -//! struct MyStruct { -//! a: u64, -//! } -//! -//! // First initialize the logger. -//! // -//! // This is only required when you want the logs to be printed -//! // also during non-native run. -//! // Note that enabling the logger has performance impact on -//! // WASM runtime execution and should be used sparingly. -//! debug::RuntimeLogger::init(); -//! -//! let x = MyStruct { a: 5 }; -//! // will log an info line `"My struct: MyStruct{a:5}"` when running -//! // natively, but will only print `"My struct: "` when running WASM. -//! debug::info!("My struct: {:?}", x); -//! -//! // same output here, although this will print to stdout -//! // (and without log format) -//! debug::print!("My struct: {:?}", x); -//! ``` -//! -//! If you want to avoid extra overhead in WASM, but still be able -//! to print / log when the code is executed natively you can use -//! macros coming from `native` sub-module. This module enables -//! logs conditionally and strips out logs in WASM. -//! -//! ```rust,no_run -//! use frame_support::debug::native; -//! -//! #[derive(sp_core::RuntimeDebug)] -//! struct MyStruct { -//! a: u64, -//! } -//! -//! // We don't initialize the logger, since -//! // we are not printing anything out in WASM. -//! // debug::RuntimeLogger::init(); -//! -//! let x = MyStruct { a: 5 }; -//! -//! // Displays an info log when running natively, nothing when WASM. -//! native::info!("My struct: {:?}", x); -//! -//! // same output to stdout, no overhead on WASM. -//! native::print!("My struct: {:?}", x); -//! ``` - -use sp_std::fmt::{self, Debug}; - -pub use log::{info, debug, error, trace, warn}; -pub use crate::runtime_print as print; -pub use sp_std::Writer; - -/// Native-only logging. -/// -/// Using any functions from this module will have any effect -/// only if the runtime is running natively (i.e. not via WASM) -#[cfg(feature = "std")] -pub mod native { - pub use super::{info, debug, error, trace, warn, print}; -} - -/// Native-only logging. -/// -/// Using any functions from this module will have any effect -/// only if the runtime is running natively (i.e. not via WASM) -#[cfg(not(feature = "std"))] -pub mod native { - #[macro_export] - macro_rules! noop { - ($($arg:tt)+) => {} - } - pub use noop as info; - pub use noop as debug; - pub use noop as error; - pub use noop as trace; - pub use noop as warn; - pub use noop as print; -} - -/// Print out a formatted message. -/// -/// # Example -/// -/// ``` -/// frame_support::runtime_print!("my value is {}", 3); -/// ``` -#[macro_export] -macro_rules! runtime_print { - ($($arg:tt)+) => { - { - use core::fmt::Write; - let mut w = $crate::sp_std::Writer::default(); - let _ = core::write!(&mut w, $($arg)+); - $crate::sp_io::misc::print_utf8(&w.inner()) - } - } -} - -/// Print out the debuggable type. -pub fn debug(data: &impl Debug) { - runtime_print!("{:?}", data); -} - -/// Runtime logger implementation - `log` crate backend. -/// -/// The logger should be initialized if you want to display -/// logs inside the runtime that is not necessarily running natively. -/// -/// When runtime is executed natively any log statements are displayed -/// even if this logger is NOT initialized. -/// -/// Note that even though the logs are not displayed in WASM, they -/// may still affect the size and performance of the generated runtime. -/// To lower the footprint make sure to only use macros from `native` -/// sub-module. -pub struct RuntimeLogger; - -impl RuntimeLogger { - /// Initialize the logger. - /// - /// This is a no-op when running natively (`std`). - #[cfg(feature = "std")] - pub fn init() {} - - /// Initialize the logger. - /// - /// This is a no-op when running natively (`std`). - #[cfg(not(feature = "std"))] - pub fn init() { - static LOGGER: RuntimeLogger = RuntimeLogger; - let _ = log::set_logger(&LOGGER); - - // Set max level to `TRACE` to ensure we propagate - // all log entries to the native side that will do the - // final filtering on what should be printed. - // - // If we don't set any level, logging is disabled - // completly. - log::set_max_level(log::LevelFilter::Trace); - } -} - -impl log::Log for RuntimeLogger { - fn enabled(&self, _metadata: &log::Metadata) -> bool { - // to avoid calling to host twice, we pass everything - // and let the host decide what to print. - // If someone is initializing the logger they should - // know what they are doing. - true - } - - fn log(&self, record: &log::Record) { - use fmt::Write; - let mut w = sp_std::Writer::default(); - let _ = core::write!(&mut w, "{}", record.args()); - - sp_io::logging::log( - record.level().into(), - record.target(), - w.inner(), - ); - } - - fn flush(&self) {} -} - -#[cfg(test)] -mod tests { - use substrate_test_runtime_client::{ - ExecutionStrategy, TestClientBuilderExt, DefaultTestClientBuilderExt, - TestClientBuilder, runtime::TestAPI, - }; - use sp_api::ProvideRuntimeApi; - use sp_runtime::generic::BlockId; - - #[test] - fn ensure_runtime_logger_works() { - let executable = std::env::current_exe().unwrap(); - let output = std::process::Command::new(executable) - .env("RUN_TEST", "1") - .env("RUST_LOG", "trace") - .args(&["--nocapture", "ensure_runtime_logger_works_implementation"]) - .output() - .unwrap(); - - let output = dbg!(String::from_utf8(output.stderr).unwrap()); - assert!(output.contains("Hey I'm runtime")); - } - - /// This is no actual test. It will be called by `ensure_runtime_logger_works` - /// to check that the runtime can print from the wasm side using the - /// `RuntimeLogger`. - #[test] - fn ensure_runtime_logger_works_implementation() { - if std::env::var("RUN_TEST").is_ok() { - sp_tracing::try_init_simple(); - - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(0); - runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); - } - } -} diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 7927ccd014..ab9feae3c2 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -80,18 +80,18 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// // FRAME pallets. /// #[weight = 0] /// fn my_function(origin, var: u64) -> dispatch::DispatchResult { -/// // Your implementation -/// Ok(()) +/// // Your implementation +/// Ok(()) /// } /// -/// // Public functions are both dispatchable and available to other +/// // Public functions are both dispatchable and available to other /// // FRAME pallets. /// #[weight = 0] -/// pub fn my_public_function(origin) -> dispatch::DispatchResult { +/// pub fn my_public_function(origin) -> dispatch::DispatchResult { /// // Your implementation -/// Ok(()) +/// Ok(()) +/// } /// } -/// } /// } /// # fn main() {} /// ``` @@ -99,8 +99,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// The declaration is set with the header where: /// /// * `Module`: The struct generated by the macro, with type `Config`. -/// * `Call`: The enum generated for every pallet, which implements [`Callable`](./dispatch/trait.Callable.html). -/// * `origin`: Alias of `T::Origin`, declared by the [`impl_outer_origin!`](./macro.impl_outer_origin.html) macro. +/// * `Call`: The enum generated for every pallet, which implements +/// [`Callable`](./dispatch/trait.Callable.html). +/// * `origin`: Alias of `T::Origin`, declared by the +/// [`impl_outer_origin!`](./macro.impl_outer_origin.html) macro. /// * `Result`: The expected return type from pallet functions. /// /// The first parameter of dispatchable functions must always be `origin`. @@ -119,15 +121,15 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] /// fn my_long_function(origin) -> dispatch::DispatchResult { -/// // Your implementation +/// // Your implementation /// Ok(()) /// } /// /// #[weight = 0] /// fn my_short_function(origin) { -/// // Your implementation +/// // Your implementation +/// } /// } -/// } /// } /// # fn main() {} /// ``` @@ -184,7 +186,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// #[weight = 0] /// #[transactional] /// fn my_short_function(origin) { -/// // Your implementation +/// // Your implementation /// } /// } /// } @@ -203,12 +205,12 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// #[weight = 0] -/// fn my_privileged_function(origin) -> dispatch::DispatchResult { +/// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; -/// // Your implementation +/// // Your implementation /// Ok(()) /// } -/// } +/// } /// } /// # fn main() {} /// ``` @@ -218,15 +220,17 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// Attributes on functions are supported, but must be in the order of: /// 1. Optional #\[doc\] attribute. /// 2. #\[weight\] attribute. -/// 3. Optional function attributes, for instance #\[transactional\]. Those function attributes will be written -/// only on the dispatchable functions implemented on `Module`, not on the `Call` enum variant. +/// 3. Optional function attributes, for instance #\[transactional\]. Those function attributes will +/// be written only on the dispatchable functions implemented on `Module`, not on the `Call` enum +/// variant. /// /// ## Multiple Module Instances Example /// -/// A Substrate module can be built such that multiple instances of the same module can be used within a single -/// runtime. For example, the [Balances module](../pallet_balances/index.html) can be added multiple times to your -/// runtime in order to support multiple, independent currencies for your blockchain. Here is an example of how -/// you would declare such a module using the `decl_module!` macro: +/// A Substrate module can be built such that multiple instances of the same module can be used +/// within a single runtime. For example, the [Balances module](../pallet_balances/index.html) can +/// be added multiple times to your runtime in order to support multiple, independent currencies for +/// your blockchain. Here is an example of how you would declare such a module using the +/// `decl_module!` macro: /// /// ``` /// # #[macro_use] @@ -251,10 +255,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// /// ## Where clause /// -/// Besides the default `origin: T::Origin`, you can also pass other bounds to the module declaration. -/// This where bound will be replicated to all types generated by this macro. The chaining of multiple -/// trait bounds with `+` is not supported. If multiple bounds for one type are required, it needs to -/// be split up into multiple bounds. +/// Besides the default `origin: T::Origin`, you can also pass other bounds to the module +/// declaration. This where bound will be replicated to all types generated by this macro. The +/// chaining of multiple trait bounds with `+` is not supported. If multiple bounds for one type are +/// required, it needs to be split up into multiple bounds. /// /// ``` /// # #[macro_use] @@ -276,16 +280,18 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// The following are reserved function signatures: /// /// * `deposit_event`: Helper function for depositing an [event](https://docs.substrate.dev/docs/event-enum). -/// The default behavior is to call `deposit_event` from the [System module](../frame_system/index.html). -/// However, you can write your own implementation for events in your runtime. To use the default behavior, -/// add `fn deposit_event() = default;` to your `Module`. +/// The default behavior is to call `deposit_event` from the [System +/// module](../frame_system/index.html). However, you can write your own implementation for events +/// in your runtime. To use the default behavior, add `fn deposit_event() = default;` to your +/// `Module`. /// -/// The following reserved functions also take the block number (with type `T::BlockNumber`) as an optional input: +/// The following reserved functions also take the block number (with type `T::BlockNumber`) as an +/// optional input: /// /// * `on_runtime_upgrade`: Executes at the beginning of a block prior to on_initialize when there -/// is a runtime upgrade. This allows each module to upgrade its storage before the storage items are used. -/// As such, **calling other modules must be avoided**!! Using this function will implement the -/// [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. +/// is a runtime upgrade. This allows each module to upgrade its storage before the storage items +/// are used. As such, **calling other modules must be avoided**!! Using this function will +/// implement the [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. /// Function signature must be `fn on_runtime_upgrade() -> frame_support::weights::Weight`. /// /// * `on_initialize`: Executes at the beginning of a block. Using this function will @@ -300,11 +306,11 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// * `fn on_finalize(n: BlockNumber) -> frame_support::weights::Weight` or /// * `fn on_finalize() -> frame_support::weights::Weight` /// -/// * `offchain_worker`: Executes at the beginning of a block and produces extrinsics for a future block -/// upon completion. Using this function will implement the +/// * `offchain_worker`: Executes at the beginning of a block and produces extrinsics for a future +/// block upon completion. Using this function will implement the /// [`OffchainWorker`](./traits/trait.OffchainWorker.html) trait. -/// * `integrity_test`: Executes in a test generated by `construct_runtime`, note it doesn't -/// execute in an externalities-provided environment. Implement +/// * `integrity_test`: Executes in a test generated by `construct_runtime`, note it doesn't execute +/// in an externalities-provided environment. Implement /// [`IntegrityTest`](./trait.IntegrityTest.html) trait. #[macro_export] macro_rules! decl_module { @@ -1325,15 +1331,39 @@ macro_rules! decl_module { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); let result: $return = (|| { $( $impl )* })(); - $crate::crate_to_pallet_version!() + let new_storage_version = $crate::crate_to_pallet_version!(); + new_storage_version .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); let additional_write = < <$trait_instance as $system::Config>::DbWeight as $crate::traits::Get<_> >::get().writes(1); + let pallet_name = << + $trait_instance + as + $system::Config + >::PalletInfo as $crate::traits::PalletInfo>::name::().expect("pallet will have name in the runtime; qed"); + + $crate::log::info!( + target: $crate::LOG_TARGET, + "⚠️ running migration for {} and setting new storage version to {:?}", + pallet_name, + new_storage_version, + ); + result.saturating_add(additional_write) } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } } }; @@ -1349,13 +1379,37 @@ macro_rules! decl_module { fn on_runtime_upgrade() -> $crate::dispatch::Weight { $crate::sp_tracing::enter_span!($crate::sp_tracing::trace_span!("on_runtime_upgrade")); - $crate::crate_to_pallet_version!() + let new_storage_version = $crate::crate_to_pallet_version!(); + new_storage_version .put_into_storage::<<$trait_instance as $system::Config>::PalletInfo, Self>(); + let pallet_name = << + $trait_instance + as + $system::Config + >::PalletInfo as $crate::traits::PalletInfo>::name::().expect("pallet will have name in the runtime; qed"); + + $crate::log::info!( + target: $crate::LOG_TARGET, + "✅ no migration for '{}' and setting new storage version to {:?}", + pallet_name, + new_storage_version, + ); + < <$trait_instance as $system::Config>::DbWeight as $crate::traits::Get<_> >::get().writes(1) } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } } }; diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index 0a8be8aec0..22ccbeb6ce 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -102,7 +102,7 @@ impl StorageHasher for Twox64Concat { impl ReversibleStorageHasher for Twox64Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 8 { - crate::debug::error!("Invalid reverse: hash length too short"); + log::error!("Invalid reverse: hash length too short"); return &[] } &x[8..] @@ -125,7 +125,7 @@ impl StorageHasher for Blake2_128Concat { impl ReversibleStorageHasher for Blake2_128Concat { fn reverse(x: &[u8]) -> &[u8] { if x.len() < 16 { - crate::debug::error!("Invalid reverse: hash length too short"); + log::error!("Invalid reverse: hash length too short"); return &[] } &x[16..] diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index fc7939fe30..4dbb6bff5a 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -44,9 +44,9 @@ pub use sp_state_machine::BasicExternalities; pub use sp_io::{storage::root as storage_root, self}; #[doc(hidden)] pub use sp_runtime::RuntimeDebug; +#[doc(hidden)] +pub use log; -#[macro_use] -pub mod debug; #[macro_use] mod origin; #[macro_use] @@ -80,6 +80,9 @@ pub use self::storage::{ pub use self::dispatch::{Parameter, Callable}; pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; +/// A unified log target for support operations. +pub const LOG_TARGET: &'static str = "runtime::frame-support"; + /// A type that cannot be instantiated. #[derive(Debug, PartialEq, Eq, Clone)] pub enum Never {} @@ -337,6 +340,30 @@ macro_rules! ord_parameter_types { } } +/// Print out a formatted message. +/// +/// # Example +/// +/// ``` +/// frame_support::runtime_print!("my value is {}", 3); +/// ``` +#[macro_export] +macro_rules! runtime_print { + ($($arg:tt)+) => { + { + use core::fmt::Write; + let mut w = $crate::sp_std::Writer::default(); + let _ = core::write!(&mut w, $($arg)+); + $crate::sp_io::misc::print_utf8(&w.inner()) + } + } +} + +/// Print out the debuggable type. +pub fn debug(data: &impl sp_std::fmt::Debug) { + runtime_print!("{:?}", data); +} + #[doc(inline)] pub use frame_support_procedural::{ decl_storage, construct_runtime, transactional, RuntimeDebugNoBound @@ -1048,7 +1075,7 @@ pub mod pallet_prelude { pub use frame_support::traits::GenesisBuild; pub use frame_support::{ EqNoBound, PartialEqNoBound, RuntimeDebugNoBound, DebugNoBound, CloneNoBound, Twox256, - Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, debug, ensure, + Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, ensure, RuntimeDebug, storage, traits::{Get, Hooks, IsType, GetPalletVersion, EnsureOrigin}, dispatch::{DispatchResultWithPostInfo, Parameter, DispatchError}, @@ -2034,9 +2061,9 @@ pub mod pallet_prelude { /// * `add_extra_genesis` fields are converted to `GenesisConfig` field with their correct /// default if specified /// * `add_extra_genesis` build is written into `GenesisBuild::build` -/// * storage items defined with [`pallet`] use the name of the pallet provided by [`PalletInfo::name`] -/// as `pallet_prefix` (in `decl_storage`, storage items used the `pallet_prefix` given as input of -/// `decl_storage` with the syntax `as Example`). +/// * storage items defined with [`pallet`] use the name of the pallet provided by +/// [`traits::PalletInfo::name`] as `pallet_prefix` (in `decl_storage`, storage items used the +/// `pallet_prefix` given as input of `decl_storage` with the syntax `as Example`). /// Thus a runtime using the pallet must be careful with this change. /// To handle this change: /// * either ensure that the name of the pallet given to `construct_runtime!` is the same diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index c1885fc074..6f99874743 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -24,14 +24,7 @@ use crate::sp_std::prelude::*; use codec::{Codec, Encode, Decode}; pub use sp_core::storage::{ChildInfo, ChildType}; - -/// The outcome of calling [`kill_storage`]. -pub enum KillOutcome { - /// No key remains in the child trie. - AllRemoved, - /// At least one key still resides in the child trie due to the supplied limit. - SomeRemaining, -} +pub use crate::sp_io::KillChildStorageResult; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get( @@ -47,7 +40,11 @@ pub fn get( ).and_then(|v| { Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); + crate::runtime_print!( + "ERROR: Corrupted state in child trie at {:?}/{:?}", + storage_key, + key, + ); None }) }) @@ -177,16 +174,12 @@ pub fn exists( pub fn kill_storage( child_info: &ChildInfo, limit: Option, -) -> KillOutcome { - let all_removed = match child_info.child_type() { +) -> KillChildStorageResult { + match child_info.child_type() { ChildType::ParentKeyId => sp_io::default_child_storage::storage_kill( child_info.storage_key(), limit ), - }; - match all_removed { - true => KillOutcome::AllRemoved, - false => KillOutcome::SomeRemaining, } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index 7e1a2456e4..c02ebe4829 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -393,7 +393,7 @@ impl< let value = match unhashed::get::(&previous_key) { Some(value) => value, None => { - crate::debug::error!("Invalid translate: fail to decode old value"); + log::error!("Invalid translate: fail to decode old value"); continue }, }; @@ -401,7 +401,7 @@ impl< let key1 = match K1::decode(&mut key_material) { Ok(key1) => key1, Err(_) => { - crate::debug::error!("Invalid translate: fail to decode key1"); + log::error!("Invalid translate: fail to decode key1"); continue }, }; @@ -410,7 +410,7 @@ impl< let key2 = match K2::decode(&mut key2_material) { Ok(key2) => key2, Err(_) => { - crate::debug::error!("Invalid translate: fail to decode key2"); + log::error!("Invalid translate: fail to decode key2"); continue }, }; diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 7f6eb2a518..9abc788393 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -172,7 +172,7 @@ impl< let value = match unhashed::get::(&previous_key) { Some(value) => value, None => { - crate::debug::error!("Invalid translate: fail to decode old value"); + log::error!("Invalid translate: fail to decode old value"); continue }, }; @@ -181,7 +181,7 @@ impl< let key = match K::decode(&mut key_material) { Ok(key) => key, Err(_) => { - crate::debug::error!("Invalid translate: fail to decode key"); + log::error!("Invalid translate: fail to decode key"); continue }, }; diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 93cf7c6639..d9820475a7 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -62,7 +62,7 @@ mod debug_helper { let mut val = v.borrow_mut(); *val += 1; if *val > 10 { - crate::debug::warn!( + log::warn!( "Detected with_transaction with nest level {}. Nested usage of with_transaction is not recommended.", *val ); @@ -532,9 +532,9 @@ impl Iterator for PrefixIterator { let raw_value = match unhashed::get_raw(&self.previous_key) { Some(raw_value) => raw_value, None => { - crate::debug::error!( + log::error!( "next_key returned a key with no value at {:?}", - self.previous_key + self.previous_key, ); continue } @@ -546,9 +546,10 @@ impl Iterator for PrefixIterator { let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { Ok(item) => item, Err(e) => { - crate::debug::error!( + log::error!( "(key, value) failed to decode at {:?}: {:?}", - self.previous_key, e + self.previous_key, + e, ); continue } @@ -628,9 +629,9 @@ pub trait StoragePrefixedMap { None => unhashed::kill(&previous_key), }, None => { - crate::debug::error!( + log::error!( "old key failed to decode at {:?}", - previous_key + previous_key, ); continue }, diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index 8ac4240a9f..d3d54f3de5 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -25,7 +25,7 @@ pub fn get(key: &[u8]) -> Option { sp_io::storage::get(key).and_then(|val| { Decode::decode(&mut &val[..]).map(Some).unwrap_or_else(|_| { // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state at {:?}", key); + crate::runtime_print!("ERROR: Corrupted state at {:?}", key); None }) }) diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 106ec10c6c..395a23d581 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -490,10 +490,16 @@ impl< } } -/// Something that can estimate at which block the next session rotation will happen. This should -/// be the same logical unit that dictates `ShouldEndSession` to the session module. No Assumptions -/// are made about the scheduling of the sessions. +/// Something that can estimate at which block the next session rotation will happen. +/// +/// This should be the same logical unit that dictates `ShouldEndSession` to the session module. No +/// Assumptions are made about the scheduling of the sessions. pub trait EstimateNextSessionRotation { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + /// Return the block number at which the next session rotation is estimated to happen. /// /// None should be returned if the estimation fails to come to an answer @@ -503,7 +509,11 @@ pub trait EstimateNextSessionRotation { fn weight(now: BlockNumber) -> Weight; } -impl EstimateNextSessionRotation for () { +impl EstimateNextSessionRotation for () { + fn average_session_length() -> BlockNumber { + Default::default() + } + fn estimate_next_session_rotation(_: BlockNumber) -> Option { Default::default() } @@ -513,9 +523,15 @@ impl EstimateNextSessionRotation for () { } } -/// Something that can estimate at which block the next `new_session` will be triggered. This must -/// always be implemented by the session module. +/// Something that can estimate at which block the next `new_session` will be triggered. +/// +/// This must always be implemented by the session module. pub trait EstimateNextNewSession { + /// Return the average length of a session. + /// + /// This may or may not be accurate. + fn average_session_length() -> BlockNumber; + /// Return the block number at which the next new session is estimated to happen. fn estimate_next_new_session(now: BlockNumber) -> Option; @@ -523,7 +539,11 @@ pub trait EstimateNextNewSession { fn weight(now: BlockNumber) -> Weight; } -impl EstimateNextNewSession for () { +impl EstimateNextNewSession for () { + fn average_session_length() -> BlockNumber { + Default::default() + } + fn estimate_next_new_session(_: BlockNumber) -> Option { Default::default() } @@ -1533,6 +1553,54 @@ pub trait OnGenesis { fn on_genesis() {} } +/// Prefix to be used (optionally) for implementing [`OnRuntimeUpgrade::storage_key`]. +#[cfg(feature = "try-runtime")] +pub const ON_RUNTIME_UPGRADE_PREFIX: &[u8] = b"__ON_RUNTIME_UPGRADE__"; + +/// Some helper functions for [`OnRuntimeUpgrade`] during `try-runtime` testing. +#[cfg(feature = "try-runtime")] +pub trait OnRuntimeUpgradeHelpersExt { + /// Generate a storage key unique to this runtime upgrade. + /// + /// This can be used to communicate data from pre-upgrade to post-upgrade state and check + /// them. See [`set_temp_storage`] and [`get_temp_storage`]. + #[cfg(feature = "try-runtime")] + fn storage_key(ident: &str) -> [u8; 32] { + let prefix = sp_io::hashing::twox_128(ON_RUNTIME_UPGRADE_PREFIX); + let ident = sp_io::hashing::twox_128(ident.as_bytes()); + + let mut final_key = [0u8; 32]; + final_key[..16].copy_from_slice(&prefix); + final_key[16..].copy_from_slice(&ident); + + final_key + } + + /// Get temporary storage data written by [`set_temp_storage`]. + /// + /// Returns `None` if either the data is unavailable or un-decodable. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being read from. + #[cfg(feature = "try-runtime")] + fn get_temp_storage(at: &str) -> Option { + sp_io::storage::get(&Self::storage_key(at)) + .and_then(|bytes| Decode::decode(&mut &*bytes).ok()) + } + + /// Write some temporary data to a specific storage that can be read (potentially in + /// post-upgrade hook) via [`get_temp_storage`]. + /// + /// A `at` storage identifier must be provided to indicate where the storage is being written + /// to. + #[cfg(feature = "try-runtime")] + fn set_temp_storage(data: T, at: &str) { + sp_io::storage::set(&Self::storage_key(at), &data.encode()); + } +} + +#[cfg(feature = "try-runtime")] +impl OnRuntimeUpgradeHelpersExt for U {} + /// The runtime upgrade trait. /// /// Implementing this lets you express what should happen when the runtime upgrades, @@ -1547,7 +1615,21 @@ pub trait OnRuntimeUpgrade { /// block local data are not accessible. /// /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } + + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str>; + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str>; } #[impl_for_tuples(30)] @@ -1557,6 +1639,20 @@ impl OnRuntimeUpgrade for Tuple { for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); weight } + + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::pre_upgrade()); )* ); + result + } + + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + let mut result = Ok(()); + for_tuples!( #( result = result.and(Tuple::post_upgrade()); )* ); + result + } } /// Off-chain computation trait. @@ -1960,6 +2056,22 @@ pub trait Hooks { /// Return the non-negotiable weight consumed for runtime upgrade. fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + /// Execute some pre-checks prior to a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn pre_upgrade() -> Result<(), &'static str> { + Ok(()) + } + + /// Execute some post-checks after a runtime upgrade. + /// + /// This hook is never meant to be executed on-chain but is meant to be used by testing tools. + #[cfg(feature = "try-runtime")] + fn post_upgrade() -> Result<(), &'static str> { + Ok(()) + } + /// Implementing this function on a module allows you to perform long-running tasks /// that make (by default) validators generate transactions that feed results /// of those long-running computations back on chain. diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index c4530e9dfd..4306dbd644 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -22,6 +22,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit sp-version = { version = "3.0.0", default-features = false, path = "../../primitives/version" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } impl-trait-for-tuples = "0.2.1" +log = { version = "0.4.14", default-features = false } [dev-dependencies] criterion = "0.3.3" @@ -39,11 +40,13 @@ std = [ "frame-support/std", "sp-runtime/std", "sp-version/std", + "log/std", ] runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] [[bench]] name = "bench" diff --git a/frame/system/benchmarking/Cargo.toml b/frame/system/benchmarking/Cargo.toml index ddf52c96ef..1a9317c69b 100644 --- a/frame/system/benchmarking/Cargo.toml +++ b/frame/system/benchmarking/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } sp-std = { version = "3.0.0", default-features = false, path = "../../../primitives/std" } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../primitives/runtime" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../../benchmarking" } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../../benchmarking" } frame-system = { version = "3.0.0", default-features = false, path = "../../system" } frame-support = { version = "3.0.0", default-features = false, path = "../../support" } sp-core = { version = "3.0.0", default-features = false, path = "../../../primitives/core" } diff --git a/frame/system/benchmarking/src/lib.rs b/frame/system/benchmarking/src/lib.rs index 9ff749950a..bdb34e7944 100644 --- a/frame/system/benchmarking/src/lib.rs +++ b/frame/system/benchmarking/src/lib.rs @@ -24,7 +24,7 @@ use sp_std::vec; use sp_std::prelude::*; use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use sp_runtime::traits::Hash; -use frame_benchmarking::{benchmarks, whitelisted_caller}; +use frame_benchmarking::{benchmarks, whitelisted_caller, impl_benchmark_test_suite}; use frame_support::{ storage, traits::Get, @@ -44,6 +44,12 @@ benchmarks! { let caller = whitelisted_caller(); }: _(RawOrigin::Signed(caller), remark_message) + remark_with_event { + let b in 0 .. *T::BlockLength::get().max.get(DispatchClass::Normal) as u32; + let remark_message = vec![1; b as usize]; + let caller = whitelisted_caller(); + }: _(RawOrigin::Signed(caller), remark_message) + set_heap_pages { }: _(RawOrigin::Root, Default::default()) @@ -138,22 +144,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_remark::()); - assert_ok!(test_benchmark_set_heap_pages::()); - assert_ok!(test_benchmark_set_code_without_checks::()); - assert_ok!(test_benchmark_set_changes_trie_config::()); - assert_ok!(test_benchmark_set_storage::()); - assert_ok!(test_benchmark_kill_storage::()); - assert_ok!(test_benchmark_kill_prefix::()); - }); - } -} +impl_benchmark_test_suite!( + Module, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index e521a082a9..a99184650c 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -85,7 +85,7 @@ use sp_runtime::{ use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use frame_support::{ - Parameter, debug, storage, + Parameter, storage, traits::{ Contains, Get, PalletInfo, OnNewAccount, OnKilledAccount, HandleLifetime, StoredMap, EnsureOrigin, OriginTrait, Filter, @@ -292,8 +292,6 @@ pub mod pallet { /// /// # /// - `O(1)` - /// - Base Weight: 0.665 µs, independent of remark length. - /// - No DB operations. /// # #[pallet::weight(T::SystemWeightInfo::remark(_remark.len() as u32))] pub(crate) fn remark(origin: OriginFor, _remark: Vec) -> DispatchResultWithPostInfo { @@ -450,11 +448,25 @@ pub mod pallet { storage::unhashed::kill_prefix(&prefix); Ok(().into()) } + + /// Make some on-chain remark and emit event. + /// + /// # + /// - `O(b)` where b is the length of the remark. + /// - 1 event. + /// # + #[pallet::weight(T::SystemWeightInfo::remark_with_event(remark.len() as u32))] + pub(crate) fn remark_with_event(origin: OriginFor, remark: Vec) -> DispatchResultWithPostInfo { + let who = ensure_signed(origin)?; + let hash = T::Hashing::hash(&remark[..]); + Self::deposit_event(Event::Remarked(who, hash)); + Ok(().into()) + } } /// Event for the System pallet. #[pallet::event] - #[pallet::metadata(T::AccountId = "AccountId")] + #[pallet::metadata(T::AccountId = "AccountId", T::Hash = "Hash")] pub enum Event { /// An extrinsic completed successfully. \[info\] ExtrinsicSuccess(DispatchInfo), @@ -466,6 +478,8 @@ pub mod pallet { NewAccount(T::AccountId), /// An \[account\] was reaped. KilledAccount(T::AccountId), + /// On on-chain remark happened. \[origin, remark_hash\] + Remarked(T::AccountId, T::Hash), } /// Old name generated by `decl_event`. @@ -1046,7 +1060,10 @@ impl Module { (0, _) => { // Logic error - cannot decrement beyond zero and no item should // exist with zero providers. - debug::print!("Logic error: Unexpected underflow in reducing provider"); + log::error!( + target: "runtime::system", + "Logic error: Unexpected underflow in reducing provider", + ); Ok(DecRefStatus::Reaped) }, (1, 0) => { @@ -1064,7 +1081,10 @@ impl Module { } } } else { - debug::print!("Logic error: Account already dead when reducing provider"); + log::error!( + target: "runtime::system", + "Logic error: Account already dead when reducing provider", + ); Ok(DecRefStatus::Reaped) } }) @@ -1093,7 +1113,10 @@ impl Module { Account::::mutate(who, |a| if a.consumers > 0 { a.consumers -= 1; } else { - debug::print!("Logic error: Unexpected underflow in reducing consumer"); + log::error!( + target: "runtime::system", + "Logic error: Unexpected underflow in reducing consumer", + ); }) } diff --git a/frame/system/src/limits.rs b/frame/system/src/limits.rs index c24d671cdd..49a4582240 100644 --- a/frame/system/src/limits.rs +++ b/frame/system/src/limits.rs @@ -129,6 +129,7 @@ pub struct WeightsPerClass { /// `on_initialize` pallet callbacks are invoked and their cost is added before any extrinsic /// is executed. This cost is tracked as `Mandatory` dispatch class. /// +/// ```text,ignore /// | | `max_block` | | /// | | | | /// | | | | @@ -139,12 +140,15 @@ pub struct WeightsPerClass { /// ||\_ Mandatory /// |\__ Operational /// \___ Normal +/// ``` /// /// The remaining capacity can be used to dispatch extrinsics. Note that each dispatch class /// is being tracked separately, but the sum can't exceed `max_block` (except for `reserved`). /// Below you can see a picture representing full block with 3 extrinsics (two `Operational` and /// one `Normal`). Each class has it's own limit `max_total`, but also the sum cannot exceed /// `max_block` value. +/// +/// ```text,ignore /// -- `Mandatory` limit (unlimited) /// | # | | | /// | # | `Ext3` | - - `Operational` limit @@ -153,6 +157,7 @@ pub struct WeightsPerClass { /// | #| `on_initialize` | ##| /// | #| `base_block` |###| /// |NOM| |NOM| +/// ``` /// /// It should be obvious now that it's possible for one class to reach it's limit (say `Normal`), /// while the block has still capacity to process more transactions (`max_block` not reached, @@ -164,6 +169,8 @@ pub struct WeightsPerClass { /// full. For instance one might want to prevent high-priority `Normal` transactions from pushing /// out lower-priority `Operational` transactions. In such cases you might add a `reserved` capacity /// for given class. +/// +/// ```test,ignore /// _ /// # \ /// # `Ext8` - `reserved` @@ -175,6 +182,7 @@ pub struct WeightsPerClass { /// | #| `on_initialize` |###| /// | #| `base_block` |###| /// |NOM| |NOM| +/// ``` /// /// In the above example, `Ext4-6` fill up the block almost up to `max_block`. `Ext7` would not fit /// if there wasn't the extra `reserved` space for `Operational` transactions. Note that `max_total` diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index f2f446913c..aa8bce9661 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -63,7 +63,7 @@ use sp_std::convert::{TryInto, TryFrom}; use sp_std::prelude::{Box, Vec}; use sp_runtime::app_crypto::RuntimeAppPublic; use sp_runtime::traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}; -use frame_support::{debug, RuntimeDebug}; +use frame_support::RuntimeDebug; /// Marker struct used to flag using all supported keys to sign a payload. pub struct ForAll {} @@ -550,8 +550,8 @@ pub trait SendSignedTransaction< call: LocalCall, ) -> Option> { let mut account_data = crate::Account::::get(&account.id); - debug::native::debug!( - target: "offchain", + log::debug!( + target: "runtime::offchain", "Creating signed transaction from account: {:?} (nonce: {:?})", account.id, account_data.nonce, diff --git a/frame/system/src/weights.rs b/frame/system/src/weights.rs index f28e90b34c..c961b47e53 100644 --- a/frame/system/src/weights.rs +++ b/frame/system/src/weights.rs @@ -1,6 +1,6 @@ // This file is part of Substrate. -// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// Copyright (C) 2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,9 +15,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Weights for frame_system -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 2.0.0 -//! DATE: 2020-10-28, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] +//! Autogenerated weights for frame_system +//! +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 +//! DATE: 2021-02-27, STEPS: [50, ], REPEAT: 20, LOW RANGE: [], HIGH RANGE: [] //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -44,79 +45,89 @@ use sp_std::marker::PhantomData; /// Weight functions needed for frame_system. pub trait WeightInfo { fn remark(b: u32, ) -> Weight; + fn remark_with_event(b: u32, ) -> Weight; fn set_heap_pages() -> Weight; fn set_changes_trie_config() -> Weight; fn set_storage(i: u32, ) -> Weight; fn kill_storage(i: u32, ) -> Weight; fn kill_prefix(p: u32, ) -> Weight; - fn suicide() -> Weight; } /// Weights for frame_system using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { fn remark(_b: u32, ) -> Weight { - (1_973_000 as Weight) + (1_296_000 as Weight) + } + fn remark_with_event(b: u32, ) -> Weight { + (13_474_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } fn set_heap_pages() -> Weight { - (2_816_000 as Weight) + (2_024_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (11_539_000 as Weight) + (10_551_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((833_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 0 + .saturating_add((612_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { - (2_131_000 as Weight) - .saturating_add((597_000 as Weight).saturating_mul(i as Weight)) + (562_000 as Weight) + // Standard Error: 0 + .saturating_add((442_000 as Weight).saturating_mul(i as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { - (11_844_000 as Weight) - .saturating_add((857_000 as Weight).saturating_mul(p as Weight)) + (10_499_000 as Weight) + // Standard Error: 1_000 + .saturating_add((840_000 as Weight).saturating_mul(p as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } - fn suicide() -> Weight { - (37_209_000 as Weight) - } } // For backwards compatibility and tests impl WeightInfo for () { fn remark(_b: u32, ) -> Weight { - (1_973_000 as Weight) + (1_296_000 as Weight) + } + fn remark_with_event(b: u32, ) -> Weight { + (13_474_000 as Weight) + // Standard Error: 0 + .saturating_add((1_000 as Weight).saturating_mul(b as Weight)) } fn set_heap_pages() -> Weight { - (2_816_000 as Weight) + (2_024_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } fn set_changes_trie_config() -> Weight { - (11_539_000 as Weight) + (10_551_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } fn set_storage(i: u32, ) -> Weight { (0 as Weight) - .saturating_add((833_000 as Weight).saturating_mul(i as Weight)) + // Standard Error: 0 + .saturating_add((612_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_storage(i: u32, ) -> Weight { - (2_131_000 as Weight) - .saturating_add((597_000 as Weight).saturating_mul(i as Weight)) + (562_000 as Weight) + // Standard Error: 0 + .saturating_add((442_000 as Weight).saturating_mul(i as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(i as Weight))) } fn kill_prefix(p: u32, ) -> Weight { - (11_844_000 as Weight) - .saturating_add((857_000 as Weight).saturating_mul(p as Weight)) + (10_499_000 as Weight) + // Standard Error: 1_000 + .saturating_add((840_000 as Weight).saturating_mul(p as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(p as Weight))) } - fn suicide() -> Weight { - (37_209_000 as Weight) - } } diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index f4f7bbda0f..01aa6ff3cf 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -21,11 +21,12 @@ sp-std = { version = "3.0.0", default-features = false, path = "../../primitives sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io", optional = true } sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } sp-inherents = { version = "3.0.0", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } sp-timestamp = { version = "3.0.0", default-features = false, path = "../../primitives/timestamp" } impl-trait-for-tuples = "0.2.1" +log = { version = "0.4.14", default-features = false } [dev-dependencies] sp-io ={ version = "3.0.0", path = "../../primitives/io" } @@ -42,6 +43,8 @@ std = [ "frame-support/std", "serde", "frame-system/std", - "sp-timestamp/std" + "sp-timestamp/std", + "log/std", ] runtime-benchmarks = ["frame-benchmarking", "sp-io"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/timestamp/README.md b/frame/timestamp/README.md index d406b039bc..de1fb74392 100644 --- a/frame/timestamp/README.md +++ b/frame/timestamp/README.md @@ -2,7 +2,7 @@ The Timestamp module provides functionality to get and set the on-chain time. -- [`timestamp::Config`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/trait.Trait.html) +- [`timestamp::Trait`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/trait.Trait.html) - [`Call`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/enum.Call.html) - [`Module`](https://docs.rs/pallet-timestamp/latest/pallet_timestamp/struct.Module.html) diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index ad249cbae6..57b8ce2d1b 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -23,7 +23,7 @@ use super::*; use sp_std::prelude::*; use frame_system::RawOrigin; use frame_support::{ensure, traits::OnFinalize}; -use frame_benchmarking::{benchmarks, TrackedStorageKey}; +use frame_benchmarking::{benchmarks, TrackedStorageKey, impl_benchmark_test_suite}; use crate::Module as Timestamp; @@ -57,17 +57,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set::()); - assert_ok!(test_benchmark_on_finalize::()); - }); - } -} +impl_benchmark_test_suite!( + Timestamp, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 86ca0c11a7..0deef258ed 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -97,8 +97,6 @@ pub mod weights; use sp_std::{result, cmp}; use sp_inherents::InherentData; -#[cfg(feature = "std")] -use frame_support::debug; use frame_support::traits::{Time, UnixTime}; use sp_runtime::{ RuntimeString, @@ -287,8 +285,9 @@ impl UnixTime for Pallet { let now = Self::now(); sp_std::if_std! { if now == T::Moment::zero() { - debug::error!( - "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0" + log::error!( + target: "runtime::timestamp", + "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0", ); } } diff --git a/frame/tips/Cargo.toml b/frame/tips/Cargo.toml index 92af65ce07..a16c9b9132 100644 --- a/frame/tips/Cargo.toml +++ b/frame/tips/Cargo.toml @@ -21,7 +21,7 @@ frame-support = { version = "3.0.0", default-features = false, path = "../suppor frame-system = { version = "3.0.0", default-features = false, path = "../system" } pallet-treasury = { version = "3.0.0", default-features = false, path = "../treasury" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-io ={ version = "3.0.0", path = "../../primitives/io" } @@ -45,3 +45,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/tips/src/benchmarking.rs b/frame/tips/src/benchmarking.rs index e05afc0b2a..e6a0284d82 100644 --- a/frame/tips/src/benchmarking.rs +++ b/frame/tips/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::{traits::{Saturating}}; use crate::Module as TipsMod; @@ -193,21 +193,8 @@ benchmarks! { }: _(RawOrigin::Root, hash) } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_report_awesome::()); - assert_ok!(test_benchmark_retract_tip::()); - assert_ok!(test_benchmark_tip_new::()); - assert_ok!(test_benchmark_tip::()); - assert_ok!(test_benchmark_close_tip::()); - assert_ok!(test_benchmark_slash_tip::()); - }); - } -} +impl_benchmark_test_suite!( + TipsMod, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 7a713ab1cf..2a7fbe503e 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -40,3 +40,4 @@ std = [ "sp-io/std", "sp-core/std", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index fede9f9dd0..12ce23806d 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-api = { version = "3.0.0", default-features = false, path = "../../../../primitives/api" } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } sp-runtime = { version = "3.0.0", default-features = false, path = "../../../../primitives/runtime" } +frame-support = { version = "3.0.0", default-features = false, path = "../../../../frame/support" } pallet-transaction-payment = { version = "3.0.0", default-features = false, path = "../../../transaction-payment" } [features] @@ -24,5 +25,6 @@ std = [ "sp-api/std", "codec/std", "sp-runtime/std", + "frame-support/std", "pallet-transaction-payment/std", ] diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index bd05aec303..315670669e 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -24,6 +24,42 @@ use sp_runtime::traits::MaybeDisplay; pub use pallet_transaction_payment::{FeeDetails, InclusionFee, RuntimeDispatchInfo}; +// TODO Fix conflicting implementations of trait `parity_scale_codec::Decode` for type `pallet_transaction_payment::RuntimeDispatchInfo<_>`: +// use codec::Decode; +// use frame_support::weights::{Weight, DispatchClass}; +// // The weight type used by legacy runtimes +// type LegacyWeight = u32; +// // Encoding of `RuntimeDispatchInfo` is approximately (assuming `u128` balance) +// // old byte length (u32, u8, u128) = 168 / 8 = 21 +// // new byte length (u64, u8, u128) = 200 / 8 = 25 +// /// Byte length of an encoded legacy `RuntimeDispatchInfo` i.e. Weight = u32 +// const LEGACY_RUNTIME_DISPATCH_INFO_BYTE_LENGTH: usize = 21; + +// impl Decode for RuntimeDispatchInfo { +// // Custom decode implementation to handle the differences between the `RuntimeDispatchInfo` type +// // between client version vs. runtime version +// // Concretely, `Weight` type changed from `u32` in some legacy runtimes to now `u64` +// fn decode(value: &mut I) -> Result { +// // Check `value` len to see whether we should decode legacy or new Weight type +// let input_len = value.remaining_len()?.ok_or("empty buffer while decoding")?; +// let weight: Weight = if input_len == LEGACY_RUNTIME_DISPATCH_INFO_BYTE_LENGTH { +// LegacyWeight::decode(value)?.into() +// } else { +// Weight::decode(value)? +// }; +// +// let class = DispatchClass::decode(value)?; +// let partial_fee = Balance::decode(value)?; +// +// return Ok(Self { +// weight, +// class, +// partial_fee, +// }) +// } +// } + + sp_api::decl_runtime_apis! { pub trait TransactionPaymentApi where Balance: Codec + MaybeDisplay, @@ -32,3 +68,27 @@ sp_api::decl_runtime_apis! { fn query_fee_details(uxt: Block::Extrinsic, len: u32) -> FeeDetails; } } + +// TODO Fix conflicting implementations of trait `parity_scale_codec::Decode` for type `pallet_transaction_payment::RuntimeDispatchInfo<_>`: +// #[cfg(test)] +// mod tests { +// use super::*; +// +// #[test] +// fn it_decodes_legacy_runtime_dispatch_info() { +// // older runtimes pre-2.0.0 use `type Weight = u32` +// let legacy_dispatch_info = (1_u32, DispatchClass::Normal, 1_u128); +// let decoded = RuntimeDispatchInfo::::decode(&mut &legacy_dispatch_info.encode()[..]).expect("it decodes"); +// assert_eq!(decoded.weight, legacy_dispatch_info.0 as u64); +// assert_eq!(decoded.class, legacy_dispatch_info.1); +// assert_eq!(decoded.partial_fee, legacy_dispatch_info.2); +// } +// +// #[test] +// fn it_decodes_new_runtime_dispatch_info() { +// // newer runtimes post-2.0.0 use `type Weight = u64` +// let runtime_dispatch_info = RuntimeDispatchInfo { weight: 1, class: DispatchClass::Normal, partial_fee: 1_u128 }; +// let decoded = RuntimeDispatchInfo::::decode(&mut &runtime_dispatch_info.encode()[..]).expect("it decodes"); +// assert_eq!(decoded, runtime_dispatch_info); +// } +// } diff --git a/frame/transaction-payment/src/types.rs b/frame/transaction-payment/src/types.rs index 50f00110f9..ab771eb8ba 100644 --- a/frame/transaction-payment/src/types.rs +++ b/frame/transaction-payment/src/types.rs @@ -24,7 +24,6 @@ use codec::{Encode, Decode}; use serde::{Serialize, Deserialize}; use sp_runtime::traits::{AtLeast32BitUnsigned, Zero}; - /// The base fee and adjusted weight and length fees constitute the _inclusion fee_. #[derive(Encode, Decode, Clone, Eq, PartialEq)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] @@ -84,7 +83,7 @@ impl FeeDetails { } /// Information related to a dispatchable's class, weight, and fee that can be queried from the runtime. -#[derive(Eq, PartialEq, Encode, Default)] +#[derive(Eq, PartialEq, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] @@ -102,38 +101,6 @@ pub struct RuntimeDispatchInfo { pub partial_fee: Balance, } -// The weight type used by legacy runtimes (pre-frame 2.0.0 versions) -type LegacyWeight = u32; -// Encoding of `RuntimeDispatchInfo` is approximately (assuming `u128` balance) -// old byte length (u32, u8, u128) = 168 / 8 = 21 -// new byte length (u64, u8, u128) = 200 / 8 = 25 -/// Byte length of an encoded legacy `RuntimeDispatchInfo` i.e. Weight = u32 -const LEGACY_RUNTIME_DISPATCH_INFO_BYTE_LENGTH: usize = 21; - -impl Decode for RuntimeDispatchInfo { - // Custom decode implementation to handle the differences between the `RuntimeDispatchInfo` type - // between client version vs. runtime version - // Concretely, `Weight` type changed from `u32` in some legacy runtimes to now `u64` - fn decode(value: &mut I) -> Result { - // Check `value` len to see whether we should decode legacy or new Weight type - let input_len = value.remaining_len()?.ok_or("empty buffer while decoding")?; - let weight: Weight = if input_len == LEGACY_RUNTIME_DISPATCH_INFO_BYTE_LENGTH { - LegacyWeight::decode(value)?.into() - } else { - Weight::decode(value)? - }; - - let class = DispatchClass::decode(value)?; - let partial_fee = Balance::decode(value)?; - - return Ok(Self { - weight, - class, - partial_fee, - }) - } -} - #[cfg(feature = "std")] mod serde_balance { use serde::{Deserialize, Serializer, Deserializer}; @@ -152,24 +119,6 @@ mod serde_balance { mod tests { use super::*; - #[test] - fn it_decodes_legacy_runtime_dispatch_info() { - // older runtimes pre-frame 2.0.0 use `type Weight = u32` - let legacy_dispatch_info = (1_u32, DispatchClass::Normal, 1_u128); - let decoded = RuntimeDispatchInfo::::decode(&mut &legacy_dispatch_info.encode()[..]).expect("it decodes"); - assert_eq!(decoded.weight, legacy_dispatch_info.0 as u64); - assert_eq!(decoded.class, legacy_dispatch_info.1); - assert_eq!(decoded.partial_fee, legacy_dispatch_info.2); - } - - #[test] - fn it_decodes_new_runtime_dispatch_info() { - // newer runtimes post frame 2.0.0 use `type Weight = u64` - let runtime_dispatch_info = RuntimeDispatchInfo { weight: 1, class: DispatchClass::Normal, partial_fee: 1_u128 }; - let decoded = RuntimeDispatchInfo::::decode(&mut &runtime_dispatch_info.encode()[..]).expect("it decodes"); - assert_eq!(decoded, runtime_dispatch_info); - } - #[test] fn should_serialize_and_deserialize_properly_with_string() { let info = RuntimeDispatchInfo { diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 461dc91223..da0ffcb725 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -22,7 +22,7 @@ frame-system = { version = "3.0.0", default-features = false, path = "../system" pallet-balances = { version = "3.0.0", default-features = false, path = "../balances" } impl-trait-for-tuples = "0.2.1" -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-io ={ version = "3.0.0", path = "../../primitives/io" } @@ -45,3 +45,4 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", "frame-system/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index 9cb214420c..119516fe27 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks_instance, account}; +use frame_benchmarking::{benchmarks_instance, account, impl_benchmark_test_suite}; use frame_support::traits::OnInitialize; use crate::Module as Treasury; @@ -66,7 +66,7 @@ fn setup_pot_account, I: Instance>() { } benchmarks_instance! { - + propose_spend { let (caller, value, beneficiary_lookup) = setup_proposal::(SEED); // Whitelist caller account from further DB operations. @@ -103,19 +103,8 @@ benchmarks_instance! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_spend::()); - assert_ok!(test_benchmark_reject_proposal::()); - assert_ok!(test_benchmark_approve_proposal::()); - assert_ok!(test_benchmark_on_initialize_proposals::()); - }); - } -} +impl_benchmark_test_suite!( + Treasury, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/try-runtime/Cargo.toml b/frame/try-runtime/Cargo.toml new file mode 100644 index 0000000000..9c1919d380 --- /dev/null +++ b/frame/try-runtime/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "frame-try-runtime" +version = "0.9.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for democracy" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false } + +sp-api = { version = "3.0.0", path = "../../primitives/api", default-features = false } +sp-std = { version = "3.0.0", path = "../../primitives/std" , default-features = false } +sp-runtime = { version = "3.0.0", path = "../../primitives/runtime" , default-features = false } + +frame-support = { version = "3.0.0", path = "../support", default-features = false } + +[features] +default = [ "std" ] +std = [ + "sp-api/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", +] diff --git a/frame/try-runtime/src/lib.rs b/frame/try-runtime/src/lib.rs new file mode 100644 index 0000000000..dcd3a47878 --- /dev/null +++ b/frame/try-runtime/src/lib.rs @@ -0,0 +1,37 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Supporting types for try-runtime, testing and dry-running commands. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::prelude::*; +use frame_support::weights::Weight; + +sp_api::decl_runtime_apis! { + /// Runtime api for testing the execution of a runtime upgrade. + pub trait TryRuntime { + /// dry-run runtime upgrades, returning the total weight consumed. + /// + /// This should do EXACTLY the same operations as the runtime would have done in the case of + /// a runtime upgrade (e.g. pallet ordering must be the same) + /// + /// Returns the consumed weight of the migration in case of a successful one, combined with + /// the total allowed block weight of the runtime. + fn on_runtime_upgrade() -> Result<(Weight, Weight), sp_runtime::RuntimeString>; + } +} diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index edb930231e..f55cff4d65 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -22,7 +22,7 @@ sp-runtime = { version = "3.0.0", default-features = false, path = "../../primit sp-std = { version = "3.0.0", default-features = false, path = "../../primitives/std" } sp-io = { version = "3.0.0", default-features = false, path = "../../primitives/io" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-core = { version = "3.0.0", path = "../../primitives/core" } @@ -43,3 +43,4 @@ runtime-benchmarks = [ "frame-benchmarking", "frame-support/runtime-benchmarks", ] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index 24de602157..79fb569c77 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -21,7 +21,7 @@ use super::*; use frame_system::{RawOrigin, EventRecord}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; const SEED: u32 = 0; @@ -69,18 +69,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_batch::()); - assert_ok!(test_benchmark_as_derivative::()); - assert_ok!(test_benchmark_batch_all::()); - }); - } -} +impl_benchmark_test_suite!( + Module, + crate::tests::new_test_ext(), + crate::tests::Test, +); diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index dc42fbcbab..e1335237eb 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -20,7 +20,7 @@ sp-std = { version = "3.0.0", default-features = false, path = "../../primitives sp-runtime = { version = "3.0.0", default-features = false, path = "../../primitives/runtime" } frame-support = { version = "3.0.0", default-features = false, path = "../support" } frame-system = { version = "3.0.0", default-features = false, path = "../system" } -frame-benchmarking = { version = "3.0.0", default-features = false, path = "../benchmarking", optional = true } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] sp-io = { version = "3.0.0", path = "../../primitives/io" } @@ -40,3 +40,4 @@ std = [ "frame-system/std", ] runtime-benchmarks = ["frame-benchmarking"] +try-runtime = ["frame-support/try-runtime"] diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index f650110504..937f2b033d 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -22,7 +22,7 @@ use super::*; use frame_system::{RawOrigin, Module as System}; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; +use frame_benchmarking::{benchmarks, account, whitelisted_caller, impl_benchmark_test_suite}; use sp_runtime::traits::Bounded; use crate::Module as Vesting; @@ -224,21 +224,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { - assert_ok!(test_benchmark_vest_locked::()); - assert_ok!(test_benchmark_vest_unlocked::()); - assert_ok!(test_benchmark_vest_other_locked::()); - assert_ok!(test_benchmark_vest_other_unlocked::()); - assert_ok!(test_benchmark_vested_transfer::()); - assert_ok!(test_benchmark_force_vested_transfer::()); - }); - } -} +impl_benchmark_test_suite!( + Vesting, + crate::tests::ExtBuilder::default().existential_deposit(256).build(), + crate::tests::Test, +); diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index 20987035ef..c284d1f479 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -23,6 +23,8 @@ sp-state-machine = { version = "0.9.0", optional = true, path = "../state-machin hash-db = { version = "0.15.2", optional = true } thiserror = { version = "1.0.21", optional = true } +log = { version = "0.4.14", default-features = false } + [dev-dependencies] sp-test-primitives = { version = "2.0.0", path = "../test-primitives" } @@ -37,4 +39,15 @@ std = [ "sp-version/std", "hash-db", "thiserror", + "log/std", +] +# Special feature to disable logging completly. +# +# By default `sp-api` initializes the `RuntimeLogger` for each runtime api function. However, +# logging functionality increases the code size. It is recommended to enable this feature when +# building a runtime for registering it on chain. +# +# This sets the max logging level to `off` for `log`. +disable-logging = [ + "log/max_level_off", ] diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 67d8081f5f..450ce64b2b 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.9" +quote = "1.0.3" syn = { version = "1.0.58", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.6" blake2-rfc = { version = "0.2.18", default-features = false } diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index ed5f33ef60..9fd5baba87 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -187,14 +187,15 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { result.push(quote!( #[cfg(any(feature = "std", test))] fn convert_between_block_types - ( - input: &I, error_desc: &'static str, - ) -> std::result::Result + #crate_::ApiError>( + input: &I, + map_error: F, + ) -> std::result::Result { ::decode_with_depth_limit( #crate_::MAX_EXTRINSIC_DEPTH, &mut &#crate_::Encode::encode(input)[..], - ).map_err(|e| format!("{} {}", error_desc, e)) + ).map_err(map_error) } )); @@ -202,19 +203,26 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { for fn_ in fns { let params = extract_parameter_names_types_and_borrows(&fn_, AllowSelfRefInParameters::No)?; let trait_fn_name = &fn_.ident; + let function_name_str = fn_.ident.to_string(); let fn_name = generate_native_call_generator_fn_name(&fn_.ident); let output = return_type_replace_block_with_node_block(fn_.output.clone()); let output_ty = return_type_extract_type(&output); - let output = quote!( std::result::Result<#output_ty, String> ); + let output = quote!( std::result::Result<#output_ty, #crate_::ApiError> ); // Every type that is using the `Block` generic parameter, we need to encode/decode, // to make it compatible between the runtime/node. let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let name_str = format!( - "Could not convert parameter `{}` between node and runtime:", quote!(#n) - ); + let param_name = quote!(#n).to_string(); + quote!( - let #n: #t = convert_between_block_types(&#n, #name_str)?; + let #n: #t = convert_between_block_types( + &#n, + |e| #crate_::ApiError::FailedToConvertParameter { + function: #function_name_str, + parameter: #param_name, + error: e, + }, + )?; ) }); // Same as for the input types, we need to check if we also need to convert the output, @@ -223,7 +231,10 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { quote!( convert_between_block_types( &res, - "Could not convert return value from runtime to node!" + |e| #crate_::ApiError::FailedToConvertReturnValue { + function: #function_name_str, + error: e, + }, ) ) } else { @@ -399,10 +410,10 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { #[cfg(any(feature = "std", test))] pub fn #fn_name< R: #crate_::Encode + #crate_::Decode + PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, + NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, Block: #crate_::BlockT, T: #crate_::CallApiAt, - C: #crate_::Core, + C: #crate_::Core, >( call_runtime_at: &T, core_api: &C, @@ -416,7 +427,7 @@ fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { native_call: Option, context: #crate_::ExecutionContext, recorder: &Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { + ) -> std::result::Result<#crate_::NativeOrEncoded, #crate_::ApiError> { let version = call_runtime_at.runtime_version_at(at)?; use #crate_::InitializeBlock; let initialize_block = if #skip_initialize_block { @@ -621,7 +632,7 @@ impl<'a> ToClientSideDecl<'a> { context: #crate_::ExecutionContext, params: Option<( #( #param_types ),* )>, params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error>; + ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError>; } ) } @@ -647,7 +658,7 @@ impl<'a> ToClientSideDecl<'a> { let params2 = params.clone(); let ret_type = return_type_extract_type(&method.sig.output); - fold_fn_decl_for_client_side(&mut method.sig, &self.block_id); + fold_fn_decl_for_client_side(&mut method.sig, &self.block_id, &self.crate_); let name_impl = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); let crate_ = self.crate_; @@ -705,7 +716,12 @@ impl<'a> ToClientSideDecl<'a> { }, #crate_::NativeOrEncoded::Encoded(r) => { <#ret_type as #crate_::Decode>::decode(&mut &r[..]) - .map_err(|err| { #crate_::ApiError::new(#function_name, err).into() }) + .map_err(|err| + #crate_::ApiError::FailedToDecodeReturnValue { + function: #function_name, + error: err, + } + ) } } ) @@ -728,12 +744,10 @@ impl<'a> Fold for ToClientSideDecl<'a> { if is_core_trait { // Add all the supertraits we want to have for `Core`. - let crate_ = &self.crate_; input.supertraits = parse_quote!( 'static + Send + Sync - + #crate_::ApiErrorExt ); } else { // Add the `Core` runtime api as super trait. @@ -803,12 +817,12 @@ fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { let bounds = &t.bounds; quote! { #ident #colon_token #bounds } - }).chain(std::iter::once(quote! { __Sr_Api_Error__ })); + }); let ty_generics = trait_.generics.type_params().map(|t| { let ident = &t.ident; quote! { #ident } - }).chain(std::iter::once(quote! { Error = __Sr_Api_Error__ })); + }); quote!( #[cfg(any(feature = "std", test))] diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index f8d7c74b97..2be8545a81 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -162,6 +162,7 @@ fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { let input = Ident::new("input", Span::call_site()); let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let impl_calls = generate_impl_calls(impls, &input)? .into_iter() .map(|(trait_, fn_name, impl_, attrs)| { @@ -183,6 +184,8 @@ fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { } }; + #c::init_runtime_logger(); + let output = { #impl_ }; #c::to_substrate_wasm_fn_return_value(&output) } @@ -233,16 +236,6 @@ fn generate_runtime_api_base_structures() -> Result { C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, {} - #[cfg(any(feature = "std", test))] - impl> #crate_::ApiErrorExt - for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - type Error = C::Error; - } - #[cfg(any(feature = "std", test))] impl> #crate_::ApiExt for RuntimeApiImpl @@ -269,16 +262,20 @@ fn generate_runtime_api_base_structures() -> Result { fn has_api( &self, at: &#crate_::BlockId, - ) -> std::result::Result where Self: Sized { - self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) + ) -> std::result::Result where Self: Sized { + self.call + .runtime_version_at(at) + .map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) } fn has_api_with bool>( &self, at: &#crate_::BlockId, pred: P, - ) -> std::result::Result where Self: Sized { - self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) + ) -> std::result::Result where Self: Sized { + self.call + .runtime_version_at(at) + .map(|v| v.has_api_with(&A::ID, pred)) } fn record_proof(&mut self) { @@ -306,7 +303,7 @@ fn generate_runtime_api_base_structures() -> Result { >>, parent_hash: Block::Hash, ) -> std::result::Result< - #crate_::StorageChanges, + #crate_::StorageChanges, String > where Self: Sized { self.initialized_block.borrow_mut().take(); @@ -513,7 +510,7 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, RuntimeApiImplCall::Error> + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> ); // Generate the new method implementation that calls into the runtime. @@ -554,7 +551,7 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { ) }; - let mut input = fold::fold_impl_item_method(self, input); + let mut input = fold::fold_impl_item_method(self, input); // We need to set the block, after we modified the rest of the ast, otherwise we would // modify our generated block as well. input.block = block; diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index c6ff98c0f1..62a03a59ba 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -27,7 +27,7 @@ use proc_macro2::{Span, TokenStream}; use quote::{quote, quote_spanned}; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, ImplItem, TypePath, parse_quote, + spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, TypePath, parse_quote, parse::{Parse, ParseStream, Result, Error}, fold::{self, Fold}, Attribute, Pat, }; @@ -61,29 +61,14 @@ impl Parse for RuntimeApiImpls { } } -/// Implement the `ApiExt` trait, `ApiErrorExt` trait and the `Core` runtime api. +/// Implement the `ApiExt` trait and the `Core` runtime api. fn implement_common_api_traits( - error_type: Option, block_type: TypePath, self_ty: Type, ) -> Result { let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let error_type = error_type - .map(|e| quote!(#e)) - .unwrap_or_else(|| quote!( #crate_::ApiError ) ); - - // Quote using the span from `error_type` to generate nice error messages when the type is - // not implementing a trait or similar. - let api_error_ext = quote_spanned! { error_type.span() => - impl #crate_::ApiErrorExt for #self_ty { - type Error = #error_type; - } - }; - Ok(quote!( - #api_error_ext - impl #crate_::ApiExt<#block_type> for #self_ty { type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; @@ -97,7 +82,7 @@ fn implement_common_api_traits( fn has_api( &self, _: &#crate_::BlockId<#block_type>, - ) -> std::result::Result where Self: Sized { + ) -> std::result::Result where Self: Sized { Ok(true) } @@ -105,7 +90,7 @@ fn implement_common_api_traits( &self, _: &#crate_::BlockId<#block_type>, pred: P, - ) -> std::result::Result where Self: Sized { + ) -> std::result::Result where Self: Sized { Ok(pred(A::VERSION)) } @@ -140,7 +125,7 @@ fn implement_common_api_traits( _: #crate_::ExecutionContext, _: Option<()>, _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #error_type> { + ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #crate_::ApiError> { unimplemented!("Not required for testing!") } @@ -150,7 +135,7 @@ fn implement_common_api_traits( _: #crate_::ExecutionContext, _: Option<#block_type>, _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { unimplemented!("Not required for testing!") } @@ -160,7 +145,7 @@ fn implement_common_api_traits( _: #crate_::ExecutionContext, _: Option<&<#block_type as #crate_::BlockT>::Header>, _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #crate_::ApiError> { unimplemented!("Not required for testing!") } } @@ -230,9 +215,6 @@ struct FoldRuntimeApiImpl<'a> { block_type: &'a TypePath, /// The identifier of the trait being implemented. impl_trait: &'a Ident, - /// Stores the error type that is being found in the trait implementation as associated type - /// with the name `Error`. - error_type: &'a mut Option, } impl<'a> Fold for FoldRuntimeApiImpl<'a> { @@ -300,7 +282,7 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { // Generate the correct return type. input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error> + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, #crate_::ApiError> ); } @@ -336,51 +318,12 @@ impl<'a> Fold for FoldRuntimeApiImpl<'a> { input.block = block; input } - - fn fold_impl_item(&mut self, input: ImplItem) -> ImplItem { - match input { - ImplItem::Type(ty) => { - if ty.ident == "Error" { - if let Some(error_type) = self.error_type { - if *error_type != ty.ty { - let mut error = Error::new( - ty.span(), - "Error type can not change between runtime apis", - ); - let error_first = Error::new( - error_type.span(), - "First error type was declared here." - ); - - error.combine(error_first); - - ImplItem::Verbatim(error.to_compile_error()) - } else { - ImplItem::Verbatim(Default::default()) - } - } else { - *self.error_type = Some(ty.ty); - ImplItem::Verbatim(Default::default()) - } - } else { - let error = Error::new( - ty.span(), - "Only associated type with name `Error` is allowed", - ); - ImplItem::Verbatim(error.to_compile_error()) - } - }, - o => fold::fold_impl_item(self, o), - } - } } /// Result of [`generate_runtime_api_impls`]. struct GeneratedRuntimeApiImpls { /// All the runtime api implementations. impls: TokenStream, - /// The error type that should be used by the runtime apis. - error_type: Option, /// The block type that is being used by the runtime apis. block_type: TypePath, /// The type the traits are implemented for. @@ -393,7 +336,6 @@ struct GeneratedRuntimeApiImpls { /// extracts the error type, self type and the block type. fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { let mut result = Vec::with_capacity(impls.len()); - let mut error_type = None; let mut global_block_type: Option = None; let mut self_ty: Option> = None; @@ -451,7 +393,6 @@ fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result Result proc_macro fn mock_impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let GeneratedRuntimeApiImpls { impls, error_type, block_type, self_ty } = + let GeneratedRuntimeApiImpls { impls, block_type, self_ty } = generate_runtime_api_impls(api_impls)?; - let api_traits = implement_common_api_traits(error_type, block_type, self_ty)?; + let api_traits = implement_common_api_traits(block_type, self_ty)?; Ok(quote!( #hidden_includes diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 741993c224..cf8cc1355d 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -99,6 +99,7 @@ pub fn replace_wild_card_parameter_names(input: &mut Signature) { pub fn fold_fn_decl_for_client_side( input: &mut Signature, block_id: &TokenStream, + crate_: &TokenStream, ) { replace_wild_card_parameter_names(input); @@ -109,7 +110,7 @@ pub fn fold_fn_decl_for_client_side( // Wrap the output in a `Result` input.output = { let ty = return_type_extract_type(&input.output); - parse_quote!( -> std::result::Result<#ty, Self::Error> ) + parse_quote!( -> std::result::Result<#ty, #crate_::ApiError> ) }; } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 8ce447c0d3..afb9af343b 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -30,6 +30,19 @@ //! api, the [`ApiExt`] trait, the [`CallApiAt`] trait and the [`ConstructRuntimeApi`] trait. //! //! On a meta level this implies, the client calls the generated API from the client perspective. +//! +//! +//! # Logging +//! +//! Substrate supports logging from the runtime in native and in wasm. For that purpose it provides +//! the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). This runtime logger is +//! automatically enabled for each call into the runtime through the runtime api. As logging +//! introduces extra code that isn't actually required for the logic of your runtime and also +//! increases the final wasm blob size, it is recommended to disable the logging for on-chain +//! wasm blobs. This can be done by enabling the `disable-logging` feature of this crate. Be aware +//! that this feature instructs `log` and `tracing` to disable logging at compile time by setting +//! the `max_level_off` feature for these crates. So, you should not enable this feature for a +//! native build as otherwise the node will not output any log messages. #![cfg_attr(not(feature = "std"), no_std)] @@ -67,7 +80,7 @@ pub use sp_std::{slice, mem}; #[cfg(feature = "std")] use sp_std::result; #[doc(hidden)] -pub use codec::{Encode, Decode, DecodeLimit}; +pub use codec::{Encode, Decode, DecodeLimit, self}; use sp_core::OpaqueMetadata; #[cfg(feature = "std")] use std::{panic::UnwindSafe, cell::RefCell}; @@ -246,8 +259,8 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// and the error type can be specified as associated type. If no error type is specified [`String`] /// is used as error type. /// -/// Besides implementing the given traits, the [`Core`](sp_api::Core), [`ApiExt`](sp_api::ApiExt) -/// and [`ApiErrorExt`](sp_api::ApiErrorExt) are implemented automatically. +/// Besides implementing the given traits, the [`Core`](sp_api::Core) and [`ApiExt`](sp_api::ApiExt) +/// are implemented automatically. /// /// # Example /// @@ -284,11 +297,6 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// } /// /// impl BlockBuilder for MockApi { -/// /// Sets the error type that is being used by the mock implementation. -/// /// The error type is used by all runtime apis. It is only required to -/// /// be specified in one trait implementation. -/// type Error = sp_api::ApiError; -/// /// fn build_block() -> Block { /// unimplemented!("Not Required in tests") /// } @@ -331,15 +339,14 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// /// sp_api::mock_impl_runtime_apis! { /// impl Balance for MockApi { -/// type Error = sp_api::ApiError; /// #[advanced] -/// fn get_balance(&self, at: &BlockId) -> Result, Self::Error> { +/// fn get_balance(&self, at: &BlockId) -> Result, sp_api::ApiError> { /// println!("Being called at: {}", at); /// /// Ok(self.balance.into()) /// } /// #[advanced] -/// fn set_balance(at: &BlockId, val: u64) -> Result, Self::Error> { +/// fn set_balance(at: &BlockId, val: u64) -> Result, sp_api::ApiError> { /// if let BlockId::Number(1) = at { /// println!("Being called to set balance to: {}", val); /// } @@ -392,47 +399,42 @@ pub trait ConstructRuntimeApi> { fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; } -/// An error describing which API call failed. -#[cfg_attr(feature = "std", derive(Debug, thiserror::Error, Eq, PartialEq))] -#[cfg_attr(feature = "std", error("Failed to execute API call {tag}"))] -#[cfg(feature = "std")] -pub struct ApiError { - tag: &'static str, - #[source] - error: codec::Error, -} - -#[cfg(feature = "std")] -impl From<(&'static str, codec::Error)> for ApiError { - fn from((tag, error): (&'static str, codec::Error)) -> Self { - Self { - tag, - error, - } - } -} - -#[cfg(feature = "std")] -impl ApiError { - pub fn new(tag: &'static str, error: codec::Error) -> Self { - Self { - tag, - error, - } - } +/// Init the [`RuntimeLogger`](sp_runtime::runtime_logger::RuntimeLogger). +pub fn init_runtime_logger() { + #[cfg(not(feature = "disable-logging"))] + sp_runtime::runtime_logger::RuntimeLogger::init(); } -/// Extends the runtime api traits with an associated error type. This trait is given as super -/// trait to every runtime api trait. +/// An error describing which API call failed. #[cfg(feature = "std")] -pub trait ApiErrorExt { - /// Error type used by the runtime apis. - type Error: std::fmt::Debug + From; +#[derive(Debug, thiserror::Error)] +pub enum ApiError { + #[error("Failed to decode return value of {function}")] + FailedToDecodeReturnValue { + function: &'static str, + #[source] + error: codec::Error, + }, + #[error("Failed to convert return value from runtime to node of {function}")] + FailedToConvertReturnValue { + function: &'static str, + #[source] + error: codec::Error, + }, + #[error("Failed to convert parameter `{parameter}` from node to runtime of {function}")] + FailedToConvertParameter { + function: &'static str, + parameter: &'static str, + #[source] + error: codec::Error, + }, + #[error(transparent)] + Application(#[from] Box), } /// Extends the runtime api implementation with some common functionality. #[cfg(feature = "std")] -pub trait ApiExt: ApiErrorExt { +pub trait ApiExt { /// The state backend that is used to store the block states. type StateBackend: StateBackend>; @@ -450,14 +452,14 @@ pub trait ApiExt: ApiErrorExt { fn has_api( &self, at: &BlockId, - ) -> Result where Self: Sized; + ) -> Result where Self: Sized; /// Check if the given api is implemented and the version passes a predicate. fn has_api_with bool>( &self, at: &BlockId, pred: P, - ) -> Result where Self: Sized; + ) -> Result where Self: Sized; /// Start recording all accessed trie nodes for generating proofs. fn record_proof(&mut self); @@ -478,7 +480,10 @@ pub trait ApiExt: ApiErrorExt { backend: &Self::StateBackend, changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, - ) -> Result, String> where Self: Sized; + ) -> Result< + StorageChanges, + String + > where Self: Sized; } /// Before calling any runtime api function, the runtime need to be initialized @@ -533,9 +538,6 @@ pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend { - /// Error type used by the implementation. - type Error: std::fmt::Debug + From; - /// The state backend that is used to store the block states. type StateBackend: StateBackend>; @@ -544,15 +546,18 @@ pub trait CallApiAt { fn call_api_at< 'a, R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - C: Core, + NC: FnOnce() -> result::Result + UnwindSafe, + C: Core, >( &self, params: CallApiAtParams<'a, Block, C, NC, Self::StateBackend>, - ) -> Result, Self::Error>; + ) -> Result, ApiError>; /// Returns the runtime version at the given block. - fn runtime_version_at(&self, at: &BlockId) -> Result; + fn runtime_version_at( + &self, + at: &BlockId, + ) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. @@ -605,10 +610,6 @@ pub trait RuntimeApiInfo { const VERSION: u32; } -/// Extracts the `Api::Error` for a type that provides a runtime api. -#[cfg(feature = "std")] -pub type ApiErrorFor = <>::Api as ApiErrorExt>::Error; - #[derive(codec::Encode, codec::Decode)] pub struct OldRuntimeVersion { pub spec_name: RuntimeString, diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index e8f06aaf20..2a6325fd09 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -15,6 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] sp-api = { version = "3.0.0", path = "../" } substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } sp-version = { version = "3.0.0", path = "../../version" } +sp-tracing = { version = "3.0.0", path = "../../tracing" } sp-runtime = { version = "3.0.0", path = "../../runtime" } sp-blockchain = { version = "3.0.0", path = "../../blockchain" } sp-consensus = { version = "0.9.0", path = "../../consensus/common" } @@ -28,6 +29,7 @@ rustversion = "1.0.0" criterion = "0.3.0" substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } sp-core = { version = "3.0.0", path = "../../core" } +log = "0.4.14" [[bench]] name = "bench" diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index 134ee50856..1f7ccf2712 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -23,7 +23,6 @@ use sp_api::{ use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; use sp_core::NativeOrEncoded; use substrate_test_runtime_client::runtime::Block; -use sp_blockchain::Result; /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` /// trait are done by the `construct_runtime!` macro in a real runtime. @@ -105,7 +104,7 @@ mock_impl_runtime_apis! { #[advanced] fn same_name(_: &BlockId) -> - std::result::Result< + Result< NativeOrEncoded<()>, ApiError > @@ -115,7 +114,7 @@ mock_impl_runtime_apis! { #[advanced] fn wild_card(at: &BlockId, _: u32) -> - std::result::Result< + Result< NativeOrEncoded<()>, ApiError > @@ -124,7 +123,7 @@ mock_impl_runtime_apis! { // yeah Ok(().into()) } else { - Err(ApiError::new("MockApi", codec::Error::from("Ohh noooo"))) + Err((Box::from("Test error") as Box).into()) } } } @@ -143,33 +142,33 @@ type TestClient = substrate_test_runtime_client::client::Client< #[test] fn test_client_side_function_signature() { - let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<()> = + let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<(), ApiError> = RuntimeApiImpl::::test; let _something_with_block: - fn(&RuntimeApiImpl, &BlockId, Block) -> Result = + fn(&RuntimeApiImpl, &BlockId, Block) -> Result = RuntimeApiImpl::::something_with_block; #[allow(deprecated)] let _same_name_before_version_2: - fn(&RuntimeApiImpl, &BlockId) -> Result = + fn(&RuntimeApiImpl, &BlockId) -> Result = RuntimeApiImpl::::same_name_before_version_2; } #[test] fn check_runtime_api_info() { - assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); - assert_eq!(Api::::VERSION, runtime_decl_for_Api::VERSION); - assert_eq!(Api::::VERSION, 1); + assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); + assert_eq!(Api::::VERSION, runtime_decl_for_Api::VERSION); + assert_eq!(Api::::VERSION, 1); assert_eq!( - ApiWithCustomVersion::::VERSION, + ApiWithCustomVersion::::VERSION, runtime_decl_for_ApiWithCustomVersion::VERSION, ); assert_eq!( - &ApiWithCustomVersion::::ID, + &ApiWithCustomVersion::::ID, &runtime_decl_for_ApiWithCustomVersion::ID, ); - assert_eq!(ApiWithCustomVersion::::VERSION, 2); + assert_eq!(ApiWithCustomVersion::::VERSION, 2); } fn check_runtime_api_versions_contains() { @@ -178,9 +177,9 @@ fn check_runtime_api_versions_contains() { #[test] fn check_runtime_api_versions() { - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); } #[test] @@ -188,9 +187,9 @@ fn mock_runtime_api_has_api() { let mock = MockApi { block: None }; assert!( - mock.has_api::>(&BlockId::Number(0)).unwrap(), + mock.has_api::>(&BlockId::Number(0)).unwrap(), ); - assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); + assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); } #[test] @@ -209,7 +208,7 @@ fn mock_runtime_api_works_with_advanced() { Api::::same_name(&mock, &BlockId::Number(0)).unwrap(); mock.wild_card(&BlockId::Number(1337), 1).unwrap(); assert_eq!( - ApiError::new("MockApi", ::codec::Error::from("Ohh noooo")), - mock.wild_card(&BlockId::Number(1336), 1).unwrap_err() + "Test error".to_string(), + mock.wild_card(&BlockId::Number(1336), 1).unwrap_err().to_string(), ); } diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index ec1a86d837..e10e1b3401 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -50,10 +50,7 @@ fn calling_wasm_runtime_function() { } #[test] -#[should_panic( - expected = - "Could not convert parameter `param` between node and runtime: DecodeFails always fails" -)] +#[should_panic(expected = "FailedToConvertParameter { function: \"fail_convert_parameter\"")] fn calling_native_runtime_function_with_non_decodable_parameter() { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); let runtime_api = client.runtime_api(); @@ -62,7 +59,7 @@ fn calling_native_runtime_function_with_non_decodable_parameter() { } #[test] -#[should_panic(expected = "Could not convert return value from runtime to node!")] +#[should_panic(expected = "FailedToConvertReturnValue { function: \"fail_convert_return_value\"")] fn calling_native_runtime_function_with_non_decodable_return_value() { let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); let runtime_api = client.runtime_api(); @@ -218,3 +215,34 @@ fn call_runtime_api_with_multiple_arguments() { .test_multiple_arguments(&block_id, data.clone(), data.clone(), data.len() as u32) .unwrap(); } + +#[test] +fn disable_logging_works() { + if std::env::var("RUN_TEST").is_ok() { + sp_tracing::try_init_simple(); + + let mut builder = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm); + builder.genesis_init_mut().set_wasm_code( + substrate_test_runtime_client::runtime::wasm_binary_logging_disabled_unwrap().to_vec(), + ); + + let client = builder.build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(0); + runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); + log::error!("Logging from native works"); + } else { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_TEST", "1") + .env("RUST_LOG", "info") + .args(&["--nocapture", "disable_logging_works"]) + .output() + .unwrap(); + + let output = dbg!(String::from_utf8(output.stderr).unwrap()); + assert!(!output.contains("Hey I'm runtime")); + assert!(output.contains("Logging from native works")); + } +} diff --git a/primitives/api/test/tests/ui/mock_only_error_associated_type.rs b/primitives/api/test/tests/ui/mock_only_error_associated_type.rs deleted file mode 100644 index bbd3c71c94..0000000000 --- a/primitives/api/test/tests/ui/mock_only_error_associated_type.rs +++ /dev/null @@ -1,19 +0,0 @@ -use substrate_test_runtime_client::runtime::Block; - -sp_api::decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } -} - -struct MockApi; - -sp_api::mock_impl_runtime_apis! { - impl Api for MockApi { - type OtherData = u32; - - fn test(data: u64) {} - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr b/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr deleted file mode 100644 index beced70413..0000000000 --- a/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr +++ /dev/null @@ -1,5 +0,0 @@ -error: Only associated type with name `Error` is allowed - --> $DIR/mock_only_error_associated_type.rs:13:3 - | -13 | type OtherData = u32; - | ^^^^ diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.rs b/primitives/api/test/tests/ui/mock_only_one_error_type.rs deleted file mode 100644 index 1c3f13dbb9..0000000000 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.rs +++ /dev/null @@ -1,29 +0,0 @@ -use substrate_test_runtime_client::runtime::Block; - -sp_api::decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - } - - pub trait Api2 { - fn test(data: u64); - } -} - -struct MockApi; - -sp_api::mock_impl_runtime_apis! { - impl Api for MockApi { - type Error = u32; - - fn test(data: u64) {} - } - - impl Api2 for MockApi { - type Error = u64; - - fn test(data: u64) {} - } -} - -fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr deleted file mode 100644 index ab5b90af3a..0000000000 --- a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr +++ /dev/null @@ -1,29 +0,0 @@ -error: Error type can not change between runtime apis - --> $DIR/mock_only_one_error_type.rs:23:3 - | -23 | type Error = u64; - | ^^^^ - -error: First error type was declared here. - --> $DIR/mock_only_one_error_type.rs:17:16 - | -17 | type Error = u32; - | ^^^ - -error[E0277]: the trait bound `u32: From` is not satisfied - --> $DIR/mock_only_one_error_type.rs:17:16 - | -17 | type Error = u32; - | ^^^ the trait `From` is not implemented for `u32` - | - ::: $WORKSPACE/primitives/api/src/lib.rs - | - | type Error: std::fmt::Debug + From; - | -------------- required by this bound in `sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ApiErrorExt::Error` - | - = help: the following implementations were found: - > - > - > - > - and 18 others diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index caaa4c33cd..319666747b 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -80,6 +80,11 @@ pub trait PerThing: Self::from_rational_approximation::(p * p, q * q) } + /// Return the part left when `self` is saturating-subtracted from `Self::one()`. + fn left_from_one(self) -> Self { + Self::one().saturating_sub(self) + } + /// Multiplication that always rounds down to a whole number. The standard `Mul` rounds to the /// nearest whole number. /// diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 092d961162..c37686c0df 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -15,7 +15,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] log = "0.4.11" -lru = "0.6.1" +lru = "0.6.5" parking_lot = "0.11.1" thiserror = "1.0.21" futures = "0.3.9" diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index 6ed5fe1b33..58d08d06f0 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -114,8 +114,8 @@ pub enum Error { #[error("Error decoding call result of {0}")] CallResultDecode(&'static str, #[source] CodecError), - #[error(transparent)] - RuntimeApiCodecError(#[from] ApiError), + #[error("Error at calling runtime api: {0}")] + RuntimeApiError(#[from] ApiError), #[error("Runtime :code missing in storage")] RuntimeCodeMissing, @@ -153,7 +153,6 @@ pub enum Error { #[error("Failed to get header for hash {0}")] MissingHeader(String), - #[error("State Database error: {0}")] StateDatabase(String), @@ -183,6 +182,15 @@ impl From> for Error { } } +impl From for ApiError { + fn from(err: Error) -> ApiError { + match err { + Error::RuntimeApiError(err) => err, + e => ApiError::Application(Box::new(e)), + } + } +} + impl Error { /// Chain a blockchain error. pub fn from_blockchain(e: Box) -> Self { diff --git a/primitives/consensus/babe/src/inherents.rs b/primitives/consensus/babe/src/inherents.rs index b20cf45cd4..2f1a716114 100644 --- a/primitives/consensus/babe/src/inherents.rs +++ b/primitives/consensus/babe/src/inherents.rs @@ -52,6 +52,7 @@ impl BabeInherentData for InherentData { } /// Provides the slot duration inherent data for BABE. +// TODO: Remove in the future. https://github.com/paritytech/substrate/issues/8029 #[cfg(feature = "std")] pub struct InherentDataProvider { slot_duration: u64, diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 4420267899..8c5ae96815 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] thiserror = "1.0.21" -libp2p = { version = "0.34.0", default-features = false } +libp2p = { version = "0.35.1", default-features = false } log = "0.4.8" sp-core = { path= "../../core", version = "3.0.0"} sp-inherents = { version = "3.0.0", path = "../../inherents" } diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 41b5f391f6..00f84501db 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -193,16 +193,21 @@ impl BlockImportParams { if let Some(hash) = self.post_hash { hash } else { - if self.post_digests.is_empty() { - self.header.hash() - } else { - let mut hdr = self.header.clone(); - for digest_item in &self.post_digests { - hdr.digest_mut().push(digest_item.clone()); - } - - hdr.hash() + self.post_header().hash() + } + } + + /// Get the post header. + pub fn post_header(&self) -> Block::Header { + if self.post_digests.is_empty() { + self.header.clone() + } else { + let mut hdr = self.header.clone(); + for digest_item in &self.post_digests { + hdr.digest_mut().push(digest_item.clone()); } + + hdr } } diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index 541c1ff0f4..1bfcef5fa6 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -62,7 +62,7 @@ impl BasicQueue { verifier: V, block_import: BoxBlockImport, justification_import: Option>, - spawner: &impl sp_core::traits::SpawnNamed, + spawner: &impl sp_core::traits::SpawnEssentialNamed, prometheus_registry: Option<&Registry>, ) -> Self { let (result_sender, result_port) = buffered_link::buffered_link(); @@ -83,7 +83,7 @@ impl BasicQueue { metrics, ); - spawner.spawn_blocking("basic-block-import-worker", future.boxed()); + spawner.spawn_essential_blocking("basic-block-import-worker", future.boxed()); Self { justification_sender, @@ -164,7 +164,13 @@ async fn block_import_process( loop { let worker_messages::ImportBlocks(origin, blocks) = match block_import_receiver.next().await { Some(blocks) => blocks, - None => return, + None => { + log::debug!( + target: "block-import", + "Stopping block import because the import channel was closed!", + ); + return + }, }; let res = import_many_blocks( @@ -236,6 +242,10 @@ impl BlockImportWorker { // If the results sender is closed, that means that the import queue is shutting // down and we should end this future. if worker.result_sender.is_closed() { + log::debug!( + target: "block-import", + "Stopping block import because result channel was closed!", + ); return; } @@ -244,7 +254,13 @@ impl BlockImportWorker { match justification { Some(ImportJustification(who, hash, number, justification)) => worker.import_justification(who, hash, number, justification), - None => return, + None => { + log::debug!( + target: "block-import", + "Stopping block import because justification channel was closed!", + ); + return + }, } } @@ -503,7 +519,6 @@ mod tests { #[test] fn prioritizes_finality_work_over_block_import() { let (result_sender, mut result_port) = buffered_link::buffered_link(); - let (worker, mut finality_sender, mut block_import_sender) = BlockImportWorker::new(result_sender, (), Box::new(()), Some(Box::new(())), None); futures::pin_mut!(worker); @@ -531,7 +546,7 @@ mod tests { import_existing: false, }], ))) - .unwrap(); + .unwrap(); hash }; @@ -545,7 +560,7 @@ mod tests { 1, Vec::new(), ))) - .unwrap(); + .unwrap(); hash }; diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 43edf4f777..b3aceb45e1 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -36,7 +36,7 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, DigestFor, NumberFor, HashFor}, }; use futures::prelude::*; -pub use sp_inherents::InherentData; +use sp_state_machine::StorageProof; pub mod block_validation; pub mod offline_tracker; @@ -55,6 +55,7 @@ pub use block_import::{ pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; pub use import_queue::DefaultImportQueue; +pub use sp_inherents::InherentData; /// Block status. #[derive(Debug, PartialEq, Eq)] @@ -89,53 +90,81 @@ pub trait Environment { } /// A proposal that is created by a [`Proposer`]. -pub struct Proposal { +pub struct Proposal { /// The block that was build. pub block: Block, - /// Optional proof that was recorded while building the block. - pub proof: Option, + /// Proof that was recorded while building the block. + pub proof: Proof, /// The storage changes while building this block. pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, } -/// Used as parameter to [`Proposer`] to tell the requirement on recording a proof. +/// Error that is returned when [`ProofRecording`] requested to record a proof, +/// but no proof was recorded. +#[derive(Debug, thiserror::Error)] +#[error("Proof should be recorded, but no proof was provided.")] +pub struct NoProofRecorded; + +/// A trait to express the state of proof recording on type system level. /// -/// When `RecordProof::Yes` is given, all accessed trie nodes should be saved. These recorded -/// trie nodes can be used by a third party to proof this proposal without having access to the -/// full storage. -#[derive(Copy, Clone, PartialEq)] -pub enum RecordProof { - /// `Yes`, record a proof. - Yes, - /// `No`, don't record any proof. - No, +/// This is used by [`Proposer`] to signal if proof recording is enabled. This can be used by +/// downstream users of the [`Proposer`] trait to enforce that proof recording is activated when +/// required. The only two implementations of this trait are [`DisableProofRecording`] and +/// [`EnableProofRecording`]. +/// +/// This trait is sealed and can not be implemented outside of this crate! +pub trait ProofRecording: Send + Sync + private::Sealed + 'static { + /// The proof type that will be used internally. + type Proof: Send + Sync + 'static; + /// Is proof recording enabled? + const ENABLED: bool; + /// Convert the given `storage_proof` into [`Self::Proof`]. + /// + /// Internally Substrate uses `Option` to express the both states of proof + /// recording (for now) and as [`Self::Proof`] is some different type, we need to provide a + /// function to convert this value. + /// + /// If the proof recording was requested, but `None` is given, this will return + /// `Err(NoProofRecorded)`. + fn into_proof(storage_proof: Option) -> Result; } -impl RecordProof { - /// Returns if `Self` == `Yes`. - pub fn yes(&self) -> bool { - match self { - Self::Yes => true, - Self::No => false, - } +/// Express that proof recording is disabled. +/// +/// For more information see [`ProofRecording`]. +pub struct DisableProofRecording; + +impl ProofRecording for DisableProofRecording { + type Proof = (); + const ENABLED: bool = false; + + fn into_proof(_: Option) -> Result { + Ok(()) } } -/// Will return [`RecordProof::No`] as default value. -impl Default for RecordProof { - fn default() -> Self { - Self::No +/// Express that proof recording is enabled. +/// +/// For more information see [`ProofRecording`]. +pub struct EnableProofRecording; + +impl ProofRecording for EnableProofRecording { + type Proof = sp_state_machine::StorageProof; + const ENABLED: bool = true; + + fn into_proof(proof: Option) -> Result { + proof.ok_or_else(|| NoProofRecorded) } } -impl From for RecordProof { - fn from(val: bool) -> Self { - if val { - Self::Yes - } else { - Self::No - } - } +/// Provides `Sealed` trait to prevent implementing trait [`ProofRecording`] outside of this crate. +mod private { + /// Special trait that prevents the implementation of [`super::ProofRecording`] outside of this + /// crate. + pub trait Sealed {} + + impl Sealed for super::DisableProofRecording {} + impl Sealed for super::EnableProofRecording {} } /// Logic for a proposer. @@ -150,8 +179,16 @@ pub trait Proposer { /// The transaction type used by the backend. type Transaction: Default + Send + 'static; /// Future that resolves to a committed proposal with an optional proof. - type Proposal: Future, Self::Error>> + - Send + Unpin + 'static; + type Proposal: + Future, Self::Error>> + + Send + + Unpin + + 'static; + /// The supported proof recording by the implementator of this trait. See [`ProofRecording`] + /// for more information. + type ProofRecording: self::ProofRecording + Send + Sync + 'static; + /// The proof type used by [`Self::ProofRecording`]. + type Proof: Send + Sync + 'static; /// Create a proposal. /// @@ -167,7 +204,6 @@ pub trait Proposer { inherent_data: InherentData, inherent_digests: DigestFor, max_duration: Duration, - record_proof: RecordProof, ) -> Self::Proposal; } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 2c375f68eb..95192acc4c 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -224,7 +224,7 @@ pub enum PublicError { /// Key that can be encoded to/from SS58. /// -/// See https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58)#address-type +/// See /// for information on the codec. #[cfg(feature = "full_crypto")] pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { @@ -578,11 +578,16 @@ ss58_address_format!( (46, "reserved46", "Reserved for future use (46).") Reserved47 => (47, "reserved47", "Reserved for future use (47).") + NeatcoinAccount => + (48, "neatcoin", "Neatcoin mainnet, standard account (*25519).") + HydraDXAccount => + (63, "hydradx", "HydraDX standard account (*25519).") AventusAccount => (65, "aventus", "Aventus Chain mainnet, standard account (*25519).") CrustAccount => (66, "crust", "Crust Network, standard account (*25519).") - // Note: 48 and above are reserved. + // Note: 16384 and above are reserved. + ); /// Set the default "version" (actually, this is a bit of a misnomer and the version byte is @@ -595,14 +600,20 @@ pub fn set_default_ss58_version(version: Ss58AddressFormat) { *DEFAULT_VERSION.lock() = version } +#[cfg(feature = "std")] +lazy_static::lazy_static! { + static ref SS58_REGEX: Regex = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") + .expect("constructed from known-good static value; qed"); + static ref SECRET_PHRASE_REGEX: Regex = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + static ref JUNCTION_REGEX: Regex = Regex::new(r"/(/?[^/]+)") + .expect("constructed from known-good static value; qed"); +} + #[cfg(feature = "std")] impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { fn from_string(s: &str) -> Result { - let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); + let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; let s = cap.name("ss58") .map(|r| r.as_str()) .unwrap_or(DEV_ADDRESS); @@ -621,7 +632,7 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { if cap["path"].is_empty() { Ok(addr) } else { - let path = re_junction.captures_iter(&cap["path"]) + let path = JUNCTION_REGEX.captures_iter(&cap["path"]) .map(|f| DeriveJunction::from(&f[1])); addr.derive(path) .ok_or(PublicError::InvalidPath) @@ -629,11 +640,7 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { } fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); + let cap = SS58_REGEX.captures(s).ok_or(PublicError::InvalidFormat)?; let (addr, v) = Self::from_ss58check_with_version( cap.name("ss58") .map(|r| r.as_str()) @@ -642,7 +649,7 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { if cap["path"].is_empty() { Ok((addr, v)) } else { - let path = re_junction.captures_iter(&cap["path"]) + let path = JUNCTION_REGEX.captures_iter(&cap["path"]) .map(|f| DeriveJunction::from(&f[1])); addr.derive(path) .ok_or(PublicError::InvalidPath) @@ -999,13 +1006,9 @@ pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { fn from_string_with_seed(s: &str, password_override: Option<&str>) -> Result<(Self, Option), SecretStringError> { - let re = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(SecretStringError::InvalidFormat)?; + let cap = SECRET_PHRASE_REGEX.captures(s).ok_or(SecretStringError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let path = re_junction.captures_iter(&cap["path"]) + let path = JUNCTION_REGEX.captures_iter(&cap["path"]) .map(|f| DeriveJunction::from(&f[1])); let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); diff --git a/primitives/core/src/hashing.rs b/primitives/core/src/hashing.rs index 0b67d33235..ac0eedef69 100644 --- a/primitives/core/src/hashing.rs +++ b/primitives/core/src/hashing.rs @@ -18,7 +18,7 @@ //! Hashing functions. //! //! This module is gated by `full-crypto` feature. If you intend to use any of the functions -//! defined here within your runtime, you should most likely rather use [sp_io::hashing] instead, +//! defined here within your runtime, you should most likely rather use `sp_io::hashing` instead, //! unless you know what you're doing. Using `sp_io` will be more performant, since instead of //! computing the hash in WASM it delegates that computation to the host client. diff --git a/primitives/core/src/hexdisplay.rs b/primitives/core/src/hexdisplay.rs index 304b665a72..e590eec0e5 100644 --- a/primitives/core/src/hexdisplay.rs +++ b/primitives/core/src/hexdisplay.rs @@ -71,6 +71,12 @@ impl AsBytesRef for sp_std::vec::Vec { fn as_bytes_ref(&self) -> &[u8] { &self } } +impl AsBytesRef for sp_storage::StorageKey { + fn as_bytes_ref(&self) -> &[u8] { + self.as_ref() + } +} + macro_rules! impl_non_endians { ( $( $t:ty ),* ) => { $( impl AsBytesRef for $t { diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index 1506abb77f..b33f518c32 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -152,3 +152,13 @@ impl crate::traits::SpawnNamed for TaskExecutor { self.0.spawn_ok(future); } } + +#[cfg(feature = "std")] +impl crate::traits::SpawnEssentialNamed for TaskExecutor { + fn spawn_essential_blocking(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.0.spawn_ok(future); + } + fn spawn_essential(&self, _: &'static str, future: futures::future::BoxFuture<'static, ()>) { + self.0.spawn_ok(future); + } +} diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 8488a1873c..90f8060f9a 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -34,7 +34,7 @@ pub trait CodeExecutor: Sized + Send + Sync + CallInWasm + Clone + 'static { /// or an execution error) together with a `bool`, which is true if native execution was used. fn call< R: codec::Codec + PartialEq, - NC: FnOnce() -> Result + UnwindSafe, + NC: FnOnce() -> Result> + UnwindSafe, >( &self, ext: &mut dyn Externalities, @@ -205,7 +205,7 @@ sp_externalities::decl_extension! { pub struct RuntimeSpawnExt(Box); } -/// Something that can spawn futures (blocking and non-blocking) with an assigned name. +/// Something that can spawn tasks (blocking and non-blocking) with an assigned name. #[dyn_clonable::clonable] pub trait SpawnNamed: Clone + Send + Sync { /// Spawn the given blocking future. @@ -227,3 +227,28 @@ impl SpawnNamed for Box { (**self).spawn(name, future) } } + +/// Something that can spawn essential tasks (blocking and non-blocking) with an assigned name. +/// +/// Essential tasks are special tasks that should take down the node when they end. +#[dyn_clonable::clonable] +pub trait SpawnEssentialNamed: Clone + Send + Sync { + /// Spawn the given blocking future. + /// + /// The given `name` is used to identify the future in tracing. + fn spawn_essential_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>); + /// Spawn the given non-blocking future. + /// + /// The given `name` is used to identify the future in tracing. + fn spawn_essential(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>); +} + +impl SpawnEssentialNamed for Box { + fn spawn_essential_blocking(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) { + (**self).spawn_essential_blocking(name, future) + } + + fn spawn_essential(&self, name: &'static str, future: futures::future::BoxFuture<'static, ()>) { + (**self).spawn_essential(name, future) + } +} diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index d7eb49247a..0d3ba80510 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -17,7 +17,7 @@ targets = ["x86_64-unknown-linux-gnu"] proc-macro = true [dependencies] -quote = "1.0.9" +quote = "1.0.3" syn = "1.0.58" proc-macro2 = "1.0" diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index a10ce32bdc..3ee37f5e31 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -139,15 +139,16 @@ pub trait Externalities: ExtensionStore { /// Clear an entire child storage. /// /// Deletes all keys from the overlay and up to `limit` keys from the backend. No - /// limit is applied if `limit` is `None`. Returns `true` if the child trie was + /// limit is applied if `limit` is `None`. Returned boolean is `true` if the child trie was /// removed completely and `false` if there are remaining keys after the function - /// returns. + /// returns. Returned `u32` is the number of keys that was removed at the end of the + /// operation. /// /// # Note /// /// An implementation is free to delete more keys than the specified limit as long as /// it is able to do that in constant time. - fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> bool; + fn kill_child_storage(&mut self, child_info: &ChildInfo, limit: Option) -> (bool, u32); /// Clear storage entries which keys are start with the given prefix. fn clear_prefix(&mut self, prefix: &[u8]); diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index c8ff2fc0a2..95aa65c930 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -16,7 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -grandpa = { package = "finality-grandpa", version = "0.13.0", default-features = false, features = ["derive-codec"] } +grandpa = { package = "finality-grandpa", version = "0.14.0", default-features = false, features = ["derive-codec"] } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "3.0.0", default-features = false, path = "../api" } diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 383e4fe371..5b393bd1d8 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -400,7 +400,7 @@ where AuthorityId::ID, &public.to_public_crypto_pair(), &encoded[..], - ).ok()?.try_into().ok()?; + ).ok().flatten()?.try_into().ok()?; Some(grandpa::SignedMessage { message, diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index 36a1b32775..0110db5680 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -424,7 +424,7 @@ pub trait ProvideInherent { /// - `Err(_)` indicates that this function failed and further operations should be aborted. /// /// CAUTION: This check has a bug when used in pallets that also provide unsigned transactions. - /// See https://github.com/paritytech/substrate/issues/6243 for details. + /// See for details. fn is_inherent_required(_: &InherentData) -> Result, Self::Error> { Ok(None) } /// Check whether the given inherent is valid. Checking the inherent is optional and can be diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 397dd3c217..bc86dd902d 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -57,7 +57,7 @@ use sp_core::{ use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_runtime_interface::{runtime_interface, Pointer}; -use sp_runtime_interface::pass_by::PassBy; +use sp_runtime_interface::pass_by::{PassBy, PassByCodec}; use codec::{Encode, Decode}; @@ -81,6 +81,16 @@ pub enum EcdsaVerifyError { BadSignature, } +/// The outcome of calling [`kill_storage`]. Returned value is the number of storage items +/// removed from the trie from making the `kill_storage` call. +#[derive(PassByCodec, Encode, Decode)] +pub enum KillChildStorageResult { + /// No key remains in the child trie. + AllRemoved(u32), + /// At least one key still resides in the child trie due to the supplied limit. + SomeRemaining(u32), +} + /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { @@ -290,7 +300,7 @@ pub trait DefaultChildStorage { /// The limit can be used to partially delete a child trie in case it is too large /// to delete in one go (block). /// - /// It returns false iff some keys are remaining in + /// It returns a boolean false iff some keys are remaining in /// the child trie after the functions returns. /// /// # Note @@ -307,7 +317,41 @@ pub trait DefaultChildStorage { #[version(2)] fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> bool { let child_info = ChildInfo::new_default(storage_key); - self.kill_child_storage(&child_info, limit) + let (all_removed, _num_removed) = self.kill_child_storage(&child_info, limit); + all_removed + } + + /// Clear a child storage key. + /// + /// Deletes all keys from the overlay and up to `limit` keys from the backend if + /// it is set to `Some`. No limit is applied when `limit` is set to `None`. + /// + /// The limit can be used to partially delete a child trie in case it is too large + /// to delete in one go (block). + /// + /// It returns a boolean false iff some keys are remaining in + /// the child trie after the functions returns. Also returns a `u32` with + /// the number of keys removed from the process. + /// + /// # Note + /// + /// Please note that keys that are residing in the overlay for that child trie when + /// issuing this call are all deleted without counting towards the `limit`. Only keys + /// written during the current block are part of the overlay. Deleting with a `limit` + /// mostly makes sense with an empty overlay for that child trie. + /// + /// Calling this function multiple times per block for the same `storage_key` does + /// not make much sense because it is not cumulative when called inside the same block. + /// Use this function to distribute the deletion of a single child trie across multiple + /// blocks. + #[version(3)] + fn storage_kill(&mut self, storage_key: &[u8], limit: Option) -> KillChildStorageResult { + let child_info = ChildInfo::new_default(storage_key); + let (all_removed, num_removed) = self.kill_child_storage(&child_info, limit); + match all_removed { + true => KillChildStorageResult::AllRemoved(num_removed), + false => KillChildStorageResult::SomeRemaining(num_removed), + } } /// Check a child storage key. @@ -474,8 +518,9 @@ pub trait Crypto { let keystore = &***self.extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) - .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) .ok() + .flatten() + .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) } /// Verify `ed25519` signature. @@ -600,8 +645,9 @@ pub trait Crypto { let keystore = &***self.extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) - .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) .ok() + .flatten() + .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) } /// Verify an `sr25519` signature. @@ -646,8 +692,9 @@ pub trait Crypto { let keystore = &***self.extension::() .expect("No `keystore` associated for the current context!"); SyncCryptoStore::sign_with(keystore, id, &pub_key.into(), msg) - .map(|sig| ecdsa::Signature::from_slice(sig.as_slice())) .ok() + .flatten() + .map(|sig| ecdsa::Signature::from_slice(sig.as_slice())) } /// Verify `ecdsa` signature. diff --git a/primitives/keystore/src/lib.rs b/primitives/keystore/src/lib.rs index f42f6dd712..2fda3a48c5 100644 --- a/primitives/keystore/src/lib.rs +++ b/primitives/keystore/src/lib.rs @@ -34,9 +34,6 @@ pub enum Error { /// Public key type is not supported #[display(fmt="Key not supported: {:?}", _0)] KeyNotSupported(KeyTypeId), - /// Pair not found for public key and KeyTypeId - #[display(fmt="Pair was not found: {}", _0)] - PairNotFound(String), /// Validation error #[display(fmt="Validation error: {}", _0)] ValidationError(String), @@ -125,37 +122,39 @@ pub trait CryptoStore: Send + Sync { /// Signs a message with the private key that matches /// the public key passed. /// - /// Returns the SCALE encoded signature if key is found & supported, - /// an error otherwise. + /// Returns the SCALE encoded signature if key is found and supported, `None` if the key doesn't + /// exist or an error when something failed. async fn sign_with( &self, id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error>; + ) -> Result>, Error>; /// Sign with any key /// /// Given a list of public keys, find the first supported key and /// sign the provided message with that key. /// - /// Returns a tuple of the used key and the SCALE encoded signature. + /// Returns a tuple of the used key and the SCALE encoded signature or `None` if no key could + /// be found to sign. async fn sign_with_any( &self, id: KeyTypeId, keys: Vec, msg: &[u8] - ) -> Result<(CryptoTypePublicPair, Vec), Error> { + ) -> Result)>, Error> { if keys.len() == 1 { - return self.sign_with(id, &keys[0], msg).await.map(|s| (keys[0].clone(), s)); + return Ok(self.sign_with(id, &keys[0], msg).await?.map(|s| (keys[0].clone(), s))); } else { for k in self.supported_keys(id, keys).await? { - if let Ok(sign) = self.sign_with(id, &k, msg).await { - return Ok((k, sign)); + if let Ok(Some(sign)) = self.sign_with(id, &k, msg).await { + return Ok(Some((k, sign))); } } } - Err(Error::KeyNotSupported(id)) + + Ok(None) } /// Sign with all keys @@ -164,13 +163,13 @@ pub trait CryptoStore: Send + Sync { /// each key given that the key is supported. /// /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key or a Error for non-supported keys. + /// signature of each key, `None` if the key doesn't exist or a error when something failed. async fn sign_with_all( &self, id: KeyTypeId, keys: Vec, msg: &[u8], - ) -> Result, Error>>, ()> { + ) -> Result>, Error>>, ()> { let futs = keys.iter() .map(|k| self.sign_with(id, k, msg)); @@ -187,16 +186,14 @@ pub trait CryptoStore: Send + Sync { /// Namely, VRFOutput and VRFProof which are returned /// inside the `VRFSignature` container struct. /// - /// This function will return an error in the cases where - /// the public key and key type provided do not match a private - /// key in the keystore. Or, in the context of remote signing - /// an error could be a network one. + /// This function will return `None` if the given `key_type` and `public` combination + /// doesn't exist in the keystore or an `Err` when something failed. async fn sr25519_vrf_sign( &self, key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result; + ) -> Result, Error>; } /// Sync version of the CryptoStore @@ -285,37 +282,41 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// Signs a message with the private key that matches /// the public key passed. /// - /// Returns the SCALE encoded signature if key is found & supported, - /// an error otherwise. + /// Returns the SCALE encoded signature if key is found and supported, `None` if the key doesn't + /// exist or an error when something failed. fn sign_with( &self, id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error>; + ) -> Result>, Error>; /// Sign with any key /// /// Given a list of public keys, find the first supported key and /// sign the provided message with that key. /// - /// Returns a tuple of the used key and the SCALE encoded signature. + /// Returns a tuple of the used key and the SCALE encoded signature or `None` if no key could + /// be found to sign. fn sign_with_any( &self, id: KeyTypeId, keys: Vec, msg: &[u8] - ) -> Result<(CryptoTypePublicPair, Vec), Error> { + ) -> Result)>, Error> { if keys.len() == 1 { - return SyncCryptoStore::sign_with(self, id, &keys[0], msg).map(|s| (keys[0].clone(), s)); + return Ok( + SyncCryptoStore::sign_with(self, id, &keys[0], msg)?.map(|s| (keys[0].clone(), s)), + ) } else { for k in SyncCryptoStore::supported_keys(self, id, keys)? { - if let Ok(sign) = SyncCryptoStore::sign_with(self, id, &k, msg) { - return Ok((k, sign)); + if let Ok(Some(sign)) = SyncCryptoStore::sign_with(self, id, &k, msg) { + return Ok(Some((k, sign))); } } } - Err(Error::KeyNotSupported(id)) + + Ok(None) } /// Sign with all keys @@ -324,13 +325,13 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// each key given that the key is supported. /// /// Returns a list of `Result`s each representing the SCALE encoded - /// signature of each key or a Error for non-supported keys. + /// signature of each key, `None` if the key doesn't exist or an error when something failed. fn sign_with_all( &self, id: KeyTypeId, keys: Vec, msg: &[u8], - ) -> Result, Error>>, ()>{ + ) -> Result>, Error>>, ()> { Ok(keys.iter().map(|k| SyncCryptoStore::sign_with(self, id, k, msg)).collect()) } @@ -344,16 +345,14 @@ pub trait SyncCryptoStore: CryptoStore + Send + Sync { /// Namely, VRFOutput and VRFProof which are returned /// inside the `VRFSignature` container struct. /// - /// This function will return an error in the cases where - /// the public key and key type provided do not match a private - /// key in the keystore. Or, in the context of remote signing - /// an error could be a network one. + /// This function will return `None` if the given `key_type` and `public` combination + /// doesn't exist in the keystore or an `Err` when something failed. fn sr25519_vrf_sign( &self, key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result; + ) -> Result, Error>; } /// A pointer to a keystore. diff --git a/primitives/keystore/src/testing.rs b/primitives/keystore/src/testing.rs index 702e2bbc85..caee7178e0 100644 --- a/primitives/keystore/src/testing.rs +++ b/primitives/keystore/src/testing.rs @@ -132,7 +132,7 @@ impl CryptoStore for KeyStore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error> { + ) -> Result>, Error> { SyncCryptoStore::sign_with(self, id, key, msg) } @@ -141,7 +141,7 @@ impl CryptoStore for KeyStore { key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result { + ) -> Result, Error> { SyncCryptoStore::sr25519_vrf_sign(self, key_type, public, transcript_data) } } @@ -280,27 +280,27 @@ impl SyncCryptoStore for KeyStore { id: KeyTypeId, key: &CryptoTypePublicPair, msg: &[u8], - ) -> Result, Error> { + ) -> Result>, Error> { use codec::Encode; match key.0 { ed25519::CRYPTO_ID => { - let key_pair: ed25519::Pair = self - .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())) - .ok_or_else(|| Error::PairNotFound("ed25519".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); + let key_pair = self + .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())); + + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() } sr25519::CRYPTO_ID => { - let key_pair: sr25519::Pair = self - .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())) - .ok_or_else(|| Error::PairNotFound("sr25519".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); + let key_pair = self + .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())); + + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() } ecdsa::CRYPTO_ID => { - let key_pair: ecdsa::Pair = self - .ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())) - .ok_or_else(|| Error::PairNotFound("ecdsa".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); + let key_pair = self + .ecdsa_key_pair(id, &ecdsa::Public::from_slice(key.1.as_slice())); + + key_pair.map(|k| k.sign(msg).encode()).map(Ok).transpose() } _ => Err(Error::KeyNotSupported(id)) } @@ -311,15 +311,19 @@ impl SyncCryptoStore for KeyStore { key_type: KeyTypeId, public: &sr25519::Public, transcript_data: VRFTranscriptData, - ) -> Result { + ) -> Result, Error> { let transcript = make_transcript(transcript_data); - let pair = self.sr25519_key_pair(key_type, public) - .ok_or_else(|| Error::PairNotFound("Not found".to_owned()))?; + let pair = if let Some(k) = self.sr25519_key_pair(key_type, public) { + k + } else { + return Ok(None) + }; + let (inout, proof, _) = pair.as_ref().vrf_sign(transcript); - Ok(VRFSignature { + Ok(Some(VRFSignature { output: inout.to_output(), proof, - }) + })) } } @@ -394,7 +398,7 @@ mod tests { &key_pair.public(), transcript_data.clone(), ); - assert!(result.is_err()); + assert!(result.unwrap().is_none()); SyncCryptoStore::insert_unknown( &store, @@ -410,6 +414,6 @@ mod tests { transcript_data, ); - assert!(result.is_ok()); + assert!(result.unwrap().is_some()); } } diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 348e3977cd..51732ac631 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -17,7 +17,7 @@ proc-macro = true [dependencies] syn = { version = "1.0.58", features = ["full", "visit", "fold", "extra-traits"] } -quote = "1.0.9" +quote = "1.0.3" proc-macro2 = "1.0.3" Inflector = "0.11.4" proc-macro-crate = "0.1.4" diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index 0e4f6168ba..7d33e7fa62 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -22,7 +22,7 @@ sp-application-crypto = { version = "3.0.0", default-features = false, path = ". sp-arithmetic = { version = "3.0.0", default-features = false, path = "../arithmetic" } sp-std = { version = "3.0.0", default-features = false, path = "../std" } sp-io = { version = "3.0.0", default-features = false, path = "../io" } -log = { version = "0.4.8", optional = true } +log = { version = "0.4.14", default-features = false } paste = "1.0" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.2.1" @@ -34,6 +34,9 @@ either = { version = "1.5", default-features = false } serde_json = "1.0.41" rand = "0.7.2" sp-state-machine = { version = "0.9.0", path = "../state-machine" } +sp-api = { version = "3.0.0", path = "../api" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-tracing = { version = "3.0.0", path = "../../primitives/tracing" } [features] bench = [] @@ -43,7 +46,7 @@ std = [ "sp-application-crypto/std", "sp-arithmetic/std", "codec/std", - "log", + "log/std", "sp-core/std", "rand", "sp-std/std", diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 2fb4f7546d..c8b93a083b 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -57,6 +57,7 @@ pub mod transaction_validity; pub mod random_number_generator; mod runtime_string; mod multiaddress; +pub mod runtime_logger; pub use crate::runtime_string::*; diff --git a/primitives/runtime/src/runtime_logger.rs b/primitives/runtime/src/runtime_logger.rs new file mode 100644 index 0000000000..e27dc828cd --- /dev/null +++ b/primitives/runtime/src/runtime_logger.rs @@ -0,0 +1,108 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! A logger that can be used to log from the runtime. +//! +//! See [`RuntimeLogger`] for more docs. + +/// Runtime logger implementation - `log` crate backend. +/// +/// The logger should be initialized if you want to display +/// logs inside the runtime that is not necessarily running natively. +pub struct RuntimeLogger; + +impl RuntimeLogger { + /// Initialize the logger. + /// + /// This is a no-op when running natively (`std`). + #[cfg(feature = "std")] + pub fn init() {} + + /// Initialize the logger. + /// + /// This is a no-op when running natively (`std`). + #[cfg(not(feature = "std"))] + pub fn init() { + static LOGGER: RuntimeLogger = RuntimeLogger; + let _ = log::set_logger(&LOGGER); + + // Set max level to `TRACE` to ensure we propagate + // all log entries to the native side that will do the + // final filtering on what should be printed. + // + // If we don't set any level, logging is disabled + // completly. + log::set_max_level(log::LevelFilter::Trace); + } +} + +impl log::Log for RuntimeLogger { + fn enabled(&self, _metadata: &log::Metadata) -> bool { + // to avoid calling to host twice, we pass everything + // and let the host decide what to print. + // If someone is initializing the logger they should + // know what they are doing. + true + } + + fn log(&self, record: &log::Record) { + use sp_std::fmt::Write; + let mut w = sp_std::Writer::default(); + let _ = ::core::write!(&mut w, "{}", record.args()); + + sp_io::logging::log( + record.level().into(), + record.target(), + w.inner(), + ); + } + + fn flush(&self) {} +} + +#[cfg(test)] +mod tests { + use substrate_test_runtime_client::{ + ExecutionStrategy, TestClientBuilderExt, DefaultTestClientBuilderExt, + TestClientBuilder, runtime::TestAPI, + }; + use sp_api::{ProvideRuntimeApi, BlockId}; + + #[test] + fn ensure_runtime_logger_works() { + if std::env::var("RUN_TEST").is_ok() { + sp_tracing::try_init_simple(); + + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(0); + runtime_api.do_trace_log(&block_id).expect("Logging should not fail"); + } else { + let executable = std::env::current_exe().unwrap(); + let output = std::process::Command::new(executable) + .env("RUN_TEST", "1") + .env("RUST_LOG", "trace") + .args(&["--nocapture", "ensure_runtime_logger_works"]) + .output() + .unwrap(); + + let output = dbg!(String::from_utf8(output.stderr).unwrap()); + assert!(output.contains("Hey I'm runtime")); + } + } +} diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index 8000c23dd4..9f63d64d41 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -113,7 +113,7 @@ pub fn generate_initial_session_keys( client: std::sync::Arc, at: &BlockId, seeds: Vec, -) -> Result<(), sp_api::ApiErrorFor> +) -> Result<(), sp_api::ApiError> where Block: BlockT, T: ProvideRuntimeApi, diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index 3b26520813..dda8f523b7 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -211,9 +211,9 @@ impl Externalities for BasicExternalities { &mut self, child_info: &ChildInfo, _limit: Option, - ) -> bool { - self.inner.children_default.remove(child_info.storage_key()); - true + ) -> (bool, u32) { + let num_removed = self.inner.children_default.remove(child_info.storage_key()).map(|c| c.data.len()).unwrap_or(0); + (true, num_removed as u32) } fn clear_prefix(&mut self, prefix: &[u8]) { @@ -411,6 +411,29 @@ mod tests { assert_eq!(ext.child_storage(child_info, b"doe"), None); } + #[test] + fn kill_child_storage_returns_num_elements_removed() { + let child_info = ChildInfo::new_default(b"storage_key"); + let child_info = &child_info; + let mut ext = BasicExternalities::new(Storage { + top: Default::default(), + children_default: map![ + child_info.storage_key().to_vec() => StorageChild { + data: map![ + b"doe".to_vec() => b"reindeer".to_vec(), + b"dog".to_vec() => b"puppy".to_vec(), + b"hello".to_vec() => b"world".to_vec(), + ], + child_info: child_info.to_owned(), + } + ] + }); + + + let res = ext.kill_child_storage(child_info, None); + assert_eq!(res, (true, 3)); + } + #[test] fn basic_externalities_is_empty() { // Make sure no values are set by default in `BasicExternalities`. diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 1e64cd74bc..7907cda6fb 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -391,7 +391,7 @@ where &mut self, child_info: &ChildInfo, limit: Option, - ) -> bool { + ) -> (bool, u32) { trace!(target: "state", "{:04x}: KillChild({})", self.id, HexDisplay::from(&child_info.storage_key()), @@ -399,9 +399,9 @@ where let _guard = guard(); self.mark_dirty(); self.overlay.clear_child_storage(child_info); + let mut num_deleted: u32 = 0; if let Some(limit) = limit { - let mut num_deleted: u32 = 0; let mut all_deleted = true; self.backend.apply_to_child_keys_while(child_info, |key| { if num_deleted == limit { @@ -417,13 +417,14 @@ where self.overlay.set_child_storage(child_info, key.to_vec(), None); true }); - all_deleted + (all_deleted, num_deleted) } else { self.backend.apply_to_child_keys_while(child_info, |key| { + num_deleted = num_deleted.saturating_add(1); self.overlay.set_child_storage(child_info, key.to_vec(), None); true }); - true + (true, num_deleted) } } @@ -575,27 +576,41 @@ where #[cfg(feature = "std")] fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result>, ()> { let _guard = guard(); - let root = self.overlay.changes_trie_root( - self.backend, - self.changes_trie_state.as_ref(), - Decode::decode(&mut &parent_hash[..]).map_err(|e| - trace!( - target: "state", - "Failed to decode changes root parent hash: {}", - e, - ) - )?, - true, - self.storage_transaction_cache, - ); + if let Some(ref root) = self.storage_transaction_cache.changes_trie_transaction_storage_root { + trace!( + target: "state", + "{:04x}: ChangesRoot({})(cached) {:?}", + self.id, + HexDisplay::from(&parent_hash), + root, + ); - trace!(target: "state", "{:04x}: ChangesRoot({}) {:?}", - self.id, - HexDisplay::from(&parent_hash), - root, - ); + Ok(Some(root.encode())) + } else { + let root = self.overlay.changes_trie_root( + self.backend, + self.changes_trie_state.as_ref(), + Decode::decode(&mut &parent_hash[..]).map_err(|e| + trace!( + target: "state", + "Failed to decode changes root parent hash: {}", + e, + ) + )?, + true, + self.storage_transaction_cache, + ); - root.map(|r| r.map(|o| o.encode())) + trace!( + target: "state", + "{:04x}: ChangesRoot({}) {:?}", + self.id, + HexDisplay::from(&parent_hash), + root, + ); + + root.map(|r| r.map(|o| o.encode())) + } } fn storage_start_transaction(&mut self) { diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 31d4eacc4e..0167633d48 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -392,7 +392,7 @@ mod execution { bool, ) where R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + UnwindSafe, { let mut cache = StorageTransactionCache::default(); @@ -449,7 +449,7 @@ mod execution { ) -> CallResult where R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + UnwindSafe, Handler: FnOnce( CallResult, CallResult, @@ -485,7 +485,7 @@ mod execution { ) -> CallResult where R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + UnwindSafe, { self.overlay.start_transaction(); let (result, was_native) = self.execute_aux( @@ -522,7 +522,7 @@ mod execution { ) -> Result, Box> where R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, + NC: FnOnce() -> result::Result> + UnwindSafe, Handler: FnOnce( CallResult, CallResult, @@ -869,7 +869,7 @@ mod tests { map, traits::{Externalities, RuntimeCode}, testing::TaskExecutor, }; use sp_runtime::traits::BlakeTwo256; - use std::{result, collections::HashMap}; + use std::{result, collections::HashMap, panic::UnwindSafe}; use codec::Decode; use sp_core::{ storage::ChildInfo, NativeOrEncoded, NeverNativeValue, @@ -891,7 +891,7 @@ mod tests { fn call< R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result, + NC: FnOnce() -> result::Result> + UnwindSafe, >( &self, ext: &mut dyn Externalities, @@ -1159,7 +1159,7 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - assert_eq!(ext.kill_child_storage(&child_info, Some(2)), false); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); } assert_eq!( @@ -1199,12 +1199,14 @@ mod tests { changes_trie::disabled_state::<_, u64>(), None, ); - assert_eq!(ext.kill_child_storage(&child_info, Some(0)), false); - assert_eq!(ext.kill_child_storage(&child_info, Some(1)), false); - assert_eq!(ext.kill_child_storage(&child_info, Some(2)), false); - assert_eq!(ext.kill_child_storage(&child_info, Some(3)), false); - assert_eq!(ext.kill_child_storage(&child_info, Some(4)), true); - assert_eq!(ext.kill_child_storage(&child_info, Some(5)), true); + assert_eq!(ext.kill_child_storage(&child_info, Some(0)), (false, 0)); + assert_eq!(ext.kill_child_storage(&child_info, Some(1)), (false, 1)); + assert_eq!(ext.kill_child_storage(&child_info, Some(2)), (false, 2)); + assert_eq!(ext.kill_child_storage(&child_info, Some(3)), (false, 3)); + assert_eq!(ext.kill_child_storage(&child_info, Some(4)), (true, 4)); + // Only 4 items to remove + assert_eq!(ext.kill_child_storage(&child_info, Some(5)), (true, 4)); + assert_eq!(ext.kill_child_storage(&child_info, None), (true, 4)); } #[test] diff --git a/primitives/state-machine/src/read_only.rs b/primitives/state-machine/src/read_only.rs index dee7c9e337..296520900c 100644 --- a/primitives/state-machine/src/read_only.rs +++ b/primitives/state-machine/src/read_only.rs @@ -132,7 +132,7 @@ impl<'a, H: Hasher, B: 'a + Backend> Externalities for ReadOnlyExternalities< &mut self, _child_info: &ChildInfo, _limit: Option, - ) -> bool { + ) -> (bool, u32) { unimplemented!("kill_child_storage is not supported in ReadOnlyExternalities") } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index a6f9d06824..f4b0cb6592 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -48,20 +48,22 @@ pub struct TestExternalities where H::Out: codec::Codec + Ord, { + /// The overlay changed storage. overlay: OverlayedChanges, offchain_db: TestPersistentOffchainDB, - storage_transaction_cache: StorageTransactionCache< - as Backend>::Transaction, H, N - >, - backend: InMemoryBackend, + storage_transaction_cache: + StorageTransactionCache< as Backend>::Transaction, H, N>, + /// Storage backend. + pub backend: InMemoryBackend, changes_trie_config: Option, changes_trie_storage: ChangesTrieInMemoryStorage, - extensions: Extensions, + /// Extensions. + pub extensions: Extensions, } impl TestExternalities - where - H::Out: Ord + 'static + codec::Codec +where + H::Out: Ord + 'static + codec::Codec, { /// Get externalities implementation. pub fn ext(&mut self) -> Ext> { @@ -324,7 +326,7 @@ mod tests { { let mut ext = ext.ext(); - assert!(!ext.kill_child_storage(&child_info, Some(2)), "Should not delete all keys"); + assert!(!ext.kill_child_storage(&child_info, Some(2)).0, "Should not delete all keys"); assert!(ext.child_storage(&child_info, &b"doe"[..]).is_none()); assert!(ext.child_storage(&child_info, &b"dog"[..]).is_none()); diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 1e9f976607..ced8d8c02a 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -29,12 +29,17 @@ use codec::{Encode, Decode}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode))] pub struct StorageKey( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - pub Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec, ); +impl AsRef<[u8]> for StorageKey { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } +} + /// Storage key with read/write tracking information. #[derive(PartialEq, Eq, RuntimeDebug, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Hash, PartialOrd, Ord))] @@ -102,7 +107,7 @@ impl PrefixedStorageKey { /// Storage data associated to a [`StorageKey`]. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone, Encode, Decode))] pub struct StorageData( #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] pub Vec, diff --git a/primitives/tasks/src/async_externalities.rs b/primitives/tasks/src/async_externalities.rs index 249222ec71..5d99ca4368 100644 --- a/primitives/tasks/src/async_externalities.rs +++ b/primitives/tasks/src/async_externalities.rs @@ -119,7 +119,7 @@ impl Externalities for AsyncExternalities { &mut self, _child_info: &ChildInfo, _limit: Option, - ) -> bool { + ) -> (bool, u32) { panic!("`kill_child_storage`: should not be used in async externalities!") } diff --git a/prml/attestation/Cargo.toml b/prml/attestation/Cargo.toml index 0eb4f25d69..2ec71ffc2b 100644 --- a/prml/attestation/Cargo.toml +++ b/prml/attestation/Cargo.toml @@ -1,24 +1,24 @@ [package] name = "prml-attestation" -version = "2.0.0" +version = "3.0.0" authors = ["Centrality Developers "] edition = "2018" [dependencies] -frame-benchmarking = { path = "../../frame/benchmarking", default-features = false, optional = true } +serde = { version = "1.0.101", optional = true, features = ["derive"] } codec = { package = "parity-scale-codec", version = "2.0.0", default-features = false, features = ["derive"] } -serde = { version = "1.0.101", optional = true } sp-core = { path = "../../primitives/core", default-features = false } sp-io = { path = "../../primitives/io", default-features = false } sp-runtime = { path = "../../primitives/runtime", default-features = false } sp-std = { path = "../../primitives/std", default-features = false } frame-support = { path = "../../frame/support", default-features = false } frame-system = { path = "../../frame/system", default-features = false } +frame-benchmarking = { version = "3.1.0", default-features = false, path = "../../frame/benchmarking", optional = true } [features] default = ["std"] std = [ - "serde", + "serde/std", "codec/std", "sp-core/std", "sp-runtime/std", diff --git a/prml/attestation/src/benchmarking.rs b/prml/attestation/src/benchmarking.rs index cacb1513cd..937ed08cc1 100644 --- a/prml/attestation/src/benchmarking.rs +++ b/prml/attestation/src/benchmarking.rs @@ -18,7 +18,7 @@ use super::*; -use frame_benchmarking::{account, benchmarks, whitelisted_caller}; +use frame_benchmarking::{account, benchmarks, whitelisted_caller, impl_benchmark_test_suite}; use frame_system::RawOrigin; use crate::Module as Attestation; @@ -26,8 +26,6 @@ use crate::Module as Attestation; const SEED: u32 = 0; benchmarks! { - _{ } - set_claim { let issuer: T::AccountId = whitelisted_caller(); let holder: T::AccountId = account("holder", 0, SEED); @@ -66,23 +64,8 @@ benchmarks! { } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn set_claim() { - ExtBuilder::build().execute_with(|| { - assert_ok!(test_benchmark_set_claim::()); - }); - } - - #[test] - fn remove_claim() { - ExtBuilder::build().execute_with(|| { - assert_ok!(test_benchmark_remove_claim::()); - }); - } -} +impl_benchmark_test_suite!( + Attestation, + crate::mock::new_test_ext(), + crate::mock::Test, +); diff --git a/prml/attestation/src/lib.rs b/prml/attestation/src/lib.rs index 38e2d4bb60..7fad1878c7 100644 --- a/prml/attestation/src/lib.rs +++ b/prml/attestation/src/lib.rs @@ -36,17 +36,15 @@ #![cfg_attr(not(feature = "std"), no_std)] mod benchmarking; -pub mod weights; -pub use weights::WeightInfo; mod mock; +mod weights; use frame_support::sp_std::prelude::*; -use frame_support::{ - decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResult, ensure, -}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResult, ensure}; use frame_system::ensure_signed; -use sp_core::uint::U256; +use sp_core::U256; use sp_runtime::traits::Zero; +use weights::WeightInfo; pub trait Config: frame_system::Config { type Event: From> + Into<::Event>; @@ -57,7 +55,9 @@ type AttestationTopic = U256; type AttestationValue = U256; decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = frame_system { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + fn deposit_event() = default; /// Create or update an existing claim @@ -176,7 +176,7 @@ impl Module { #[cfg(test)] mod tests { use super::*; - use crate::mock::{Attestation, ExtBuilder, Origin, System, Test, Event}; + use crate::mock::{new_test_ext, Attestation, Event as TestEvent, Origin, System, Test}; use frame_support::{assert_noop, assert_ok}; type AccountId = ::AccountId; @@ -184,7 +184,7 @@ mod tests { #[test] fn initialize_holder_has_no_claims() { let holder = 0xbaa; - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { // Note: without any valid issuers, there is no valid input for topics or value assert_eq!(Attestation::issuers(holder), >::new()); }) @@ -196,7 +196,7 @@ mod tests { let holder = 0xbaa; let topic = AttestationTopic::from(0xf00d); let value = AttestationValue::from(0xb33f); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result = Attestation::set_claim(Origin::signed(issuer), holder, topic, value); assert_ok!(result); @@ -212,7 +212,7 @@ mod tests { let holder = 0x1d107; let topic = AttestationTopic::from(0xf001); let value = AttestationValue::from(0xb01); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result = Attestation::set_claim(Origin::signed(holder), holder, topic, value); assert_ok!(result); @@ -230,7 +230,7 @@ mod tests { let topic = AttestationTopic::from(0xf00d); let value_old = AttestationValue::from(0xb33f); let value_new = AttestationValue::from(0xcabba93); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_old = Attestation::set_claim(Origin::signed(issuer), holder, topic, value_old); assert_ok!(result_old); @@ -251,7 +251,7 @@ mod tests { let value_food = AttestationValue::from(0xb33f); let topic_loot = AttestationTopic::from(0x1007); let value_loot = AttestationValue::from(0x901d); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_food = Attestation::set_claim(Origin::signed(issuer), holder, topic_food, value_food); let result_loot = Attestation::set_claim(Origin::signed(issuer), holder, topic_loot, value_loot); @@ -273,7 +273,7 @@ mod tests { let topic_food = AttestationTopic::from(0xf00d); let value_food_foo = AttestationValue::from(0xb33f); let value_food_boa = AttestationValue::from(0x90a7); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_foo = Attestation::set_claim(Origin::signed(issuer_foo), holder, topic_food, value_food_foo); let result_boa = Attestation::set_claim(Origin::signed(issuer_boa), holder, topic_food, value_food_boa); @@ -295,7 +295,7 @@ mod tests { let topic = AttestationTopic::from(0xf00d); let value = AttestationValue::from(0xb33f); let invalid_value = AttestationValue::zero(); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_add = Attestation::set_claim(Origin::signed(issuer), holder, topic, value); let result_remove = Attestation::remove_claim(Origin::signed(issuer), holder, topic); @@ -318,7 +318,7 @@ mod tests { let value_food_foo = AttestationValue::from(0xb33f); let value_food_boa = AttestationValue::from(0x90a7); let invalid_value = AttestationValue::zero(); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_foo = Attestation::set_claim(Origin::signed(issuer_foo), holder, topic_food, value_food_foo); let result_boa = Attestation::set_claim(Origin::signed(issuer_boa), holder, topic_food, value_food_boa); @@ -345,7 +345,7 @@ mod tests { let topic_loot = AttestationTopic::from(0x1007); let value_loot = AttestationValue::from(0x901d); let invalid_value = AttestationValue::zero(); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_food = Attestation::set_claim(Origin::signed(issuer), holder, topic_food, value_food); let result_loot = Attestation::set_claim(Origin::signed(issuer), holder, topic_loot, value_loot); @@ -371,7 +371,7 @@ mod tests { let topic_loot = AttestationTopic::from(0x1007); let value_loot = AttestationValue::from(0x901d); let invalid_value = AttestationValue::zero(); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { let result_food = Attestation::set_claim(Origin::signed(issuer), holder, topic_food, value_food); let result_loot = Attestation::set_claim(Origin::signed(issuer), holder, topic_loot, value_loot); @@ -395,7 +395,7 @@ mod tests { let issuer = 0xf00; let holder = 0xbaa; let topic = AttestationTopic::from(0xf00d); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { assert_noop!( Attestation::remove_claim(Origin::signed(issuer), holder, topic), Error::::TopicNotRegistered @@ -409,11 +409,11 @@ mod tests { let holder = 0xbaa; let topic = AttestationTopic::from(0xf00d); let value = AttestationValue::from(0xb33f); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { System::set_block_number(1); assert_ok!(Attestation::set_claim(Origin::signed(issuer), holder, topic, value)); - let expected_event = Event::prml_attestation(RawEvent::ClaimCreated(holder, issuer, topic, value)); + let expected_event = TestEvent::prml_attestation(RawEvent::ClaimCreated(holder, issuer, topic, value)); // Assert assert!(System::events().iter().any(|record| record.event == expected_event)); }) @@ -425,12 +425,12 @@ mod tests { let holder = 0xbaa; let topic = AttestationTopic::from(0xf00d); let value = AttestationValue::from(0xb33f); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { System::set_block_number(1); assert_ok!(Attestation::set_claim(Origin::signed(issuer), holder, topic, value)); assert_ok!(Attestation::remove_claim(Origin::signed(issuer), holder, topic)); - let expected_event = Event::prml_attestation(RawEvent::ClaimRemoved(holder, issuer, topic)); + let expected_event = TestEvent::prml_attestation(RawEvent::ClaimRemoved(holder, issuer, topic)); // Assert assert!(System::events().iter().any(|record| record.event == expected_event)); }) @@ -443,12 +443,12 @@ mod tests { let topic = AttestationTopic::from(0xf00d); let value_old = AttestationValue::from(0xb33f); let value_new = AttestationValue::from(0xcabba93); - ExtBuilder::build().execute_with(|| { + new_test_ext().execute_with(|| { System::set_block_number(1); assert_ok!(Attestation::set_claim(Origin::signed(issuer), holder, topic, value_old)); assert_ok!(Attestation::set_claim(Origin::signed(issuer), holder, topic, value_new)); - let expected_event = Event::prml_attestation(RawEvent::ClaimUpdated(holder, issuer, topic, value_new)); + let expected_event = TestEvent::prml_attestation(RawEvent::ClaimUpdated(holder, issuer, topic, value_new)); // Assert assert!(System::events().iter().any(|record| record.event == expected_event)); }) diff --git a/prml/attestation/src/mock.rs b/prml/attestation/src/mock.rs index d6dc2f772c..9431047aeb 100644 --- a/prml/attestation/src/mock.rs +++ b/prml/attestation/src/mock.rs @@ -20,18 +20,18 @@ #![cfg(test)] -use super::*; -use crate as prml_attestation; -use frame_support::{parameter_types, weights::Weight}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, - Perbill, }; -type Block = frame_system::mocking::MockBlock; +use super::*; +use crate as prml_attestation; + type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( pub enum Test where @@ -40,19 +40,18 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Module, Call, Config, Storage, Event}, - Attestation: prml_attestation::{Module, Call, Storage, Event} + Attestation: prml_attestation::{Module, Call, Storage, Event}, } ); parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl frame_system::Config for Test { type BaseCallFilter = (); + type BlockWeights = (); + type BlockLength = (); + type DbWeight = (); type Origin = Origin; type Index = u64; type Call = Call; @@ -64,12 +63,9 @@ impl frame_system::Config for Test { type Header = Header; type Event = Event; type BlockHashCount = BlockHashCount; - type BlockLength = (); - type BlockWeights = (); - type DbWeight = (); type Version = (); - type AccountData = (); type PalletInfo = PalletInfo; + type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); @@ -81,15 +77,9 @@ impl Config for Test { type WeightInfo = (); } -#[derive(Default)] -pub struct ExtBuilder; - -impl ExtBuilder { - // builds genesis config - pub fn build() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default() - .build_storage::() - .unwrap() - .into() - } +pub(crate) fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } diff --git a/prml/generic-asset/Cargo.toml b/prml/generic-asset/Cargo.toml index 8c301d80f9..afae39f6f1 100644 --- a/prml/generic-asset/Cargo.toml +++ b/prml/generic-asset/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "prml-generic-asset" -version = "2.0.0" +version = "3.0.0" authors = ["Centrality Developers "] edition = "2018" license = "GPL-3.0" diff --git a/prml/generic-asset/rpc/Cargo.toml b/prml/generic-asset/rpc/Cargo.toml index 8e6e2b7957..efe87a3bc1 100644 --- a/prml/generic-asset/rpc/Cargo.toml +++ b/prml/generic-asset/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "prml-generic-asset-rpc" -version = "2.0.0" +version = "3.0.0" authors = ["Centrality Developers "] edition = "2018" license = "GPL-3.0" diff --git a/prml/generic-asset/rpc/runtime-api/Cargo.toml b/prml/generic-asset/rpc/runtime-api/Cargo.toml index f1c1009888..3544ff8d60 100644 --- a/prml/generic-asset/rpc/runtime-api/Cargo.toml +++ b/prml/generic-asset/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "prml-generic-asset-rpc-runtime-api" -version = "2.0.0" +version = "3.0.0" authors = ["Centrality Developers "] edition = "2018" license = "GPL-3.0" diff --git a/prml/generic-asset/src/benchmarking.rs b/prml/generic-asset/src/benchmarking.rs index fdc4ef4eb2..145e5445c5 100644 --- a/prml/generic-asset/src/benchmarking.rs +++ b/prml/generic-asset/src/benchmarking.rs @@ -18,15 +18,13 @@ use super::*; -use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account, whitelisted_caller}; use crate::Module as GenericAsset; +use frame_benchmarking::{account, benchmarks, impl_benchmark_test_suite, whitelisted_caller}; +use frame_system::RawOrigin; const SEED: u32 = 0; benchmarks! { - _ { } - // Benchmark `transfer` extrinsic with the worst possible conditions: // Transfer will kill the sender account. // Transfer will create the recipient account. @@ -35,11 +33,11 @@ benchmarks! { // spending asset id let asset_id = GenericAsset::::spending_asset_id(); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); GenericAsset::::set_free_balance(asset_id, &caller, initial_balance); let recipient: T::AccountId = account("recipient", 0, SEED); - let transfer_amount = T::Balance::from(5_000_000); + let transfer_amount = T::Balance::from(5_000_000u32); }: transfer(RawOrigin::Signed(caller.clone()), asset_id, recipient.clone(), transfer_amount) verify { assert_eq!(GenericAsset::::free_balance(asset_id, &caller), Zero::zero()); @@ -50,7 +48,7 @@ benchmarks! { // Mint some amount of new asset to an account and burn the asset from it. burn { let caller: T::AccountId = whitelisted_caller(); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); let asset_id = GenericAsset::::next_asset_id(); let permissions = PermissionLatest::::new(caller.clone()); let asset_options :AssetOptions = AssetOptions { @@ -68,10 +66,10 @@ benchmarks! { let account: T::AccountId = account("bob", 0, SEED); // Mint some asset to the account 'bob' so that 'bob' can burn those - let mint_amount = T::Balance::from(5_000_000); + let mint_amount = T::Balance::from(5_000_000u32); let _ = GenericAsset::::mint(RawOrigin::Signed(caller.clone()).into(), asset_id, account.clone(), mint_amount); - let burn_amount = T::Balance::from(5_000_000); + let burn_amount = T::Balance::from(5_000_000u32); }: burn(RawOrigin::Signed(caller.clone()), asset_id, account.clone(), burn_amount) verify { assert_eq!(GenericAsset::::free_balance(asset_id, &account), Zero::zero()); @@ -81,7 +79,7 @@ benchmarks! { // Benchmark `burn`, GA's create comes from ROOT account. create { let caller: T::AccountId = whitelisted_caller(); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); let permissions = PermissionLatest::::new(caller.clone()); let asset_id = GenericAsset::::next_asset_id(); let asset_options :AssetOptions = AssetOptions { @@ -99,7 +97,7 @@ benchmarks! { mint { let caller: T::AccountId = whitelisted_caller(); let mint_to: T::AccountId = account("recipient", 0, SEED); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); let asset_id = GenericAsset::::next_asset_id(); let permissions = PermissionLatest::::new(caller.clone()); let asset_options :AssetOptions = AssetOptions { @@ -113,10 +111,10 @@ benchmarks! { AssetInfo::default() ); - let mint_amount = T::Balance::from(1_000_000); + let mint_amount = T::Balance::from(1_000_000u32); }: mint(RawOrigin::Signed(caller.clone()), asset_id, mint_to.clone(), mint_amount ) verify { - let total_issuance = T::Balance::from(6_000_000); + let total_issuance = T::Balance::from(6_000_000u32); assert_eq!(GenericAsset::::total_issuance(&asset_id), total_issuance); assert_eq!(GenericAsset::::free_balance(asset_id, &mint_to.clone()), mint_amount); } @@ -126,10 +124,10 @@ benchmarks! { update_asset_info { let caller: T::AccountId = whitelisted_caller(); let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); let asset_id = GenericAsset::::next_asset_id(); let permissions = PermissionLatest::::new(caller.clone()); - let burn_amount = T::Balance::from(5_000); + let burn_amount = T::Balance::from(5_000u32); let asset_options :AssetOptions = AssetOptions { initial_issuance: initial_balance, permissions, @@ -151,7 +149,7 @@ benchmarks! { // Update permission to include update and mint update_permission { let caller: T::AccountId = whitelisted_caller(); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); let permissions = PermissionLatest { update: Owner::Address(caller.clone()), mint: Owner::None, @@ -183,11 +181,11 @@ benchmarks! { // Benchmark `create_reserved`, create reserved asset from ROOT account. create_reserved { let caller: T::AccountId = whitelisted_caller(); - let initial_balance = T::Balance::from(5_000_000); + let initial_balance = T::Balance::from(5_000_000u32); let permissions = PermissionLatest::::new(caller.clone()); // create reserved asset with asset_id >= next_asset_id should fail so set the next asset id to some value - >::put(T::AssetId::from(10001)); - let asset_id = T::AssetId::from(1000); + >::put(T::AssetId::from(10001u32)); + let asset_id = T::AssetId::from(1000u32); let asset_options :AssetOptions = AssetOptions { initial_issuance: initial_balance, permissions, @@ -196,26 +194,12 @@ benchmarks! { verify { assert_eq!(GenericAsset::::total_issuance(&asset_id), initial_balance); assert_eq!(GenericAsset::::free_balance(asset_id, &T::AccountId::default()), initial_balance); - assert_eq!(asset_id, T::AssetId::from(1000)); + assert_eq!(asset_id, T::AssetId::from(1000u32)); } } -#[cfg(test)] -mod tests { - use super::*; - use crate::mock::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn generic_asset_benchmark_test() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer::()); - assert_ok!(test_benchmark_burn::()); - assert_ok!(test_benchmark_create::()); - assert_ok!(test_benchmark_create_reserved::()); - assert_ok!(test_benchmark_mint::()); - assert_ok!(test_update_asset_info::()); - assert_ok!(test_update_permission::()); - }); - } -} +impl_benchmark_test_suite!( + GenericAsset, + crate::mock::new_test_ext_with_default(), + crate::mock::Test, +); diff --git a/prml/generic-asset/src/imbalances.rs b/prml/generic-asset/src/imbalances.rs index 0cad83a9f0..932ec89494 100644 --- a/prml/generic-asset/src/imbalances.rs +++ b/prml/generic-asset/src/imbalances.rs @@ -24,7 +24,7 @@ // wrapping these imbalances in a private module is necessary to ensure absolute // privacy of the inner member. -use crate::{TotalIssuance, Config}; +use crate::{Config, TotalIssuance}; use frame_support::{ storage::StorageMap, traits::{Imbalance, TryDrop}, @@ -215,7 +215,7 @@ pub trait CheckedImbalance: Imbalance { Self::Opposite: CheckedImbalance, { if other.asset_id().is_zero() { - return Ok(OffsetResult::Imbalance(self)) + return Ok(OffsetResult::Imbalance(self)); } if self.asset_id().is_zero() && !self.amount().is_zero() { return Err(Error::ZeroIdWithNonZeroAmount); diff --git a/prml/generic-asset/src/impls.rs b/prml/generic-asset/src/impls.rs index 69b2bc2892..ed7b828180 100644 --- a/prml/generic-asset/src/impls.rs +++ b/prml/generic-asset/src/impls.rs @@ -16,7 +16,7 @@ //! Extra trait implementations for the `GenericAsset` module -use crate::{Error, Module, NegativeImbalance, PositiveImbalance, SpendingAssetIdAuthority, Config}; +use crate::{Config, Error, Module, NegativeImbalance, PositiveImbalance, SpendingAssetIdAuthority}; use frame_support::traits::{ExistenceRequirement, Imbalance, SignedImbalance, WithdrawReasons}; use prml_support::{AssetIdAuthority, MultiCurrencyAccounting}; use sp_runtime::{ @@ -145,13 +145,13 @@ impl MultiCurrencyAccounting for Module { #[cfg(test)] mod tests { use super::*; - use crate::mock::{ExtBuilder, GenericAsset, Test}; + use crate::mock::{new_test_ext_with_balance, new_test_ext_with_default, GenericAsset, Test}; use frame_support::assert_noop; use sp_runtime::traits::Zero; #[test] fn multi_accounting_minimum_balance() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { assert!(::minimum_balance().is_zero()); }); } @@ -159,49 +159,43 @@ mod tests { #[test] fn multi_accounting_total_balance() { let (alice, asset_id, amount) = (&1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, *alice, amount)) - .build() - .execute_with(|| { - assert_eq!( - ::total_balance(alice, Some(asset_id)), - amount - ); - - GenericAsset::reserve(asset_id, alice, amount / 2).ok(); - // total balance should include reserved balance - assert_eq!( - ::total_balance(alice, Some(asset_id)), - amount - ); - }); + new_test_ext_with_balance(asset_id, *alice, amount).execute_with(|| { + assert_eq!( + ::total_balance(alice, Some(asset_id)), + amount + ); + + GenericAsset::reserve(asset_id, alice, amount / 2).ok(); + // total balance should include reserved balance + assert_eq!( + ::total_balance(alice, Some(asset_id)), + amount + ); + }); } #[test] fn multi_accounting_free_balance() { let (alice, asset_id, amount) = (&1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, *alice, amount)) - .build() - .execute_with(|| { - assert_eq!( - ::free_balance(alice, Some(asset_id)), - amount - ); - - GenericAsset::reserve(asset_id, alice, amount / 2).ok(); - // free balance should not include reserved balance - assert_eq!( - ::free_balance(alice, Some(asset_id)), - amount / 2 - ); - }); + new_test_ext_with_balance(asset_id, *alice, amount).execute_with(|| { + assert_eq!( + ::free_balance(alice, Some(asset_id)), + amount + ); + + GenericAsset::reserve(asset_id, alice, amount / 2).ok(); + // free balance should not include reserved balance + assert_eq!( + ::free_balance(alice, Some(asset_id)), + amount / 2 + ); + }); } #[test] fn multi_accounting_deposit_creating() { let (alice, asset_id, amount) = (&1, 16000, 100); - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { let imbalance = ::deposit_creating(alice, Some(asset_id), amount); // Check a positive imbalance of `amount` was created assert_eq!(imbalance.peek(), amount); @@ -217,7 +211,7 @@ mod tests { #[test] fn multi_accounting_deposit_into_existing() { let (alice, asset_id, amount) = (&1, 16000, 100); - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { let result = ::deposit_into_existing(alice, Some(asset_id), amount); // Check a positive imbalance of `amount` was created @@ -232,32 +226,29 @@ mod tests { #[test] fn multi_accounting_ensure_can_withdraw() { let (alice, asset_id, amount) = (1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - assert_eq!( - ::ensure_can_withdraw( - &alice, - Some(asset_id), - amount / 2, - WithdrawReasons::empty(), - amount / 2, - ), - Ok(()) - ); - - // check free balance has not decreased - assert_eq!(GenericAsset::free_balance(asset_id, &alice), amount); - // check issuance has not decreased - assert_eq!(GenericAsset::total_issuance(asset_id), amount); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + assert_eq!( + ::ensure_can_withdraw( + &alice, + Some(asset_id), + amount / 2, + WithdrawReasons::all(), + amount / 2, + ), + Ok(()) + ); + + // check free balance has not decreased + assert_eq!(GenericAsset::free_balance(asset_id, &alice), amount); + // check issuance has not decreased + assert_eq!(GenericAsset::total_issuance(asset_id), amount); + }); } #[test] fn multi_accounting_make_free_balance_be() { let (alice, asset_id, amount) = (1, 16000, 100); - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // Issuance should be `0` initially assert!(GenericAsset::total_issuance(asset_id).is_zero()); @@ -280,46 +271,40 @@ mod tests { fn multi_accounting_transfer() { let (alice, dest_id, asset_id, amount) = (1, 2, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - assert_eq!( - ::transfer( - &alice, - &dest_id, - Some(asset_id), - amount, - ExistenceRequirement::KeepAlive - ), - Ok(()) - ); - assert_eq!(GenericAsset::free_balance(asset_id, &dest_id), amount); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + assert_eq!( + ::transfer( + &alice, + &dest_id, + Some(asset_id), + amount, + ExistenceRequirement::KeepAlive + ), + Ok(()) + ); + assert_eq!(GenericAsset::free_balance(asset_id, &dest_id), amount); + }); } #[test] fn multi_accounting_withdraw() { let (alice, asset_id, amount) = (1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - assert_eq!(GenericAsset::total_issuance(asset_id), amount); - let result = ::withdraw( - &alice, - Some(asset_id), - amount / 2, - WithdrawReasons::empty(), - ExistenceRequirement::KeepAlive, - ); - assert_eq!(result.unwrap().peek(), amount / 2); - - // check free balance of asset has decreased for the account - assert_eq!(GenericAsset::free_balance(asset_id, &alice), amount / 2); - // check global issuance has decreased for the asset - assert_eq!(GenericAsset::total_issuance(asset_id), amount / 2); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + assert_eq!(GenericAsset::total_issuance(asset_id), amount); + let result = ::withdraw( + &alice, + Some(asset_id), + amount / 2, + WithdrawReasons::all(), + ExistenceRequirement::KeepAlive, + ); + assert_eq!(result.unwrap().peek(), amount / 2); + + // check free balance of asset has decreased for the account + assert_eq!(GenericAsset::free_balance(asset_id, &alice), amount / 2); + // check global issuance has decreased for the asset + assert_eq!(GenericAsset::total_issuance(asset_id), amount / 2); + }); } #[test] @@ -327,223 +312,202 @@ mod tests { // Run through all the `MultiAccounting` functions checking that the default currency is // used when the Asset ID is left unspecified (`None`) let (alice, bob, amount) = (&1, &2, 100); - ExtBuilder::default() - .free_balance((16001, *alice, amount)) // `160001` is the spending asset id from genesis config - .build() - .execute_with(|| { - assert_eq!( - ::total_balance(alice, None), - amount - ); - - assert_eq!( - ::free_balance(alice, None), - amount - ); - - // Mint `amount` of default currency into `alice`s account - let _ = ::deposit_creating(alice, None, amount); - // Check balance updated - assert_eq!( - ::total_balance(alice, None), - amount + amount - ); - assert_eq!(GenericAsset::total_issuance(16001), amount + amount); - - // Make free balance be equal to `amount` again - let _ = ::make_free_balance_be(alice, None, amount); - assert_eq!( - ::free_balance(alice, None), - amount - ); - assert_eq!(GenericAsset::total_issuance(16001), amount); - - // Mint `amount` of the default currency into `alice`s account. Similar to `deposit_creating` above - let _ = ::deposit_into_existing(alice, None, amount); - // Check balance updated - assert_eq!( - ::total_balance(alice, None), - amount + amount - ); - assert_eq!(GenericAsset::total_issuance(16001), amount + amount); - - // transfer - let _ = ::transfer( - alice, - bob, - None, - amount, - ExistenceRequirement::KeepAlive, - ); - assert_eq!( - ::free_balance(alice, None), - amount - ); - assert_eq!( - ::free_balance(bob, None), - amount - ); - assert_eq!(GenericAsset::total_issuance(16001), amount + amount); - - // ensure can withdraw - assert!(::ensure_can_withdraw( - alice, - None, - amount, - WithdrawReasons::empty(), - amount, - ) - .is_ok()); + new_test_ext_with_balance(16001, *alice, amount).execute_with(|| { + assert_eq!( + ::total_balance(alice, None), + amount + ); - // withdraw - let _ = ::withdraw( - alice, - None, - amount / 2, - WithdrawReasons::empty(), - ExistenceRequirement::KeepAlive, - ); - assert_eq!( - ::free_balance(alice, None), - amount / 2 - ); - }); + assert_eq!( + ::free_balance(alice, None), + amount + ); + + // Mint `amount` of default currency into `alice`s account + let _ = ::deposit_creating(alice, None, amount); + // Check balance updated + assert_eq!( + ::total_balance(alice, None), + amount + amount + ); + assert_eq!(GenericAsset::total_issuance(16001), amount + amount); + + // Make free balance be equal to `amount` again + let _ = ::make_free_balance_be(alice, None, amount); + assert_eq!( + ::free_balance(alice, None), + amount + ); + assert_eq!(GenericAsset::total_issuance(16001), amount); + + // Mint `amount` of the default currency into `alice`s account. Similar to `deposit_creating` above + let _ = ::deposit_into_existing(alice, None, amount); + // Check balance updated + assert_eq!( + ::total_balance(alice, None), + amount + amount + ); + assert_eq!(GenericAsset::total_issuance(16001), amount + amount); + + // transfer + let _ = ::transfer( + alice, + bob, + None, + amount, + ExistenceRequirement::KeepAlive, + ); + assert_eq!( + ::free_balance(alice, None), + amount + ); + assert_eq!( + ::free_balance(bob, None), + amount + ); + assert_eq!(GenericAsset::total_issuance(16001), amount + amount); + + // ensure can withdraw + assert!(::ensure_can_withdraw( + alice, + None, + amount, + WithdrawReasons::all(), + amount, + ) + .is_ok()); + + // withdraw + let _ = ::withdraw( + alice, + None, + amount / 2, + WithdrawReasons::all(), + ExistenceRequirement::KeepAlive, + ); + assert_eq!( + ::free_balance(alice, None), + amount / 2 + ); + }); } #[test] fn multi_accounting_transfer_more_than_free_balance_should_fail() { let (alice, dest_id, asset_id, amount) = (1, 2, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - assert_noop!( - ::transfer( - &alice, - &dest_id, - Some(asset_id), - amount * 2, - ExistenceRequirement::KeepAlive - ), - Error::::InsufficientBalance, - ); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + assert_noop!( + ::transfer( + &alice, + &dest_id, + Some(asset_id), + amount * 2, + ExistenceRequirement::KeepAlive + ), + Error::::InsufficientBalance, + ); + }); } #[test] fn multi_accounting_transfer_locked_funds_should_fail() { let (alice, dest_id, asset_id, amount) = (1, 2, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - // Lock alice's funds - GenericAsset::set_lock(1u64.to_be_bytes(), &alice, amount, WithdrawReasons::all()); - - assert_noop!( - ::transfer( - &alice, - &dest_id, - Some(asset_id), - amount, - ExistenceRequirement::KeepAlive - ), - Error::::LiquidityRestrictions, - ); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + // Lock alice's funds + GenericAsset::set_lock(1u64.to_be_bytes(), &alice, amount, WithdrawReasons::all()); + + assert_noop!( + ::transfer( + &alice, + &dest_id, + Some(asset_id), + amount, + ExistenceRequirement::KeepAlive + ), + Error::::LiquidityRestrictions, + ); + }); } #[test] fn multi_accounting_transfer_reserved_funds_should_fail() { let (alice, dest_id, asset_id, amount) = (1, 2, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - GenericAsset::reserve(asset_id, &alice, amount).ok(); - assert_noop!( - ::transfer( - &alice, - &dest_id, - Some(asset_id), - amount, - ExistenceRequirement::KeepAlive - ), - Error::::InsufficientBalance, - ); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + GenericAsset::reserve(asset_id, &alice, amount).ok(); + assert_noop!( + ::transfer( + &alice, + &dest_id, + Some(asset_id), + amount, + ExistenceRequirement::KeepAlive + ), + Error::::InsufficientBalance, + ); + }); } #[test] fn multi_accounting_withdraw_more_than_free_balance_should_fail() { let (alice, asset_id, amount) = (1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - assert_noop!( - ::withdraw( - &alice, - Some(asset_id), - amount * 2, - WithdrawReasons::empty(), - ExistenceRequirement::KeepAlive - ), - Error::::InsufficientBalance, - ); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + assert_noop!( + ::withdraw( + &alice, + Some(asset_id), + amount * 2, + WithdrawReasons::all(), + ExistenceRequirement::KeepAlive + ), + Error::::InsufficientBalance, + ); + }); } #[test] fn multi_accounting_withdraw_locked_funds_should_fail() { let (alice, asset_id, amount) = (1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - // Lock alice's funds - GenericAsset::set_lock(1u64.to_be_bytes(), &alice, amount, WithdrawReasons::all()); - - assert_noop!( - ::withdraw( - &alice, - Some(asset_id), - amount, - WithdrawReasons::all(), - ExistenceRequirement::KeepAlive - ), - Error::::LiquidityRestrictions, - ); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + // Lock alice's funds + GenericAsset::set_lock(1u64.to_be_bytes(), &alice, amount, WithdrawReasons::all()); + + assert_noop!( + ::withdraw( + &alice, + Some(asset_id), + amount, + WithdrawReasons::all(), + ExistenceRequirement::KeepAlive + ), + Error::::LiquidityRestrictions, + ); + }); } #[test] fn multi_accounting_withdraw_reserved_funds_should_fail() { let (alice, asset_id, amount) = (1, 16000, 100); - ExtBuilder::default() - .free_balance((asset_id, alice, amount)) - .build() - .execute_with(|| { - // Reserve alice's funds - GenericAsset::reserve(asset_id, &alice, amount).ok(); - - assert_noop!( - ::withdraw( - &alice, - Some(asset_id), - amount, - WithdrawReasons::all(), - ExistenceRequirement::KeepAlive - ), - Error::::InsufficientBalance, - ); - }); + new_test_ext_with_balance(asset_id, alice, amount).execute_with(|| { + // Reserve alice's funds + GenericAsset::reserve(asset_id, &alice, amount).ok(); + + assert_noop!( + ::withdraw( + &alice, + Some(asset_id), + amount, + WithdrawReasons::all(), + ExistenceRequirement::KeepAlive + ), + Error::::InsufficientBalance, + ); + }); } #[test] fn multi_accounting_make_free_balance_edge_cases() { let (alice, asset_id) = (&1, 16000); - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { let max_value = u64::max_value(); let min_value = Zero::zero(); diff --git a/prml/generic-asset/src/lib.rs b/prml/generic-asset/src/lib.rs index 2652329b76..c8576e6242 100644 --- a/prml/generic-asset/src/lib.rs +++ b/prml/generic-asset/src/lib.rs @@ -116,8 +116,7 @@ //! ``` //! use frame_support::{ //! dispatch, -//! traits::{Currency, ExistenceRequirement, WithdrawReason}, -//! weights::SimpleDispatchInfo, +//! traits::{Currency, ExistenceRequirement, WithdrawReasons}, //! }; //! # pub trait Config: frame_system::Config { //! # type Currency: Currency; @@ -129,7 +128,7 @@ //! T::Currency::withdraw( //! transactor, //! amount, -//! WithdrawReason::TransactionPayment.into(), +//! WithdrawReasons::TRANSACTION_PAYMENT, //! ExistenceRequirement::KeepAlive, //! )?; //! // ... @@ -152,7 +151,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode}; +use codec::{Codec, Decode, Encode, FullCodec}; use sp_runtime::traits::{ AtLeast32BitUnsigned, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Member, One, Zero, @@ -173,23 +172,30 @@ use sp_std::prelude::*; use sp_std::{cmp, fmt::Debug, result}; mod benchmarking; -pub mod weights; -pub use weights::WeightInfo; mod imbalances; mod impls; mod mock; mod tests; mod types; +mod weights; // Export GA types/traits pub use self::imbalances::{CheckedImbalance, NegativeImbalance, OffsetResult, PositiveImbalance}; pub use types::*; +use weights::WeightInfo; pub trait Config: frame_system::Config { /// The type for asset IDs - type AssetId: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + MaybeSerializeDeserialize; + type AssetId: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + MaybeSerializeDeserialize + Codec; /// The type for asset amounts - type Balance: Parameter + Member + AtLeast32BitUnsigned + Default + Copy + MaybeSerializeDeserialize + Debug; + type Balance: Parameter + + Member + + AtLeast32BitUnsigned + + Default + + Copy + + MaybeSerializeDeserialize + + Debug + + FullCodec; /// The system event type type Event: From> + Into<::Event>; /// Weight information for extrinsics in this module. @@ -582,13 +588,7 @@ impl Module { .checked_add(&amount) .ok_or(Error::::TransferOverflow)?; - Self::ensure_can_withdraw( - asset_id, - from, - amount, - WithdrawReasons::TRANSFER, - new_from_balance, - )?; + Self::ensure_can_withdraw(asset_id, from, amount, WithdrawReasons::TRANSFER, new_from_balance)?; if from != to { >::mutate(asset_id, from, |balance| *balance -= amount); diff --git a/prml/generic-asset/src/mock.rs b/prml/generic-asset/src/mock.rs index d787d7a525..3ba2722a9d 100644 --- a/prml/generic-asset/src/mock.rs +++ b/prml/generic-asset/src/mock.rs @@ -20,17 +20,17 @@ #![cfg(test)] -use super::*; -use crate as prml_generic_asset; use crate::{NegativeImbalance, PositiveImbalance}; -use frame_support::{parameter_types, weights::Weight}; +use frame_support::parameter_types; use sp_core::H256; use sp_runtime::{ testing::Header, traits::{BlakeTwo256, IdentityLookup}, - Perbill, }; +use super::*; +use crate as prml_generic_asset; + // test accounts pub const ALICE: u64 = 1; pub const BOB: u64 = 2; @@ -52,11 +52,8 @@ pub const INITIAL_ISSUANCE: u64 = 1000; // iniital balance for seting free balance pub const INITIAL_BALANCE: u64 = 100; -pub type PositiveImbalanceOf = PositiveImbalance; -pub type NegativeImbalanceOf = NegativeImbalance; - -type Block = frame_system::mocking::MockBlock; type UncheckedExtrinsic = frame_system::mocking::MockUncheckedExtrinsic; +type Block = frame_system::mocking::MockBlock; frame_support::construct_runtime!( pub enum Test where @@ -65,36 +62,35 @@ frame_support::construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic, { System: frame_system::{Module, Call, Config, Storage, Event}, - GenericAsset: prml_generic_asset::{Module, Call, Storage, Config, Event} + GenericAsset: prml_generic_asset::{Module, Call, Storage, Config, Event}, } ); +pub type PositiveImbalanceOf = PositiveImbalance; +pub type NegativeImbalanceOf = NegativeImbalance; + parameter_types! { pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); } - impl frame_system::Config for Test { + type BlockWeights = (); + type BlockLength = (); type BaseCallFilter = (); type Origin = Origin; type Index = u64; - type Call = Call; type BlockNumber = u64; + type Call = Call; type Hash = H256; type Hashing = BlakeTwo256; type AccountId = u64; type Lookup = IdentityLookup; type Header = Header; - type Event = Event; type BlockHashCount = BlockHashCount; - type BlockLength = (); - type BlockWeights = (); + type Event = Event; type DbWeight = (); type Version = (); - type AccountData = (); type PalletInfo = PalletInfo; + type AccountData = (); type OnNewAccount = (); type OnKilledAccount = (); type SystemWeightInfo = (); @@ -108,73 +104,50 @@ impl Config for Test { type WeightInfo = (); } -pub struct ExtBuilder { - asset_id: u32, - next_asset_id: u32, - accounts: Vec, +// Build storage for generic asset with some default values +pub(crate) fn new_test_ext( + assets: Vec, + endowed_accounts: Vec, initial_balance: u64, permissions: Vec<(u32, u64)>, -} - -// Returns default values for genesis config -impl Default for ExtBuilder { - fn default() -> Self { - Self { - asset_id: 0, - next_asset_id: ASSET_ID, - accounts: vec![0], - initial_balance: 0, - permissions: vec![], - } - } -} - -impl ExtBuilder { - // Sets free balance to genesis config - pub fn free_balance(mut self, free_balance: (u32, u64, u64)) -> Self { - self.asset_id = free_balance.0; - self.accounts = vec![free_balance.1]; - self.initial_balance = free_balance.2; - self + next_asset_id: u32, +) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + + prml_generic_asset::GenesisConfig:: { + assets, + endowed_accounts, + initial_balance, + next_asset_id, + staking_asset_id: STAKING_ASSET_ID, + spending_asset_id: SPENDING_ASSET_ID, + permissions, + asset_meta: vec![ + (TEST1_ASSET_ID, AssetInfo::new(b"TST1".to_vec(), 1)), + (TEST2_ASSET_ID, AssetInfo::new(b"TST 2".to_vec(), 2)), + ], } + .assimilate_storage(&mut t) + .unwrap(); - pub fn permissions(mut self, permissions: Vec<(u32, u64)>) -> Self { - self.permissions = permissions; - self - } + t.into() +} - pub fn next_asset_id(mut self, asset_id: u32) -> Self { - self.next_asset_id = asset_id; - self - } +pub(crate) fn new_test_ext_with_default() -> sp_io::TestExternalities { + new_test_ext(vec![0], vec![], 0, vec![], ASSET_ID) +} +pub(crate) fn new_test_ext_with_balance( + asset_id: u32, + account_id: u64, + initial_balance: u64, +) -> sp_io::TestExternalities { + new_test_ext(vec![asset_id], vec![account_id], initial_balance, vec![], ASSET_ID) +} - // builds genesis config - pub fn build(self) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - prml_generic_asset::GenesisConfig:: { - assets: vec![self.asset_id], - endowed_accounts: self.accounts, - initial_balance: self.initial_balance, - next_asset_id: self.next_asset_id, - staking_asset_id: STAKING_ASSET_ID, - spending_asset_id: SPENDING_ASSET_ID, - permissions: self.permissions, - asset_meta: vec![ - (TEST1_ASSET_ID, AssetInfo::new(b"TST1".to_vec(), 1)), - (TEST2_ASSET_ID, AssetInfo::new(b"TST 2".to_vec(), 2)), - ], - }.assimilate_storage(&mut t).unwrap(); - - t.into() - } +pub(crate) fn new_test_ext_with_next_asset_id(next_asset_id: u32) -> sp_io::TestExternalities { + new_test_ext(vec![0], vec![], 0, vec![], next_asset_id) } -// This function basically just builds a genesis storage key/value store according to -// our desired mockup. -pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default() - .build_storage::() - .unwrap() - .into() +pub(crate) fn new_test_ext_with_permissions(permissions: Vec<(u32, u64)>) -> sp_io::TestExternalities { + new_test_ext(vec![0], vec![], 0, permissions, ASSET_ID) } diff --git a/prml/generic-asset/src/tests.rs b/prml/generic-asset/src/tests.rs index ab72878061..3023dbc9cb 100644 --- a/prml/generic-asset/src/tests.rs +++ b/prml/generic-asset/src/tests.rs @@ -21,14 +21,14 @@ #![cfg(test)] use super::*; -use crate::CheckedImbalance; use crate::mock::{ - new_test_ext, Event, ExtBuilder, GenericAsset, NegativeImbalanceOf, Origin, PositiveImbalanceOf, System, - Test, ALICE, ASSET_ID, BOB, CHARLIE, INITIAL_BALANCE, INITIAL_ISSUANCE, SPENDING_ASSET_ID, STAKING_ASSET_ID, - TEST1_ASSET_ID, TEST2_ASSET_ID, + new_test_ext_with_balance, new_test_ext_with_default, new_test_ext_with_next_asset_id, + new_test_ext_with_permissions, Event as TestEvent, GenericAsset, NegativeImbalanceOf, Origin, PositiveImbalanceOf, + System, Test, ALICE, ASSET_ID, BOB, CHARLIE, INITIAL_BALANCE, INITIAL_ISSUANCE, SPENDING_ASSET_ID, + STAKING_ASSET_ID, TEST1_ASSET_ID, TEST2_ASSET_ID, }; -use frame_support::{assert_noop, assert_ok, traits::{Imbalance}}; - +use crate::CheckedImbalance; +use frame_support::{assert_noop, assert_ok, traits::Imbalance}; fn asset_options(permissions: PermissionLatest) -> AssetOptions { AssetOptions { initial_issuance: INITIAL_ISSUANCE, @@ -38,85 +38,76 @@ fn asset_options(permissions: PermissionLatest) -> AssetOptions { #[test] fn issuing_asset_units_to_issuer_should_work() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - - assert_eq!(GenericAsset::next_asset_id(), ASSET_ID); - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::next_asset_id(), ASSET_ID + 1); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); - assert_eq!(GenericAsset::total_issuance(&ASSET_ID), INITIAL_ISSUANCE); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_eq!(GenericAsset::free_balance(STAKING_ASSET_ID, &ALICE), INITIAL_BALANCE); - }); + assert_eq!(GenericAsset::next_asset_id(), ASSET_ID); + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::next_asset_id(), ASSET_ID + 1); + + assert_eq!(GenericAsset::total_issuance(&ASSET_ID), INITIAL_ISSUANCE); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_eq!(GenericAsset::free_balance(STAKING_ASSET_ID, &ALICE), INITIAL_BALANCE); + }); } #[test] fn issuing_with_next_asset_id_overflow_should_fail() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - NextAssetId::::put(u32::max_value()); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + NextAssetId::::put(u32::max_value()); - assert_noop!( - GenericAsset::create(Origin::root(), ALICE, asset_options(permissions), AssetInfo::default()), - Error::::AssetIdExhausted - ); - assert_eq!(GenericAsset::next_asset_id(), u32::max_value()); - }); + assert_noop!( + GenericAsset::create(Origin::root(), ALICE, asset_options(permissions), AssetInfo::default()), + Error::::AssetIdExhausted + ); + assert_eq!(GenericAsset::next_asset_id(), u32::max_value()); + }); } #[test] fn querying_total_supply_should_work() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let transfer_ammount = 50; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); - - assert_ok!(GenericAsset::transfer( - Origin::signed(ALICE), - ASSET_ID, - BOB, - transfer_ammount - )); - assert_eq!( - GenericAsset::free_balance(ASSET_ID, &ALICE), - INITIAL_ISSUANCE - transfer_ammount - ); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), transfer_ammount); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); - - assert_ok!(GenericAsset::transfer( - Origin::signed(BOB), - ASSET_ID, - CHARLIE, - transfer_ammount / 2 - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), transfer_ammount / 2); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &CHARLIE), transfer_ammount / 2); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + let transfer_ammount = 50; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); + + assert_ok!(GenericAsset::transfer( + Origin::signed(ALICE), + ASSET_ID, + BOB, + transfer_ammount + )); + assert_eq!( + GenericAsset::free_balance(ASSET_ID, &ALICE), + INITIAL_ISSUANCE - transfer_ammount + ); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), transfer_ammount); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); + + assert_ok!(GenericAsset::transfer( + Origin::signed(BOB), + ASSET_ID, + CHARLIE, + transfer_ammount / 2 + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), transfer_ammount / 2); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &CHARLIE), transfer_ammount / 2); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); + }); } // Given @@ -136,32 +127,29 @@ fn querying_total_supply_should_work() { // - account 2's `free_balance` = 40. #[test] fn transferring_amount_should_work() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let transfer_ammount = 40; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_ok!(GenericAsset::transfer( - Origin::signed(ALICE), - ASSET_ID, - BOB, - transfer_ammount - )); - assert_eq!( - GenericAsset::free_balance(ASSET_ID, &ALICE), - INITIAL_ISSUANCE - transfer_ammount - ); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), transfer_ammount); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + let transfer_ammount = 40; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_ok!(GenericAsset::transfer( + Origin::signed(ALICE), + ASSET_ID, + BOB, + transfer_ammount + )); + assert_eq!( + GenericAsset::free_balance(ASSET_ID, &ALICE), + INITIAL_ISSUANCE - transfer_ammount + ); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), transfer_ammount); + }); } // Given @@ -180,46 +168,40 @@ fn transferring_amount_should_work() { // - throw error with insufficient balance. #[test] fn transferring_amount_more_than_free_balance_should_fail() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_noop!( - GenericAsset::transfer(Origin::signed(ALICE), ASSET_ID, BOB, INITIAL_ISSUANCE + 1), - Error::::InsufficientBalance - ); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_noop!( + GenericAsset::transfer(Origin::signed(ALICE), ASSET_ID, BOB, INITIAL_ISSUANCE + 1), + Error::::InsufficientBalance + ); + }); } #[test] fn transferring_less_than_one_unit_should_fail() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_noop!( - GenericAsset::transfer(Origin::signed(ALICE), ASSET_ID, BOB, 0), - Error::::ZeroAmount - ); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_noop!( + GenericAsset::transfer(Origin::signed(ALICE), ASSET_ID, BOB, 0), + Error::::ZeroAmount + ); + }); } // Given @@ -233,57 +215,51 @@ fn transferring_less_than_one_unit_should_fail() { // - Free balance after self transfer should equal to the free balance before self transfer. #[test] fn self_transfer_should_unchanged() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let transfer_ammount = 50; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_ok!(GenericAsset::transfer( - Origin::signed(ALICE), - ASSET_ID, - ALICE, - transfer_ammount - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + let transfer_ammount = 50; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_ok!(GenericAsset::transfer( + Origin::signed(ALICE), + ASSET_ID, + ALICE, + transfer_ammount + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); + }); } #[test] fn transferring_more_units_than_total_supply_should_fail() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); - assert_noop!( - GenericAsset::transfer(Origin::signed(ALICE), ASSET_ID, BOB, INITIAL_ISSUANCE + 1), - Error::::InsufficientBalance - ); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE); + assert_noop!( + GenericAsset::transfer(Origin::signed(ALICE), ASSET_ID, BOB, INITIAL_ISSUANCE + 1), + Error::::InsufficientBalance + ); + }); } // Ensures it uses fake money for staking asset id. #[test] fn staking_asset_id_should_correct() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { assert_eq!(GenericAsset::staking_asset_id(), STAKING_ASSET_ID); }); } @@ -291,7 +267,7 @@ fn staking_asset_id_should_correct() { // Ensures it uses fake money for spending asset id. #[test] fn spending_asset_id_should_correct() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { assert_eq!(GenericAsset::spending_asset_id(), SPENDING_ASSET_ID); }); } @@ -302,7 +278,7 @@ fn spending_asset_id_should_correct() { // -Â total_balance should return 0 #[test] fn total_balance_should_be_zero() { - new_test_ext().execute_with(|| { + new_test_ext_with_default().execute_with(|| { assert_eq!(GenericAsset::total_balance(ASSET_ID, &ALICE), 0); }); } @@ -316,28 +292,25 @@ fn total_balance_should_be_zero() { // -Â total_balance should equals to free balance + reserved balance. #[test] fn total_balance_should_be_equal_to_account_balance() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let reserved_amount = 50; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_ok!(GenericAsset::reserve(ASSET_ID, &ALICE, reserved_amount)); - assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), reserved_amount); - assert_eq!( - GenericAsset::free_balance(ASSET_ID, &ALICE), - INITIAL_ISSUANCE - reserved_amount - ); - assert_eq!(GenericAsset::total_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + let reserved_amount = 50; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_ok!(GenericAsset::reserve(ASSET_ID, &ALICE, reserved_amount)); + assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), reserved_amount); + assert_eq!( + GenericAsset::free_balance(ASSET_ID, &ALICE), + INITIAL_ISSUANCE - reserved_amount + ); + assert_eq!(GenericAsset::total_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + }); } // Given @@ -350,13 +323,10 @@ fn total_balance_should_be_equal_to_account_balance() { // -Â free_balance should return 50. #[test] fn free_balance_should_only_return_account_free_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 50); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 50); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE); + }); } // Given @@ -368,13 +338,10 @@ fn free_balance_should_only_return_account_free_balance() { // -Â total_balance should equals to account balance + free balance. #[test] fn total_balance_should_be_equal_to_sum_of_account_balance_and_free_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 50); - assert_eq!(GenericAsset::total_balance(ASSET_ID, &ALICE), INITIAL_BALANCE + 50); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 50); + assert_eq!(GenericAsset::total_balance(ASSET_ID, &ALICE), INITIAL_BALANCE + 50); + }); } // Given @@ -386,13 +353,10 @@ fn total_balance_should_be_equal_to_sum_of_account_balance_and_free_balance() { // - reserved_balance should return 70. #[test] fn reserved_balance_should_only_return_account_reserved_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 70); - assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 70); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 70); + assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 70); + }); } // Given @@ -405,7 +369,7 @@ fn reserved_balance_should_only_return_account_reserved_balance() { // - reserved_balance = amount #[test] fn set_reserved_balance_should_add_balance_as_reserved() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 50); assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 50); }); @@ -421,13 +385,10 @@ fn set_reserved_balance_should_add_balance_as_reserved() { // - New free_balance should replace older free_balance. #[test] fn set_free_balance_should_add_amount_as_free_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_free_balance(ASSET_ID, &ALICE, 50); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), 50); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_free_balance(ASSET_ID, &ALICE, 50); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), 50); + }); } // Given @@ -443,14 +404,11 @@ fn set_free_balance_should_add_amount_as_free_balance() { // - new reserved_balance = original free balance + reserved amount #[test] fn reserve_should_moves_amount_from_balance_to_reserved_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::reserve(ASSET_ID, &ALICE, 70)); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE - 70); - assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 70); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + assert_ok!(GenericAsset::reserve(ASSET_ID, &ALICE, 70)); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE - 70); + assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 70); + }); } // Given @@ -465,17 +423,14 @@ fn reserve_should_moves_amount_from_balance_to_reserved_balance() { // - Should throw an error. #[test] fn reserve_should_not_moves_amount_from_balance_to_reserved_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - assert_noop!( - GenericAsset::reserve(ASSET_ID, &ALICE, INITIAL_BALANCE + 20), - Error::::InsufficientBalance - ); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE); - assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 0); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + assert_noop!( + GenericAsset::reserve(ASSET_ID, &ALICE, INITIAL_BALANCE + 20), + Error::::InsufficientBalance + ); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE); + assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 0); + }); } // Given @@ -489,13 +444,10 @@ fn reserve_should_not_moves_amount_from_balance_to_reserved_balance() { // - unreserved should return 20. #[test] fn unreserve_should_return_subtracted_value_from_unreserved_amount_by_actual_account_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); - assert_eq!(GenericAsset::unreserve(ASSET_ID, &ALICE, 120), 20); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); + assert_eq!(GenericAsset::unreserve(ASSET_ID, &ALICE, 120), 20); + }); } // Given @@ -509,13 +461,10 @@ fn unreserve_should_return_subtracted_value_from_unreserved_amount_by_actual_acc // - unreserved should return None. #[test] fn unreserve_should_return_none() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); - assert_eq!(GenericAsset::unreserve(ASSET_ID, &ALICE, 50), 0); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); + assert_eq!(GenericAsset::unreserve(ASSET_ID, &ALICE, 50), 0); + }); } // Given @@ -529,14 +478,11 @@ fn unreserve_should_return_none() { // - free_balance should be 200. #[test] fn unreserve_should_increase_free_balance_by_reserved_balance() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); - GenericAsset::unreserve(ASSET_ID, &ALICE, 120); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE + 100); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); + GenericAsset::unreserve(ASSET_ID, &ALICE, 120); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_BALANCE + 100); + }); } // Given @@ -550,13 +496,10 @@ fn unreserve_should_increase_free_balance_by_reserved_balance() { // - reserved_balance should be 0. #[test] fn unreserve_should_deduct_reserved_balance_by_reserved_amount() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - GenericAsset::unreserve(ASSET_ID, &ALICE, 120); - assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 0); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + GenericAsset::unreserve(ASSET_ID, &ALICE, 120); + assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 0); + }); } // Given @@ -570,23 +513,20 @@ fn unreserve_should_deduct_reserved_balance_by_reserved_amount() { // - slash should return None. #[test] fn slash_should_return_slash_reserved_amount() { - ExtBuilder::default() - .free_balance((ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let reserved_amount = 100; - let slash_amount = 70; - GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, reserved_amount); - assert_eq!(GenericAsset::slash(ASSET_ID, &ALICE, slash_amount), None); - assert_eq!( - GenericAsset::free_balance(ASSET_ID, &ALICE), - INITIAL_BALANCE - slash_amount - ); - assert_eq!( - GenericAsset::total_balance(ASSET_ID, &ALICE), - INITIAL_BALANCE + reserved_amount - slash_amount - ); - }); + new_test_ext_with_balance(ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let reserved_amount = 100; + let slash_amount = 70; + GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, reserved_amount); + assert_eq!(GenericAsset::slash(ASSET_ID, &ALICE, slash_amount), None); + assert_eq!( + GenericAsset::free_balance(ASSET_ID, &ALICE), + INITIAL_BALANCE - slash_amount + ); + assert_eq!( + GenericAsset::total_balance(ASSET_ID, &ALICE), + INITIAL_BALANCE + reserved_amount - slash_amount + ); + }); } // Given @@ -597,7 +537,7 @@ fn slash_should_return_slash_reserved_amount() { // - Should return slashed_reserved - reserved_balance. #[test] fn slash_reserved_should_deducts_up_to_amount_from_reserved_balance() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); assert_eq!(GenericAsset::slash_reserved(ASSET_ID, &ALICE, 150), Some(50)); assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 0); @@ -612,7 +552,7 @@ fn slash_reserved_should_deducts_up_to_amount_from_reserved_balance() { // - Should return None. #[test] fn slash_reserved_should_return_none() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); assert_eq!(GenericAsset::slash_reserved(ASSET_ID, &ALICE, 100), None); assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 0); @@ -628,7 +568,7 @@ fn slash_reserved_should_return_none() { // - Should return `remaining`. #[test] fn repatriate_reserved_return_amount_subtracted_by_slash_amount() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); assert_ok!(GenericAsset::repatriate_reserved(ASSET_ID, &ALICE, &ALICE, 130), 30); assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), 100); @@ -644,7 +584,7 @@ fn repatriate_reserved_return_amount_subtracted_by_slash_amount() { // - Should return zero. #[test] fn repatriate_reserved_return_none() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { GenericAsset::set_reserved_balance(ASSET_ID, &ALICE, 100); assert_ok!(GenericAsset::repatriate_reserved(ASSET_ID, &ALICE, &ALICE, 90), 0); assert_eq!(GenericAsset::reserved_balance(ASSET_ID, &ALICE), 10); @@ -660,7 +600,7 @@ fn repatriate_reserved_return_none() { // - Should create a new reserved asset. #[test] fn create_reserved_should_create_a_default_account_with_the_balance_given() { - ExtBuilder::default().next_asset_id(1001).build().execute_with(|| { + new_test_ext_with_next_asset_id(1001).execute_with(|| { let permissions = PermissionLatest::new(ALICE); let options = asset_options(permissions); @@ -677,7 +617,7 @@ fn create_reserved_should_create_a_default_account_with_the_balance_given() { #[test] fn create_reserved_with_non_reserved_asset_id_should_failed() { - ExtBuilder::default().next_asset_id(999).build().execute_with(|| { + new_test_ext_with_next_asset_id(999).execute_with(|| { let permissions = PermissionLatest::new(ALICE); let options = asset_options(permissions); @@ -691,7 +631,7 @@ fn create_reserved_with_non_reserved_asset_id_should_failed() { #[test] fn create_reserved_with_a_taken_asset_id_should_failed() { - ExtBuilder::default().next_asset_id(1001).build().execute_with(|| { + new_test_ext_with_next_asset_id(1001).execute_with(|| { let permissions = PermissionLatest::new(ALICE); let options = asset_options(permissions); @@ -722,7 +662,7 @@ fn create_reserved_with_a_taken_asset_id_should_failed() { // - Should throw a permission error #[test] fn mint_without_permission_should_throw_error() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { let amount = 100; assert_noop!( @@ -742,25 +682,22 @@ fn mint_without_permission_should_throw_error() { // - Should not change `origins` free_balance. #[test] fn mint_should_increase_asset() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let amount = 100; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_ok!(GenericAsset::mint(Origin::signed(ALICE), ASSET_ID, BOB, amount)); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), amount); - // Origin's free_balance should not change. - assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE + amount); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + let amount = 100; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_ok!(GenericAsset::mint(Origin::signed(ALICE), ASSET_ID, BOB, amount)); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), amount); + // Origin's free_balance should not change. + assert_eq!(GenericAsset::free_balance(ASSET_ID, &ALICE), INITIAL_ISSUANCE); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE + amount); + }); } // Given @@ -772,17 +709,14 @@ fn mint_should_increase_asset() { // - Should throw a permission error. #[test] fn burn_should_throw_permission_error() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let amount = 100; + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let amount = 100; - assert_noop!( - GenericAsset::burn(Origin::signed(ALICE), ASSET_ID, BOB, amount), - Error::::NoBurnPermission, - ); - }); + assert_noop!( + GenericAsset::burn(Origin::signed(ALICE), ASSET_ID, BOB, amount), + Error::::NoBurnPermission, + ); + }); } // Given @@ -795,30 +729,27 @@ fn burn_should_throw_permission_error() { // - Should not change `origin`'s free_balance. #[test] fn burn_should_burn_an_asset() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let mint_amount = 100; - let burn_amount = 40; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_ok!(GenericAsset::mint(Origin::signed(ALICE), ASSET_ID, BOB, mint_amount)); - assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE + mint_amount); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + let mint_amount = 100; + let burn_amount = 40; - assert_ok!(GenericAsset::burn(Origin::signed(ALICE), ASSET_ID, BOB, burn_amount)); - assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), mint_amount - burn_amount); - assert_eq!( - GenericAsset::total_issuance(ASSET_ID), - INITIAL_ISSUANCE + mint_amount - burn_amount - ); - }); + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_ok!(GenericAsset::mint(Origin::signed(ALICE), ASSET_ID, BOB, mint_amount)); + assert_eq!(GenericAsset::total_issuance(ASSET_ID), INITIAL_ISSUANCE + mint_amount); + + assert_ok!(GenericAsset::burn(Origin::signed(ALICE), ASSET_ID, BOB, burn_amount)); + assert_eq!(GenericAsset::free_balance(ASSET_ID, &BOB), mint_amount - burn_amount); + assert_eq!( + GenericAsset::total_issuance(ASSET_ID), + INITIAL_ISSUANCE + mint_amount - burn_amount + ); + }); } // Given @@ -830,26 +761,23 @@ fn burn_should_burn_an_asset() { // - The account origin should have burn, mint and update permissions. #[test] fn check_permission_should_return_correct_permission() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert!(GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Burn)); - assert!(GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Mint)); - assert!(GenericAsset::check_permission( - ASSET_ID, - &ALICE, - &PermissionType::Update - )); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::new(ALICE); + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert!(GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Burn)); + assert!(GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Mint)); + assert!(GenericAsset::check_permission( + ASSET_ID, + &ALICE, + &PermissionType::Update + )); + }); } // Given @@ -861,34 +789,23 @@ fn check_permission_should_return_correct_permission() { // - The account origin should not have burn, mint and update permissions. #[test] fn check_permission_should_return_false_for_no_permission() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::default(); - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert!(!GenericAsset::check_permission( - ASSET_ID, - &ALICE, - &PermissionType::Burn - )); - assert!(!GenericAsset::check_permission( - ASSET_ID, - &ALICE, - &PermissionType::Mint - )); - assert!(!GenericAsset::check_permission( - ASSET_ID, - &ALICE, - &PermissionType::Update - )); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::default(); + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert!(!GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Burn)); + assert!(!GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Mint)); + assert!(!GenericAsset::check_permission( + ASSET_ID, + &ALICE, + &PermissionType::Update + )); + }); } // Given @@ -900,40 +817,33 @@ fn check_permission_should_return_false_for_no_permission() { // - The account origin should have update and mint permissions. #[test] fn update_permission_should_change_permission() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest { - update: Owner::Address(ALICE), - mint: Owner::None, - burn: Owner::None, - }; - - let new_permission = PermissionLatest { - update: Owner::Address(ALICE), - mint: Owner::Address(ALICE), - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_ok!(GenericAsset::update_permission( - Origin::signed(ALICE), - ASSET_ID, - new_permission - )); - assert!(GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Mint)); - assert!(!GenericAsset::check_permission( - ASSET_ID, - &ALICE, - &PermissionType::Burn - )); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest { + update: Owner::Address(ALICE), + mint: Owner::None, + burn: Owner::None, + }; + + let new_permission = PermissionLatest { + update: Owner::Address(ALICE), + mint: Owner::Address(ALICE), + burn: Owner::None, + }; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_ok!(GenericAsset::update_permission( + Origin::signed(ALICE), + ASSET_ID, + new_permission + )); + assert!(GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Mint)); + assert!(!GenericAsset::check_permission(ASSET_ID, &ALICE, &PermissionType::Burn)); + }); } // Given @@ -944,29 +854,26 @@ fn update_permission_should_change_permission() { // - Should throw an error stating "Origin does not have enough permission to update permissions." #[test] fn update_permission_should_throw_error_when_lack_of_permissions() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::default(); - - let new_permission = PermissionLatest { - update: Owner::Address(ALICE), - mint: Owner::Address(ALICE), - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_noop!( - GenericAsset::update_permission(Origin::signed(ALICE), ASSET_ID, new_permission), - Error::::NoUpdatePermission, - ); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let permissions = PermissionLatest::default(); + + let new_permission = PermissionLatest { + update: Owner::Address(ALICE), + mint: Owner::Address(ALICE), + burn: Owner::None, + }; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_noop!( + GenericAsset::update_permission(Origin::signed(ALICE), ASSET_ID, new_permission), + Error::::NoUpdatePermission, + ); + }); } // Given @@ -983,7 +890,7 @@ fn update_permission_should_throw_error_when_lack_of_permissions() { // - Permissions must have burn, mint and updatePermission for the given asset_id. #[test] fn create_asset_works_with_given_asset_id_and_from_account() { - ExtBuilder::default().next_asset_id(1001).build().execute_with(|| { + new_test_ext_with_next_asset_id(1001).execute_with(|| { let from_account: Option<::AccountId> = Some(ALICE); let permissions = PermissionLatest::new(ALICE); let expected_permission = PermissionVersions::V1(permissions.clone()); @@ -1009,7 +916,7 @@ fn create_asset_works_with_given_asset_id_and_from_account() { // - `create_asset` should not work. #[test] fn create_asset_with_non_reserved_asset_id_should_fail() { - ExtBuilder::default().next_asset_id(999).build().execute_with(|| { + new_test_ext_with_next_asset_id(999).execute_with(|| { let permissions = PermissionLatest::new(ALICE); assert_noop!( @@ -1031,7 +938,7 @@ fn create_asset_with_non_reserved_asset_id_should_fail() { // - `create_asset` should not work. #[test] fn create_asset_with_a_taken_asset_id_should_fail() { - ExtBuilder::default().next_asset_id(1001).build().execute_with(|| { + new_test_ext_with_next_asset_id(1001).execute_with(|| { let permissions = PermissionLatest::new(ALICE); assert_ok!(GenericAsset::create_asset( @@ -1062,7 +969,7 @@ fn create_asset_with_a_taken_asset_id_should_fail() { // - Should create a reserved token. #[test] fn create_asset_should_create_a_reserved_asset_when_from_account_is_none() { - ExtBuilder::default().next_asset_id(1001).build().execute_with(|| { + new_test_ext_with_next_asset_id(1001).execute_with(|| { let from_account: Option<::AccountId> = None; let permissions = PermissionLatest::new(ALICE); let created_account_id = 0; @@ -1094,7 +1001,7 @@ fn create_asset_should_create_a_reserved_asset_when_from_account_is_none() { // - Should not create a `reserved_asset`. #[test] fn create_asset_should_create_a_user_asset() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { let from_account: Option<::AccountId> = None; let permissions = PermissionLatest::new(ALICE); let reserved_asset_id = 1001; @@ -1115,89 +1022,83 @@ fn create_asset_should_create_a_user_asset() { #[test] fn update_permission_should_raise_event() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions.clone()), - AssetInfo::default() - )); - assert_ok!(GenericAsset::update_permission( - Origin::signed(ALICE), - ASSET_ID, - permissions.clone() - )); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + System::set_block_number(1); + + let permissions = PermissionLatest::new(ALICE); + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions.clone()), + AssetInfo::default() + )); + assert_ok!(GenericAsset::update_permission( + Origin::signed(ALICE), + ASSET_ID, + permissions.clone() + )); - let expected_event = Event::prml_generic_asset(RawEvent::PermissionUpdated(ASSET_ID, permissions)); - assert!(System::events().iter().any(|record| record.event == expected_event)); - }); + let expected_event = TestEvent::prml_generic_asset(RawEvent::PermissionUpdated(ASSET_ID, permissions)); + assert!(System::events().iter().any(|record| record.event == expected_event)); + }); } #[test] fn mint_should_raise_event() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let amount = 100; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_ok!(GenericAsset::mint(Origin::signed(ALICE), ASSET_ID, BOB, amount)); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + System::set_block_number(1); - let expected_event = Event::prml_generic_asset(RawEvent::Minted(ASSET_ID, BOB, amount)); - assert!(System::events().iter().any(|record| record.event == expected_event)); - }); + let permissions = PermissionLatest::new(ALICE); + let amount = 100; + + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_ok!(GenericAsset::mint(Origin::signed(ALICE), ASSET_ID, BOB, amount)); + + let expected_event = TestEvent::prml_generic_asset(RawEvent::Minted(ASSET_ID, BOB, amount)); + assert!(System::events().iter().any(|record| record.event == expected_event)); + }); } #[test] fn burn_should_raise_event() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let permissions = PermissionLatest::new(ALICE); - let amount = 100; - - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(permissions), - AssetInfo::default() - )); - assert_ok!(GenericAsset::burn(Origin::signed(ALICE), ASSET_ID, ALICE, amount)); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + System::set_block_number(1); + + let permissions = PermissionLatest::new(ALICE); + let amount = 100; - let expected_event = Event::prml_generic_asset(RawEvent::Burned(ASSET_ID, ALICE, amount)); - assert!(System::events().iter().any(|record| record.event == expected_event)); - }); + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(permissions), + AssetInfo::default() + )); + assert_ok!(GenericAsset::burn(Origin::signed(ALICE), ASSET_ID, ALICE, amount)); + + let expected_event = TestEvent::prml_generic_asset(RawEvent::Burned(ASSET_ID, ALICE, amount)); + assert!(System::events().iter().any(|record| record.event == expected_event)); + }); } #[test] fn can_set_asset_owner_permissions_in_genesis() { - ExtBuilder::default() - .permissions(vec![(ASSET_ID, ALICE)]) - .build() - .execute_with(|| { - let expected: PermissionVersions<_> = PermissionsV1::new(ALICE).into(); - let actual = GenericAsset::get_permission(ASSET_ID); - assert_eq!(expected, actual); - }); + new_test_ext_with_permissions(vec![(ASSET_ID, ALICE)]).execute_with(|| { + let expected: PermissionVersions<_> = PermissionsV1::new(ALICE).into(); + let actual = GenericAsset::get_permission(ASSET_ID); + assert_eq!(expected, actual); + }); } #[test] fn zero_asset_id_should_updated_after_negative_imbalance_operations() { let asset_id = 16000; - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // generate empty negative imbalance let negative_im = NegativeImbalanceOf::zero(); let other = NegativeImbalanceOf::new(100, asset_id); @@ -1209,7 +1110,7 @@ fn zero_asset_id_should_updated_after_negative_imbalance_operations() { assert_eq!(merged_im.asset_id(), asset_id); assert_eq!(merged_im.peek(), 100); - let negative_im = NegativeImbalanceOf::new(100, asset_id); + let negative_im = NegativeImbalanceOf::new(100, asset_id); let other = NegativeImbalanceOf::new(100, asset_id); // If assets are same, the amount can be merged safely let merged_im = negative_im.checked_merge(other).unwrap(); @@ -1229,7 +1130,7 @@ fn zero_asset_id_should_updated_after_negative_imbalance_operations() { assert_eq!(negative_im.asset_id(), asset_id); assert_eq!(negative_im.peek(), 100); - negative_im = NegativeImbalanceOf::new(100, asset_id); + negative_im = NegativeImbalanceOf::new(100, asset_id); // subsume other with same asset id should work let other = NegativeImbalanceOf::new(100, asset_id); negative_im.checked_subsume(other).unwrap(); @@ -1246,7 +1147,7 @@ fn zero_asset_id_should_updated_after_negative_imbalance_operations() { #[test] fn zero_asset_id_should_updated_after_positive_imbalance_operations() { let asset_id = 16000; - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // generate empty positive imbalance let positive_im = PositiveImbalanceOf::zero(); let other = PositiveImbalanceOf::new(100, asset_id); @@ -1257,7 +1158,7 @@ fn zero_asset_id_should_updated_after_positive_imbalance_operations() { assert_eq!(merged_im.asset_id(), asset_id); assert_eq!(merged_im.peek(), 100); - let positive_im = PositiveImbalanceOf::new(10, asset_id); + let positive_im = PositiveImbalanceOf::new(10, asset_id); let other = PositiveImbalanceOf::new(100, asset_id); // If assets are same, the amount can be merged safely let merged_im = positive_im.checked_merge(other).unwrap(); @@ -1276,7 +1177,7 @@ fn zero_asset_id_should_updated_after_positive_imbalance_operations() { assert_eq!(positive_im.asset_id(), asset_id); assert_eq!(positive_im.peek(), 100); - positive_im = PositiveImbalanceOf::new(100, asset_id); + positive_im = PositiveImbalanceOf::new(100, asset_id); // subsume other with same asset id should work let other = PositiveImbalanceOf::new(100, asset_id); positive_im.checked_subsume(other).unwrap(); @@ -1295,7 +1196,7 @@ fn zero_asset_id_should_updated_after_positive_imbalance_operations() { #[test] fn negative_imbalance_merge_with_incompatible_asset_id_should_fail() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // create two mew imbalances with different asset id let negative_im = NegativeImbalanceOf::new(100, 1); let other = NegativeImbalanceOf::new(50, 2); @@ -1314,7 +1215,7 @@ fn negative_imbalance_merge_with_incompatible_asset_id_should_fail() { #[test] fn positive_imbalance_merge_with_incompatible_asset_id_should_fail() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // create two mew imbalances with different asset id let positive_im = PositiveImbalanceOf::new(100, 1); let other = PositiveImbalanceOf::new(50, 2); @@ -1334,7 +1235,7 @@ fn positive_imbalance_merge_with_incompatible_asset_id_should_fail() { #[test] fn negative_imbalance_subsume_with_incompatible_asset_id_should_fail() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // create two mew imbalances with different asset id let mut negative_im = NegativeImbalanceOf::new(100, 1); let other = NegativeImbalanceOf::new(50, 2); @@ -1355,7 +1256,7 @@ fn negative_imbalance_subsume_with_incompatible_asset_id_should_fail() { #[test] fn positive_imbalance_subsume_with_incompatible_asset_id_should_fail() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // create two mew imbalances with different asset id let mut positive_im = PositiveImbalanceOf::new(100, 1); let other = PositiveImbalanceOf::new(50, 2); @@ -1376,7 +1277,7 @@ fn positive_imbalance_subsume_with_incompatible_asset_id_should_fail() { #[test] fn negative_imbalance_offset_with_incompatible_asset_id_should_fail() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // create two mew imbalances with different asset id let negative_im = NegativeImbalanceOf::new(100, 1); let opposite_im = PositiveImbalanceOf::new(50, 2); @@ -1395,7 +1296,7 @@ fn negative_imbalance_offset_with_incompatible_asset_id_should_fail() { #[test] fn positive_imbalance_offset_with_incompatible_asset_id_should_fail() { - ExtBuilder::default().build().execute_with(|| { + new_test_ext_with_default().execute_with(|| { // create two mew imbalances with different asset id let positive_im = PositiveImbalanceOf::new(100, 1); let opposite_im = NegativeImbalanceOf::new(50, 2); @@ -1416,168 +1317,147 @@ fn positive_imbalance_offset_with_incompatible_asset_id_should_fail() { fn total_issuance_should_update_after_positive_imbalance_dropped() { let asset_id = 16000; let balance = 100000; - ExtBuilder::default() - .free_balance((asset_id, 1, balance)) - .build() - .execute_with(|| { - assert_eq!(GenericAsset::total_issuance(&asset_id), balance); - // generate empty positive imbalance - let positive_im = PositiveImbalanceOf::new(0, asset_id); - let other = PositiveImbalanceOf::new(100, asset_id); - // merge - let merged_im = positive_im.checked_merge(other); - // explitically drop `imbalance` so issuance is managed - drop(merged_im); - assert_eq!(GenericAsset::total_issuance(&asset_id), balance + 100); - }); + new_test_ext_with_balance(asset_id, 1, balance).execute_with(|| { + assert_eq!(GenericAsset::total_issuance(&asset_id), balance); + // generate empty positive imbalance + let positive_im = PositiveImbalanceOf::new(0, asset_id); + let other = PositiveImbalanceOf::new(100, asset_id); + // merge + let merged_im = positive_im.checked_merge(other); + // explitically drop `imbalance` so issuance is managed + drop(merged_im); + assert_eq!(GenericAsset::total_issuance(&asset_id), balance + 100); + }); } #[test] fn total_issuance_should_update_after_negative_imbalance_dropped() { let asset_id = 16000; let balance = 100000; - ExtBuilder::default() - .free_balance((asset_id, 1, balance)) - .build() - .execute_with(|| { - assert_eq!(GenericAsset::total_issuance(&asset_id), balance); - // generate empty positive imbalance - let positive_im = NegativeImbalanceOf::new(0, asset_id); - let other = NegativeImbalanceOf::new(100, asset_id); - // merge - let merged_im = positive_im.checked_merge(other); - // explitically drop `imbalance` so issuance is managed - drop(merged_im); - assert_eq!(GenericAsset::total_issuance(&asset_id), balance - 100); - }); + new_test_ext_with_balance(asset_id, 1, balance).execute_with(|| { + assert_eq!(GenericAsset::total_issuance(&asset_id), balance); + // generate empty positive imbalance + let positive_im = NegativeImbalanceOf::new(0, asset_id); + let other = NegativeImbalanceOf::new(100, asset_id); + // merge + let merged_im = positive_im.checked_merge(other); + // explitically drop `imbalance` so issuance is managed + drop(merged_im); + assert_eq!(GenericAsset::total_issuance(&asset_id), balance - 100); + }); } #[test] fn query_pre_existing_asset_info() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - assert_eq!( - GenericAsset::registered_assets(), - vec![ - (TEST1_ASSET_ID, AssetInfo::new(b"TST1".to_vec(), 1)), - (TEST2_ASSET_ID, AssetInfo::new(b"TST 2".to_vec(), 2)) - ] - ); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + assert_eq!( + GenericAsset::registered_assets(), + vec![ + (TEST1_ASSET_ID, AssetInfo::new(b"TST1".to_vec(), 1)), + (TEST2_ASSET_ID, AssetInfo::new(b"TST 2".to_vec(), 2)) + ] + ); + }); } #[test] fn no_asset_info() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - // Asset STAKING_ASSET_ID exists but no info is stored for that - assert_eq!(>::get(STAKING_ASSET_ID), AssetInfo::default()); - // Asset STAKING_ASSET_ID doesn't exist - assert!(!>::contains_key(ASSET_ID)); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + // Asset STAKING_ASSET_ID exists but no info is stored for that + assert_eq!(>::get(STAKING_ASSET_ID), AssetInfo::default()); + // Asset STAKING_ASSET_ID doesn't exist + assert!(!>::contains_key(ASSET_ID)); + }); } #[test] fn non_owner_not_permitted_update_asset_info() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); - - // Should fail as ASSET_ID doesn't exist - assert_noop!( - GenericAsset::update_asset_info(Origin::signed(ALICE), ASSET_ID, web3_asset_info.clone()), - Error::::AssetIdNotExist - ); - - // Should fail as ALICE hasn't got the permission to update this asset's info - assert_noop!( - GenericAsset::update_asset_info(Origin::signed(ALICE), STAKING_ASSET_ID, web3_asset_info,), - Error::::NoUpdatePermission - ); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); + + // Should fail as ASSET_ID doesn't exist + assert_noop!( + GenericAsset::update_asset_info(Origin::signed(ALICE), ASSET_ID, web3_asset_info.clone()), + Error::::AssetIdNotExist + ); + + // Should fail as ALICE hasn't got the permission to update this asset's info + assert_noop!( + GenericAsset::update_asset_info(Origin::signed(ALICE), STAKING_ASSET_ID, web3_asset_info,), + Error::::NoUpdatePermission + ); + }); } #[test] fn owner_update_asset_info() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); - - // Should succeed and set ALICE as the owner of ASSET_ID - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(PermissionLatest::new(ALICE)), - web3_asset_info.clone() - )); - - // Should return the same info as ALICE set for the asset while creating it - assert_eq!(>::get(ASSET_ID), web3_asset_info); - - let web3_asset_info = AssetInfo::new(b"WEB3.1".to_vec(), 5); - // Should succeed as ALICE is the owner of this asset - assert_ok!(GenericAsset::update_asset_info( - Origin::signed(ALICE), - ASSET_ID, - web3_asset_info.clone(), - )); - - assert_eq!(>::get(ASSET_ID), web3_asset_info); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); + + // Should succeed and set ALICE as the owner of ASSET_ID + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(PermissionLatest::new(ALICE)), + web3_asset_info.clone() + )); + + // Should return the same info as ALICE set for the asset while creating it + assert_eq!(>::get(ASSET_ID), web3_asset_info); + + let web3_asset_info = AssetInfo::new(b"WEB3.1".to_vec(), 5); + // Should succeed as ALICE is the owner of this asset + assert_ok!(GenericAsset::update_asset_info( + Origin::signed(ALICE), + ASSET_ID, + web3_asset_info.clone(), + )); + + assert_eq!(>::get(ASSET_ID), web3_asset_info); + }); } #[test] fn non_owner_permitted_update_asset_info() { - ExtBuilder::default() - .free_balance((STAKING_ASSET_ID, ALICE, INITIAL_BALANCE)) - .build() - .execute_with(|| { - let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); - - // Should succeed and set ALICE as the owner of ASSET_ID - assert_ok!(GenericAsset::create( - Origin::root(), - ALICE, - asset_options(PermissionLatest::new(ALICE)), - web3_asset_info.clone(), - )); - - // Should succeed as ALICE could update the asset info - assert_eq!(>::get(ASSET_ID), web3_asset_info); - - let web3_asset_info = AssetInfo::new(b"WEB3.1".to_vec(), 5); - // Should fail as BOB hasn't got the permission - assert_noop!( - GenericAsset::update_asset_info(Origin::signed(BOB), ASSET_ID, web3_asset_info.clone()), - Error::::NoUpdatePermission - ); - - let bob_update_permission = PermissionLatest { - update: Owner::Address(BOB), - mint: Owner::None, - burn: Owner::None, - }; - assert_ok!(GenericAsset::update_permission( - Origin::signed(ALICE), - ASSET_ID, - bob_update_permission - )); - // Should succeed as Bob has now got the update permission - assert_ok!(GenericAsset::update_asset_info( - Origin::signed(BOB), - ASSET_ID, - web3_asset_info.clone() - )); - - // Should succeed as BOB could update the asset info - assert_eq!(>::get(ASSET_ID), web3_asset_info); - }); + new_test_ext_with_balance(STAKING_ASSET_ID, ALICE, INITIAL_BALANCE).execute_with(|| { + let web3_asset_info = AssetInfo::new(b"WEB3.0".to_vec(), 3); + + // Should succeed and set ALICE as the owner of ASSET_ID + assert_ok!(GenericAsset::create( + Origin::root(), + ALICE, + asset_options(PermissionLatest::new(ALICE)), + web3_asset_info.clone(), + )); + + // Should succeed as ALICE could update the asset info + assert_eq!(>::get(ASSET_ID), web3_asset_info); + + let web3_asset_info = AssetInfo::new(b"WEB3.1".to_vec(), 5); + // Should fail as BOB hasn't got the permission + assert_noop!( + GenericAsset::update_asset_info(Origin::signed(BOB), ASSET_ID, web3_asset_info.clone()), + Error::::NoUpdatePermission + ); + + let bob_update_permission = PermissionLatest { + update: Owner::Address(BOB), + mint: Owner::None, + burn: Owner::None, + }; + assert_ok!(GenericAsset::update_permission( + Origin::signed(ALICE), + ASSET_ID, + bob_update_permission + )); + // Should succeed as Bob has now got the update permission + assert_ok!(GenericAsset::update_asset_info( + Origin::signed(BOB), + ASSET_ID, + web3_asset_info.clone() + )); + + // Should succeed as BOB could update the asset info + assert_eq!(>::get(ASSET_ID), web3_asset_info); + }); } diff --git a/prml/generic-asset/src/types.rs b/prml/generic-asset/src/types.rs index 35966a6665..bd2ad521a7 100644 --- a/prml/generic-asset/src/types.rs +++ b/prml/generic-asset/src/types.rs @@ -122,8 +122,8 @@ impl Encode for PermissionVersions { fn encode_to(&self, dest: &mut T) { match self { PermissionVersions::V1(payload) => { - &PermissionVersionNumber::V1.encode_to(dest); - payload.encode_to(dest); + dest.write(&PermissionVersionNumber::V1.encode()); + dest.write(&payload.encode()); } } } diff --git a/prml/generic-asset/src/weights.rs b/prml/generic-asset/src/weights.rs index 7ea6d5fed0..60b3929a5d 100644 --- a/prml/generic-asset/src/weights.rs +++ b/prml/generic-asset/src/weights.rs @@ -19,7 +19,7 @@ #![allow(unused_parens)] #![allow(unused_imports)] -use frame_support::weights::{Weight, constants::RocksDbWeight as DbWeight}; +use frame_support::weights::{constants::RocksDbWeight as DbWeight, Weight}; pub trait WeightInfo { fn burn() -> Weight; diff --git a/prml/support/src/lib.rs b/prml/support/src/lib.rs index 30ebcc6f07..90982ab4ec 100644 --- a/prml/support/src/lib.rs +++ b/prml/support/src/lib.rs @@ -22,7 +22,6 @@ // - 'token' / 'asset' / 'currency' and // - 'balance' / 'value' / 'amount' // are used interchangeably as they make more sense in certain contexts. -use codec::{Codec, FullCodec}; use frame_support::traits::{ExistenceRequirement, Imbalance, SignedImbalance, WithdrawReasons}; use sp_runtime::{ traits::{AtLeast32BitUnsigned, MaybeSerializeDeserialize, Saturating, Zero}, @@ -42,11 +41,11 @@ pub trait AssetIdAuthority { /// Currencies in the system are identifiable by a unique `CurrencyId` pub trait MultiCurrencyAccounting { /// The ID type for an account in the system - type AccountId: Codec + Debug + Default; + type AccountId: Debug + Default; /// The balance of an account for a particular currency - type Balance: AtLeast32BitUnsigned + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default + Saturating; + type Balance: AtLeast32BitUnsigned + Copy + MaybeSerializeDeserialize + Debug + Default + Saturating; /// The ID type of a currency in the system - type CurrencyId: Codec + Debug + Default; + type CurrencyId: Debug + Default; /// A type the is aware of the default network currency ID /// When the currency ID is not specified for a `MultiCurrencyAccounting` method, it will be used /// by default diff --git a/prml/validator-manager/src/lib.rs b/prml/validator-manager/src/lib.rs index c01d6e163f..c7cae73aa4 100644 --- a/prml/validator-manager/src/lib.rs +++ b/prml/validator-manager/src/lib.rs @@ -53,7 +53,7 @@ use sp_staking::SessionIndex; use sp_std::prelude::*; /// The module's config trait. -pub trait Trait: frame_system::Config + pallet_session::Config { +pub trait Trait: frame_system::Config + pallet_session::Trait { /// The overarching event type. type Event: From> + Into<::Event>; /// The minimum number of validators persisted in storage to ensure block production continues. diff --git a/ss58-registry.json b/ss58-registry.json index cae6577e21..23ea3f8b6e 100644 --- a/ss58-registry.json +++ b/ss58-registry.json @@ -442,6 +442,24 @@ "standardAccount": null, "website": null }, + { + "prefix": 48, + "network": "neatcoin", + "displayName": "Neatcoin Mainnet", + "symbols": ["NEAT"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://neatcoin.org" + }, + { + "prefix": 63, + "network": "hydradx", + "displayName": "HydraDX", + "symbols": ["HDX"], + "decimals": [12], + "standardAccount": "*25519", + "website": "https://hydradx.io" + }, { "prefix": 65, "network": "aventus", diff --git a/test-utils/derive/Cargo.toml b/test-utils/derive/Cargo.toml index d1148dacda..8f9a37f8db 100644 --- a/test-utils/derive/Cargo.toml +++ b/test-utils/derive/Cargo.toml @@ -9,7 +9,7 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate test utilities macros" [dependencies] -quote = "1.0.9" +quote = "1.0.6" syn = { version = "1.0.58", features = ["full"] } proc-macro-crate = "0.1.4" diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 1a841ac075..bdb847ae56 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -47,7 +47,7 @@ sp-externalities = { version = "0.9.0", default-features = false, path = "../../ # 3rd party cfg-if = "1.0" -log = { version = "0.4.8", optional = true } +log = { version = "0.4.14", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } [dev-dependencies] @@ -71,7 +71,7 @@ std = [ "frame-executive/std", "sp-inherents/std", "sp-keyring", - "log", + "log/std", "memory-db/std", "sp-offchain/std", "sp-core/std", @@ -97,3 +97,5 @@ std = [ "sp-transaction-pool/std", "trie-db/std", ] +# Special feature to disable logging +disable-logging = [ "sp-api/disable-logging" ] diff --git a/test-utils/runtime/build.rs b/test-utils/runtime/build.rs index 1de18d32b0..50c455b4ad 100644 --- a/test-utils/runtime/build.rs +++ b/test-utils/runtime/build.rs @@ -26,5 +26,13 @@ fn main() { // depend on the stack-size. .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") .import_memory() - .build() + .build(); + + WasmBuilder::new() + .with_current_project() + .export_heap_base() + .import_memory() + .set_file_name("wasm_binary_logging_disabled.rs") + .enable_feature("disable-logging") + .build(); } diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index 9dc27c6414..bb0f2d400b 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -43,7 +43,7 @@ pub trait BlockBuilderExt { impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> where A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + + A::Api: BlockBuilderApi + ApiExt< substrate_test_runtime::Block, StateBackend = backend::StateBackendFor diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 5800203cf7..c8d11c9b62 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -92,6 +92,7 @@ pub struct GenesisParameters { changes_trie_config: Option, heap_pages_override: Option, extra_storage: Storage, + wasm_code: Option>, } impl GenesisParameters { @@ -113,6 +114,11 @@ impl GenesisParameters { self.extra_storage.clone(), ) } + + /// Set the wasm code that should be used at genesis. + pub fn set_wasm_code(&mut self, code: Vec) { + self.wasm_code = Some(code); + } } impl substrate_test_client::GenesisInit for GenesisParameters { @@ -121,6 +127,10 @@ impl substrate_test_client::GenesisInit for GenesisParameters { let mut storage = self.genesis_config().genesis_map(); + if let Some(ref code) = self.wasm_code { + storage.top.insert(sp_core::storage::well_known_keys::CODE.to_vec(), code.clone()); + } + let child_roots = storage.children_default.iter().map(|(_sk, child_content)| { let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( child_content.data.clone().into_iter().collect() diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index b349d1266b..ef7a51d28d 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -69,6 +69,12 @@ pub type AuraId = sp_consensus_aura::sr25519::AuthorityId; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. +#[cfg(feature = "std")] +pub mod wasm_binary_logging_disabled { + include!(concat!(env!("OUT_DIR"), "/wasm_binary_logging_disabled.rs")); +} + /// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. #[cfg(feature = "std")] pub fn wasm_binary_unwrap() -> &'static [u8] { @@ -76,6 +82,16 @@ pub fn wasm_binary_unwrap() -> &'static [u8] { supported with the flag disabled.") } +/// Wasm binary unwrapped. If built with `SKIP_WASM_BUILD`, the function panics. +#[cfg(feature = "std")] +pub fn wasm_binary_logging_disabled_unwrap() -> &'static [u8] { + wasm_binary_logging_disabled::WASM_BINARY + .expect( + "Development wasm binary is not available. Testing is only supported with the flag \ + disabled." + ) +} + /// Test runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("test"), @@ -742,8 +758,7 @@ cfg_if! { } fn do_trace_log() { - frame_support::debug::RuntimeLogger::init(); - frame_support::debug::trace!("Hey I'm runtime"); + log::trace!("Hey I'm runtime"); } } @@ -1001,8 +1016,7 @@ cfg_if! { } fn do_trace_log() { - frame_support::debug::RuntimeLogger::init(); - frame_support::debug::trace!("Hey I'm runtime"); + log::error!("Hey I'm runtime: {}", log::STATIC_MAX_LEVEL); } } diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index a2e83fe7b0..b3a0f322a6 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -72,7 +72,7 @@ macro_rules! assert_eq_uvec { macro_rules! __assert_eq_uvec { ( $x:expr, $y:expr ) => { $x.iter().for_each(|e| { - if !$y.contains(e) { panic!(format!("vectors not equal: {:?} != {:?}", $x, $y)); } + if !$y.contains(e) { panic!("vectors not equal: {:?} != {:?}", $x, $y); } }); } } diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index ea9dfc9674..b72f2e973b 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -24,7 +24,7 @@ use sc_service::{ GenericChainSpec, RuntimeGenesis, KeepBlocks, TransactionStorageMode, }; -use sc_telemetry::{TelemetryHandle, TelemetrySpan}; +use sc_telemetry::TelemetryHandle; use sc_tracing::logging::LoggerBuilder; use wasm_bindgen::prelude::*; use futures::{ @@ -72,7 +72,6 @@ where allow_private_ipv4: true, enable_mdns: false, }; - let telemetry_span = telemetry_handle.as_ref().map(|_| TelemetrySpan::new()); let config = Configuration { network, @@ -84,7 +83,6 @@ where }).into(), telemetry_external_transport: Some(transport), telemetry_handle, - telemetry_span, role: Role::Light, database: { info!("Opening Indexed DB database '{}'...", name); diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index c810bd4d57..51290e5f44 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -13,7 +13,7 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -frame-benchmarking = { version = "3.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "3.1.0", path = "../../../frame/benchmarking" } sp-core = { version = "3.0.0", path = "../../../primitives/core" } sc-service = { version = "0.9.0", default-features = false, path = "../../../client/service" } sc-cli = { version = "0.9.0", path = "../../../client/cli" } diff --git a/utils/frame/remote-externalities/Cargo.toml b/utils/frame/remote-externalities/Cargo.toml new file mode 100644 index 0000000000..d4825211d8 --- /dev/null +++ b/utils/frame/remote-externalities/Cargo.toml @@ -0,0 +1,34 @@ +[package] +name = "remote-externalities" +version = "0.9.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "An externalities provided environemnt that can load itself from remote nodes or cache files" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +jsonrpc-core-client = { version = "15.1.0", features = ["http"] } +sc-rpc-api = { version = "0.9.0", path = "../../../client/rpc-api" } +sc-rpc = { version = "3.0.0", path = "../../../client/rpc" } +futures = "0.3" + +hex-literal = "0.3.1" +env_logger = "0.8.2" +log = "0.4.11" +codec = { package = "parity-scale-codec", version = "2.0.0" } +tokio = "0.1.22" + +sp-io = { version = "3.0.0", path = "../../../primitives/io" } +sp-core = { version = "3.0.0", path = "../../../primitives/core" } + +[dev-dependencies] +async-std = { version = "1.6.5", features = ["attributes"] } + +[features] +remote-test = [] diff --git a/utils/frame/remote-externalities/src/lib.rs b/utils/frame/remote-externalities/src/lib.rs new file mode 100644 index 0000000000..ab26226253 --- /dev/null +++ b/utils/frame/remote-externalities/src/lib.rs @@ -0,0 +1,455 @@ +// This file is part of Substrate. + +// Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! # Remote Externalities +//! +//! An equivalent of `sp_io::TestExternalities` that can load its state from a remote substrate +//! based chain, or a local cache file. +//! +//! #### Runtime to Test Against +//! +//! While not absolutely necessary, you most likely need a `Runtime` equivalent in your test setup +//! through which you can infer storage types. There are two options here: +//! +//! 1. Build a mock runtime, similar how to you would build one in a pallet test (see example +//! below). The very important point here is that this mock needs to hold real values for types +//! that matter for you, based on the chain of interest. Some typical ones are: +//! +//! - `sp_runtime::AccountId32` as `AccountId`. +//! - `u32` as `BlockNumber`. +//! - `u128` as Balance. +//! +//! Once you have your `Runtime`, you can use it for storage type resolution and do things like +//! `>::storage_getter()` or `>::get()`. +//! +//! 2. Or, you can use a real runtime. +//! +//! ### Example +//! +//! With a test runtime +//! +//! ```ignore +//! use remote_externalities::Builder; +//! +//! #[derive(Clone, Eq, PartialEq, Debug, Default)] +//! pub struct TestRuntime; +//! +//! use frame_system as system; +//! impl_outer_origin! { +//! pub enum Origin for TestRuntime {} +//! } +//! +//! impl frame_system::Config for TestRuntime { +//! .. +//! // we only care about these two for now. The rest can be mock. The block number type of +//! // kusama is u32. +//! type BlockNumber = u32; +//! type Header = Header; +//! .. +//! } +//! +//! #[test] +//! fn test_runtime_works() { +//! let hash: Hash = +//! hex!["f9a4ce984129569f63edc01b1c13374779f9384f1befd39931ffdcc83acf63a7"].into(); +//! let parent: Hash = +//! hex!["540922e96a8fcaf945ed23c6f09c3e189bd88504ec945cc2171deaebeaf2f37e"].into(); +//! Builder::new() +//! .at(hash) +//! .module("System") +//! .build() +//! .execute_with(|| { +//! assert_eq!( +//! // note: the hash corresponds to 3098546. We can check only the parent. +//! // https://polkascan.io/kusama/block/3098546 +//! >::block_hash(3098545u32), +//! parent, +//! ) +//! }); +//! } +//! ``` +//! +//! Or with the real kusama runtime. +//! +//! ```ignore +//! use remote_externalities::Builder; +//! use kusama_runtime::Runtime; +//! +//! #[test] +//! fn test_runtime_works() { +//! let hash: Hash = +//! hex!["f9a4ce984129569f63edc01b1c13374779f9384f1befd39931ffdcc83acf63a7"].into(); +//! Builder::new() +//! .at(hash) +//! .module("Staking") +//! .build() +//! .execute_with(|| assert_eq!(>::validator_count(), 400)); +//! } +//! ``` + +use std::{ + fs, + path::{Path, PathBuf}, +}; +use log::*; +use sp_core::{hashing::twox_128}; +pub use sp_io::TestExternalities; +use sp_core::{ + hexdisplay::HexDisplay, + storage::{StorageKey, StorageData}, +}; +use futures::{ + compat::Future01CompatExt, + TryFutureExt, +}; +use codec::{Encode, Decode}; + +type KeyPair = (StorageKey, StorageData); +type Number = u32; +type Hash = sp_core::H256; +// TODO: make these two generic. + +const LOG_TARGET: &'static str = "remote-ext"; + +/// The execution mode. +#[derive(Clone)] +pub enum Mode { + /// Online. + Online(OnlineConfig), + /// Offline. Uses a cached file and needs not any client config. + Offline(OfflineConfig), +} + +/// configuration of the online execution. +/// +/// A cache config must be present. +#[derive(Clone)] +pub struct OfflineConfig { + /// The configuration of the cache file to use. It must be present. + pub cache: CacheConfig, +} + +/// Configuration of the online execution. +/// +/// A cache config may be present and will be written to in that case. +#[derive(Clone)] +pub struct OnlineConfig { + /// The HTTP uri to use. + pub uri: String, + /// The block number at which to connect. Will be latest finalized head if not provided. + pub at: Option, + /// An optional cache file to WRITE to, not for reading. Not cached if set to `None`. + pub cache: Option, + /// The modules to scrape. If empty, entire chain state will be scraped. + pub modules: Vec, +} + +impl Default for OnlineConfig { + fn default() -> Self { + Self { + uri: "http://localhost:9933".into(), + at: None, + cache: None, + modules: Default::default(), + } + } +} + +/// Configuration of the cache. +#[derive(Clone)] +pub struct CacheConfig { + // TODO: I could mix these two into one filed, but I think separate is better bc one can be + // configurable while one not. + /// File name. + pub name: String, + /// Base directory. + pub directory: String, +} + +impl Default for CacheConfig { + fn default() -> Self { + Self { name: "CACHE".into(), directory: ".".into() } + } +} + +impl CacheConfig { + fn path(&self) -> PathBuf { + Path::new(&self.directory).join(self.name.clone()) + } +} + +/// Builder for remote-externalities. +pub struct Builder { + inject: Vec, + mode: Mode, +} + +impl Default for Builder { + fn default() -> Self { + Self { + inject: Default::default(), + mode: Mode::Online(OnlineConfig { + at: None, + uri: "http://localhost:9933".into(), + cache: None, + modules: Default::default(), + }), + } + } +} + +// Mode methods +impl Builder { + fn as_online(&self) -> &OnlineConfig { + match &self.mode { + Mode::Online(config) => &config, + _ => panic!("Unexpected mode: Online"), + } + } + + fn as_online_mut(&mut self) -> &mut OnlineConfig { + match &mut self.mode { + Mode::Online(config) => config, + _ => panic!("Unexpected mode: Online"), + } + } +} + +// RPC methods +impl Builder { + async fn rpc_get_head(&self) -> Result { + let uri = self.as_online().uri.clone(); + trace!(target: LOG_TARGET, "rpc: finalized_head"); + let client: sc_rpc_api::chain::ChainClient = + jsonrpc_core_client::transports::http::connect(&uri) + .compat() + .map_err(|_| "client initialization failed") + .await?; + client.finalized_head().compat().map_err(|_| "rpc finalized_head failed.").await + } + + /// Relay the request to `state_getPairs` rpc endpoint. + /// + /// Note that this is an unsafe RPC. + async fn rpc_get_pairs( + &self, + prefix: StorageKey, + at: Hash, + ) -> Result, &'static str> { + let uri = self.as_online().uri.clone(); + trace!(target: LOG_TARGET, "rpc: storage_pairs: {:?} / {:?}", prefix, at); + let client: sc_rpc_api::state::StateClient = + jsonrpc_core_client::transports::http::connect(&uri) + .compat() + .map_err(|_| "client initialization failed") + .await?; + client + .storage_pairs(prefix, Some(at)) + .compat() + .map_err(|_| "rpc finalized_head failed.") + .await + } +} + +// Internal methods +impl Builder { + /// Save the given data as cache. + fn save_cache(&self, data: &[KeyPair], path: &Path) -> Result<(), &'static str> { + info!(target: LOG_TARGET, "writing to cache file {:?}", path); + fs::write(path, data.encode()).map_err(|_| "fs::write failed.")?; + Ok(()) + } + + /// initialize `Self` from cache. Panics if the file does not exist. + fn load_cache(&self, path: &Path) -> Result, &'static str> { + info!(target: LOG_TARGET, "scraping keypairs from cache {:?}", path,); + let bytes = fs::read(path).map_err(|_| "fs::read failed.")?; + Decode::decode(&mut &*bytes).map_err(|_| "decode failed") + } + + /// Build `Self` from a network node denoted by `uri`. + async fn load_remote(&self) -> Result, &'static str> { + let config = self.as_online(); + let at = self + .as_online() + .at + .expect("online config must be initialized by this point; qed.") + .clone(); + info!(target: LOG_TARGET, "scraping keypairs from remote node {} @ {:?}", config.uri, at); + + let keys_and_values = if config.modules.len() > 0 { + let mut filtered_kv = vec![]; + for f in config.modules.iter() { + let hashed_prefix = StorageKey(twox_128(f.as_bytes()).to_vec()); + let module_kv = self.rpc_get_pairs(hashed_prefix.clone(), at).await?; + info!( + target: LOG_TARGET, + "downloaded data for module {} (count: {} / prefix: {:?}).", + f, + module_kv.len(), + HexDisplay::from(&hashed_prefix), + ); + filtered_kv.extend(module_kv); + } + filtered_kv + } else { + info!(target: LOG_TARGET, "downloading data for all modules."); + self.rpc_get_pairs(StorageKey(vec![]), at).await?.into_iter().collect::>() + }; + + Ok(keys_and_values) + } + + async fn init_remote_client(&mut self) -> Result<(), &'static str> { + let at = self.rpc_get_head().await?; + self.as_online_mut().at = Some(at); + Ok(()) + } + + async fn pre_build(mut self) -> Result, &'static str> { + let mut base_kv = match self.mode.clone() { + Mode::Offline(config) => self.load_cache(&config.cache.path())?, + Mode::Online(config) => { + self.init_remote_client().await?; + let kp = self.load_remote().await?; + if let Some(c) = config.cache { + self.save_cache(&kp, &c.path())?; + } + kp + } + }; + + info!( + target: LOG_TARGET, + "extending externalities with {} manually injected keys", + self.inject.len() + ); + base_kv.extend(self.inject.clone()); + Ok(base_kv) + } +} + +// Public methods +impl Builder { + /// Create a new builder. + pub fn new() -> Self { + Default::default() + } + + /// Inject a manual list of key and values to the storage. + pub fn inject(mut self, injections: &[KeyPair]) -> Self { + for i in injections { + self.inject.push(i.clone()); + } + self + } + + /// Configure a cache to be used. + pub fn mode(mut self, mode: Mode) -> Self { + self.mode = mode; + self + } + + /// Build the test externalities. + pub async fn build(self) -> Result { + let kv = self.pre_build().await?; + let mut ext = TestExternalities::new_empty(); + + info!(target: LOG_TARGET, "injecting a total of {} keys", kv.len()); + for (k, v) in kv { + let (k, v) = (k.0, v.0); + ext.insert(k, v); + } + Ok(ext) + } +} + +#[cfg(feature = "remote-test")] +#[cfg(test)] +mod tests { + use super::*; + + fn init_logger() { + let _ = env_logger::Builder::from_default_env() + .format_module_path(false) + .format_level(true) + .try_init(); + } + + #[async_std::test] + async fn can_build_one_pallet() { + init_logger(); + Builder::new() + .mode(Mode::Online(OnlineConfig { + modules: vec!["Proxy".into()], + ..Default::default() + })) + .build() + .await + .unwrap() + .execute_with(|| {}); + } + + #[async_std::test] + async fn can_load_cache() { + init_logger(); + Builder::new() + .mode(Mode::Offline(OfflineConfig { + cache: CacheConfig { name: "proxy_test".into(), ..Default::default() }, + })) + .build() + .await + .unwrap() + .execute_with(|| {}); + } + + #[async_std::test] + async fn can_create_cache() { + init_logger(); + Builder::new() + .mode(Mode::Online(OnlineConfig { + cache: Some(CacheConfig { + name: "test_cache_to_remove.bin".into(), + ..Default::default() + }), + ..Default::default() + })) + .build() + .await + .unwrap() + .execute_with(|| {}); + + let to_delete = std::fs::read_dir(CacheConfig::default().directory) + .unwrap() + .into_iter() + .map(|d| d.unwrap()) + .filter(|p| p.path().extension().unwrap_or_default() == "bin") + .collect::>(); + + assert!(to_delete.len() > 0); + + for d in to_delete { + std::fs::remove_file(d.path()).unwrap(); + } + } + + #[async_std::test] + async fn can_build_all() { + init_logger(); + Builder::new().build().await.unwrap().execute_with(|| {}); + } +} diff --git a/utils/frame/try-runtime/cli/Cargo.toml b/utils/frame/try-runtime/cli/Cargo.toml new file mode 100644 index 0000000000..592d0a5b99 --- /dev/null +++ b/utils/frame/try-runtime/cli/Cargo.toml @@ -0,0 +1,32 @@ +[package] +name = "try-runtime-cli" +version = "0.9.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "Apache-2.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Cli command runtime testing and dry-running" +readme = "README.md" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +log = "0.4.8" +parity-scale-codec = { version = "2.0.0" } + +sc-service = { version = "0.9.0", default-features = false, path = "../../../../client/service" } +sc-cli = { version = "0.9.0", path = "../../../../client/cli" } +sc-executor = { path = "../../../../client/executor" } +sc-client-api = { version = "3.0.0", path = "../../../../client/api" } +structopt = "0.3.8" +sp-state-machine = { version = "0.9.0", path = "../../../../primitives/state-machine" } +sp-api = { version = "3.0.0", path = "../../../../primitives/api" } +sp-blockchain = { version = "3.0.0", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "3.0.0", path = "../../../../primitives/runtime" } +sp-externalities = { version = "0.9.0", path = "../../../../primitives/externalities" } +sp-core = { version = "3.0.0", path = "../../../../primitives/core" } +frame-try-runtime = { version = "0.9.0", path = "../../../../frame/try-runtime" } + +remote-externalities = { path = "../../remote-externalities" } diff --git a/utils/frame/try-runtime/cli/src/lib.rs b/utils/frame/try-runtime/cli/src/lib.rs new file mode 100644 index 0000000000..4ab38692a5 --- /dev/null +++ b/utils/frame/try-runtime/cli/src/lib.rs @@ -0,0 +1,178 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! `Structopt`-ready struct for `try-runtime`. + +use parity_scale_codec::Decode; +use std::{fmt::Debug, str::FromStr}; +use sc_service::Configuration; +use sc_cli::{CliConfiguration, ExecutionStrategy, WasmExecutionMethod}; +use sc_executor::NativeExecutor; +use sc_service::NativeExecutionDispatch; +use sp_state_machine::StateMachine; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_core::storage::{StorageData, StorageKey, well_known_keys}; + +/// Various commands to try out the new runtime, over configurable states. +/// +/// For now this only assumes running the `on_runtime_upgrade` hooks. +#[derive(Debug, structopt::StructOpt)] +pub struct TryRuntimeCmd { + /// The shared parameters + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: sc_cli::SharedParams, + + /// The state to use to run the migration. Should be a valid FILE or HTTP URI. + #[structopt(short, long, default_value = "http://localhost:9933")] + pub state: State, + + /// The execution strategy that should be used for benchmarks + #[structopt( + long = "execution", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = "Native", + )] + pub execution: ExecutionStrategy, + + /// Method for executing Wasm runtime code. + #[structopt( + long = "wasm-execution", + value_name = "METHOD", + possible_values = &WasmExecutionMethod::enabled_variants(), + case_insensitive = true, + default_value = "Interpreted" + )] + pub wasm_method: WasmExecutionMethod, +} + +/// The state to use for a migration dry-run. +#[derive(Debug)] +pub enum State { + /// A snapshot. Inner value is a file path. + Snap(String), + + /// A live chain. Inner value is the HTTP uri. + Live(String), +} + +impl FromStr for State { + type Err = &'static str; + fn from_str(s: &str) -> Result { + match s.get(..7) { + // could use Url crate as well, but lets keep it simple for now. + Some("http://") => Ok(State::Live(s.to_string())), + Some("file://") => s + .split("//") + .collect::>() + .get(1) + .map(|s| State::Snap(s.to_string())) + .ok_or("invalid file URI"), + _ => Err("invalid format. Must be a valid HTTP or File URI"), + } + } +} + +impl TryRuntimeCmd { + pub async fn run(&self, config: Configuration) -> sc_cli::Result<()> + where + B: BlockT, + ExecDispatch: NativeExecutionDispatch + 'static, + { + let spec = config.chain_spec; + let genesis_storage = spec.build_storage()?; + + let code = StorageData( + genesis_storage + .top + .get(well_known_keys::CODE) + .expect("code key must exist in genesis storage; qed") + .to_vec(), + ); + let code_key = StorageKey(well_known_keys::CODE.to_vec()); + + let wasm_method = self.wasm_method; + let execution = self.execution; + + let mut changes = Default::default(); + // don't really care about these -- use the default values. + let max_runtime_instances = config.max_runtime_instances; + let heap_pages = config.default_heap_pages; + let executor = NativeExecutor::::new( + wasm_method.into(), + heap_pages, + max_runtime_instances, + ); + + let ext = { + use remote_externalities::{Builder, Mode, CacheConfig, OfflineConfig, OnlineConfig}; + let builder = match &self.state { + State::Snap(file_path) => Builder::new().mode(Mode::Offline(OfflineConfig { + cache: CacheConfig { name: file_path.into(), ..Default::default() }, + })), + State::Live(http_uri) => Builder::new().mode(Mode::Online(OnlineConfig { + uri: http_uri.into(), + ..Default::default() + })), + }; + + // inject the code into this ext. + builder.inject(&[(code_key, code)]).build().await? + }; + + let encoded_result = StateMachine::<_, _, NumberFor, _>::new( + &ext.backend, + None, + &mut changes, + &executor, + "TryRuntime_on_runtime_upgrade", + &[], + ext.extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&ext.backend) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(execution.into()) + .map_err(|e| format!("failed to execute 'TryRuntime_on_runtime_upgrade' due to {:?}", e))?; + + let (weight, total_weight) = <(u64, u64) as Decode>::decode(&mut &*encoded_result) + .map_err(|e| format!("failed to decode output due to {:?}", e))?; + log::info!( + "try-runtime executed without errors. Consumed weight = {}, total weight = {} ({})", + weight, + total_weight, + weight as f64 / total_weight as f64 + ); + + Ok(()) + } +} + +impl CliConfiguration for TryRuntimeCmd { + fn shared_params(&self) -> &sc_cli::SharedParams { + &self.shared_params + } + + fn chain_id(&self, _is_dev: bool) -> sc_cli::Result { + Ok(match self.shared_params.chain { + Some(ref chain) => chain.clone(), + None => "dev".into(), + }) + } +} diff --git a/utils/wasm-builder/src/builder.rs b/utils/wasm-builder/src/builder.rs index 8ef6c95324..bfbc4030ad 100644 --- a/utils/wasm-builder/src/builder.rs +++ b/utils/wasm-builder/src/builder.rs @@ -43,6 +43,7 @@ impl WasmBuilderSelectProject { rust_flags: Vec::new(), file_name: None, project_cargo_toml: get_manifest_dir().join("Cargo.toml"), + features_to_enable: Vec::new(), } } @@ -60,6 +61,7 @@ impl WasmBuilderSelectProject { rust_flags: Vec::new(), file_name: None, project_cargo_toml: path, + features_to_enable: Vec::new(), }) } else { Err("Project path must point to the `Cargo.toml` of the project") @@ -88,6 +90,8 @@ pub struct WasmBuilder { /// The path to the `Cargo.toml` of the project that should be built /// for wasm. project_cargo_toml: PathBuf, + /// Features that should be enabled when building the wasm binary. + features_to_enable: Vec, } impl WasmBuilder { @@ -132,10 +136,20 @@ impl WasmBuilder { self } + /// Enable the given feature when building the wasm binary. + /// + /// `feature` needs to be a valid feature that is defined in the project `Cargo.toml`. + pub fn enable_feature(mut self, feature: impl Into) -> Self { + self.features_to_enable.push(feature.into()); + self + } + /// Build the WASM binary. pub fn build(self) { let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); - let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); + let file_path = out_dir.join( + self.file_name.clone().unwrap_or_else(|| "wasm_binary.rs".into()), + ); if check_skip_build() { // If we skip the build, we still want to make sure to be called when an env variable @@ -151,6 +165,8 @@ impl WasmBuilder { file_path, self.project_cargo_toml, self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect(), + self.features_to_enable, + self.file_name, ); // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't @@ -200,10 +216,15 @@ fn generate_rerun_if_changed_instructions() { /// constant `WASM_BINARY`, which contains the built WASM binary. /// `project_cargo_toml` - The path to the `Cargo.toml` of the project that should be built. /// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. +/// `features_to_enable` - Features that should be enabled for the project. +/// `wasm_binary_name` - The optional wasm binary name that is extended with `.compact.wasm`. +/// If `None`, the project name will be used. fn build_project( file_name: PathBuf, project_cargo_toml: PathBuf, default_rustflags: String, + features_to_enable: Vec, + wasm_binary_name: Option, ) { let cargo_cmd = match crate::prerequisites::check() { Ok(cmd) => cmd, @@ -217,6 +238,8 @@ fn build_project( &project_cargo_toml, &default_rustflags, cargo_cmd, + features_to_enable, + wasm_binary_name, ); let (wasm_binary, wasm_binary_bloaty) = if let Some(wasm_binary) = wasm_binary { diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 73dc2e13af..0392546575 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -97,6 +97,8 @@ pub(crate) fn create_and_compile( project_cargo_toml: &Path, default_rustflags: &str, cargo_cmd: CargoCommandVersioned, + features_to_enable: Vec, + wasm_binary_name: Option, ) -> (Option, WasmBinaryBloaty) { let wasm_workspace_root = get_wasm_workspace_root(); let wasm_workspace = wasm_workspace_root.join("wbuild"); @@ -108,12 +110,14 @@ pub(crate) fn create_and_compile( &wasm_workspace, &crate_metadata, &crate_metadata.workspace_root, + features_to_enable, ); build_project(&project, default_rustflags, cargo_cmd); let (wasm_binary, bloaty) = compact_wasm_file( &project, project_cargo_toml, + wasm_binary_name, ); wasm_binary.as_ref().map(|wasm_binary| @@ -199,7 +203,7 @@ fn create_project_cargo_toml( crate_name: &str, crate_path: &Path, wasm_binary: &str, - enabled_features: &[String], + enabled_features: impl Iterator, ) { let mut workspace_toml: Table = toml::from_str( &fs::read_to_string( @@ -265,7 +269,7 @@ fn create_project_cargo_toml( wasm_project.insert("package".into(), crate_name.into()); wasm_project.insert("path".into(), crate_path.display().to_string().into()); wasm_project.insert("default-features".into(), false.into()); - wasm_project.insert("features".into(), enabled_features.to_vec().into()); + wasm_project.insert("features".into(), enabled_features.collect::>().into()); dependencies.insert("wasm-project".into(), wasm_project.into()); @@ -339,6 +343,7 @@ fn create_project( wasm_workspace: &Path, crate_metadata: &Metadata, workspace_root_path: &Path, + features_to_enable: Vec, ) -> PathBuf { let crate_name = get_crate_name(project_cargo_toml); let crate_path = project_cargo_toml.parent().expect("Parent path exists; qed"); @@ -354,13 +359,16 @@ fn create_project( enabled_features.push("runtime-wasm".into()); } + let mut enabled_features = enabled_features.into_iter().collect::>(); + enabled_features.extend(features_to_enable.into_iter()); + create_project_cargo_toml( &wasm_project_folder, workspace_root_path, &crate_name, &crate_path, &wasm_binary, - &enabled_features, + enabled_features.into_iter(), ); write_file_if_changed( @@ -437,16 +445,22 @@ fn build_project(project: &Path, default_rustflags: &str, cargo_cmd: CargoComman fn compact_wasm_file( project: &Path, cargo_manifest: &Path, + wasm_binary_name: Option, ) -> (Option, WasmBinaryBloaty) { let is_release_build = is_release_build(); let target = if is_release_build { "release" } else { "debug" }; - let wasm_binary = get_wasm_binary_name(cargo_manifest); + let default_wasm_binary_name = get_wasm_binary_name(cargo_manifest); let wasm_file = project.join("target/wasm32-unknown-unknown") .join(target) - .join(format!("{}.wasm", wasm_binary)); + .join(format!("{}.wasm", default_wasm_binary_name)); let wasm_compact_file = if is_release_build { - let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); + let wasm_compact_file = project.join( + format!( + "{}.compact.wasm", + wasm_binary_name.clone().unwrap_or_else(|| default_wasm_binary_name.clone()), + ) + ); wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) .expect("Failed to compact generated WASM binary."); Some(WasmBinary(wasm_compact_file)) @@ -454,7 +468,16 @@ fn compact_wasm_file( None }; - (wasm_compact_file, WasmBinaryBloaty(wasm_file)) + let bloaty_file_name = if let Some(name) = wasm_binary_name { + format!("{}.wasm", name) + } else { + format!("{}.wasm", default_wasm_binary_name) + }; + + let bloaty_file = project.join(bloaty_file_name); + fs::copy(wasm_file, &bloaty_file).expect("Copying the bloaty file to the project dir."); + + (wasm_compact_file, WasmBinaryBloaty(bloaty_file)) } /// Custom wrapper for a [`cargo_metadata::Package`] to store it in