diff --git a/Cargo.lock b/Cargo.lock index 6a4d8f4b83047..f7adabc8c6bcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16026,6 +16026,7 @@ dependencies = [ "sc-client-api", "sc-client-db", "sc-consensus", + "sc-consensus-aura", "sc-consensus-manual-seal", "sc-executor", "sc-keystore", diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index fa97d36c68572..7f877a47929c4 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -104,6 +104,7 @@ cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } futures-timer = { workspace = true } +sc-consensus-aura = { workspace = true } [dev-dependencies] assert_cmd = { workspace = true } diff --git a/cumulus/polkadot-omni-node/lib/src/command.rs b/cumulus/polkadot-omni-node/lib/src/command.rs index 0f667b1970498..22b27cd601543 100644 --- a/cumulus/polkadot-omni-node/lib/src/command.rs +++ b/cumulus/polkadot-omni-node/lib/src/command.rs @@ -22,12 +22,12 @@ use crate::{ AuraConsensusId, Consensus, Runtime, RuntimeResolver as RuntimeResolverT, RuntimeResolver, }, + spec::DynNodeSpec, types::Block, NodeBlock, NodeExtraArgs, }, extra_subcommand::DefaultExtraSubcommands, fake_runtime_api, - nodes::DynNodeSpecExt, runtime::BlockNumber, }; use clap::{CommandFactory, FromArgMatches}; @@ -63,7 +63,7 @@ impl RunConfig { pub fn new_aura_node_spec( aura_id: AuraConsensusId, extra_args: &NodeExtraArgs, -) -> Box +) -> Box where Block: NodeBlock, { @@ -85,7 +85,7 @@ fn new_node_spec( config: &sc_service::Configuration, runtime_resolver: &Box, extra_args: &NodeExtraArgs, -) -> std::result::Result, sc_cli::Error> { +) -> std::result::Result, sc_cli::Error> { let runtime = runtime_resolver.runtime(config.chain_spec.as_ref())?; Ok(match runtime { @@ -136,7 +136,7 @@ where // Handle the extra, and return - subcommands are self contained, // no need to handle the rest of the CLI or node running. extra.handle(&cmd_config)?; - return Ok(()) + return Ok(()); } // If matching on the extra subcommands fails, match on the rest of the node CLI as usual. @@ -304,8 +304,6 @@ where new_node_spec(&config, &cmd_config.runtime_resolver, &cli.node_extra_args())?; if cli.run.base.is_dev()? { - // Set default dev block time to 3000ms if not set. - // TODO: take block time from AURA config if set. let dev_block_time = cli.dev_block_time.unwrap_or(DEFAULT_DEV_BLOCK_TIME_MS); return node_spec .start_manual_seal_node(config, dev_block_time) diff --git a/cumulus/polkadot-omni-node/lib/src/common/spec.rs b/cumulus/polkadot-omni-node/lib/src/common/spec.rs index f0d4cc0e0a88d..30e5fd587941b 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/spec.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/spec.rs @@ -299,6 +299,13 @@ pub(crate) trait NodeSpec: BaseNodeSpec { const SYBIL_RESISTANCE: CollatorSybilResistance; + fn start_manual_seal_node( + _config: Configuration, + _block_time: u64, + ) -> sc_service::error::Result { + Err(sc_service::Error::Other("Manual seal not supported for this node type".into())) + } + /// Start a node with the given parachain spec. /// /// This is the actual implementation that is abstract over the executor and the runtime api. @@ -546,6 +553,14 @@ pub(crate) trait NodeSpec: BaseNodeSpec { } pub(crate) trait DynNodeSpec: NodeCommandRunner { + /// Start node with manual-seal consensus. + fn start_manual_seal_node( + self: Box, + config: Configuration, + block_time: u64, + ) -> sc_service::error::Result; + + /// Start the node. fn start_node( self: Box, parachain_config: Configuration, @@ -560,6 +575,14 @@ impl DynNodeSpec for T where T: NodeSpec + NodeCommandRunner, { + fn start_manual_seal_node( + self: Box, + config: Configuration, + block_time: u64, + ) -> sc_service::error::Result { + ::start_manual_seal_node(config, block_time) + } + fn start_node( self: Box, parachain_config: Configuration, diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index ed53440272e31..797f8d47c5af8 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -18,10 +18,10 @@ use crate::{ cli::AuthoringPolicy, common::{ aura::{AuraIdT, AuraRuntimeApi}, - rpc::BuildParachainRpcExtensions, + rpc::{BuildParachainRpcExtensions, BuildRpcExtensions}, spec::{ - BaseNodeSpec, BuildImportQueue, ClientBlockImport, InitBlockImport, NodeSpec, - StartConsensus, + BaseNodeSpec, BuildImportQueue, ClientBlockImport, DynNodeSpec, InitBlockImport, + NodeSpec, StartConsensus, }, types::{ AccountId, Balance, Hash, Nonce, ParachainBackend, ParachainBlockImport, @@ -29,8 +29,8 @@ use crate::{ }, ConstructNodeRuntimeApi, NodeBlock, NodeExtraArgs, }, - nodes::DynNodeSpecExt, }; +use codec::Encode; use cumulus_client_collator::service::{ CollatorService, ServiceInterface as CollatorServiceInterface, }; @@ -47,28 +47,34 @@ use cumulus_client_consensus_aura::{ }; use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; +use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; use cumulus_client_service::CollatorSybilResistance; -use cumulus_primitives_core::{relay_chain::ValidationCode, GetParachainInfo, ParaId}; +use cumulus_primitives_core::{ + relay_chain::ValidationCode, CollectCollationInfo, GetParachainInfo, ParaId, +}; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; -use futures::prelude::*; -use polkadot_primitives::CollatorPair; +use futures::{prelude::*, FutureExt}; +use polkadot_primitives::{CollatorPair, UpgradeGoAhead}; use prometheus_endpoint::Registry; -use sc_client_api::BlockchainEvents; +use sc_client_api::{Backend, BlockchainEvents}; use sc_client_db::DbHash; use sc_consensus::{ import_queue::{BasicQueue, Verifier as VerifierT}, - BlockImportParams, DefaultImportQueue, + BlockImportParams, DefaultImportQueue, LongestChain, }; -use sc_service::{Configuration, Error, TaskManager}; +use sc_consensus_manual_seal::consensus::aura::AuraConsensusDataProvider; +use sc_network::{config::FullNetworkConfiguration, NotificationMetrics}; +use sc_service::{Configuration, Error, PartialComponents, TaskManager}; use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; +use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::ProvideRuntimeApi; use sp_core::traits::SpawnNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ app_crypto::AppCrypto, - traits::{Block as BlockT, Header as HeaderT}, + traits::{Block as BlockT, Header as HeaderT, UniqueSaturatedInto}, }; use std::{marker::PhantomData, sync::Arc, time::Duration}; @@ -197,18 +203,236 @@ where InitBlockImport::BlockImport, InitBlockImport::BlockImportAuxiliaryData, > + 'static, - InitBlockImport: self::InitBlockImport + Send, + InitBlockImport: self::InitBlockImport + Send + 'static, InitBlockImport::BlockImport: sc_consensus::BlockImport + 'static, { type BuildRpcExtensions = BuildParachainRpcExtensions; type StartConsensus = StartConsensus; const SYBIL_RESISTANCE: CollatorSybilResistance = CollatorSybilResistance::Resistant; + + fn start_manual_seal_node( + mut config: Configuration, + block_time: u64, + ) -> sc_service::error::Result { + let PartialComponents { + client, + backend, + mut task_manager, + import_queue, + keystore_container, + select_chain: _, + transaction_pool, + other: (_, mut telemetry, _, _), + } = Self::new_partial(&config)?; + + // Since this is a dev node, prevent it from connecting to peers. + config.network.default_peers_set.in_peers = 0; + config.network.default_peers_set.out_peers = 0; + let net_config = FullNetworkConfiguration::<_, _, sc_network::Litep2pNetworkBackend>::new( + &config.network, + None, + ); + + let (network, system_rpc_tx, tx_handler_controller, sync_service) = + sc_service::build_network(sc_service::BuildNetworkParams { + config: &config, + client: client.clone(), + transaction_pool: transaction_pool.clone(), + spawn_handle: task_manager.spawn_handle(), + import_queue, + net_config, + block_announce_validator_builder: None, + warp_sync_config: None, + block_relay: None, + metrics: NotificationMetrics::new(None), + })?; + + if config.offchain_worker.enabled { + let offchain_workers = + sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { + runtime_api_provider: client.clone(), + keystore: Some(keystore_container.keystore()), + offchain_db: backend.offchain_storage(), + transaction_pool: Some(OffchainTransactionPoolFactory::new( + transaction_pool.clone(), + )), + network_provider: Arc::new(network.clone()), + is_validator: config.role.is_authority(), + enable_http_requests: true, + custom_extensions: move |_| vec![], + })?; + task_manager.spawn_handle().spawn( + "offchain-workers-runner", + "offchain-work", + offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), + ); + } + + let proposer = sc_basic_authorship::ProposerFactory::new( + task_manager.spawn_handle(), + client.clone(), + transaction_pool.clone(), + None, + None, + ); + + let (manual_seal_sink, manual_seal_stream) = futures::channel::mpsc::channel(1024); + let mut manual_seal_sink_clone = manual_seal_sink.clone(); + task_manager + .spawn_essential_handle() + .spawn("block_authoring", None, async move { + loop { + futures_timer::Delay::new(std::time::Duration::from_millis(block_time)).await; + manual_seal_sink_clone + .try_send(sc_consensus_manual_seal::EngineCommand::SealNewBlock { + create_empty: true, + finalize: true, + parent_hash: None, + sender: None, + }) + .unwrap(); + } + }); + + // Note: Changing slot durations are currently not supported + let slot_duration = sc_consensus_aura::slot_duration(&*client) + .expect("slot_duration is always present; qed."); + + // The aura digest provider will provide digests that match the provided timestamp data. + // Without this, the AURA parachain runtimes complain about slot mismatches. + let aura_digest_provider = AuraConsensusDataProvider::new_with_slot_duration(slot_duration); + + let para_id = + Self::parachain_id(&client, &config).ok_or("Failed to retrieve the parachain id")?; + let create_inherent_data_providers = Self::create_manual_seal_inherent_data_providers( + client.clone(), + para_id, + slot_duration, + ); + + let params = sc_consensus_manual_seal::ManualSealParams { + block_import: client.clone(), + env: proposer, + client: client.clone(), + pool: transaction_pool.clone(), + select_chain: LongestChain::new(backend.clone()), + commands_stream: Box::pin(manual_seal_stream), + consensus_data_provider: Some(Box::new(aura_digest_provider)), + create_inherent_data_providers, + }; + + let authorship_future = sc_consensus_manual_seal::run_manual_seal(params); + task_manager.spawn_essential_handle().spawn_blocking( + "manual-seal", + None, + authorship_future, + ); + + let rpc_extensions_builder = { + let client = client.clone(); + let transaction_pool = transaction_pool.clone(); + let backend_for_rpc = backend.clone(); + + Box::new(move |_| { + let module = Self::BuildRpcExtensions::build_rpc_extensions( + client.clone(), + backend_for_rpc.clone(), + transaction_pool.clone(), + None, + )?; + Ok(module) + }) + }; + + let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { + network, + client, + keystore: keystore_container.keystore(), + task_manager: &mut task_manager, + transaction_pool, + rpc_builder: rpc_extensions_builder, + backend, + system_rpc_tx, + tx_handler_controller, + sync_service, + config, + telemetry: telemetry.as_mut(), + })?; + + Ok(task_manager) + } +} + +impl + AuraNode +where + Block: NodeBlock, + RuntimeApi: ConstructNodeRuntimeApi>, + RuntimeApi::RuntimeApi: AuraRuntimeApi, + AuraId: AuraIdT + Sync, +{ + /// Creates the inherent data providers for manual seal consensus. + /// + /// This function sets up the timestamp and parachain validation data providers + /// required for manual seal block production in a parachain environment. + fn create_manual_seal_inherent_data_providers( + client: Arc>, + para_id: ParaId, + slot_duration: sp_consensus_aura::SlotDuration, + ) -> impl Fn( + Hash, + (), + ) -> future::Ready< + Result< + (sp_timestamp::InherentDataProvider, MockValidationDataInherentDataProvider<()>), + Box, + >, + > + Send + + Sync { + move |block: Hash, ()| { + let current_para_head = client + .header(block) + .expect("Header lookup should succeed") + .expect("Header passed in as parent should be present in backend."); + + let should_send_go_ahead = client + .runtime_api() + .collect_collation_info(block, ¤t_para_head) + .map(|info| info.new_validation_code.is_some()) + .unwrap_or_default(); + + let current_para_block_head = + Some(polkadot_primitives::HeadData(current_para_head.encode())); + let current_block_number = + UniqueSaturatedInto::::unique_saturated_into(*current_para_head.number()) + 1; + log::info!("Current block number: {current_block_number}"); + + let mocked_parachain = MockValidationDataInherentDataProvider::<()> { + current_para_block: current_block_number, + para_id, + current_para_block_head, + relay_blocks_per_para_block: 1, + para_blocks_per_relay_epoch: 10, + upgrade_go_ahead: should_send_go_ahead.then(|| { + log::info!("Detected pending validation code, sending go-ahead signal."); + UpgradeGoAhead::GoAhead + }), + ..Default::default() + }; + + let timestamp_provider = sp_timestamp::InherentDataProvider::new( + (slot_duration.as_millis() * current_block_number as u64).into(), + ); + + futures::future::ready(Ok((timestamp_provider, mocked_parachain))) + } + } } pub fn new_aura_node_spec( extra_args: &NodeExtraArgs, -) -> Box +) -> Box where Block: NodeBlock, RuntimeApi: ConstructNodeRuntimeApi>, diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs b/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs deleted file mode 100644 index edd19ac597939..0000000000000 --- a/cumulus/polkadot-omni-node/lib/src/nodes/manual_seal.rs +++ /dev/null @@ -1,290 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: Apache-2.0 - -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use crate::common::{ - rpc::BuildRpcExtensions as BuildRpcExtensionsT, - spec::{BaseNodeSpec, BuildImportQueue, ClientBlockImport, NodeSpec as NodeSpecT}, - types::{Hash, ParachainBlockImport, ParachainClient}, -}; -use codec::Encode; -use cumulus_client_parachain_inherent::{MockValidationDataInherentDataProvider, MockXcmConfig}; -use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::CollectCollationInfo; -use futures::FutureExt; -use polkadot_primitives::UpgradeGoAhead; -use sc_client_api::Backend; -use sc_consensus::{DefaultImportQueue, LongestChain}; -use sc_consensus_manual_seal::rpc::{ManualSeal, ManualSealApiServer}; -use sc_network::NetworkBackend; -use sc_service::{Configuration, PartialComponents, TaskManager}; -use sc_telemetry::TelemetryHandle; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_runtime::traits::Header; -use std::{marker::PhantomData, sync::Arc}; - -pub struct ManualSealNode(PhantomData); - -impl - BuildImportQueue< - NodeSpec::Block, - NodeSpec::RuntimeApi, - Arc>, - > for ManualSealNode -{ - fn build_import_queue( - client: Arc>, - _block_import: ParachainBlockImport< - NodeSpec::Block, - Arc>, - >, - config: &Configuration, - _telemetry_handle: Option, - task_manager: &TaskManager, - ) -> sc_service::error::Result> { - Ok(sc_consensus_manual_seal::import_queue( - Box::new(client.clone()), - &task_manager.spawn_essential_handle(), - config.prometheus_registry(), - )) - } -} - -impl BaseNodeSpec for ManualSealNode { - type Block = NodeSpec::Block; - type RuntimeApi = NodeSpec::RuntimeApi; - type BuildImportQueue = Self; - type InitBlockImport = ClientBlockImport; -} - -impl ManualSealNode { - pub fn new() -> Self { - Self(Default::default()) - } - - pub fn start_node( - &self, - mut config: Configuration, - block_time: u64, - ) -> sc_service::error::Result - where - Net: NetworkBackend, - { - let PartialComponents { - client, - backend, - mut task_manager, - import_queue, - keystore_container, - select_chain: _, - transaction_pool, - other: (_, mut telemetry, _, _), - } = Self::new_partial(&config)?; - let select_chain = LongestChain::new(backend.clone()); - - let para_id = - Self::parachain_id(&client, &config).ok_or("Failed to retrieve the parachain id")?; - - // Since this is a dev node, prevent it from connecting to peers. - config.network.default_peers_set.in_peers = 0; - config.network.default_peers_set.out_peers = 0; - let net_config = sc_network::config::FullNetworkConfiguration::<_, _, Net>::new( - &config.network, - config.prometheus_config.as_ref().map(|cfg| cfg.registry.clone()), - ); - let metrics = Net::register_notification_metrics( - config.prometheus_config.as_ref().map(|cfg| &cfg.registry), - ); - - let (network, system_rpc_tx, tx_handler_controller, sync_service) = - sc_service::build_network(sc_service::BuildNetworkParams { - config: &config, - client: client.clone(), - transaction_pool: transaction_pool.clone(), - spawn_handle: task_manager.spawn_handle(), - import_queue, - net_config, - block_announce_validator_builder: None, - warp_sync_config: None, - block_relay: None, - metrics, - })?; - - if config.offchain_worker.enabled { - let offchain_workers = - sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions { - runtime_api_provider: client.clone(), - keystore: Some(keystore_container.keystore()), - offchain_db: backend.offchain_storage(), - transaction_pool: Some(OffchainTransactionPoolFactory::new( - transaction_pool.clone(), - )), - network_provider: Arc::new(network.clone()), - is_validator: config.role.is_authority(), - enable_http_requests: true, - custom_extensions: move |_| vec![], - })?; - task_manager.spawn_handle().spawn( - "offchain-workers-runner", - "offchain-work", - offchain_workers.run(client.clone(), task_manager.spawn_handle()).boxed(), - ); - } - - let proposer = sc_basic_authorship::ProposerFactory::new( - task_manager.spawn_handle(), - client.clone(), - transaction_pool.clone(), - None, - None, - ); - - let (manual_seal_sink, manual_seal_stream) = futures::channel::mpsc::channel(1024); - let mut manual_seal_sink_clone = manual_seal_sink.clone(); - task_manager - .spawn_essential_handle() - .spawn("block_authoring", None, async move { - loop { - futures_timer::Delay::new(std::time::Duration::from_millis(block_time)).await; - manual_seal_sink_clone - .try_send(sc_consensus_manual_seal::EngineCommand::SealNewBlock { - create_empty: true, - finalize: true, - parent_hash: None, - sender: None, - }) - .unwrap(); - } - }); - - let client_for_cidp = client.clone(); - let params = sc_consensus_manual_seal::ManualSealParams { - block_import: client.clone(), - env: proposer, - client: client.clone(), - pool: transaction_pool.clone(), - select_chain, - commands_stream: Box::pin(manual_seal_stream), - consensus_data_provider: None, - create_inherent_data_providers: move |block: Hash, ()| { - let current_para_head = client_for_cidp - .header(block) - .expect("Header lookup should succeed") - .expect("Header passed in as parent should be present in backend."); - - let should_send_go_ahead = client_for_cidp - .runtime_api() - .collect_collation_info(block, ¤t_para_head) - .map(|info| info.new_validation_code.is_some()) - .unwrap_or_default(); - - // The API version is relevant here because the constraints in the runtime changed - // in https://github.com/paritytech/polkadot-sdk/pull/6825. In general, the logic - // here assumes that we are using the aura-ext consensushook in the parachain - // runtime. - let requires_relay_progress = client_for_cidp - .runtime_api() - .has_api_with::, _>( - block, - |version| version > 1, - ) - .ok() - .unwrap_or_default(); - - let current_para_block_head = - Some(polkadot_primitives::HeadData(current_para_head.encode())); - let client_for_xcm = client_for_cidp.clone(); - async move { - use sp_runtime::traits::UniqueSaturatedInto; - - let mocked_parachain = MockValidationDataInherentDataProvider { - // When using manual seal we start from block 0, and it's very unlikely to - // reach a block number > u32::MAX. - current_para_block: UniqueSaturatedInto::::unique_saturated_into( - *current_para_head.number(), - ), - para_id, - current_para_block_head, - relay_offset: 0, - relay_blocks_per_para_block: requires_relay_progress - .then(|| 1) - .unwrap_or_default(), - para_blocks_per_relay_epoch: 10, - relay_randomness_config: (), - xcm_config: MockXcmConfig::new(&*client_for_xcm, block, Default::default()), - raw_downward_messages: vec![], - raw_horizontal_messages: vec![], - additional_key_values: None, - upgrade_go_ahead: should_send_go_ahead.then(|| { - log::info!( - "Detected pending validation code, sending go-ahead signal." - ); - UpgradeGoAhead::GoAhead - }), - }; - Ok(( - // This is intentional, as the runtime that we expect to run against this - // will never receive the aura-related inherents/digests, and providing - // real timestamps would cause aura <> timestamp checking to fail. - sp_timestamp::InherentDataProvider::new(sp_timestamp::Timestamp::new(0)), - mocked_parachain, - )) - } - }, - }; - let authorship_future = sc_consensus_manual_seal::run_manual_seal(params); - task_manager.spawn_essential_handle().spawn_blocking( - "manual-seal", - None, - authorship_future, - ); - let rpc_extensions_builder = { - let client = client.clone(); - let transaction_pool = transaction_pool.clone(); - let backend_for_rpc = backend.clone(); - - Box::new(move |_| { - let mut module = NodeSpec::BuildRpcExtensions::build_rpc_extensions( - client.clone(), - backend_for_rpc.clone(), - transaction_pool.clone(), - None, - )?; - module - .merge(ManualSeal::new(manual_seal_sink.clone()).into_rpc()) - .map_err(|e| sc_service::Error::Application(e.into()))?; - Ok(module) - }) - }; - - let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams { - network, - client: client.clone(), - keystore: keystore_container.keystore(), - task_manager: &mut task_manager, - transaction_pool: transaction_pool.clone(), - rpc_builder: rpc_extensions_builder, - backend, - system_rpc_tx, - tx_handler_controller, - sync_service, - config, - telemetry: telemetry.as_mut(), - })?; - - Ok(task_manager) - } -} diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs b/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs index fbd98bafba2c1..4257eca5912bd 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/mod.rs @@ -15,45 +15,8 @@ // limitations under the License. pub mod aura; -mod manual_seal; - -use crate::common::spec::{DynNodeSpec, NodeSpec as NodeSpecT}; -use manual_seal::ManualSealNode; -use sc_service::{Configuration, TaskManager}; /// The current node version for cumulus official binaries, which takes the basic /// SemVer form `..`. It should correspond to the latest /// `polkadot` version of a stable release. pub const NODE_VERSION: &'static str = "1.19.2"; - -/// Trait that extends the `DynNodeSpec` trait with manual seal related logic. -/// -/// We need it in order to be able to access both the `DynNodeSpec` and the manual seal logic -/// through dynamic dispatch. -pub trait DynNodeSpecExt: DynNodeSpec { - fn start_manual_seal_node( - &self, - config: Configuration, - block_time: u64, - ) -> sc_service::error::Result; -} - -impl DynNodeSpecExt for T -where - T: NodeSpecT + DynNodeSpec, -{ - #[sc_tracing::logging::prefix_logs_with("Parachain")] - fn start_manual_seal_node( - &self, - config: Configuration, - block_time: u64, - ) -> sc_service::error::Result { - let node = ManualSealNode::::new(); - match config.network.network_backend { - sc_network::config::NetworkBackendType::Libp2p => - node.start_node::>(config, block_time), - sc_network::config::NetworkBackendType::Litep2p => - node.start_node::(config, block_time), - } - } -} diff --git a/prdoc/pr_9885.prdoc b/prdoc/pr_9885.prdoc new file mode 100644 index 0000000000000..c8e87fb4aafb6 --- /dev/null +++ b/prdoc/pr_9885.prdoc @@ -0,0 +1,19 @@ +title: 'Omni-node: Manual-seal uses proper aura digests' +doc: +- audience: + - Runtime Dev + - Runtime User + description: |- + Omni-node in dev mode is now operating closer to reality. The runtime is supplied with appropriate AURA digests. + Breaking changes: + - `sc-consensus-manual-seal`: The trait bounds have been relaxed, it should be easier to accomodate now. Code might break since the generics changed. + - `polkadot-omni-node-lib`: `DynNodeSpecExt` trait has been removed and can no longer be used. Implementers of the `NodeSpec` trait now need to provide an implementation for starting a dev node. +crates: +- name: polkadot-omni-node-lib + bump: major +- name: sc-consensus-manual-seal + bump: major +- name: frame-benchmarking-cli + bump: patch + + diff --git a/substrate/client/consensus/manual-seal/src/consensus/aura.rs b/substrate/client/consensus/manual-seal/src/consensus/aura.rs index 566a2266c701b..1d66ab20e40f3 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/aura.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/aura.rs @@ -23,7 +23,6 @@ use crate::{ConsensusDataProvider, Error}; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus::BlockImportParams; use sp_api::ProvideRuntimeApi; -use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus_aura::{ digests::CompatibleDigestItem, sr25519::{AuthorityId, AuthoritySignature}, @@ -34,39 +33,42 @@ use sp_runtime::{traits::Block as BlockT, Digest, DigestItem}; use sp_timestamp::TimestampInherentData; use std::{marker::PhantomData, sync::Arc}; -/// Consensus data provider for Aura. -pub struct AuraConsensusDataProvider { +/// Consensus data provider for Aura. This allows to use manual-seal driven nodes to author valid +/// AURA blocks. It will inspect incoming [`InherentData`] and look for included timestamps. Based +/// on these timestamps, the [`AuraConsensusDataProvider`] will emit fitting digest items. +pub struct AuraConsensusDataProvider { // slot duration slot_duration: SlotDuration, // phantom data for required generics - _phantom: PhantomData<(B, C, P)>, + _phantom: PhantomData<(B, P)>, } -impl AuraConsensusDataProvider +impl AuraConsensusDataProvider where B: BlockT, - C: AuxStore + ProvideRuntimeApi + UsageProvider, - C::Api: AuraApi, { /// Creates a new instance of the [`AuraConsensusDataProvider`], requires that `client` /// implements [`sp_consensus_aura::AuraApi`] - pub fn new(client: Arc) -> Self { + pub fn new(client: Arc) -> Self + where + C: AuxStore + ProvideRuntimeApi + UsageProvider, + C::Api: AuraApi, + { let slot_duration = sc_consensus_aura::slot_duration(&*client) .expect("slot_duration is always present; qed."); Self { slot_duration, _phantom: PhantomData } } + + /// Creates a new instance of the [`AuraConsensusDataProvider`] + pub fn new_with_slot_duration(slot_duration: SlotDuration) -> Self { + Self { slot_duration, _phantom: PhantomData } + } } -impl ConsensusDataProvider for AuraConsensusDataProvider +impl ConsensusDataProvider for AuraConsensusDataProvider where B: BlockT, - C: AuxStore - + HeaderBackend - + HeaderMetadata - + UsageProvider - + ProvideRuntimeApi, - C::Api: AuraApi, P: Send + Sync, { type Proof = P; diff --git a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs index ccf33daca55fe..c8b61fe92b617 100644 --- a/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs +++ b/substrate/utils/frame/benchmarking-cli/src/overhead/command.rs @@ -208,7 +208,7 @@ fn create_inherent_data + HeaderBackend, Blo let parachain_validation_data_provider = MockValidationDataInherentDataProvider::<()> { para_id: ParaId::from(*para_id), current_para_block_head: Some(header.encode().into()), - relay_offset: 1, + relay_offset: 0, ..Default::default() }; let _ = futures::executor::block_on(