frontier_template_node/
service.rs

1//! Service and ServiceFactory implementation. Specialized wrapper over substrate service.
2
3use std::{cell::RefCell, path::Path, sync::Arc, time::Duration};
4
5use futures::{channel::mpsc, prelude::*};
6// Substrate
7use prometheus_endpoint::Registry;
8use sc_client_api::{Backend as BackendT, BlockBackend};
9use sc_consensus::{BasicQueue, BoxBlockImport};
10use sc_consensus_grandpa::BlockNumberOps;
11use sc_executor::HostFunctions as HostFunctionsT;
12use sc_network_sync::strategy::warp::{WarpSyncConfig, WarpSyncProvider};
13use sc_service::{error::Error as ServiceError, Configuration, PartialComponents, TaskManager};
14use sc_telemetry::{Telemetry, TelemetryHandle, TelemetryWorker};
15use sc_transaction_pool::TransactionPoolHandle;
16use sc_transaction_pool_api::OffchainTransactionPoolFactory;
17use sp_api::ConstructRuntimeApi;
18use sp_consensus_aura::sr25519::{AuthorityId as AuraId, AuthorityPair as AuraPair};
19use sp_core::{H256, U256};
20use sp_runtime::traits::{Block as BlockT, NumberFor};
21// Runtime
22use frontier_template_runtime::{
23	opaque::Block, AccountId, Balance, Nonce, RuntimeApi, TransactionConverter,
24};
25
26pub use crate::eth::{db_config_dir, EthConfiguration};
27use crate::{
28	cli::Sealing,
29	client::{BaseRuntimeApiCollection, FullBackend, FullClient, RuntimeApiCollection},
30	eth::{
31		new_frontier_partial, spawn_frontier_tasks, BackendType, EthCompatRuntimeApiCollection,
32		FrontierBackend, FrontierBlockImport, FrontierPartialComponents, StorageOverride,
33		StorageOverrideHandler,
34	},
35};
36
37/// Only enable the benchmarking host functions when we actually want to benchmark.
38#[cfg(feature = "runtime-benchmarks")]
39pub type HostFunctions = (
40	sp_io::SubstrateHostFunctions,
41	frame_benchmarking::benchmarking::HostFunctions,
42	cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions,
43);
44/// Otherwise we use empty host functions for ext host functions.
45#[cfg(not(feature = "runtime-benchmarks"))]
46pub type HostFunctions = (
47	sp_io::SubstrateHostFunctions,
48	cumulus_primitives_proof_size_hostfunction::storage_proof_size::HostFunctions,
49);
50
51pub type Backend = FullBackend<Block>;
52pub type Client = FullClient<Block, RuntimeApi, HostFunctions>;
53
54type FullSelectChain<B> = sc_consensus::LongestChain<FullBackend<B>, B>;
55type GrandpaBlockImport<B, C> =
56	sc_consensus_grandpa::GrandpaBlockImport<FullBackend<B>, B, C, FullSelectChain<B>>;
57type GrandpaLinkHalf<B, C> = sc_consensus_grandpa::LinkHalf<B, C, FullSelectChain<B>>;
58
59/// The minimum period of blocks on which justifications will be
60/// imported and generated.
61const GRANDPA_JUSTIFICATION_PERIOD: u32 = 512;
62
63pub fn new_partial<B, RA, HF, BIQ>(
64	config: &Configuration,
65	eth_config: &EthConfiguration,
66	build_import_queue: BIQ,
67) -> Result<
68	PartialComponents<
69		FullClient<B, RA, HF>,
70		FullBackend<B>,
71		FullSelectChain<B>,
72		BasicQueue<B>,
73		sc_transaction_pool::TransactionPoolHandle<B, FullClient<B, RA, HF>>,
74		(
75			Option<Telemetry>,
76			BoxBlockImport<B>,
77			GrandpaLinkHalf<B, FullClient<B, RA, HF>>,
78			FrontierBackend<B, FullClient<B, RA, HF>>,
79			Arc<dyn StorageOverride<B>>,
80		),
81	>,
82	ServiceError,
83>
84where
85	B: BlockT<Hash = H256>,
86	RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
87	RA: Send + Sync + 'static,
88	RA::RuntimeApi: BaseRuntimeApiCollection<B> + EthCompatRuntimeApiCollection<B>,
89	HF: HostFunctionsT + 'static,
90	BIQ: FnOnce(
91		Arc<FullClient<B, RA, HF>>,
92		&Configuration,
93		&EthConfiguration,
94		&TaskManager,
95		Option<TelemetryHandle>,
96		GrandpaBlockImport<B, FullClient<B, RA, HF>>,
97	) -> Result<(BasicQueue<B>, BoxBlockImport<B>), ServiceError>,
98{
99	let telemetry = config
100		.telemetry_endpoints
101		.clone()
102		.filter(|x| !x.is_empty())
103		.map(|endpoints| -> Result<_, sc_telemetry::Error> {
104			let worker = TelemetryWorker::new(16)?;
105			let telemetry = worker.handle().new_telemetry(endpoints);
106			Ok((worker, telemetry))
107		})
108		.transpose()?;
109
110	let executor = sc_service::new_wasm_executor(&config.executor);
111
112	let (client, backend, keystore_container, task_manager) =
113		sc_service::new_full_parts_record_import::<B, RA, _>(
114			config,
115			telemetry.as_ref().map(|(_, telemetry)| telemetry.handle()),
116			executor,
117			true,
118		)?;
119	let client = Arc::new(client);
120
121	let telemetry = telemetry.map(|(worker, telemetry)| {
122		task_manager
123			.spawn_handle()
124			.spawn("telemetry", None, worker.run());
125		telemetry
126	});
127
128	let select_chain = sc_consensus::LongestChain::new(backend.clone());
129	let (grandpa_block_import, grandpa_link) = sc_consensus_grandpa::block_import(
130		client.clone(),
131		GRANDPA_JUSTIFICATION_PERIOD,
132		&client,
133		select_chain.clone(),
134		telemetry.as_ref().map(|x| x.handle()),
135	)?;
136
137	let storage_override = Arc::new(StorageOverrideHandler::<B, _, _>::new(client.clone()));
138	let frontier_backend = match eth_config.frontier_backend_type {
139		BackendType::KeyValue => FrontierBackend::KeyValue(Arc::new(fc_db::kv::Backend::open(
140			Arc::clone(&client),
141			&config.database,
142			&db_config_dir(config),
143		)?)),
144		BackendType::Sql => {
145			let db_path = db_config_dir(config).join("sql");
146			std::fs::create_dir_all(&db_path).expect("failed creating sql db directory");
147			let backend = futures::executor::block_on(fc_db::sql::Backend::new(
148				fc_db::sql::BackendConfig::Sqlite(fc_db::sql::SqliteBackendConfig {
149					path: Path::new("sqlite:///")
150						.join(db_path)
151						.join("frontier.db3")
152						.to_str()
153						.unwrap(),
154					create_if_missing: true,
155					thread_count: eth_config.frontier_sql_backend_thread_count,
156					cache_size: eth_config.frontier_sql_backend_cache_size,
157				}),
158				eth_config.frontier_sql_backend_pool_size,
159				std::num::NonZeroU32::new(eth_config.frontier_sql_backend_num_ops_timeout),
160				storage_override.clone(),
161			))
162			.unwrap_or_else(|err| panic!("failed creating sql backend: {err:?}"));
163			FrontierBackend::Sql(Arc::new(backend))
164		}
165	};
166
167	let (import_queue, block_import) = build_import_queue(
168		client.clone(),
169		config,
170		eth_config,
171		&task_manager,
172		telemetry.as_ref().map(|x| x.handle()),
173		grandpa_block_import,
174	)?;
175
176	let transaction_pool = Arc::from(
177		sc_transaction_pool::Builder::new(
178			task_manager.spawn_essential_handle(),
179			client.clone(),
180			config.role.is_authority().into(),
181		)
182		.with_options(config.transaction_pool.clone())
183		.with_prometheus(config.prometheus_registry())
184		.build(),
185	);
186
187	Ok(PartialComponents {
188		client,
189		backend,
190		keystore_container,
191		task_manager,
192		select_chain,
193		import_queue,
194		transaction_pool,
195		other: (
196			telemetry,
197			block_import,
198			grandpa_link,
199			frontier_backend,
200			storage_override,
201		),
202	})
203}
204
205/// Build the import queue for the template runtime (aura + grandpa).
206pub fn build_aura_grandpa_import_queue<B, RA, HF>(
207	client: Arc<FullClient<B, RA, HF>>,
208	config: &Configuration,
209	eth_config: &EthConfiguration,
210	task_manager: &TaskManager,
211	telemetry: Option<TelemetryHandle>,
212	grandpa_block_import: GrandpaBlockImport<B, FullClient<B, RA, HF>>,
213) -> Result<(BasicQueue<B>, BoxBlockImport<B>), ServiceError>
214where
215	B: BlockT,
216	NumberFor<B>: BlockNumberOps,
217	RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
218	RA: Send + Sync + 'static,
219	RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
220	HF: HostFunctionsT + 'static,
221{
222	let frontier_block_import =
223		FrontierBlockImport::new(grandpa_block_import.clone(), client.clone());
224
225	let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
226	let target_gas_price = eth_config.target_gas_price;
227	let create_inherent_data_providers = move |_, ()| async move {
228		let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
229		let slot =
230			sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
231				*timestamp,
232				slot_duration,
233			);
234		let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price));
235		Ok((slot, timestamp, dynamic_fee))
236	};
237
238	let import_queue = sc_consensus_aura::import_queue::<AuraPair, _, _, _, _, _>(
239		sc_consensus_aura::ImportQueueParams {
240			block_import: frontier_block_import.clone(),
241			justification_import: Some(Box::new(grandpa_block_import)),
242			client,
243			create_inherent_data_providers,
244			spawner: &task_manager.spawn_essential_handle(),
245			registry: config.prometheus_registry(),
246			check_for_equivocation: Default::default(),
247			telemetry,
248			compatibility_mode: sc_consensus_aura::CompatibilityMode::None,
249		},
250	)
251	.map_err::<ServiceError, _>(Into::into)?;
252
253	Ok((import_queue, Box::new(frontier_block_import)))
254}
255
256/// Build the import queue for the template runtime (manual seal).
257pub fn build_manual_seal_import_queue<B, RA, HF>(
258	client: Arc<FullClient<B, RA, HF>>,
259	config: &Configuration,
260	_eth_config: &EthConfiguration,
261	task_manager: &TaskManager,
262	_telemetry: Option<TelemetryHandle>,
263	_grandpa_block_import: GrandpaBlockImport<B, FullClient<B, RA, HF>>,
264) -> Result<(BasicQueue<B>, BoxBlockImport<B>), ServiceError>
265where
266	B: BlockT,
267	RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
268	RA: Send + Sync + 'static,
269	RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
270	HF: HostFunctionsT + 'static,
271{
272	let frontier_block_import = FrontierBlockImport::new(client.clone(), client);
273	Ok((
274		sc_consensus_manual_seal::import_queue(
275			Box::new(frontier_block_import.clone()),
276			&task_manager.spawn_essential_handle(),
277			config.prometheus_registry(),
278		),
279		Box::new(frontier_block_import),
280	))
281}
282
283/// Builds a new service for a full client.
284pub async fn new_full<B, RA, HF, NB>(
285	mut config: Configuration,
286	eth_config: EthConfiguration,
287	sealing: Option<Sealing>,
288) -> Result<TaskManager, ServiceError>
289where
290	B: BlockT<Hash = H256>,
291	NumberFor<B>: BlockNumberOps,
292	<B as BlockT>::Header: Unpin,
293	RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
294	RA: Send + Sync + 'static,
295	RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
296	HF: HostFunctionsT + 'static,
297	NB: sc_network::NetworkBackend<B, <B as BlockT>::Hash>,
298{
299	let build_import_queue = if sealing.is_some() {
300		build_manual_seal_import_queue::<B, RA, HF>
301	} else {
302		build_aura_grandpa_import_queue::<B, RA, HF>
303	};
304
305	let PartialComponents {
306		client,
307		backend,
308		mut task_manager,
309		import_queue,
310		keystore_container,
311		select_chain,
312		transaction_pool,
313		other: (mut telemetry, block_import, grandpa_link, frontier_backend, storage_override),
314	} = new_partial(&config, &eth_config, build_import_queue)?;
315
316	let FrontierPartialComponents {
317		filter_pool,
318		fee_history_cache,
319		fee_history_cache_limit,
320	} = new_frontier_partial(&eth_config)?;
321
322	let maybe_registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry);
323	let mut net_config = sc_network::config::FullNetworkConfiguration::<_, _, NB>::new(
324		&config.network,
325		maybe_registry.cloned(),
326	);
327	let peer_store_handle = net_config.peer_store_handle();
328	let metrics = NB::register_notification_metrics(maybe_registry);
329
330	let grandpa_protocol_name = sc_consensus_grandpa::protocol_standard_name(
331		&client
332			.block_hash(0u32.into())
333			.ok()
334			.flatten()
335			.expect("Genesis block exists; qed"),
336		&config.chain_spec,
337	);
338
339	let (grandpa_protocol_config, grandpa_notification_service) =
340		sc_consensus_grandpa::grandpa_peers_set_config::<_, NB>(
341			grandpa_protocol_name.clone(),
342			metrics.clone(),
343			peer_store_handle,
344		);
345
346	let warp_sync_config = if sealing.is_some() {
347		None
348	} else {
349		net_config.add_notification_protocol(grandpa_protocol_config);
350		let warp_sync: Arc<dyn WarpSyncProvider<B>> =
351			Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new(
352				backend.clone(),
353				grandpa_link.shared_authority_set().clone(),
354				Vec::new(),
355			));
356		Some(WarpSyncConfig::WithProvider(warp_sync))
357	};
358
359	let (network, system_rpc_tx, tx_handler_controller, sync_service) =
360		sc_service::build_network(sc_service::BuildNetworkParams {
361			config: &config,
362			net_config,
363			client: client.clone(),
364			transaction_pool: transaction_pool.clone(),
365			spawn_handle: task_manager.spawn_handle(),
366			import_queue,
367			block_announce_validator_builder: None,
368			warp_sync_config,
369			block_relay: None,
370			metrics,
371		})?;
372
373	if config.offchain_worker.enabled {
374		let offchain_workers =
375			sc_offchain::OffchainWorkers::new(sc_offchain::OffchainWorkerOptions {
376				runtime_api_provider: client.clone(),
377				is_validator: config.role.is_authority(),
378				keystore: Some(keystore_container.keystore()),
379				offchain_db: backend.offchain_storage(),
380				transaction_pool: Some(OffchainTransactionPoolFactory::new(
381					transaction_pool.clone(),
382				)),
383				network_provider: Arc::new(network.clone()),
384				enable_http_requests: true,
385				custom_extensions: |_| vec![],
386			})?;
387		task_manager.spawn_handle().spawn(
388			"offchain-workers-runner",
389			"offchain-worker",
390			offchain_workers
391				.run(client.clone(), task_manager.spawn_handle())
392				.boxed(),
393		);
394	}
395
396	let role = config.role;
397	let force_authoring = config.force_authoring;
398	let name = config.network.node_name.clone();
399	let frontier_backend = Arc::new(frontier_backend);
400	let enable_grandpa = !config.disable_grandpa && sealing.is_none();
401	let prometheus_registry = config.prometheus_registry().cloned();
402
403	// Channel for the rpc handler to communicate with the authorship task.
404	let (command_sink, commands_stream) = mpsc::channel(1000);
405
406	// Sinks for pubsub notifications.
407	// Everytime a new subscription is created, a new mpsc channel is added to the sink pool.
408	// The MappingSyncWorker sends through the channel on block import and the subscription emits a notification to the subscriber on receiving a message through this channel.
409	// This way we avoid race conditions when using native substrate block import notification stream.
410	let pubsub_notification_sinks: fc_mapping_sync::EthereumBlockNotificationSinks<
411		fc_mapping_sync::EthereumBlockNotification<B>,
412	> = Default::default();
413	let pubsub_notification_sinks = Arc::new(pubsub_notification_sinks);
414
415	// for ethereum-compatibility rpc.
416	config.rpc.id_provider = Some(Box::new(fc_rpc::EthereumSubIdProvider));
417
418	let rpc_builder = {
419		let client = client.clone();
420		let pool = transaction_pool.clone();
421		let network = network.clone();
422		let sync_service = sync_service.clone();
423
424		let is_authority = role.is_authority();
425		let enable_dev_signer = eth_config.enable_dev_signer;
426		let max_past_logs = eth_config.max_past_logs;
427		let execute_gas_limit_multiplier = eth_config.execute_gas_limit_multiplier;
428		let filter_pool = filter_pool.clone();
429		let frontier_backend = frontier_backend.clone();
430		let pubsub_notification_sinks = pubsub_notification_sinks.clone();
431		let storage_override = storage_override.clone();
432		let fee_history_cache = fee_history_cache.clone();
433		let block_data_cache = Arc::new(fc_rpc::EthBlockDataCacheTask::new(
434			task_manager.spawn_handle(),
435			storage_override.clone(),
436			eth_config.eth_log_block_cache,
437			eth_config.eth_statuses_cache,
438			prometheus_registry.clone(),
439		));
440
441		let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
442		let target_gas_price = eth_config.target_gas_price;
443		let pending_create_inherent_data_providers = move |_, ()| async move {
444			let current = sp_timestamp::InherentDataProvider::from_system_time();
445			let next_slot = current.timestamp().as_millis() + slot_duration.as_millis();
446			let timestamp = sp_timestamp::InherentDataProvider::new(next_slot.into());
447			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
448				*timestamp,
449				slot_duration,
450			);
451			let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price));
452			Ok((slot, timestamp, dynamic_fee))
453		};
454
455		Box::new(move |subscription_task_executor| {
456			let eth_deps = crate::rpc::EthDeps {
457				client: client.clone(),
458				pool: pool.clone(),
459				graph: pool.clone(),
460				converter: Some(TransactionConverter::<B>::default()),
461				is_authority,
462				enable_dev_signer,
463				network: network.clone(),
464				sync: sync_service.clone(),
465				frontier_backend: match &*frontier_backend {
466					fc_db::Backend::KeyValue(b) => b.clone(),
467					fc_db::Backend::Sql(b) => b.clone(),
468				},
469				storage_override: storage_override.clone(),
470				block_data_cache: block_data_cache.clone(),
471				filter_pool: filter_pool.clone(),
472				max_past_logs,
473				fee_history_cache: fee_history_cache.clone(),
474				fee_history_cache_limit,
475				execute_gas_limit_multiplier,
476				forced_parent_hashes: None,
477				pending_create_inherent_data_providers,
478			};
479			let deps = crate::rpc::FullDeps {
480				client: client.clone(),
481				pool: pool.clone(),
482				command_sink: if sealing.is_some() {
483					Some(command_sink.clone())
484				} else {
485					None
486				},
487				eth: eth_deps,
488			};
489			crate::rpc::create_full(
490				deps,
491				subscription_task_executor,
492				pubsub_notification_sinks.clone(),
493			)
494			.map_err(Into::into)
495		})
496	};
497
498	let _rpc_handlers = sc_service::spawn_tasks(sc_service::SpawnTasksParams {
499		config,
500		client: client.clone(),
501		backend: backend.clone(),
502		task_manager: &mut task_manager,
503		keystore: keystore_container.keystore(),
504		transaction_pool: transaction_pool.clone(),
505		rpc_builder,
506		network: network.clone(),
507		system_rpc_tx,
508		tx_handler_controller,
509		sync_service: sync_service.clone(),
510		telemetry: telemetry.as_mut(),
511	})?;
512
513	spawn_frontier_tasks(
514		&task_manager,
515		client.clone(),
516		backend,
517		frontier_backend,
518		filter_pool,
519		storage_override,
520		fee_history_cache,
521		fee_history_cache_limit,
522		sync_service.clone(),
523		pubsub_notification_sinks,
524	)
525	.await;
526
527	if role.is_authority() {
528		// manual-seal authorship
529		if let Some(sealing) = sealing {
530			run_manual_seal_authorship(
531				&eth_config,
532				sealing,
533				client,
534				transaction_pool,
535				select_chain,
536				block_import,
537				&task_manager,
538				prometheus_registry.as_ref(),
539				telemetry.as_ref(),
540				commands_stream,
541			)?;
542
543			log::info!("Manual Seal Ready");
544			return Ok(task_manager);
545		}
546
547		let proposer_factory = sc_basic_authorship::ProposerFactory::new(
548			task_manager.spawn_handle(),
549			client.clone(),
550			transaction_pool.clone(),
551			prometheus_registry.as_ref(),
552			telemetry.as_ref().map(|x| x.handle()),
553		);
554
555		let slot_duration = sc_consensus_aura::slot_duration(&*client)?;
556		let target_gas_price = eth_config.target_gas_price;
557		let create_inherent_data_providers = move |_, ()| async move {
558			let timestamp = sp_timestamp::InherentDataProvider::from_system_time();
559			let slot = sp_consensus_aura::inherents::InherentDataProvider::from_timestamp_and_slot_duration(
560				*timestamp,
561				slot_duration,
562			);
563			let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price));
564			Ok((slot, timestamp, dynamic_fee))
565		};
566
567		let aura = sc_consensus_aura::start_aura::<AuraPair, _, _, _, _, _, _, _, _, _, _>(
568			sc_consensus_aura::StartAuraParams {
569				slot_duration,
570				client,
571				select_chain,
572				block_import,
573				proposer_factory,
574				sync_oracle: sync_service.clone(),
575				justification_sync_link: sync_service.clone(),
576				create_inherent_data_providers,
577				force_authoring,
578				backoff_authoring_blocks: Option::<()>::None,
579				keystore: keystore_container.keystore(),
580				block_proposal_slot_portion: sc_consensus_aura::SlotProportion::new(2f32 / 3f32),
581				max_block_proposal_slot_portion: None,
582				telemetry: telemetry.as_ref().map(|x| x.handle()),
583				compatibility_mode: sc_consensus_aura::CompatibilityMode::None,
584			},
585		)?;
586		// the AURA authoring task is considered essential, i.e. if it
587		// fails we take down the service with it.
588		task_manager
589			.spawn_essential_handle()
590			.spawn_blocking("aura", Some("block-authoring"), aura);
591	}
592
593	if enable_grandpa {
594		// if the node isn't actively participating in consensus then it doesn't
595		// need a keystore, regardless of which protocol we use below.
596		let keystore = if role.is_authority() {
597			Some(keystore_container.keystore())
598		} else {
599			None
600		};
601
602		let grandpa_config = sc_consensus_grandpa::Config {
603			// FIXME #1578 make this available through chainspec
604			gossip_duration: Duration::from_millis(333),
605			justification_generation_period: GRANDPA_JUSTIFICATION_PERIOD,
606			name: Some(name),
607			observer_enabled: false,
608			keystore,
609			local_role: role,
610			telemetry: telemetry.as_ref().map(|x| x.handle()),
611			protocol_name: grandpa_protocol_name,
612		};
613
614		// start the full GRANDPA voter
615		// NOTE: non-authorities could run the GRANDPA observer protocol, but at
616		// this point the full voter should provide better guarantees of block
617		// and vote data availability than the observer. The observer has not
618		// been tested extensively yet and having most nodes in a network run it
619		// could lead to finality stalls.
620		let grandpa_voter =
621			sc_consensus_grandpa::run_grandpa_voter(sc_consensus_grandpa::GrandpaParams {
622				config: grandpa_config,
623				link: grandpa_link,
624				network,
625				sync: sync_service,
626				notification_service: grandpa_notification_service,
627				voting_rule: sc_consensus_grandpa::VotingRulesBuilder::default().build(),
628				prometheus_registry,
629				shared_voter_state: sc_consensus_grandpa::SharedVoterState::empty(),
630				telemetry: telemetry.as_ref().map(|x| x.handle()),
631				offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool),
632			})?;
633
634		// the GRANDPA voter task is considered infallible, i.e.
635		// if it fails we take down the service with it.
636		task_manager
637			.spawn_essential_handle()
638			.spawn_blocking("grandpa-voter", None, grandpa_voter);
639	}
640
641	Ok(task_manager)
642}
643
644fn run_manual_seal_authorship<B, RA, HF>(
645	eth_config: &EthConfiguration,
646	sealing: Sealing,
647	client: Arc<FullClient<B, RA, HF>>,
648	transaction_pool: Arc<TransactionPoolHandle<B, FullClient<B, RA, HF>>>,
649	select_chain: FullSelectChain<B>,
650	block_import: BoxBlockImport<B>,
651	task_manager: &TaskManager,
652	prometheus_registry: Option<&Registry>,
653	telemetry: Option<&Telemetry>,
654	commands_stream: mpsc::Receiver<
655		sc_consensus_manual_seal::rpc::EngineCommand<<B as BlockT>::Hash>,
656	>,
657) -> Result<(), ServiceError>
658where
659	B: BlockT,
660	RA: ConstructRuntimeApi<B, FullClient<B, RA, HF>>,
661	RA: Send + Sync + 'static,
662	RA::RuntimeApi: RuntimeApiCollection<B, AuraId, AccountId, Nonce, Balance>,
663	HF: HostFunctionsT + 'static,
664{
665	let proposer_factory = sc_basic_authorship::ProposerFactory::new(
666		task_manager.spawn_handle(),
667		client.clone(),
668		transaction_pool.clone(),
669		prometheus_registry,
670		telemetry.as_ref().map(|x| x.handle()),
671	);
672
673	thread_local!(static TIMESTAMP: RefCell<u64> = const { RefCell::new(0) });
674
675	/// Provide a mock duration starting at 0 in millisecond for timestamp inherent.
676	/// Each call will increment timestamp by slot_duration making Aura think time has passed.
677	struct MockTimestampInherentDataProvider;
678
679	#[async_trait::async_trait]
680	impl sp_inherents::InherentDataProvider for MockTimestampInherentDataProvider {
681		async fn provide_inherent_data(
682			&self,
683			inherent_data: &mut sp_inherents::InherentData,
684		) -> Result<(), sp_inherents::Error> {
685			TIMESTAMP.with(|x| {
686				*x.borrow_mut() += frontier_template_runtime::SLOT_DURATION;
687				inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, &*x.borrow())
688			})
689		}
690
691		async fn try_handle_error(
692			&self,
693			_identifier: &sp_inherents::InherentIdentifier,
694			_error: &[u8],
695		) -> Option<Result<(), sp_inherents::Error>> {
696			// The pallet never reports error.
697			None
698		}
699	}
700
701	let target_gas_price = eth_config.target_gas_price;
702	let create_inherent_data_providers = move |_, ()| async move {
703		let timestamp = MockTimestampInherentDataProvider;
704		let dynamic_fee = fp_dynamic_fee::InherentDataProvider(U256::from(target_gas_price));
705		Ok((timestamp, dynamic_fee))
706	};
707
708	let manual_seal = match sealing {
709		Sealing::Manual => future::Either::Left(sc_consensus_manual_seal::run_manual_seal(
710			sc_consensus_manual_seal::ManualSealParams {
711				block_import,
712				env: proposer_factory,
713				client,
714				pool: transaction_pool,
715				commands_stream,
716				select_chain,
717				consensus_data_provider: None,
718				create_inherent_data_providers,
719			},
720		)),
721		Sealing::Instant => future::Either::Right(sc_consensus_manual_seal::run_instant_seal(
722			sc_consensus_manual_seal::InstantSealParams {
723				block_import,
724				env: proposer_factory,
725				client,
726				pool: transaction_pool,
727				select_chain,
728				consensus_data_provider: None,
729				create_inherent_data_providers,
730			},
731		)),
732	};
733
734	// we spawn the future on a background thread managed by service.
735	task_manager
736		.spawn_essential_handle()
737		.spawn_blocking("manual-seal", None, manual_seal);
738	Ok(())
739}
740
741pub async fn build_full(
742	config: Configuration,
743	eth_config: EthConfiguration,
744	sealing: Option<Sealing>,
745) -> Result<TaskManager, ServiceError> {
746	new_full::<Block, RuntimeApi, HostFunctions, sc_network::NetworkWorker<_, _>>(
747		config, eth_config, sealing,
748	)
749	.await
750}
751
752pub fn new_chain_ops(
753	config: &mut Configuration,
754	eth_config: &EthConfiguration,
755) -> Result<
756	(
757		Arc<Client>,
758		Arc<Backend>,
759		BasicQueue<Block>,
760		TaskManager,
761		FrontierBackend<Block, Client>,
762	),
763	ServiceError,
764> {
765	config.keystore = sc_service::config::KeystoreConfig::InMemory;
766	let PartialComponents {
767		client,
768		backend,
769		import_queue,
770		task_manager,
771		other,
772		..
773	} = new_partial::<Block, RuntimeApi, HostFunctions, _>(
774		config,
775		eth_config,
776		build_aura_grandpa_import_queue,
777	)?;
778	Ok((client, backend, import_queue, task_manager, other.3))
779}