fc_db/kv/
mod.rs

1// This file is part of Frontier.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
5
6// This program is free software: you can redistribute it and/or modify
7// it under the terms of the GNU General Public License as published by
8// the Free Software Foundation, either version 3 of the License, or
9// (at your option) any later version.
10
11// This program is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// You should have received a copy of the GNU General Public License
17// along with this program. If not, see <https://www.gnu.org/licenses/>.
18
19mod parity_db_adapter;
20mod upgrade;
21mod utils;
22
23use std::{
24	marker::PhantomData,
25	path::{Path, PathBuf},
26	sync::Arc,
27};
28
29use parking_lot::Mutex;
30use scale_codec::{Decode, Encode};
31// Substrate
32pub use sc_client_db::DatabaseSource;
33use sp_blockchain::HeaderBackend;
34use sp_core::{H160, H256};
35pub use sp_database::Database;
36use sp_runtime::traits::Block as BlockT;
37// Frontier
38use fc_api::{FilteredLog, TransactionMetadata};
39use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA_CACHE};
40
41const DB_HASH_LEN: usize = 32;
42/// Hash type that this backend uses for the database.
43pub type DbHash = [u8; DB_HASH_LEN];
44
45/// Database settings.
46pub struct DatabaseSettings {
47	/// Where to find the database.
48	pub source: DatabaseSource,
49}
50
51pub(crate) mod columns {
52	pub const NUM_COLUMNS: u32 = 4;
53
54	pub const META: u32 = 0;
55	pub const BLOCK_MAPPING: u32 = 1;
56	pub const TRANSACTION_MAPPING: u32 = 2;
57	pub const SYNCED_MAPPING: u32 = 3;
58}
59
60pub mod static_keys {
61	pub const CURRENT_SYNCING_TIPS: &[u8] = b"CURRENT_SYNCING_TIPS";
62}
63
64#[derive(Clone)]
65pub struct Backend<Block, C> {
66	client: Arc<C>,
67	meta: Arc<MetaDb<Block>>,
68	mapping: Arc<MappingDb<Block>>,
69	log_indexer: LogIndexerBackend<Block>,
70}
71
72#[async_trait::async_trait]
73impl<Block: BlockT, C: HeaderBackend<Block>> fc_api::Backend<Block> for Backend<Block, C> {
74	async fn block_hash(
75		&self,
76		ethereum_block_hash: &H256,
77	) -> Result<Option<Vec<Block::Hash>>, String> {
78		self.mapping().block_hash(ethereum_block_hash)
79	}
80
81	async fn transaction_metadata(
82		&self,
83		ethereum_transaction_hash: &H256,
84	) -> Result<Vec<TransactionMetadata<Block>>, String> {
85		self.mapping()
86			.transaction_metadata(ethereum_transaction_hash)
87	}
88
89	fn log_indexer(&self) -> &dyn fc_api::LogIndexerBackend<Block> {
90		&self.log_indexer
91	}
92
93	async fn first_block_hash(&self) -> Result<Block::Hash, String> {
94		Ok(self.client.info().genesis_hash)
95	}
96
97	async fn latest_block_hash(&self) -> Result<Block::Hash, String> {
98		Ok(self.client.info().best_hash)
99	}
100}
101
102#[derive(Clone, Default)]
103pub struct LogIndexerBackend<Block>(PhantomData<Block>);
104
105#[async_trait::async_trait]
106impl<Block: BlockT> fc_api::LogIndexerBackend<Block> for LogIndexerBackend<Block> {
107	fn is_indexed(&self) -> bool {
108		false
109	}
110
111	async fn filter_logs(
112		&self,
113		_from_block: u64,
114		_to_block: u64,
115		_addresses: Vec<H160>,
116		_topics: Vec<Vec<Option<H256>>>,
117	) -> Result<Vec<FilteredLog<Block>>, String> {
118		Err("KeyValue db does not index logs".into())
119	}
120}
121
122/// Returns the frontier database directory.
123pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf {
124	db_config_dir.join("frontier").join(db_path)
125}
126
127impl<Block: BlockT, C: HeaderBackend<Block>> Backend<Block, C> {
128	pub fn open(
129		client: Arc<C>,
130		database: &DatabaseSource,
131		db_config_dir: &Path,
132	) -> Result<Self, String> {
133		Self::new(
134			client,
135			&DatabaseSettings {
136				source: match database {
137					DatabaseSource::Auto { .. } => DatabaseSource::Auto {
138						rocksdb_path: frontier_database_dir(db_config_dir, "db"),
139						paritydb_path: frontier_database_dir(db_config_dir, "paritydb"),
140						cache_size: 0,
141					},
142					#[cfg(feature = "rocksdb")]
143					DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
144						path: frontier_database_dir(db_config_dir, "db"),
145						cache_size: 0,
146					},
147					DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
148						path: frontier_database_dir(db_config_dir, "paritydb"),
149					},
150					_ => {
151						return Err(
152							"Supported db sources: `auto` | `rocksdb` | `paritydb`".to_string()
153						)
154					}
155				},
156			},
157		)
158	}
159
160	pub fn new(client: Arc<C>, config: &DatabaseSettings) -> Result<Self, String> {
161		let db = utils::open_database::<Block, C>(client.clone(), config)?;
162
163		Ok(Self {
164			client,
165			mapping: Arc::new(MappingDb {
166				db: db.clone(),
167				write_lock: Arc::new(Mutex::new(())),
168				_marker: PhantomData,
169			}),
170			meta: Arc::new(MetaDb {
171				db: db.clone(),
172				_marker: PhantomData,
173			}),
174			log_indexer: LogIndexerBackend(PhantomData),
175		})
176	}
177
178	pub fn mapping(&self) -> &Arc<MappingDb<Block>> {
179		&self.mapping
180	}
181
182	pub fn meta(&self) -> &Arc<MetaDb<Block>> {
183		&self.meta
184	}
185}
186
187pub struct MetaDb<Block> {
188	db: Arc<dyn Database<DbHash>>,
189	_marker: PhantomData<Block>,
190}
191
192impl<Block: BlockT> MetaDb<Block> {
193	pub fn current_syncing_tips(&self) -> Result<Vec<Block::Hash>, String> {
194		match self
195			.db
196			.get(columns::META, static_keys::CURRENT_SYNCING_TIPS)
197		{
198			Some(raw) => Ok(Vec::<Block::Hash>::decode(&mut &raw[..]).map_err(|e| e.to_string())?),
199			None => Ok(Vec::new()),
200		}
201	}
202
203	pub fn write_current_syncing_tips(&self, tips: Vec<Block::Hash>) -> Result<(), String> {
204		let mut transaction = sp_database::Transaction::new();
205
206		transaction.set(
207			columns::META,
208			static_keys::CURRENT_SYNCING_TIPS,
209			&tips.encode(),
210		);
211
212		self.db.commit(transaction).map_err(|e| e.to_string())?;
213
214		Ok(())
215	}
216
217	pub fn ethereum_schema(&self) -> Result<Option<Vec<(EthereumStorageSchema, H256)>>, String> {
218		match self
219			.db
220			.get(columns::META, &PALLET_ETHEREUM_SCHEMA_CACHE.encode())
221		{
222			Some(raw) => Ok(Some(
223				Decode::decode(&mut &raw[..]).map_err(|e| e.to_string())?,
224			)),
225			None => Ok(None),
226		}
227	}
228
229	pub fn write_ethereum_schema(
230		&self,
231		new_cache: Vec<(EthereumStorageSchema, H256)>,
232	) -> Result<(), String> {
233		let mut transaction = sp_database::Transaction::new();
234
235		transaction.set(
236			columns::META,
237			&PALLET_ETHEREUM_SCHEMA_CACHE.encode(),
238			&new_cache.encode(),
239		);
240
241		self.db.commit(transaction).map_err(|e| e.to_string())?;
242
243		Ok(())
244	}
245}
246
247#[derive(Debug)]
248pub struct MappingCommitment<Block: BlockT> {
249	pub block_hash: Block::Hash,
250	pub ethereum_block_hash: H256,
251	pub ethereum_transaction_hashes: Vec<H256>,
252}
253
254pub struct MappingDb<Block> {
255	db: Arc<dyn Database<DbHash>>,
256	write_lock: Arc<Mutex<()>>,
257	_marker: PhantomData<Block>,
258}
259
260impl<Block: BlockT> MappingDb<Block> {
261	pub fn is_synced(&self, block_hash: &Block::Hash) -> Result<bool, String> {
262		match self.db.get(columns::SYNCED_MAPPING, &block_hash.encode()) {
263			Some(raw) => Ok(bool::decode(&mut &raw[..]).map_err(|e| format!("{e:?}"))?),
264			None => Ok(false),
265		}
266	}
267
268	pub fn block_hash(
269		&self,
270		ethereum_block_hash: &H256,
271	) -> Result<Option<Vec<Block::Hash>>, String> {
272		match self
273			.db
274			.get(columns::BLOCK_MAPPING, &ethereum_block_hash.encode())
275		{
276			Some(raw) => Ok(Some(
277				Vec::<Block::Hash>::decode(&mut &raw[..]).map_err(|e| format!("{e:?}"))?,
278			)),
279			None => Ok(None),
280		}
281	}
282
283	pub fn transaction_metadata(
284		&self,
285		ethereum_transaction_hash: &H256,
286	) -> Result<Vec<TransactionMetadata<Block>>, String> {
287		match self.db.get(
288			columns::TRANSACTION_MAPPING,
289			&ethereum_transaction_hash.encode(),
290		) {
291			Some(raw) => Ok(Vec::<TransactionMetadata<Block>>::decode(&mut &raw[..])
292				.map_err(|e| e.to_string())?),
293			None => Ok(Vec::new()),
294		}
295	}
296
297	pub fn write_none(&self, block_hash: Block::Hash) -> Result<(), String> {
298		let _lock = self.write_lock.lock();
299
300		let mut transaction = sp_database::Transaction::new();
301
302		transaction.set(
303			columns::SYNCED_MAPPING,
304			&block_hash.encode(),
305			&true.encode(),
306		);
307
308		self.db.commit(transaction).map_err(|e| e.to_string())?;
309
310		Ok(())
311	}
312
313	pub fn write_hashes(&self, commitment: MappingCommitment<Block>) -> Result<(), String> {
314		let _lock = self.write_lock.lock();
315
316		let mut transaction = sp_database::Transaction::new();
317
318		let substrate_hashes = match self.block_hash(&commitment.ethereum_block_hash) {
319			Ok(Some(mut data)) => {
320				if !data.contains(&commitment.block_hash) {
321					data.push(commitment.block_hash);
322					log::warn!(
323						target: "fc-db",
324						"Possible equivocation at ethereum block hash {} {:?}",
325						&commitment.ethereum_block_hash,
326						&data
327					);
328				}
329				data
330			}
331			_ => vec![commitment.block_hash],
332		};
333
334		transaction.set(
335			columns::BLOCK_MAPPING,
336			&commitment.ethereum_block_hash.encode(),
337			&substrate_hashes.encode(),
338		);
339
340		for (i, ethereum_transaction_hash) in commitment
341			.ethereum_transaction_hashes
342			.into_iter()
343			.enumerate()
344		{
345			let mut metadata = self.transaction_metadata(&ethereum_transaction_hash)?;
346			metadata.push(TransactionMetadata::<Block> {
347				substrate_block_hash: commitment.block_hash,
348				ethereum_block_hash: commitment.ethereum_block_hash,
349				ethereum_index: i as u32,
350			});
351			transaction.set(
352				columns::TRANSACTION_MAPPING,
353				&ethereum_transaction_hash.encode(),
354				&metadata.encode(),
355			);
356		}
357
358		transaction.set(
359			columns::SYNCED_MAPPING,
360			&commitment.block_hash.encode(),
361			&true.encode(),
362		);
363
364		self.db.commit(transaction).map_err(|e| e.to_string())?;
365
366		Ok(())
367	}
368}