Skip to content

Commit

Permalink
Move open_frontier_backend to fc_db (paritytech#711)
Browse files Browse the repository at this point in the history
* Move open_frontier_backend to fc_db

Signed-off-by: koushiro <koushiro.cqx@gmail.com>

* Some nits

Signed-off-by: koushiro <koushiro.cqx@gmail.com>
  • Loading branch information
koushiro authored Jun 6, 2022
1 parent 99671eb commit 39db580
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 46 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,4 @@ members = [
"template/node",
"template/runtime",
]
resolver = "2"
resolver = "2"
33 changes: 32 additions & 1 deletion client/db/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,11 @@
mod parity_db_adapter;
mod utils;

use std::{marker::PhantomData, sync::Arc};
use std::{
marker::PhantomData,
path::{Path, PathBuf},
sync::Arc,
};

use codec::{Decode, Encode};
use fp_storage::{EthereumStorageSchema, PALLET_ETHEREUM_SCHEMA_CACHE};
Expand Down Expand Up @@ -58,7 +62,34 @@ pub struct Backend<Block: BlockT> {
mapping: Arc<MappingDb<Block>>,
}

/// Returns the frontier database directory.
pub fn frontier_database_dir(db_config_dir: &Path, db_path: &str) -> PathBuf {
db_config_dir.join("frontier").join(db_path)
}

impl<Block: BlockT> Backend<Block> {
pub fn open(database: &DatabaseSource, db_config_dir: &Path) -> Result<Self, String> {
Self::new(&DatabaseSettings {
source: match database {
DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
path: frontier_database_dir(db_config_dir, "db"),
cache_size: 0,
},
DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
path: frontier_database_dir(db_config_dir, "paritydb"),
},
DatabaseSource::Auto { .. } => DatabaseSource::Auto {
rocksdb_path: frontier_database_dir(db_config_dir, "db"),
paritydb_path: frontier_database_dir(db_config_dir, "paritydb"),
cache_size: 0,
},
_ => {
return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string())
}
},
})
}

pub fn new(config: &DatabaseSettings) -> Result<Self, String> {
let db = utils::open_database(config)?;

Expand Down
8 changes: 5 additions & 3 deletions template/node/src/command.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
use std::sync::Arc;

use clap::Parser;
use fc_db::frontier_database_dir;
use frame_benchmarking_cli::BenchmarkCmd;
use frontier_template_runtime::Block;
use sc_cli::{ChainSpec, RuntimeVersion, SubstrateCli};
Expand All @@ -27,7 +28,7 @@ use crate::{
chain_spec,
cli::{Cli, Subcommand},
command_helper::{inherent_benchmark_data, BenchmarkExtrinsicBuilder},
service::{self, frontier_database_dir},
service::{self, db_config_dir},
};

impl SubstrateCli for Cli {
Expand Down Expand Up @@ -130,13 +131,14 @@ pub fn run() -> sc_cli::Result<()> {
let runner = cli.create_runner(cmd)?;
runner.sync_run(|config| {
// Remove Frontier offchain db
let db_config_dir = db_config_dir(&config);
let frontier_database_config = match config.database {
DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
path: frontier_database_dir(&config, "db"),
path: frontier_database_dir(&db_config_dir, "db"),
cache_size: 0,
},
DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
path: frontier_database_dir(&config, "paritydb"),
path: frontier_database_dir(&db_config_dir, "paritydb"),
},
_ => {
return Err(format!("Cannot purge `{:?}` database", config.database).into())
Expand Down
58 changes: 17 additions & 41 deletions template/node/src/service.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
use std::{
collections::BTreeMap,
path::PathBuf,
sync::{Arc, Mutex},
time::Duration,
};
Expand All @@ -17,7 +18,7 @@ use sc_telemetry::{Telemetry, TelemetryWorker};
use sp_core::U256;
// Frontier
use fc_consensus::FrontierBlockImport;
use fc_db::DatabaseSource;
use fc_db::Backend as FrontierBackend;
use fc_mapping_sync::{MappingSyncWorker, SyncStrategy};
use fc_rpc::{EthTask, OverrideHandle};
use fc_rpc_core::types::{FeeHistoryCache, FeeHistoryCacheLimit, FilterPool};
Expand Down Expand Up @@ -69,40 +70,15 @@ pub type ConsensusResult = (
Sealing,
);

pub fn frontier_database_dir(config: &Configuration, path: &str) -> std::path::PathBuf {
let config_dir = config
pub(crate) fn db_config_dir(config: &Configuration) -> PathBuf {
config
.base_path
.as_ref()
.map(|base_path| base_path.config_dir(config.chain_spec.id()))
.unwrap_or_else(|| {
BasePath::from_project("", "", &crate::cli::Cli::executable_name())
BasePath::from_project("", "", &Cli::executable_name())
.config_dir(config.chain_spec.id())
});
config_dir.join("frontier").join(path)
}

pub fn open_frontier_backend(config: &Configuration) -> Result<Arc<fc_db::Backend<Block>>, String> {
Ok(Arc::new(fc_db::Backend::<Block>::new(
&fc_db::DatabaseSettings {
source: match config.database {
DatabaseSource::RocksDb { .. } => DatabaseSource::RocksDb {
path: frontier_database_dir(config, "db"),
cache_size: 0,
},
DatabaseSource::ParityDb { .. } => DatabaseSource::ParityDb {
path: frontier_database_dir(config, "paritydb"),
},
DatabaseSource::Auto { .. } => DatabaseSource::Auto {
rocksdb_path: frontier_database_dir(config, "db"),
paritydb_path: frontier_database_dir(config, "paritydb"),
cache_size: 0,
},
_ => {
return Err("Supported db sources: `rocksdb` | `paritydb` | `auto`".to_string())
}
},
},
)?))
})
}

pub fn new_partial(
Expand All @@ -118,7 +94,7 @@ pub fn new_partial(
(
Option<Telemetry>,
ConsensusResult,
Arc<fc_db::Backend<Block>>,
Arc<FrontierBackend<Block>>,
Option<FilterPool>,
(FeeHistoryCache, FeeHistoryCacheLimit),
),
Expand Down Expand Up @@ -174,7 +150,10 @@ pub fn new_partial(
client.clone(),
);

let frontier_backend = open_frontier_backend(config)?;
let frontier_backend = Arc::new(FrontierBackend::open(
&config.database,
&db_config_dir(config),
)?);
let filter_pool: Option<FilterPool> = Some(Arc::new(Mutex::new(BTreeMap::new())));
let fee_history_cache: FeeHistoryCache = Arc::new(Mutex::new(BTreeMap::new()));
let fee_history_cache_limit: FeeHistoryCacheLimit = cli.run.fee_history_limit;
Expand Down Expand Up @@ -286,7 +265,6 @@ fn remote_keystore(_url: &str) -> Result<Arc<LocalKeystore>, &'static str> {
#[cfg(feature = "aura")]
pub fn new_full(mut config: Configuration, cli: &Cli) -> Result<TaskManager, ServiceError> {
use sc_client_api::{BlockBackend, ExecutorProvider};
use sc_network::warp_request_handler::WarpSyncProvider;
use sp_consensus_aura::sr25519::AuthorityPair as AuraPair;

// Use ethereum style for subscription ids
Expand Down Expand Up @@ -337,12 +315,10 @@ pub fn new_full(mut config: Configuration, cli: &Cli) -> Result<TaskManager, Ser
grandpa_protocol_name.clone(),
));

let warp_sync: Option<Arc<dyn WarpSyncProvider<Block>>> = Some(Arc::new(
sc_finality_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
consensus_result.1.shared_authority_set().clone(),
Vec::default(),
),
let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new(
backend.clone(),
consensus_result.1.shared_authority_set().clone(),
Vec::default(),
));

let (network, system_rpc_tx, network_starter) =
Expand All @@ -353,7 +329,7 @@ pub fn new_full(mut config: Configuration, cli: &Cli) -> Result<TaskManager, Ser
spawn_handle: task_manager.spawn_handle(),
import_queue,
block_announce_validator_builder: None,
warp_sync,
warp_sync: Some(warp_sync),
})?;

if config.offchain_worker.enabled {
Expand Down Expand Up @@ -753,7 +729,7 @@ fn spawn_frontier_tasks(
task_manager: &TaskManager,
client: Arc<FullClient>,
backend: Arc<FullBackend>,
frontier_backend: Arc<fc_db::Backend<Block>>,
frontier_backend: Arc<FrontierBackend<Block>>,
filter_pool: Option<FilterPool>,
overrides: Arc<OverrideHandle<Block>>,
fee_history_cache: FeeHistoryCache,
Expand Down

0 comments on commit 39db580

Please sign in to comment.