Decouple rocksdb dependency from ethcore (#8320)
* Move client DB opening logic to CLI * Move restoration db open logic to CLI This adds KeyValueDBHandler which handles opening a new database, thus allow us to move the restoration db open logic out of ethcore. * Move rocksdb's compactionprofile conversion to CLI * Move kvdb_rocksdb as test dependency for ethcore * Fix tests due to interface change * Fix service tests * Remove unused migration dep for ethcore
This commit is contained in:
parent
9436e88d27
commit
c039ab79b5
1
Cargo.lock
generated
1
Cargo.lock
generated
@ -535,7 +535,6 @@ dependencies = [
|
||||
"macros 0.1.0",
|
||||
"memory-cache 0.1.0",
|
||||
"memorydb 0.1.1",
|
||||
"migration 0.1.0",
|
||||
"num 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"parity-machine 0.1.0",
|
||||
|
@ -51,12 +51,10 @@ rlp = { path = "../util/rlp" }
|
||||
rlp_compress = { path = "../util/rlp_compress" }
|
||||
rlp_derive = { path = "../util/rlp_derive" }
|
||||
kvdb = { path = "../util/kvdb" }
|
||||
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
||||
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
|
||||
util-error = { path = "../util/error" }
|
||||
snappy = { git = "https://github.com/paritytech/rust-snappy" }
|
||||
stop-guard = { path = "../util/stop-guard" }
|
||||
migration = { path = "../util/migration" }
|
||||
macros = { path = "../util/macros" }
|
||||
rust-crypto = "0.2.34"
|
||||
rustc-hex = "1.0"
|
||||
@ -74,6 +72,7 @@ journaldb = { path = "../util/journaldb" }
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3"
|
||||
trie-standardmap = { path = "../util/trie-standardmap" }
|
||||
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
|
||||
|
||||
[features]
|
||||
evm-debug = ["slow-blocks"]
|
||||
|
@ -8,9 +8,9 @@ ansi_term = "0.10"
|
||||
ethcore = { path = ".." }
|
||||
ethcore-io = { path = "../../util/io" }
|
||||
kvdb = { path = "../../util/kvdb" }
|
||||
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }
|
||||
log = "0.3"
|
||||
stop-guard = { path = "../../util/stop-guard" }
|
||||
|
||||
[dev-dependencies]
|
||||
tempdir = "0.3"
|
||||
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }
|
||||
|
@ -18,7 +18,6 @@ extern crate ansi_term;
|
||||
extern crate ethcore;
|
||||
extern crate ethcore_io as io;
|
||||
extern crate kvdb;
|
||||
extern crate kvdb_rocksdb;
|
||||
extern crate stop_guard;
|
||||
|
||||
#[macro_use]
|
||||
@ -27,6 +26,9 @@ extern crate log;
|
||||
#[cfg(test)]
|
||||
extern crate tempdir;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate kvdb_rocksdb;
|
||||
|
||||
mod service;
|
||||
|
||||
pub use service::ClientService;
|
||||
|
@ -21,12 +21,10 @@ use std::path::Path;
|
||||
|
||||
use ansi_term::Colour;
|
||||
use io::{IoContext, TimerToken, IoHandler, IoService, IoError};
|
||||
use kvdb::KeyValueDB;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use kvdb::{KeyValueDB, KeyValueDBHandler};
|
||||
use stop_guard::StopGuard;
|
||||
|
||||
use ethcore::client::{self, Client, ClientConfig, ChainNotify, ClientIoMessage};
|
||||
use ethcore::db;
|
||||
use ethcore::client::{Client, ClientConfig, ChainNotify, ClientIoMessage};
|
||||
use ethcore::error::Error;
|
||||
use ethcore::miner::Miner;
|
||||
use ethcore::snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams};
|
||||
@ -38,7 +36,7 @@ pub struct ClientService {
|
||||
io_service: Arc<IoService<ClientIoMessage>>,
|
||||
client: Arc<Client>,
|
||||
snapshot: Arc<SnapshotService>,
|
||||
database: Arc<Database>,
|
||||
database: Arc<KeyValueDB>,
|
||||
_stop_guard: StopGuard,
|
||||
}
|
||||
|
||||
@ -47,8 +45,9 @@ impl ClientService {
|
||||
pub fn start(
|
||||
config: ClientConfig,
|
||||
spec: &Spec,
|
||||
client_path: &Path,
|
||||
client_db: Arc<KeyValueDB>,
|
||||
snapshot_path: &Path,
|
||||
restoration_db_handler: Box<KeyValueDBHandler>,
|
||||
_ipc_path: &Path,
|
||||
miner: Arc<Miner>,
|
||||
) -> Result<ClientService, Error>
|
||||
@ -57,25 +56,13 @@ impl ClientService {
|
||||
|
||||
info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name()));
|
||||
|
||||
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
|
||||
|
||||
db_config.memory_budget = config.db_cache_size;
|
||||
db_config.compaction = config.db_compaction.compaction_profile(client_path);
|
||||
db_config.wal = config.db_wal;
|
||||
|
||||
let db = Arc::new(Database::open(
|
||||
&db_config,
|
||||
&client_path.to_str().expect("DB path could not be converted to string.")
|
||||
).map_err(client::Error::Database)?);
|
||||
|
||||
|
||||
let pruning = config.pruning;
|
||||
let client = Client::new(config, &spec, db.clone(), miner, io_service.channel())?;
|
||||
let client = Client::new(config, &spec, client_db.clone(), miner, io_service.channel())?;
|
||||
|
||||
let snapshot_params = SnapServiceParams {
|
||||
engine: spec.engine.clone(),
|
||||
genesis_block: spec.genesis_block(),
|
||||
db_config: db_config.clone(),
|
||||
restoration_db_handler: restoration_db_handler,
|
||||
pruning: pruning,
|
||||
channel: io_service.channel(),
|
||||
snapshot_root: snapshot_path.into(),
|
||||
@ -97,7 +84,7 @@ impl ClientService {
|
||||
io_service: Arc::new(io_service),
|
||||
client: client,
|
||||
snapshot: snapshot,
|
||||
database: db,
|
||||
database: client_db,
|
||||
_stop_guard: stop_guard,
|
||||
})
|
||||
}
|
||||
@ -208,6 +195,9 @@ mod tests {
|
||||
use ethcore::client::ClientConfig;
|
||||
use ethcore::miner::Miner;
|
||||
use ethcore::spec::Spec;
|
||||
use ethcore::db::NUM_COLUMNS;
|
||||
use kvdb::Error;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile};
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
@ -216,12 +206,39 @@ mod tests {
|
||||
let client_path = tempdir.path().join("client");
|
||||
let snapshot_path = tempdir.path().join("snapshot");
|
||||
|
||||
let client_config = ClientConfig::default();
|
||||
let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
|
||||
|
||||
client_db_config.memory_budget = client_config.db_cache_size;
|
||||
client_db_config.compaction = CompactionProfile::auto(&client_path);
|
||||
client_db_config.wal = client_config.db_wal;
|
||||
|
||||
let client_db = Arc::new(Database::open(
|
||||
&client_db_config,
|
||||
&client_path.to_str().expect("DB path could not be converted to string.")
|
||||
).unwrap());
|
||||
|
||||
struct RestorationDBHandler {
|
||||
config: DatabaseConfig,
|
||||
}
|
||||
|
||||
impl KeyValueDBHandler for RestorationDBHandler {
|
||||
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
|
||||
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
|
||||
}
|
||||
}
|
||||
|
||||
let restoration_db_handler = Box::new(RestorationDBHandler {
|
||||
config: client_db_config,
|
||||
});
|
||||
|
||||
let spec = Spec::new_test();
|
||||
let service = ClientService::start(
|
||||
ClientConfig::default(),
|
||||
&spec,
|
||||
&client_path,
|
||||
client_db,
|
||||
&snapshot_path,
|
||||
restoration_db_handler,
|
||||
tempdir.path(),
|
||||
Arc::new(Miner::with_spec(&spec)),
|
||||
);
|
||||
|
@ -15,13 +15,11 @@
|
||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
use std::str::FromStr;
|
||||
use std::path::Path;
|
||||
use std::fmt::{Display, Formatter, Error as FmtError};
|
||||
|
||||
use mode::Mode as IpcMode;
|
||||
use verification::{VerifierType, QueueConfig};
|
||||
use journaldb;
|
||||
use kvdb_rocksdb::CompactionProfile;
|
||||
|
||||
pub use std::time::Duration;
|
||||
pub use blockchain::Config as BlockChainConfig;
|
||||
@ -45,17 +43,6 @@ impl Default for DatabaseCompactionProfile {
|
||||
}
|
||||
}
|
||||
|
||||
impl DatabaseCompactionProfile {
|
||||
/// Returns corresponding compaction profile.
|
||||
pub fn compaction_profile(&self, db_path: &Path) -> CompactionProfile {
|
||||
match *self {
|
||||
DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path),
|
||||
DatabaseCompactionProfile::SSD => CompactionProfile::ssd(),
|
||||
DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for DatabaseCompactionProfile {
|
||||
type Err = String;
|
||||
|
||||
|
@ -93,11 +93,9 @@ extern crate triehash;
|
||||
extern crate ansi_term;
|
||||
extern crate unexpected;
|
||||
extern crate kvdb;
|
||||
extern crate kvdb_rocksdb;
|
||||
extern crate kvdb_memorydb;
|
||||
extern crate util_error;
|
||||
extern crate snappy;
|
||||
extern crate migration;
|
||||
|
||||
extern crate ethabi;
|
||||
#[macro_use]
|
||||
@ -130,6 +128,9 @@ extern crate trace_time;
|
||||
#[cfg_attr(test, macro_use)]
|
||||
extern crate evm;
|
||||
|
||||
#[cfg(test)]
|
||||
extern crate kvdb_rocksdb;
|
||||
|
||||
pub extern crate ethstore;
|
||||
|
||||
pub mod account_provider;
|
||||
|
@ -39,7 +39,7 @@ use parking_lot::{Mutex, RwLock, RwLockReadGuard};
|
||||
use util_error::UtilError;
|
||||
use bytes::Bytes;
|
||||
use journaldb::Algorithm;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use kvdb::{KeyValueDB, KeyValueDBHandler};
|
||||
use snappy;
|
||||
|
||||
/// Helper for removing directories in case of error.
|
||||
@ -79,14 +79,13 @@ struct Restoration {
|
||||
snappy_buffer: Bytes,
|
||||
final_state_root: H256,
|
||||
guard: Guard,
|
||||
db: Arc<Database>,
|
||||
db: Arc<KeyValueDB>,
|
||||
}
|
||||
|
||||
struct RestorationParams<'a> {
|
||||
manifest: ManifestData, // manifest to base restoration on.
|
||||
pruning: Algorithm, // pruning algorithm for the database.
|
||||
db_path: PathBuf, // database path
|
||||
db_config: &'a DatabaseConfig, // configuration for the database.
|
||||
db: Arc<KeyValueDB>, // database
|
||||
writer: Option<LooseWriter>, // writer for recovered snapshot.
|
||||
genesis: &'a [u8], // genesis block of the chain.
|
||||
guard: Guard, // guard for the restoration directory.
|
||||
@ -101,8 +100,7 @@ impl Restoration {
|
||||
let state_chunks = manifest.state_hashes.iter().cloned().collect();
|
||||
let block_chunks = manifest.block_hashes.iter().cloned().collect();
|
||||
|
||||
let raw_db = Arc::new(Database::open(params.db_config, &*params.db_path.to_string_lossy())
|
||||
.map_err(UtilError::from)?);
|
||||
let raw_db = params.db;
|
||||
|
||||
let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone());
|
||||
let components = params.engine.snapshot_components()
|
||||
@ -211,10 +209,10 @@ pub struct ServiceParams {
|
||||
pub engine: Arc<EthEngine>,
|
||||
/// The chain's genesis block.
|
||||
pub genesis_block: Bytes,
|
||||
/// Database configuration options.
|
||||
pub db_config: DatabaseConfig,
|
||||
/// State pruning algorithm.
|
||||
pub pruning: Algorithm,
|
||||
/// Handler for opening a restoration DB.
|
||||
pub restoration_db_handler: Box<KeyValueDBHandler>,
|
||||
/// Async IO channel for sending messages.
|
||||
pub channel: Channel,
|
||||
/// The directory to put snapshots in.
|
||||
@ -228,8 +226,8 @@ pub struct ServiceParams {
|
||||
/// This controls taking snapshots and restoring from them.
|
||||
pub struct Service {
|
||||
restoration: Mutex<Option<Restoration>>,
|
||||
restoration_db_handler: Box<KeyValueDBHandler>,
|
||||
snapshot_root: PathBuf,
|
||||
db_config: DatabaseConfig,
|
||||
io_channel: Mutex<Channel>,
|
||||
pruning: Algorithm,
|
||||
status: Mutex<RestorationStatus>,
|
||||
@ -249,8 +247,8 @@ impl Service {
|
||||
pub fn new(params: ServiceParams) -> Result<Self, Error> {
|
||||
let mut service = Service {
|
||||
restoration: Mutex::new(None),
|
||||
restoration_db_handler: params.restoration_db_handler,
|
||||
snapshot_root: params.snapshot_root,
|
||||
db_config: params.db_config,
|
||||
io_channel: Mutex::new(params.channel),
|
||||
pruning: params.pruning,
|
||||
status: Mutex::new(RestorationStatus::Inactive),
|
||||
@ -437,8 +435,7 @@ impl Service {
|
||||
let params = RestorationParams {
|
||||
manifest: manifest,
|
||||
pruning: self.pruning,
|
||||
db_path: self.restoration_db(),
|
||||
db_config: &self.db_config,
|
||||
db: self.restoration_db_handler.open(&self.restoration_db())?,
|
||||
writer: writer,
|
||||
genesis: &self.genesis_block,
|
||||
guard: Guard::new(rest_dir),
|
||||
@ -638,6 +635,7 @@ mod tests {
|
||||
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
|
||||
use super::*;
|
||||
use tempdir::TempDir;
|
||||
use tests::helpers::restoration_db_handler;
|
||||
|
||||
struct NoopDBRestore;
|
||||
impl DatabaseRestore for NoopDBRestore {
|
||||
@ -657,7 +655,7 @@ mod tests {
|
||||
let snapshot_params = ServiceParams {
|
||||
engine: spec.engine.clone(),
|
||||
genesis_block: spec.genesis_block(),
|
||||
db_config: Default::default(),
|
||||
restoration_db_handler: restoration_db_handler(Default::default()),
|
||||
pruning: Algorithm::Archive,
|
||||
channel: service.channel(),
|
||||
snapshot_root: dir,
|
||||
@ -709,8 +707,7 @@ mod tests {
|
||||
block_hash: H256::default(),
|
||||
},
|
||||
pruning: Algorithm::Archive,
|
||||
db_path: tempdir.path().to_owned(),
|
||||
db_config: &db_config,
|
||||
db: restoration_db_handler(db_config).open(&tempdir.path().to_owned()).unwrap(),
|
||||
writer: None,
|
||||
genesis: &gb,
|
||||
guard: Guard::benign(),
|
||||
|
@ -24,7 +24,7 @@ use ids::BlockId;
|
||||
use snapshot::service::{Service, ServiceParams};
|
||||
use snapshot::{self, ManifestData, SnapshotService};
|
||||
use spec::Spec;
|
||||
use tests::helpers::generate_dummy_client_with_spec_and_data;
|
||||
use tests::helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler};
|
||||
|
||||
use io::IoChannel;
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
@ -65,7 +65,7 @@ fn restored_is_equivalent() {
|
||||
let service_params = ServiceParams {
|
||||
engine: spec.engine.clone(),
|
||||
genesis_block: spec.genesis_block(),
|
||||
db_config: db_config,
|
||||
restoration_db_handler: restoration_db_handler(db_config),
|
||||
pruning: ::journaldb::Algorithm::Archive,
|
||||
channel: IoChannel::disconnected(),
|
||||
snapshot_root: path,
|
||||
@ -107,7 +107,7 @@ fn guards_delete_folders() {
|
||||
let service_params = ServiceParams {
|
||||
engine: spec.engine.clone(),
|
||||
genesis_block: spec.genesis_block(),
|
||||
db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS),
|
||||
restoration_db_handler: restoration_db_handler(DatabaseConfig::with_columns(::db::NUM_COLUMNS)),
|
||||
pruning: ::journaldb::Algorithm::Archive,
|
||||
channel: IoChannel::disconnected(),
|
||||
snapshot_root: tempdir.path().to_owned(),
|
||||
|
@ -33,8 +33,11 @@ use spec::Spec;
|
||||
use state_db::StateDB;
|
||||
use state::*;
|
||||
use std::sync::Arc;
|
||||
use std::path::Path;
|
||||
use transaction::{Action, Transaction, SignedTransaction};
|
||||
use views::BlockView;
|
||||
use kvdb::{KeyValueDB, KeyValueDBHandler};
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
|
||||
pub fn create_test_block(header: &Header) -> Bytes {
|
||||
let mut rlp = RlpStream::new_list(3);
|
||||
@ -349,3 +352,19 @@ impl ChainNotify for TestNotify {
|
||||
self.messages.write().push(data);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn restoration_db_handler(config: DatabaseConfig) -> Box<KeyValueDBHandler> {
|
||||
use kvdb::Error;
|
||||
|
||||
struct RestorationDBHandler {
|
||||
config: DatabaseConfig,
|
||||
}
|
||||
|
||||
impl KeyValueDBHandler for RestorationDBHandler {
|
||||
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
|
||||
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
|
||||
}
|
||||
}
|
||||
|
||||
Box::new(RestorationDBHandler { config })
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ use cache::CacheConfig;
|
||||
use informant::{Informant, FullNodeInformantData, MillisecondDuration};
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig};
|
||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
||||
use helpers::{to_client_config, execute_upgrades};
|
||||
use helpers::{to_client_config, execute_upgrades, open_client_db, client_db_config, restoration_db_handler, compaction_profile};
|
||||
use dir::Directories;
|
||||
use user_defaults::UserDefaults;
|
||||
use fdlimit;
|
||||
@ -186,7 +186,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
|
||||
// execute upgrades
|
||||
let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path());
|
||||
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction)?;
|
||||
|
||||
// create dirs used by parity
|
||||
@ -352,7 +352,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?;
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?;
|
||||
|
||||
// create dirs used by parity
|
||||
cmd.dirs.create_dirs(false, false, false)?;
|
||||
@ -376,12 +376,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
|
||||
|
||||
client_config.queue.verifier_settings = cmd.verifier_settings;
|
||||
|
||||
let client_db_config = client_db_config(&client_path, &client_config);
|
||||
let client_db = open_client_db(&client_path, &client_db_config)?;
|
||||
let restoration_db_handler = restoration_db_handler(client_db_config);
|
||||
|
||||
// build client
|
||||
let service = ClientService::start(
|
||||
client_config,
|
||||
&spec,
|
||||
&client_path,
|
||||
client_db,
|
||||
&snapshot_path,
|
||||
restoration_db_handler,
|
||||
&cmd.dirs.ipc_path(),
|
||||
Arc::new(Miner::with_spec(&spec)),
|
||||
).map_err(|e| format!("Client service error: {:?}", e))?;
|
||||
@ -537,7 +542,7 @@ fn start_client(
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path()))?;
|
||||
execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction_profile(&compaction, db_dirs.db_root_path().as_path()))?;
|
||||
|
||||
// create dirs used by parity
|
||||
dirs.create_dirs(false, false, false)?;
|
||||
@ -559,11 +564,16 @@ fn start_client(
|
||||
true,
|
||||
);
|
||||
|
||||
let client_db_config = client_db_config(&client_path, &client_config);
|
||||
let client_db = open_client_db(&client_path, &client_db_config)?;
|
||||
let restoration_db_handler = restoration_db_handler(client_db_config);
|
||||
|
||||
let service = ClientService::start(
|
||||
client_config,
|
||||
&spec,
|
||||
&client_path,
|
||||
client_db,
|
||||
&snapshot_path,
|
||||
restoration_db_handler,
|
||||
&dirs.ipc_path(),
|
||||
Arc::new(Miner::with_spec(&spec)),
|
||||
).map_err(|e| format!("Client service error: {:?}", e))?;
|
||||
|
@ -25,7 +25,7 @@ use light::client::fetch::Unavailable as UnavailableDataFetcher;
|
||||
use light::Cache as LightDataCache;
|
||||
|
||||
use params::{SpecType, Pruning};
|
||||
use helpers::execute_upgrades;
|
||||
use helpers::{execute_upgrades, compaction_profile};
|
||||
use dir::Directories;
|
||||
use cache::CacheConfig;
|
||||
use user_defaults::UserDefaults;
|
||||
@ -66,7 +66,7 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result<String, String> {
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path());
|
||||
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?;
|
||||
|
@ -18,11 +18,13 @@ use std::io;
|
||||
use std::io::{Write, BufReader, BufRead};
|
||||
use std::time::Duration;
|
||||
use std::fs::File;
|
||||
use std::sync::Arc;
|
||||
use std::path::Path;
|
||||
use ethereum_types::{U256, clean_0x, Address};
|
||||
use kvdb_rocksdb::CompactionProfile;
|
||||
use journaldb::Algorithm;
|
||||
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
|
||||
use ethcore::miner::{PendingSet, GasLimit};
|
||||
use ethcore::db::NUM_COLUMNS;
|
||||
use miner::transaction_queue::PrioritizationStrategy;
|
||||
use cache::CacheConfig;
|
||||
use dir::DatabaseDirectories;
|
||||
@ -30,6 +32,8 @@ use dir::helpers::replace_home;
|
||||
use upgrade::{upgrade, upgrade_data_paths};
|
||||
use migration::migrate;
|
||||
use ethsync::{validate_node_url, self};
|
||||
use kvdb::{KeyValueDB, KeyValueDBHandler};
|
||||
use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile};
|
||||
use path;
|
||||
|
||||
pub fn to_duration(s: &str) -> Result<Duration, String> {
|
||||
@ -255,6 +259,52 @@ pub fn to_client_config(
|
||||
client_config
|
||||
}
|
||||
|
||||
// We assume client db has similar config as restoration db.
|
||||
pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig {
|
||||
let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
|
||||
|
||||
client_db_config.memory_budget = client_config.db_cache_size;
|
||||
client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path);
|
||||
client_db_config.wal = client_config.db_wal;
|
||||
|
||||
client_db_config
|
||||
}
|
||||
|
||||
pub fn open_client_db(client_path: &Path, client_db_config: &DatabaseConfig) -> Result<Arc<KeyValueDB>, String> {
|
||||
let client_db = Arc::new(Database::open(
|
||||
&client_db_config,
|
||||
&client_path.to_str().expect("DB path could not be converted to string.")
|
||||
).map_err(|e| format!("Client service database error: {:?}", e))?);
|
||||
|
||||
Ok(client_db)
|
||||
}
|
||||
|
||||
pub fn restoration_db_handler(client_db_config: DatabaseConfig) -> Box<KeyValueDBHandler> {
|
||||
use kvdb::Error;
|
||||
|
||||
struct RestorationDBHandler {
|
||||
config: DatabaseConfig,
|
||||
}
|
||||
|
||||
impl KeyValueDBHandler for RestorationDBHandler {
|
||||
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
|
||||
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
|
||||
}
|
||||
}
|
||||
|
||||
Box::new(RestorationDBHandler {
|
||||
config: client_db_config,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile {
|
||||
match profile {
|
||||
&DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path),
|
||||
&DatabaseCompactionProfile::SSD => CompactionProfile::ssd(),
|
||||
&DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn execute_upgrades(
|
||||
base_path: &str,
|
||||
dirs: &DatabaseDirectories,
|
||||
|
@ -55,7 +55,7 @@ use params::{
|
||||
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
||||
tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool
|
||||
};
|
||||
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
|
||||
use helpers::{to_client_config, execute_upgrades, passwords_from_files, client_db_config, open_client_db, restoration_db_handler, compaction_profile};
|
||||
use upgrade::upgrade_key_location;
|
||||
use dir::{Directories, DatabaseDirectories};
|
||||
use cache::CacheConfig;
|
||||
@ -206,7 +206,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path());
|
||||
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?;
|
||||
@ -471,7 +471,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?;
|
||||
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?;
|
||||
|
||||
// create dirs used by parity
|
||||
cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?;
|
||||
@ -609,12 +609,17 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
||||
// set network path.
|
||||
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
|
||||
|
||||
let client_db_config = client_db_config(&client_path, &client_config);
|
||||
let client_db = open_client_db(&client_path, &client_db_config)?;
|
||||
let restoration_db_handler = restoration_db_handler(client_db_config);
|
||||
|
||||
// create client service.
|
||||
let service = ClientService::start(
|
||||
client_config,
|
||||
&spec,
|
||||
&client_path,
|
||||
client_db,
|
||||
&snapshot_path,
|
||||
restoration_db_handler,
|
||||
&cmd.dirs.ipc_path(),
|
||||
miner.clone(),
|
||||
).map_err(|e| format!("Client service error: {:?}", e))?;
|
||||
|
@ -31,7 +31,7 @@ use ethcore_service::ClientService;
|
||||
|
||||
use cache::CacheConfig;
|
||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
||||
use helpers::{to_client_config, execute_upgrades};
|
||||
use helpers::{to_client_config, execute_upgrades, client_db_config, open_client_db, restoration_db_handler, compaction_profile};
|
||||
use dir::Directories;
|
||||
use user_defaults::UserDefaults;
|
||||
use fdlimit;
|
||||
@ -162,7 +162,7 @@ impl SnapshotCommand {
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
|
||||
// execute upgrades
|
||||
execute_upgrades(&self.dirs.base, &db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.db_root_path().as_path()))?;
|
||||
execute_upgrades(&self.dirs.base, &db_dirs, algorithm, compaction_profile(&self.compaction, db_dirs.db_root_path().as_path()))?;
|
||||
|
||||
// prepare client config
|
||||
let client_config = to_client_config(
|
||||
@ -181,11 +181,16 @@ impl SnapshotCommand {
|
||||
true
|
||||
);
|
||||
|
||||
let client_db_config = client_db_config(&client_path, &client_config);
|
||||
let client_db = open_client_db(&client_path, &client_db_config)?;
|
||||
let restoration_db_handler = restoration_db_handler(client_db_config);
|
||||
|
||||
let service = ClientService::start(
|
||||
client_config,
|
||||
&spec,
|
||||
&client_path,
|
||||
client_db,
|
||||
&snapshot_path,
|
||||
restoration_db_handler,
|
||||
&self.dirs.ipc_path(),
|
||||
Arc::new(Miner::with_spec(&spec))
|
||||
).map_err(|e| format!("Client service error: {:?}", e))?;
|
||||
|
@ -22,6 +22,8 @@ extern crate elastic_array;
|
||||
extern crate ethcore_bytes as bytes;
|
||||
|
||||
use std::io;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use elastic_array::{ElasticArray128, ElasticArray32};
|
||||
use bytes::Bytes;
|
||||
|
||||
@ -176,3 +178,10 @@ pub trait KeyValueDB: Sync + Send {
|
||||
/// Attempt to replace this database with a new one located at the given path.
|
||||
fn restore(&self, new_db: &str) -> Result<()>;
|
||||
}
|
||||
|
||||
/// Generic key-value database handler. This trait contains one function `open`. When called, it opens database with a
|
||||
/// predefined config.
|
||||
pub trait KeyValueDBHandler: Send + Sync {
|
||||
/// Open the predefined key-value database.
|
||||
fn open(&self, path: &Path) -> Result<Arc<KeyValueDB>>;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user