Unify and limit rocksdb dependency places (#8371)

* secret_store: remove kvdb_rocksdb dependency

* cli: init db mod for open dispatch

* cli: move db, client_db, restoration_db, secretstore_db to a separate mod

* migration: rename to migration-rocksdb and remove ethcore-migrations

* ethcore: re-move kvdb-rocksdb dep to test

* mark test_helpers as test only and fix migration mod naming

* Move restoration_db_handler to test_helpers_internal

* Fix missing preambles in test_helpers_internal and rocksdb/helpers

* Move test crates downward

* Fix missing docs

* cli, db::open_db: move each argument to a separate line

* Use featuregate instead of dead code for `open_secretstore_db`

* Move pathbuf import to open_secretstore_db

Because it's only used there behind a feature gate
This commit is contained in:
Wei Tang 2018-04-14 03:14:53 +08:00 committed by Afri Schoedon
parent 3f677c6168
commit 897a94641e
27 changed files with 295 additions and 261 deletions

12
Cargo.lock generated
View File

@ -641,13 +641,6 @@ dependencies = [
"time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "ethcore-migrations"
version = "0.1.0"
dependencies = [
"migration 0.1.0",
]
[[package]]
name = "ethcore-miner"
version = "1.11.0"
@ -1630,7 +1623,7 @@ dependencies = [
]
[[package]]
name = "migration"
name = "migration-rocksdb"
version = "0.1.0"
dependencies = [
"error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1933,7 +1926,6 @@ dependencies = [
"ethcore-io 1.11.0",
"ethcore-light 1.11.0",
"ethcore-logger 1.11.0",
"ethcore-migrations 0.1.0",
"ethcore-miner 1.11.0",
"ethcore-network 1.11.0",
"ethcore-private-tx 1.0.0",
@ -1956,7 +1948,7 @@ dependencies = [
"kvdb-rocksdb 0.1.0",
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
"mem 0.1.0",
"migration 0.1.0",
"migration-rocksdb 0.1.0",
"node-filter 1.11.0",
"node-health 0.1.0",
"num_cpus 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -37,7 +37,6 @@ ethcore-bytes = { path = "util/bytes" }
ethcore-io = { path = "util/io" }
ethcore-light = { path = "ethcore/light" }
ethcore-logger = { path = "logger" }
ethcore-migrations = { path = "ethcore/migrations" }
ethcore-miner = { path = "miner" }
ethcore-network = { path = "util/network" }
ethcore-private-tx = { path = "ethcore/private-tx" }
@ -64,7 +63,7 @@ path = { path = "util/path" }
dir = { path = "util/dir" }
panic_hook = { path = "util/panic_hook" }
keccak-hash = { path = "util/hash" }
migration = { path = "util/migration" }
migration-rocksdb = { path = "util/migration-rocksdb" }
kvdb = { path = "util/kvdb" }
kvdb-rocksdb = { path = "util/kvdb-rocksdb" }
journaldb = { path = "util/journaldb" }

View File

@ -51,7 +51,6 @@ rlp_compress = { path = "../util/rlp_compress" }
rlp_derive = { path = "../util/rlp_derive" }
kvdb = { path = "../util/kvdb" }
kvdb-memorydb = { path = "../util/kvdb-memorydb" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
util-error = { path = "../util/error" }
snappy = { git = "https://github.com/paritytech/rust-snappy" }
stop-guard = { path = "../util/stop-guard" }
@ -71,6 +70,7 @@ journaldb = { path = "../util/journaldb" }
[dev-dependencies]
tempdir = "0.3"
trie-standardmap = { path = "../util/trie-standardmap" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
[features]
# Display EVM debug traces.

View File

@ -1,7 +0,0 @@
[package]
name = "ethcore-migrations"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
migration = { path = "../../util/migration" }

View File

@ -92,7 +92,6 @@ extern crate ansi_term;
extern crate unexpected;
extern crate kvdb;
extern crate kvdb_memorydb;
extern crate kvdb_rocksdb;
extern crate util_error;
extern crate snappy;
@ -128,6 +127,9 @@ extern crate evm;
pub extern crate ethstore;
#[cfg(test)]
extern crate kvdb_rocksdb;
pub mod account_provider;
pub mod block;
pub mod client;
@ -167,6 +169,8 @@ mod tests;
#[cfg(test)]
#[cfg(feature="json-tests")]
mod json_tests;
#[cfg(test)]
mod test_helpers_internal;
pub use types::*;
pub use executive::contract_address;

View File

@ -635,7 +635,7 @@ mod tests {
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
use super::*;
use tempdir::TempDir;
use test_helpers::restoration_db_handler;
use test_helpers_internal::restoration_db_handler;
struct NoopDBRestore;
impl DatabaseRestore for NoopDBRestore {

View File

@ -24,7 +24,8 @@ use ids::BlockId;
use snapshot::service::{Service, ServiceParams};
use snapshot::{self, ManifestData, SnapshotService};
use spec::Spec;
use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler};
use test_helpers::generate_dummy_client_with_spec_and_data;
use test_helpers_internal::restoration_db_handler;
use io::IoChannel;
use kvdb_rocksdb::{Database, DatabaseConfig};

View File

@ -35,11 +35,8 @@ use spec::Spec;
use state_db::StateDB;
use state::*;
use std::sync::Arc;
use std::path::Path;
use transaction::{Action, Transaction, SignedTransaction};
use views::BlockView;
use kvdb::{KeyValueDB, KeyValueDBHandler};
use kvdb_rocksdb::{Database, DatabaseConfig};
/// Creates test block with corresponding header
pub fn create_test_block(header: &Header) -> Bytes {
@ -402,20 +399,3 @@ impl ChainNotify for TestNotify {
self.messages.write().push(data);
}
}
/// Creates new instance of KeyValueDBHandler
pub fn restoration_db_handler(config: DatabaseConfig) -> Box<KeyValueDBHandler> {
use kvdb::Error;
struct RestorationDBHandler {
config: DatabaseConfig,
}
impl KeyValueDBHandler for RestorationDBHandler {
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
}
}
Box::new(RestorationDBHandler { config })
}

View File

@ -0,0 +1,39 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Internal helpers for client tests
use std::path::Path;
use std::sync::Arc;
use kvdb::{KeyValueDB, KeyValueDBHandler};
use kvdb_rocksdb::{Database, DatabaseConfig};
/// Creates new instance of KeyValueDBHandler
pub fn restoration_db_handler(config: DatabaseConfig) -> Box<KeyValueDBHandler> {
use kvdb::Error;
struct RestorationDBHandler {
config: DatabaseConfig,
}
impl KeyValueDBHandler for RestorationDBHandler {
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
}
}
Box::new(RestorationDBHandler { config })
}

View File

@ -27,20 +27,19 @@ use bytes::ToPretty;
use rlp::PayloadInfo;
use ethcore::account_provider::AccountProvider;
use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, Nonce, Balance, BlockChainClient, BlockId, BlockInfo, ImportBlock};
use ethcore::db::NUM_COLUMNS;
use ethcore::error::ImportError;
use ethcore::miner::Miner;
use ethcore::verification::queue::VerifierSettings;
use ethcore_service::ClientService;
use cache::CacheConfig;
use informant::{Informant, FullNodeInformantData, MillisecondDuration};
use kvdb_rocksdb::{Database, DatabaseConfig};
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
use helpers::{to_client_config, execute_upgrades, open_client_db, client_db_config, restoration_db_handler, compaction_profile};
use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
use user_defaults::UserDefaults;
use fdlimit;
use ethcore_private_tx;
use db;
#[derive(Debug, PartialEq)]
pub enum DataFormat {
@ -188,8 +187,7 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
let client_path = db_dirs.client_path(algorithm);
// execute upgrades
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction)?;
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
// create dirs used by parity
cmd.dirs.create_dirs(false, false, false)?;
@ -210,19 +208,10 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
config.queue.verifier_settings = cmd.verifier_settings;
// initialize database.
let db = {
let db_config = DatabaseConfig {
memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
compaction: compaction,
wal: cmd.wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Arc::new(Database::open(
&db_config,
&client_path.to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Failed to open database: {}", e))?)
};
let db = db::open_db(&client_path.to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction,
cmd.wal)?;
// TODO: could epoch signals be avilable at the end of the file?
let fetch = ::light::client::fetch::unavailable();
@ -354,7 +343,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?;
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
// create dirs used by parity
cmd.dirs.create_dirs(false, false, false)?;
@ -378,9 +367,8 @@ fn execute_import(cmd: ImportBlockchain) -> Result<(), String> {
client_config.queue.verifier_settings = cmd.verifier_settings;
let client_db_config = client_db_config(&client_path, &client_config);
let client_db = open_client_db(&client_path, &client_db_config)?;
let restoration_db_handler = restoration_db_handler(client_db_config);
let client_db = db::open_client_db(&client_path, &client_config)?;
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
// build client
let service = ClientService::start(
@ -549,7 +537,7 @@ fn start_client(
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
execute_upgrades(&dirs.base, &db_dirs, algorithm, compaction_profile(&compaction, db_dirs.db_root_path().as_path()))?;
execute_upgrades(&dirs.base, &db_dirs, algorithm, &compaction)?;
// create dirs used by parity
dirs.create_dirs(false, false, false)?;
@ -571,9 +559,8 @@ fn start_client(
true,
);
let client_db_config = client_db_config(&client_path, &client_config);
let client_db = open_client_db(&client_path, &client_db_config)?;
let restoration_db_handler = restoration_db_handler(client_db_config);
let client_db = db::open_client_db(&client_path, &client_config)?;
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
let service = ClientService::start(
client_config,

View File

@ -1,4 +1,4 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
@ -14,24 +14,12 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Database migrations.
//! Database-related operations.
extern crate migration;
#[path="rocksdb/mod.rs"]
mod impls;
use migration::ChangeColumns;
pub use self::impls::{open_db, open_client_db, restoration_db_handler, migrate};
/// The migration from v10 to v11.
/// Adds a column for node info.
pub const TO_V11: ChangeColumns = ChangeColumns {
pre_columns: Some(6),
post_columns: Some(7),
version: 11,
};
/// The migration from v11 to v12.
/// Adds a column for light chain storage.
pub const TO_V12: ChangeColumns = ChangeColumns {
pre_columns: Some(7),
post_columns: Some(8),
version: 12,
};
#[cfg(feature = "secretstore")]
pub use self::impls::open_secretstore_db;

View File

@ -0,0 +1,38 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::path::Path;
use ethcore::db::NUM_COLUMNS;
use ethcore::client::{ClientConfig, DatabaseCompactionProfile};
use super::kvdb_rocksdb::{CompactionProfile, DatabaseConfig};
pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile {
match profile {
&DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path),
&DatabaseCompactionProfile::SSD => CompactionProfile::ssd(),
&DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
}
}
pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig {
let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
client_db_config.memory_budget = client_config.db_cache_size;
client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path);
client_db_config.wal = client_config.db_wal;
client_db_config
}

View File

@ -18,9 +18,28 @@ use std::fs;
use std::io::{Read, Write, Error as IoError, ErrorKind};
use std::path::{Path, PathBuf};
use std::fmt::{Display, Formatter, Error as FmtError};
use migr::{self, Manager as MigrationManager, Config as MigrationConfig};
use kvdb_rocksdb::CompactionProfile;
use migrations;
use super::migration_rocksdb::{self, Manager as MigrationManager, Config as MigrationConfig, ChangeColumns};
use super::kvdb_rocksdb::CompactionProfile;
use ethcore::client::DatabaseCompactionProfile;
use super::helpers;
/// The migration from v10 to v11.
/// Adds a column for node info.
pub const TO_V11: ChangeColumns = ChangeColumns {
pre_columns: Some(6),
post_columns: Some(7),
version: 11,
};
/// The migration from v11 to v12.
/// Adds a column for light chain storage.
pub const TO_V12: ChangeColumns = ChangeColumns {
pre_columns: Some(7),
post_columns: Some(8),
version: 12,
};
/// Database is assumed to be at default version, when no version file is found.
const DEFAULT_VERSION: u32 = 5;
@ -43,7 +62,7 @@ pub enum Error {
/// Migration is not possible.
MigrationImpossible,
/// Internal migration error.
Internal(migr::Error),
Internal(migration_rocksdb::Error),
/// Migration was completed succesfully,
/// but there was a problem with io.
Io(IoError),
@ -69,10 +88,10 @@ impl From<IoError> for Error {
}
}
impl From<migr::Error> for Error {
fn from(err: migr::Error) -> Self {
impl From<migration_rocksdb::Error> for Error {
fn from(err: migration_rocksdb::Error) -> Self {
match err.into() {
migr::ErrorKind::Io(e) => Error::Io(e),
migration_rocksdb::ErrorKind::Io(e) => Error::Io(e),
err => Error::Internal(err.into()),
}
}
@ -134,8 +153,8 @@ pub fn default_migration_settings(compaction_profile: &CompactionProfile) -> Mig
/// Migrations on the consolidated database.
fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> Result<MigrationManager, Error> {
let mut manager = MigrationManager::new(default_migration_settings(compaction_profile));
manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(TO_V11).map_err(|_| Error::MigrationImpossible)?;
manager.add_migration(TO_V12).map_err(|_| Error::MigrationImpossible)?;
Ok(manager)
}
@ -176,7 +195,9 @@ fn exists(path: &Path) -> bool {
}
/// Migrates the database.
pub fn migrate(path: &Path, compaction_profile: CompactionProfile) -> Result<(), Error> {
pub fn migrate(path: &Path, compaction_profile: &DatabaseCompactionProfile) -> Result<(), Error> {
let compaction_profile = helpers::compaction_profile(&compaction_profile, path);
// read version file.
let version = current_version(path)?;

91
parity/db/rocksdb/mod.rs Normal file
View File

@ -0,0 +1,91 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate kvdb_rocksdb;
extern crate migration_rocksdb;
use std::sync::Arc;
use std::path::Path;
use ethcore::db::NUM_COLUMNS;
use ethcore::client::{ClientConfig, DatabaseCompactionProfile};
use kvdb::{KeyValueDB, KeyValueDBHandler};
use self::kvdb_rocksdb::{Database, DatabaseConfig};
use cache::CacheConfig;
mod migration;
mod helpers;
pub use self::migration::migrate;
/// Open a secret store DB using the given secret store data path. The DB path is one level beneath the data path.
#[cfg(feature = "secretstore")]
pub fn open_secretstore_db(data_path: &str) -> Result<Arc<KeyValueDB>, String> {
use std::path::PathBuf;
let mut db_path = PathBuf::from(data_path);
db_path.push("db");
let db_path = db_path.to_str().ok_or_else(|| "Invalid secretstore path".to_string())?;
Ok(Arc::new(Database::open_default(&db_path).map_err(|e| format!("Error opening database: {:?}", e))?))
}
/// Open a new client DB.
pub fn open_client_db(client_path: &Path, client_config: &ClientConfig) -> Result<Arc<KeyValueDB>, String> {
let client_db_config = helpers::client_db_config(client_path, client_config);
let client_db = Arc::new(Database::open(
&client_db_config,
&client_path.to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Client service database error: {:?}", e))?);
Ok(client_db)
}
/// Create a restoration db handler using the config generated by `client_path` and `client_config`.
pub fn restoration_db_handler(client_path: &Path, client_config: &ClientConfig) -> Box<KeyValueDBHandler> {
use kvdb::Error;
let client_db_config = helpers::client_db_config(client_path, client_config);
struct RestorationDBHandler {
config: DatabaseConfig,
}
impl KeyValueDBHandler for RestorationDBHandler {
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
}
}
Box::new(RestorationDBHandler {
config: client_db_config,
})
}
/// Open a new main DB.
pub fn open_db(client_path: &str, cache_config: &CacheConfig, compaction: &DatabaseCompactionProfile, wal: bool) -> Result<Arc<KeyValueDB>, String> {
let db_config = DatabaseConfig {
memory_budget: Some(cache_config.blockchain() as usize * 1024 * 1024),
compaction: helpers::compaction_profile(&compaction, &Path::new(client_path)),
wal: wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Ok(Arc::new(Database::open(
&db_config,
client_path
).map_err(|e| format!("Failed to open database: {}", e))?))
}

View File

@ -18,17 +18,16 @@ use std::sync::Arc;
use std::time::Duration;
use ethcore::client::DatabaseCompactionProfile;
use ethcore::db::NUM_COLUMNS;
use ethcore::spec::{SpecParams, OptimizeFor};
use kvdb_rocksdb::{Database, DatabaseConfig};
use light::client::fetch::Unavailable as UnavailableDataFetcher;
use light::Cache as LightDataCache;
use params::{SpecType, Pruning};
use helpers::{execute_upgrades, compaction_profile};
use helpers::execute_upgrades;
use dir::Directories;
use cache::CacheConfig;
use user_defaults::UserDefaults;
use db;
// Number of minutes before a given gas price corpus should expire.
// Light client only.
@ -66,10 +65,8 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result<String, String> {
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
// execute upgrades
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?;
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
// create dirs used by parity
cmd.dirs.create_dirs(false, false, false)?;
@ -90,19 +87,10 @@ pub fn execute(cmd: ExportHsyncCmd) -> Result<String, String> {
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
// initialize database.
let db = {
let db_config = DatabaseConfig {
memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
compaction: compaction,
wal: cmd.wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Arc::new(Database::open(
&db_config,
&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Error opening database: {}", e))?)
};
let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction,
cmd.wal)?;
let service = light_client::Service::start(config, &spec, UnavailableDataFetcher, db, cache)
.map_err(|e| format!("Error starting light client: {}", e))?;

View File

@ -18,22 +18,17 @@ use std::io;
use std::io::{Write, BufReader, BufRead};
use std::time::Duration;
use std::fs::File;
use std::sync::Arc;
use std::path::Path;
use ethereum_types::{U256, clean_0x, Address};
use journaldb::Algorithm;
use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientConfig, VerifierType};
use ethcore::db::NUM_COLUMNS;
use ethcore::miner::{PendingSet, Penalization};
use miner::pool::PrioritizationStrategy;
use cache::CacheConfig;
use dir::DatabaseDirectories;
use dir::helpers::replace_home;
use upgrade::{upgrade, upgrade_data_paths};
use migration::migrate;
use sync::{validate_node_url, self};
use kvdb::{KeyValueDB, KeyValueDBHandler};
use kvdb_rocksdb::{Database, DatabaseConfig, CompactionProfile};
use db::migrate;
use path;
pub fn to_duration(s: &str) -> Result<Duration, String> {
@ -258,57 +253,11 @@ pub fn to_client_config(
client_config
}
// We assume client db has similar config as restoration db.
pub fn client_db_config(client_path: &Path, client_config: &ClientConfig) -> DatabaseConfig {
let mut client_db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
client_db_config.memory_budget = client_config.db_cache_size;
client_db_config.compaction = compaction_profile(&client_config.db_compaction, &client_path);
client_db_config.wal = client_config.db_wal;
client_db_config
}
pub fn open_client_db(client_path: &Path, client_db_config: &DatabaseConfig) -> Result<Arc<KeyValueDB>, String> {
let client_db = Arc::new(Database::open(
&client_db_config,
&client_path.to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Client service database error: {:?}", e))?);
Ok(client_db)
}
pub fn restoration_db_handler(client_db_config: DatabaseConfig) -> Box<KeyValueDBHandler> {
use kvdb::Error;
struct RestorationDBHandler {
config: DatabaseConfig,
}
impl KeyValueDBHandler for RestorationDBHandler {
fn open(&self, db_path: &Path) -> Result<Arc<KeyValueDB>, Error> {
Ok(Arc::new(Database::open(&self.config, &db_path.to_string_lossy())?))
}
}
Box::new(RestorationDBHandler {
config: client_db_config,
})
}
pub fn compaction_profile(profile: &DatabaseCompactionProfile, db_path: &Path) -> CompactionProfile {
match profile {
&DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path),
&DatabaseCompactionProfile::SSD => CompactionProfile::ssd(),
&DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
}
}
pub fn execute_upgrades(
base_path: &str,
dirs: &DatabaseDirectories,
pruning: Algorithm,
compaction_profile: CompactionProfile
compaction_profile: &DatabaseCompactionProfile
) -> Result<(), String> {
upgrade_data_paths(base_path, dirs, pruning);

View File

@ -50,7 +50,6 @@ extern crate ethcore_bytes as bytes;
extern crate ethcore_io as io;
extern crate ethcore_light as light;
extern crate ethcore_logger;
extern crate ethcore_migrations as migrations;
extern crate ethcore_miner as miner;
extern crate ethcore_network as network;
extern crate ethcore_private_tx;
@ -60,8 +59,6 @@ extern crate ethcore_transaction as transaction;
extern crate ethereum_types;
extern crate ethkey;
extern crate kvdb;
extern crate kvdb_rocksdb;
extern crate migration as migr;
extern crate node_health;
extern crate panic_hook;
extern crate parity_hash_fetch as hash_fetch;
@ -112,7 +109,6 @@ mod deprecated;
mod helpers;
mod informant;
mod light_helpers;
mod migration;
mod modules;
mod params;
mod presale;
@ -126,6 +122,7 @@ mod upgrade;
mod url;
mod user_defaults;
mod whisper;
mod db;
#[cfg(feature="stratum")]
mod stratum;

View File

@ -25,7 +25,6 @@ use ansi_term::{Colour, Style};
use ctrlc::CtrlC;
use ethcore::account_provider::{AccountProvider, AccountProviderSettings};
use ethcore::client::{Client, Mode, DatabaseCompactionProfile, VMType, BlockChainClient, BlockInfo};
use ethcore::db::NUM_COLUMNS;
use ethcore::ethstore::ethkey;
use ethcore::miner::{stratum, Miner, MinerService, MinerOptions};
use ethcore::snapshot;
@ -40,7 +39,6 @@ use futures_cpupool::CpuPool;
use hash_fetch::{self, fetch};
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
use journaldb::Algorithm;
use kvdb_rocksdb::{Database, DatabaseConfig};
use light::Cache as LightDataCache;
use miner::external::ExternalMiner;
use node_filter::NodeFilter;
@ -55,7 +53,7 @@ use params::{
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool
};
use helpers::{to_client_config, execute_upgrades, passwords_from_files, client_db_config, open_client_db, restoration_db_handler, compaction_profile};
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
use upgrade::upgrade_key_location;
use dir::{Directories, DatabaseDirectories};
use cache::CacheConfig;
@ -68,6 +66,7 @@ use rpc_apis;
use secretstore;
use signer;
use url;
use db;
// how often to take periodic snapshots.
const SNAPSHOT_PERIOD: u64 = 5000;
@ -210,10 +209,8 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
// select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
let compaction = compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path());
// execute upgrades
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?;
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
// create dirs used by parity
cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?;
@ -249,19 +246,10 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
};
// initialize database.
let db = {
let db_config = DatabaseConfig {
memory_budget: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024),
compaction: compaction,
wal: cmd.wal,
.. DatabaseConfig::with_columns(NUM_COLUMNS)
};
Arc::new(Database::open(
&db_config,
&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string.")
).map_err(|e| format!("Error opening database: {}", e))?)
};
let db = db::open_db(&db_dirs.client_path(algorithm).to_str().expect("DB path could not be converted to string."),
&cmd.cache_config,
&cmd.compaction,
cmd.wal)?;
let service = light_client::Service::start(config, &spec, fetch, db, cache.clone())
.map_err(|e| format!("Error starting light client: {}", e))?;
@ -477,7 +465,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction_profile(&cmd.compaction, db_dirs.db_root_path().as_path()))?;
execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, &cmd.compaction)?;
// create dirs used by parity
cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.ui_conf.enabled, cmd.secretstore_conf.enabled)?;
@ -623,9 +611,8 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
// set network path.
net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned());
let client_db_config = client_db_config(&client_path, &client_config);
let client_db = open_client_db(&client_path, &client_db_config)?;
let restoration_db_handler = restoration_db_handler(client_db_config);
let client_db = db::open_client_db(&client_path, &client_config)?;
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
// create client service.
let service = ClientService::start(

View File

@ -117,6 +117,7 @@ mod server {
use ethcore_secretstore;
use ethkey::KeyPair;
use ansi_term::Colour::Red;
use db;
use super::{Configuration, Dependencies, NodeSecretKey, ContractAddress};
fn into_service_contract_address(address: ContractAddress) -> ethcore_secretstore::ContractAddress {
@ -173,7 +174,6 @@ mod server {
service_contract_srv_retr_address: conf.service_contract_srv_retr_address.map(into_service_contract_address),
service_contract_doc_store_address: conf.service_contract_doc_store_address.map(into_service_contract_address),
service_contract_doc_sretr_address: conf.service_contract_doc_sretr_address.map(into_service_contract_address),
data_path: conf.data_path.clone(),
acl_check_enabled: conf.acl_check_enabled,
cluster_config: ethcore_secretstore::ClusterConfiguration {
threads: 4,
@ -193,7 +193,8 @@ mod server {
cconf.cluster_config.nodes.insert(self_secret.public().clone(), cconf.cluster_config.listener_address.clone());
let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf)
let db = db::open_secretstore_db(&conf.data_path)?;
let key_server = ethcore_secretstore::start(deps.client, deps.sync, deps.miner, self_secret, cconf, db)
.map_err(|e| format!("Error starting KeyServer {}: {}", key_server_name, e))?;
Ok(KeyServer {

View File

@ -32,11 +32,12 @@ use ethcore_service::ClientService;
use cache::CacheConfig;
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
use helpers::{to_client_config, execute_upgrades, client_db_config, open_client_db, restoration_db_handler, compaction_profile};
use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
use user_defaults::UserDefaults;
use fdlimit;
use ethcore_private_tx;
use db;
/// Kinds of snapshot commands.
#[derive(Debug, PartialEq, Clone, Copy)]
@ -164,7 +165,7 @@ impl SnapshotCommand {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
execute_upgrades(&self.dirs.base, &db_dirs, algorithm, compaction_profile(&self.compaction, db_dirs.db_root_path().as_path()))?;
execute_upgrades(&self.dirs.base, &db_dirs, algorithm, &self.compaction)?;
// prepare client config
let client_config = to_client_config(
@ -183,9 +184,8 @@ impl SnapshotCommand {
true
);
let client_db_config = client_db_config(&client_path, &client_config);
let client_db = open_client_db(&client_path, &client_db_config)?;
let restoration_db_handler = restoration_db_handler(client_db_config);
let client_db = db::open_client_db(&client_path, &client_config)?;
let restoration_db_handler = db::restoration_db_handler(&client_path, &client_config);
let service = ClientService::start(
client_config,

View File

@ -31,7 +31,6 @@ ethcore-sync = { path = "../ethcore/sync" }
ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.3"
kvdb = { path = "../util/kvdb" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
keccak-hash = { path = "../util/hash" }
ethkey = { path = "../ethkey" }
lazy_static = "1.0"
@ -41,3 +40,4 @@ ethabi-contract = "5.0"
[dev-dependencies]
tempdir = "0.3"
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }

View File

@ -14,14 +14,14 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::path::PathBuf;
use std::collections::BTreeMap;
use std::sync::Arc;
use serde_json;
use tiny_keccak::Keccak;
use ethereum_types::{H256, Address};
use ethkey::{Secret, Public, public_to_address};
use kvdb_rocksdb::{Database, DatabaseIterator};
use types::all::{Error, ServiceConfiguration, ServerKeyId, NodeId};
use kvdb::KeyValueDB;
use types::all::{Error, ServerKeyId, NodeId};
use serialization::{SerializablePublic, SerializableSecret, SerializableH256, SerializableAddress};
/// Key of version value.
@ -82,17 +82,17 @@ pub trait KeyStorage: Send + Sync {
/// Persistent document encryption keys storage
pub struct PersistentKeyStorage {
db: Database,
db: Arc<KeyValueDB>,
}
/// Persistent document encryption keys storage iterator
pub struct PersistentKeyStorageIterator<'a> {
iter: Option<DatabaseIterator<'a>>,
iter: Box<Iterator<Item=(Box<[u8]>, Box<[u8]>)> + 'a>,
}
/// V0 of encrypted key share, as it is stored by key storage on the single key server.
#[derive(Serialize, Deserialize)]
struct SerializableDocumentKeyShareV0 {
pub struct SerializableDocumentKeyShareV0 {
/// Decryption threshold (at least threshold + 1 nodes are required to decrypt data).
pub threshold: usize,
/// Nodes ids numbers.
@ -172,12 +172,7 @@ type SerializableDocumentKeyShareVersionV3 = SerializableDocumentKeyShareVersion
impl PersistentKeyStorage {
/// Create new persistent document encryption keys storage
pub fn new(config: &ServiceConfiguration) -> Result<Self, Error> {
let mut db_path = PathBuf::from(&config.data_path);
db_path.push("db");
let db_path = db_path.to_str().ok_or_else(|| Error::Database("Invalid secretstore path".to_owned()))?;
let db = Database::open_default(&db_path)?;
pub fn new(db: Arc<KeyValueDB>) -> Result<Self, Error> {
let db = upgrade_db(db)?;
Ok(PersistentKeyStorage {
@ -186,14 +181,14 @@ impl PersistentKeyStorage {
}
}
fn upgrade_db(db: Database) -> Result<Database, Error> {
fn upgrade_db(db: Arc<KeyValueDB>) -> Result<Arc<KeyValueDB>, Error> {
let version = db.get(None, DB_META_KEY_VERSION)?;
let version = version.and_then(|v| v.get(0).cloned()).unwrap_or(0);
match version {
0 => {
let mut batch = db.transaction();
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
let v0_key = serde_json::from_slice::<SerializableDocumentKeyShareV0>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
let current_key = CurrentSerializableDocumentKeyShare {
// author is used in separate generation + encrypt sessions.
@ -218,7 +213,7 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
1 => {
let mut batch = db.transaction();
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
let v1_key = serde_json::from_slice::<SerializableDocumentKeyShareV1>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
let current_key = CurrentSerializableDocumentKeyShare {
author: public_to_address(&v1_key.author).into(), // added in v1 + changed in v3
@ -241,7 +236,7 @@ fn upgrade_db(db: Database) -> Result<Database, Error> {
2 => {
let mut batch = db.transaction();
batch.put(None, DB_META_KEY_VERSION, &[CURRENT_VERSION]);
for (db_key, db_value) in db.iter(None).into_iter().flat_map(|inner| inner).filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
for (db_key, db_value) in db.iter(None).into_iter().filter(|&(ref k, _)| **k != *DB_META_KEY_VERSION) {
let v2_key = serde_json::from_slice::<SerializableDocumentKeyShareV2>(&db_value).map_err(|e| Error::Database(e.to_string()))?;
let current_key = CurrentSerializableDocumentKeyShare {
author: public_to_address(&v2_key.author).into(), // changed in v3
@ -319,11 +314,10 @@ impl<'a> Iterator for PersistentKeyStorageIterator<'a> {
type Item = (ServerKeyId, DocumentKeyShare);
fn next(&mut self) -> Option<(ServerKeyId, DocumentKeyShare)> {
self.iter.as_mut()
.and_then(|iter| iter.next()
self.iter.as_mut().next()
.and_then(|(db_key, db_val)| serde_json::from_slice::<CurrentSerializableDocumentKeyShare>(&db_val)
.ok()
.map(|key| ((*db_key).into(), key.into()))))
.map(|key| ((*db_key).into(), key.into())))
}
}
@ -417,14 +411,15 @@ impl From<SerializableDocumentKeyShareV3> for DocumentKeyShare {
pub mod tests {
extern crate tempdir;
use std::collections::{BTreeMap, HashMap};
use std::collections::HashMap;
use std::sync::Arc;
use parking_lot::RwLock;
use serde_json;
use self::tempdir::TempDir;
use ethereum_types::{Address, H256};
use ethkey::{Random, Generator, Public, Secret, public_to_address};
use kvdb_rocksdb::Database;
use types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, ServerKeyId};
use types::all::{Error, ServerKeyId};
use super::{DB_META_KEY_VERSION, CURRENT_VERSION, KeyStorage, PersistentKeyStorage, DocumentKeyShare,
DocumentKeyShareVersion, CurrentSerializableDocumentKeyShare, upgrade_db, SerializableDocumentKeyShareV0,
SerializableDocumentKeyShareV1, SerializableDocumentKeyShareV2, SerializableDocumentKeyShareVersionV2};
@ -472,27 +467,6 @@ pub mod tests {
#[test]
fn persistent_key_storage() {
let tempdir = TempDir::new("").unwrap();
let config = ServiceConfiguration {
listener_address: None,
service_contract_address: None,
service_contract_srv_gen_address: None,
service_contract_srv_retr_address: None,
service_contract_doc_store_address: None,
service_contract_doc_sretr_address: None,
acl_check_enabled: true,
data_path: tempdir.path().display().to_string(),
cluster_config: ClusterConfiguration {
threads: 1,
listener_address: NodeAddress {
address: "0.0.0.0".to_owned(),
port: 8083,
},
nodes: BTreeMap::new(),
allow_connecting_to_higher_nodes: false,
admin_public: None,
auto_migrate_enabled: false,
},
};
let key1 = ServerKeyId::from(1);
let value1 = DocumentKeyShare {
@ -526,7 +500,9 @@ pub mod tests {
};
let key3 = ServerKeyId::from(3);
let key_storage = PersistentKeyStorage::new(&config).unwrap();
let db = Database::open_default(&tempdir.path().display().to_string()).unwrap();
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
key_storage.insert(key1.clone(), value1.clone()).unwrap();
key_storage.insert(key2.clone(), value2.clone()).unwrap();
assert_eq!(key_storage.get(&key1), Ok(Some(value1.clone())));
@ -534,7 +510,9 @@ pub mod tests {
assert_eq!(key_storage.get(&key3), Ok(None));
drop(key_storage);
let key_storage = PersistentKeyStorage::new(&config).unwrap();
let db = Database::open_default(&tempdir.path().display().to_string()).unwrap();
let key_storage = PersistentKeyStorage::new(Arc::new(db)).unwrap();
assert_eq!(key_storage.get(&key1), Ok(Some(value1)));
assert_eq!(key_storage.get(&key2), Ok(Some(value2)));
assert_eq!(key_storage.get(&key3), Ok(None));
@ -563,7 +541,7 @@ pub mod tests {
}
// upgrade database
let db = upgrade_db(db).unwrap();
let db = upgrade_db(Arc::new(db)).unwrap();
// check upgrade
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);
@ -606,7 +584,7 @@ pub mod tests {
}
// upgrade database
let db = upgrade_db(db).unwrap();
let db = upgrade_db(Arc::new(db)).unwrap();
// check upgrade
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);
@ -654,7 +632,7 @@ pub mod tests {
}
// upgrade database
let db = upgrade_db(db).unwrap();
let db = upgrade_db(Arc::new(db)).unwrap();
// check upgrade
assert_eq!(db.get(None, DB_META_KEY_VERSION).unwrap().unwrap()[0], CURRENT_VERSION);

View File

@ -28,7 +28,6 @@ extern crate futures_cpupool;
extern crate hyper;
extern crate keccak_hash as hash;
extern crate kvdb;
extern crate kvdb_rocksdb;
extern crate parking_lot;
extern crate rustc_hex;
extern crate serde;
@ -54,6 +53,9 @@ extern crate lazy_static;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate kvdb_rocksdb;
mod key_server_cluster;
mod types;
mod helpers;
@ -69,6 +71,7 @@ mod listener;
mod trusted_client;
use std::sync::Arc;
use kvdb::KeyValueDB;
use ethcore::client::Client;
use ethcore::miner::Miner;
use sync::SyncProvider;
@ -79,7 +82,7 @@ pub use traits::{NodeKeyPair, KeyServer};
pub use self::node_key_pair::{PlainNodeKeyPair, KeyStoreNodeKeyPair};
/// Start new key server instance
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<NodeKeyPair>, config: ServiceConfiguration) -> Result<Box<KeyServer>, Error> {
pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, self_key_pair: Arc<NodeKeyPair>, config: ServiceConfiguration, db: Arc<KeyValueDB>) -> Result<Box<KeyServer>, Error> {
let trusted_client = trusted_client::TrustedClient::new(self_key_pair.clone(), client.clone(), sync, miner);
let acl_storage: Arc<acl_storage::AclStorage> = if config.acl_check_enabled {
acl_storage::OnChainAclStorage::new(trusted_client.clone())?
@ -89,7 +92,7 @@ pub fn start(client: Arc<Client>, sync: Arc<SyncProvider>, miner: Arc<Miner>, se
let key_server_set = key_server_set::OnChainKeyServerSet::new(trusted_client.clone(), self_key_pair.clone(),
config.cluster_config.auto_migrate_enabled, config.cluster_config.nodes.clone())?;
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?);
let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(db)?);
let key_server = Arc::new(key_server::KeyServerImpl::new(&config.cluster_config, key_server_set.clone(), self_key_pair.clone(), acl_storage.clone(), key_storage.clone())?);
let cluster = key_server.cluster();
let key_server: Arc<KeyServer> = key_server;

View File

@ -91,8 +91,6 @@ pub struct ServiceConfiguration {
pub service_contract_doc_sretr_address: Option<ContractAddress>,
/// Is ACL check enabled. If false, everyone has access to all keys. Useful for tests only.
pub acl_check_enabled: bool,
/// Data directory path for secret store
pub data_path: String,
/// Cluster configuration.
pub cluster_config: ClusterConfiguration,
}

View File

@ -1,5 +1,5 @@
[package]
name = "migration"
name = "migration-rocksdb"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]

View File

@ -22,7 +22,7 @@
extern crate macros;
extern crate tempdir;
extern crate kvdb_rocksdb;
extern crate migration;
extern crate migration_rocksdb as migration;
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};