From e4763e90bc16f46ecfa81c438e95616460eb22e2 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 27 Jun 2016 13:03:34 +0200 Subject: [PATCH 1/8] compaction struct and helpers --- util/src/kvdb.rs | 39 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 37 insertions(+), 2 deletions(-) diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index e472305a7..5d82dd6d6 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -20,8 +20,6 @@ use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction}; -const DB_FILE_SIZE_BASE: u64 = 32 * 1024 * 1024; -const DB_FILE_SIZE_MULTIPLIER: i32 = 2; const DB_BACKGROUND_FLUSHES: i32 = 2; const DB_BACKGROUND_COMPACTIONS: i32 = 2; @@ -53,6 +51,36 @@ impl DBTransaction { } } +/// Compaction profile for the database settings +pub struct CompactionProfile { + /// L0-L1 target file size + pub initial_file_size: u64, + /// L2-LN target file size multiplier + pub file_size_multiplier: i32, + /// rate limiter for background flushes and compactions, bytes/sec, if any + pub write_rate_limit: Option, +} + +impl CompactionProfile { + /// Default profile suitable for most storage + pub fn default() -> CompactionProfile { + CompactionProfile { + initial_file_size: 32 * 1024 * 1024, + file_size_multiplier: 2, + write_rate_limit: None, + } + } + + /// Slow hdd compaction profile + pub fn hdd(&self) -> CompactionProfile { + CompactionProfile { + initial_file_size: 192 * 1024 * 1024, + file_size_multiplier: 1, + write_rate_limit: Some(128 * 1024 * 1024), + } + } +} + /// Database configuration pub struct DatabaseConfig { /// Optional prefix size in bytes. Allows lookup by partial key. @@ -61,6 +89,8 @@ pub struct DatabaseConfig { pub max_open_files: i32, /// Cache-size pub cache_size: Option, + /// Compaction profile + pub compaction: CompactionProfile, } impl DatabaseConfig { @@ -72,6 +102,11 @@ impl DatabaseConfig { max_open_files: -1, } } + + pub fn compaction(mut self, profile: CompactionProfile) -> Self { + self.compaction = profile; + self + } } impl Default for DatabaseConfig { From 07098fd16fdc0c9e09dfcee9472e2b71fd5ad1ef Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 27 Jun 2016 13:14:40 +0200 Subject: [PATCH 2/8] extra helpers for prefix --- util/src/kvdb.rs | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 5d82dd6d6..10c7a3276 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -76,7 +76,7 @@ impl CompactionProfile { CompactionProfile { initial_file_size: 192 * 1024 * 1024, file_size_multiplier: 1, - write_rate_limit: Some(128 * 1024 * 1024), + write_rate_limit: Some(8 * 1024 * 1024), } } } @@ -100,6 +100,7 @@ impl DatabaseConfig { cache_size: Some(cache_size), prefix_size: None, max_open_files: -1, + compaction: CompactionProfile::default(), } } @@ -107,6 +108,11 @@ impl DatabaseConfig { self.compaction = profile; self } + + pub fn prefix(mut self, prefix_size: usize) -> Self { + self.prefix_size = Some(prefix_size); + self + } } impl Default for DatabaseConfig { @@ -115,6 +121,7 @@ impl Default for DatabaseConfig { cache_size: None, prefix_size: None, max_open_files: -1, + compaction: CompactionProfile::default(), } } } @@ -146,13 +153,18 @@ impl Database { /// Open database file. Creates if it does not exist. pub fn open(config: &DatabaseConfig, path: &str) -> Result { let mut opts = Options::new(); - try!(opts.set_parsed_options("rate_limiter_bytes_per_sec=256000000")); + if let Some(rate_limit) = config.compaction.write_rate_limit { + try!(opts.set_parsed_options(&format!("rate_limiter_bytes_per_sec={}", rate_limit))); + } opts.set_max_open_files(config.max_open_files); opts.create_if_missing(true); opts.set_use_fsync(false); + + // compaction settings opts.set_compaction_style(DBCompactionStyle::DBUniversalCompaction); - opts.set_target_file_size_base(DB_FILE_SIZE_BASE); - opts.set_target_file_size_multiplier(DB_FILE_SIZE_MULTIPLIER); + opts.set_target_file_size_base(config.compaction.initial_file_size); + opts.set_target_file_size_multiplier(config.compaction.file_size_multiplier); + opts.set_max_background_flushes(DB_BACKGROUND_FLUSHES); opts.set_max_background_compactions(DB_BACKGROUND_COMPACTIONS); if let Some(cache_size) = config.cache_size { From 627b67db0acdeafbc1041036a6e32f7f067dd226 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 27 Jun 2016 13:23:50 +0200 Subject: [PATCH 3/8] jdb to new settings config --- util/src/journaldb/archivedb.rs | 9 ++------- util/src/journaldb/earlymergedb.rs | 9 ++------- util/src/journaldb/mod.rs | 11 ++++++----- util/src/journaldb/overlayrecentdb.rs | 13 ++++--------- util/src/journaldb/refcounteddb.rs | 9 ++------- util/src/kvdb.rs | 2 ++ 6 files changed, 18 insertions(+), 35 deletions(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index c7f6b92fa..8372f99de 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -43,13 +43,8 @@ const DB_VERSION : u32 = 0x103; impl ArchiveDB { /// Create a new instance from file - pub fn new(path: &str, cache_size: Option) -> ArchiveDB { - let opts = DatabaseConfig { - // this must match account_db prefix - prefix_size: Some(DB_PREFIX_LEN), - max_open_files: 256, - cache_size: cache_size, - }; + pub fn new(path: &str, config: DatabaseConfig) -> ArchiveDB { + let opts = config.prefix(DB_PREFIX_LEN); let backing = Database::open(&opts, path).unwrap_or_else(|e| { panic!("Error opening state db: {}", e); }); diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 71959fa35..5c267679b 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -73,13 +73,8 @@ const PADDING : [u8; 10] = [ 0u8; 10 ]; impl EarlyMergeDB { /// Create a new instance from file - pub fn new(path: &str, cache_size: Option) -> EarlyMergeDB { - let opts = DatabaseConfig { - // this must match account_db prefix - prefix_size: Some(DB_PREFIX_LEN), - max_open_files: 256, - cache_size: cache_size, - }; + pub fn new(path: &str, config: DatabaseConfig) -> EarlyMergeDB { + let opts = config.prefix(DB_PREFIX_LEN); let backing = Database::open(&opts, path).unwrap_or_else(|e| { panic!("Error opening state db: {}", e); }); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 973dd9eb0..4715273dd 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -17,6 +17,7 @@ //! `JournalDB` interface and implementation. use common::*; +use kvdb::DatabaseConfig; /// Export the journaldb module. pub mod traits; @@ -71,12 +72,12 @@ impl fmt::Display for Algorithm { } /// Create a new `JournalDB` trait object. -pub fn new(path: &str, algorithm: Algorithm, cache_size: Option) -> Box { +pub fn new(path: &str, algorithm: Algorithm, config: DatabaseConfig) -> Box { match algorithm { - Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path, cache_size)), - Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path, cache_size)), - Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path, cache_size)), - Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path, cache_size)), + Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path, config)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path, config)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path, config)), + Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path, config)), } } diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 158b771fb..09ec3da88 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -98,18 +98,13 @@ const PADDING : [u8; 10] = [ 0u8; 10 ]; impl OverlayRecentDB { /// Create a new instance from file - pub fn new(path: &str, cache_size: Option) -> OverlayRecentDB { - Self::from_prefs(path, cache_size) + pub fn new(path: &str, config: DatabaseConfig) -> OverlayRecentDB { + Self::from_prefs(path, config) } /// Create a new instance from file - pub fn from_prefs(path: &str, cache_size: Option) -> OverlayRecentDB { - let opts = DatabaseConfig { - // this must match account_db prefix - prefix_size: Some(DB_PREFIX_LEN), - max_open_files: 256, - cache_size: cache_size, - }; + pub fn from_prefs(path: &str, config: DatabaseConfig) -> OverlayRecentDB { + let opts = config.prefix(DB_PREFIX_LEN); let backing = Database::open(&opts, path).unwrap_or_else(|e| { panic!("Error opening state db: {}", e); }); diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index fc7da1541..d2092b2a6 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -46,13 +46,8 @@ const PADDING : [u8; 10] = [ 0u8; 10 ]; impl RefCountedDB { /// Create a new instance given a `backing` database. - pub fn new(path: &str, cache_size: Option) -> RefCountedDB { - let opts = DatabaseConfig { - // this must match account_db prefix - prefix_size: Some(DB_PREFIX_LEN), - max_open_files: 256, - cache_size: cache_size, - }; + pub fn new(path: &str, config: DatabaseConfig) -> RefCountedDB { + let opts = config.prefix(DB_PREFIX_LEN); let backing = Database::open(&opts, path).unwrap_or_else(|e| { panic!("Error opening state db: {}", e); }); diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 10c7a3276..9cf4313f2 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -104,11 +104,13 @@ impl DatabaseConfig { } } + /// Modify the compaction profile pub fn compaction(mut self, profile: CompactionProfile) -> Self { self.compaction = profile; self } + /// Modify the prefix of the db pub fn prefix(mut self, prefix_size: usize) -> Self { self.prefix_size = Some(prefix_size); self From 2e5d5f12dd68cdb1f3f8702d905af33fd95346a6 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 27 Jun 2016 13:58:12 +0200 Subject: [PATCH 4/8] ethcore client config --- ethcore/src/client/client.rs | 13 +++++++++++-- ethcore/src/client/config.rs | 15 +++++++++++++++ ethcore/src/client/mod.rs | 4 ++-- parity/cli.rs | 9 ++++++++- parity/configuration.rs | 7 +++++++ util/src/kvdb.rs | 2 +- 6 files changed, 44 insertions(+), 6 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4ccfb7360..b69558d52 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -38,7 +38,7 @@ use filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; -use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics}; +use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics}; use client::Error as ClientError; use env_info::EnvInfo; use executive::{Executive, Executed, TransactOptions, contract_address}; @@ -146,10 +146,19 @@ impl Client where V: Verifier { let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path)); let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone()))); + let mut state_db_config = match config.db_cache_size { + None => DatabaseConfig::default(), + Some(cache_size) => DatabaseConfig::with_cache(cache_size), + }; + + if config.db_compaction == DatabaseCompactionProfile::HDD { + state_db_config = state_db_config.compaction(CompactionProfile::hdd()); + } + let mut state_db = journaldb::new( &append_path(&path, "state"), config.pruning, - config.db_cache_size); + state_db_config); if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) { state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 6353e324f..7d7f8e524 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -20,6 +20,19 @@ pub use trace::{Config as TraceConfig, Switch}; pub use evm::VMType; use util::journaldb; +/// Client state db compaction profile +#[derive(Debug, PartialEq)] +pub enum DatabaseCompactionProfile { + /// Default compaction profile + Default, + /// HDD or other slow storage io compaction profile + HDD, +} + +impl Default for DatabaseCompactionProfile { + fn default() -> Self { DatabaseCompactionProfile::Default } +} + /// Client configuration. Includes configs for all sub-systems. #[derive(Debug, Default)] pub struct ClientConfig { @@ -37,4 +50,6 @@ pub struct ClientConfig { pub name: String, /// State db cache-size if not default pub db_cache_size: Option, + /// State db compaction profile + pub db_compaction: DatabaseCompactionProfile, } diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 80625ad85..4ba22c1ed 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -23,7 +23,7 @@ mod test_client; mod trace; pub use self::client::*; -pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig, Switch, VMType}; +pub use self::config::{ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType}; pub use self::error::Error; pub use types::ids::*; pub use self::test_client::{TestBlockChainClient, EachBlockWith}; @@ -245,7 +245,7 @@ pub trait BlockChainClient : Sync + Send { } else { None } - } + } } /// Extended client interface used for mining diff --git a/parity/cli.rs b/parity/cli.rs index 29333fe9a..c42719cc6 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -170,7 +170,13 @@ Footprint Options: --cache MEGABYTES Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options. - --db-cache-size MB Database cache size. + +Database Options: + --db-cache-size MB Database cache size. Default if not specified. + --db-compaction TYPE Database compaction type. TYPE may be one of default, hdd + default - suitable for ssd backing storage/fast hdd + hdd - sutable for slow storage + [default: default]. Import/Export Options: --from BLOCK Export from block BLOCK, which may be an index or @@ -323,6 +329,7 @@ pub struct Args { pub flag_ipcpath: Option, pub flag_ipcapi: Option, pub flag_db_cache_size: Option, + pub flag_db_compaction: String, } pub fn print_version() { diff --git a/parity/configuration.rs b/parity/configuration.rs index 43a7fcd3c..8a11ebf4d 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -278,6 +278,13 @@ impl Configuration { // forced state db cache size if provided client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4)); + // compaction profile + client_config.db_compaction_profile = match self.args.flag_db_compaction.as_str() { + "default" => DatabaseCompactionProfile::Default, + "hdd" => DatabaseCompactionProfile::HDD, + _ => { die!("Invalid compaction profile given (--db-compaction argument), expected hdd/default."); } + }; + if self.args.flag_jitvm { client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity built without jit vm.")) } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 9cf4313f2..4e7bdb26c 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -72,7 +72,7 @@ impl CompactionProfile { } /// Slow hdd compaction profile - pub fn hdd(&self) -> CompactionProfile { + pub fn hdd() -> CompactionProfile { CompactionProfile { initial_file_size: 192 * 1024 * 1024, file_size_multiplier: 1, From f9f25fd14767491b4ff888313b257f7b25d44467 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 27 Jun 2016 14:25:50 +0200 Subject: [PATCH 5/8] cli config --- parity/configuration.rs | 6 +++--- parity/migration.rs | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/parity/configuration.rs b/parity/configuration.rs index 8a11ebf4d..ad2578478 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -26,7 +26,7 @@ use die::*; use util::*; use ethcore::account_provider::AccountProvider; use util::network_settings::NetworkSettings; -use ethcore::client::{append_path, get_db_path, ClientConfig, Switch, VMType}; +use ethcore::client::{append_path, get_db_path, ClientConfig, DatabaseCompactionProfile, Switch, VMType}; use ethcore::ethereum; use ethcore::spec::Spec; use ethsync::SyncConfig; @@ -227,7 +227,7 @@ impl Configuration { let mut latest_era = None; let jdb_types = [journaldb::Algorithm::Archive, journaldb::Algorithm::EarlyMerge, journaldb::Algorithm::OverlayRecent, journaldb::Algorithm::RefCounted]; for i in jdb_types.into_iter() { - let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i, None); + let db = journaldb::new(&append_path(&get_db_path(Path::new(&self.path()), *i, spec.genesis_header().hash()), "state"), *i, kvdb::DatabaseConfig::default()); trace!(target: "parity", "Looking for best DB: {} at {:?}", i, db.latest_era()); match (latest_era, db.latest_era()) { (Some(best), Some(this)) if best >= this => {} @@ -279,7 +279,7 @@ impl Configuration { client_config.db_cache_size = self.args.flag_db_cache_size.and_then(|cs| Some(cs / 4)); // compaction profile - client_config.db_compaction_profile = match self.args.flag_db_compaction.as_str() { + client_config.db_compaction = match self.args.flag_db_compaction.as_str() { "default" => DatabaseCompactionProfile::Default, "hdd" => DatabaseCompactionProfile::HDD, _ => { die!("Invalid compaction profile given (--db-compaction argument), expected hdd/default."); } diff --git a/parity/migration.rs b/parity/migration.rs index 4198f7ed0..df2c2116f 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -20,7 +20,7 @@ use std::io::{Read, Write, Error as IoError, ErrorKind}; use std::path::PathBuf; use std::fmt::{Display, Formatter, Error as FmtError}; use util::migration::{Manager as MigrationManager, Config as MigrationConfig, MigrationIterator}; -use util::kvdb::{Database, DatabaseConfig}; +use util::kvdb::{Database, DatabaseConfig, CompactionProfile}; use ethcore::migrations; /// Database is assumed to be at default version, when no version file is found. @@ -163,6 +163,7 @@ fn migrate_database(version: u32, path: PathBuf, migrations: MigrationManager) - prefix_size: None, max_open_files: 64, cache_size: None, + compaction: CompactionProfile::default(), }; // open old database From 9f4bfd9e7a7a14b0873310810d105102d3c3b0ae Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 27 Jun 2016 18:47:50 +0200 Subject: [PATCH 6/8] fix tests --- ethcore/src/tests/helpers.rs | 4 +-- util/src/journaldb/archivedb.rs | 21 ++++++++-------- util/src/journaldb/earlymergedb.rs | 35 ++++++++++++++------------- util/src/journaldb/overlayrecentdb.rs | 35 ++++++++++++++------------- util/src/journaldb/refcounteddb.rs | 2 +- util/src/kvdb.rs | 8 +++--- 6 files changed, 54 insertions(+), 51 deletions(-) diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 1fefd5830..15b346919 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -303,7 +303,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, None); + let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()); GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -320,7 +320,7 @@ pub fn get_temp_state() -> GuardedTempResult { } pub fn get_temp_journal_db_in(path: &Path) -> Box { - journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, None) + journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge, DatabaseConfig::default()) } pub fn get_temp_state_in(path: &Path) -> State { diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 8372f99de..a01ed807f 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -70,7 +70,7 @@ impl ArchiveDB { fn new_temp() -> ArchiveDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), None) + Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) } fn payload(&self, key: &H256) -> Option { @@ -182,6 +182,7 @@ mod tests { use super::*; use hashdb::*; use journaldb::traits::JournalDB; + use kvdb::DatabaseConfig; #[test] fn insert_same_in_fork() { @@ -323,7 +324,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -332,13 +333,13 @@ mod tests { }; { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); } { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -351,7 +352,7 @@ mod tests { dir.push(H32::random().hex()); let foo = { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -365,7 +366,7 @@ mod tests { }; { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.contains(&foo)); @@ -380,7 +381,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, _, _) = { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -395,7 +396,7 @@ mod tests { }; { - let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), None); + let mut jdb = ArchiveDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.contains(&foo)); } @@ -406,14 +407,14 @@ mod tests { let temp = ::devtools::RandomTempPath::new(); let key = { - let mut jdb = ArchiveDB::new(temp.as_str(), None); + let mut jdb = ArchiveDB::new(temp.as_str(), DatabaseConfig::default()); let key = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); key }; { - let jdb = ArchiveDB::new(temp.as_str(), None); + let jdb = ArchiveDB::new(temp.as_str(), DatabaseConfig::default()); let state = jdb.state(&key); assert!(state.is_some()); } diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 5c267679b..2b87f8345 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -102,7 +102,7 @@ impl EarlyMergeDB { fn new_temp() -> EarlyMergeDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), None) + Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) } fn morph_key(key: &H256, index: u8) -> Bytes { @@ -532,6 +532,7 @@ mod tests { use super::super::traits::JournalDB; use hashdb::*; use log::init_log; + use kvdb::DatabaseConfig; #[test] fn insert_same_in_fork() { @@ -709,7 +710,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -737,7 +738,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -765,7 +766,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -803,7 +804,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -813,14 +814,14 @@ mod tests { }; { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -835,7 +836,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); @@ -864,7 +865,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); @@ -913,7 +914,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -944,7 +945,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -984,7 +985,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -1005,7 +1006,7 @@ mod tests { assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -1013,14 +1014,14 @@ mod tests { assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + }; { let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -1033,7 +1034,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -1051,7 +1052,7 @@ mod tests { }; { - let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), None); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 09ec3da88..0ffea308b 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -130,7 +130,7 @@ impl OverlayRecentDB { pub fn new_temp() -> OverlayRecentDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), None) + Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) } #[cfg(test)] @@ -364,6 +364,7 @@ mod tests { use hashdb::*; use log::init_log; use journaldb::JournalDB; + use kvdb::DatabaseConfig; #[test] fn insert_same_in_fork() { @@ -521,7 +522,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -549,7 +550,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -577,7 +578,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -615,7 +616,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -625,14 +626,14 @@ mod tests { }; { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); assert!(jdb.contains(&foo)); assert!(jdb.contains(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -647,7 +648,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); @@ -676,7 +677,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); @@ -725,7 +726,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -756,7 +757,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -796,7 +797,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -817,7 +818,7 @@ mod tests { assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -825,14 +826,14 @@ mod tests { assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); // incantation to reopen the db - }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -845,7 +846,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -863,7 +864,7 @@ mod tests { }; { - let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), None); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap(), DatabaseConfig::default()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.contains(&foo)); diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index d2092b2a6..4be6a1eda 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -77,7 +77,7 @@ impl RefCountedDB { fn new_temp() -> RefCountedDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap(), None) + Self::new(dir.to_str().unwrap(), DatabaseConfig::default()) } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 4e7bdb26c..bc0acc2dd 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -293,10 +293,10 @@ mod tests { let path = RandomTempPath::create_dir(); let smoke = Database::open_default(path.as_path().to_str().unwrap()).unwrap(); assert!(smoke.is_empty()); - test_db(&DatabaseConfig { prefix_size: None, max_open_files: 256, cache_size: None, }); - test_db(&DatabaseConfig { prefix_size: Some(1), max_open_files: 256, cache_size: None, }); - test_db(&DatabaseConfig { prefix_size: Some(8), max_open_files: 256, cache_size: None, }); - test_db(&DatabaseConfig { prefix_size: Some(32), max_open_files: 256, cache_size: None, }); + test_db(&DatabaseConfig::default()); + test_db(&DatabaseConfig::default().prefix(12)); + test_db(&DatabaseConfig::default().prefix(22)); + test_db(&DatabaseConfig::default().prefix(8)); } } From 7b5eeb1dd71d9a7b086b9c9934a9d80f2d86ee59 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 27 Jun 2016 19:22:28 +0200 Subject: [PATCH 7/8] Update cli.rs [ci:skip] --- parity/cli.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/parity/cli.rs b/parity/cli.rs index c42719cc6..0761962a0 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -172,11 +172,10 @@ Footprint Options: options. Database Options: - --db-cache-size MB Database cache size. Default if not specified. - --db-compaction TYPE Database compaction type. TYPE may be one of default, hdd - default - suitable for ssd backing storage/fast hdd - hdd - sutable for slow storage - [default: default]. + --db-cache-size MB Override RocksDB database cache size. + --db-compaction TYPE Database compaction type. TYPE may be one of: + ssd - suitable for SSDs and fast HDDs; + hdd - suitable for slow HDDs [default: ssd]. Import/Export Options: --from BLOCK Export from block BLOCK, which may be an index or From dc79e63db734f4baf08ddbbeced8d293c0480e21 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 27 Jun 2016 19:22:54 +0200 Subject: [PATCH 8/8] Update configuration.rs --- parity/configuration.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/configuration.rs b/parity/configuration.rs index ad2578478..6b145095d 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -280,7 +280,7 @@ impl Configuration { // compaction profile client_config.db_compaction = match self.args.flag_db_compaction.as_str() { - "default" => DatabaseCompactionProfile::Default, + "ssd" => DatabaseCompactionProfile::Default, "hdd" => DatabaseCompactionProfile::HDD, _ => { die!("Invalid compaction profile given (--db-compaction argument), expected hdd/default."); } };