diff --git a/Cargo.lock b/Cargo.lock
index fa48eafcd..e716b85b8 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -554,6 +554,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0",
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs
index 69b9d9efe..b87da437f 100644
--- a/ethcore/src/client/config.rs
+++ b/ethcore/src/client/config.rs
@@ -15,6 +15,7 @@
// along with Parity. If not, see .
use std::str::FromStr;
+use std::path::Path;
pub use std::time::Duration;
pub use blockchain::Config as BlockChainConfig;
pub use trace::Config as TraceConfig;
@@ -26,23 +27,26 @@ use util::{journaldb, CompactionProfile};
/// Client state db compaction profile
#[derive(Debug, PartialEq)]
pub enum DatabaseCompactionProfile {
- /// Default compaction profile
- Default,
+ /// Try to determine compaction profile automatically
+ Auto,
+ /// SSD compaction profile
+ SSD,
/// HDD or other slow storage io compaction profile
HDD,
}
impl Default for DatabaseCompactionProfile {
fn default() -> Self {
- DatabaseCompactionProfile::Default
+ DatabaseCompactionProfile::Auto
}
}
impl DatabaseCompactionProfile {
/// Returns corresponding compaction profile.
- pub fn compaction_profile(&self) -> CompactionProfile {
+ pub fn compaction_profile(&self, db_path: &Path) -> CompactionProfile {
match *self {
- DatabaseCompactionProfile::Default => Default::default(),
+ DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path),
+ DatabaseCompactionProfile::SSD => CompactionProfile::ssd(),
DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
}
}
@@ -53,9 +57,10 @@ impl FromStr for DatabaseCompactionProfile {
fn from_str(s: &str) -> Result {
match s {
- "ssd" | "default" => Ok(DatabaseCompactionProfile::Default),
+ "auto" => Ok(DatabaseCompactionProfile::Auto),
+ "ssd" => Ok(DatabaseCompactionProfile::SSD),
"hdd" => Ok(DatabaseCompactionProfile::HDD),
- _ => Err("Invalid compaction profile given. Expected hdd/ssd (default).".into()),
+ _ => Err("Invalid compaction profile given. Expected default/hdd/ssd.".into()),
}
}
}
@@ -120,13 +125,13 @@ mod test {
#[test]
fn test_default_compaction_profile() {
- assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default);
+ assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Auto);
}
#[test]
fn test_parsing_compaction_profile() {
- assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap());
- assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap());
+ assert_eq!(DatabaseCompactionProfile::Auto, "auto".parse().unwrap());
+ assert_eq!(DatabaseCompactionProfile::SSD, "ssd".parse().unwrap());
assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap());
}
diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs
index b2dd18cd5..95cbe745e 100644
--- a/ethcore/src/service.rs
+++ b/ethcore/src/service.rs
@@ -87,7 +87,7 @@ impl ClientService {
db_config.set_cache(::db::COL_STATE, size);
}
- db_config.compaction = config.db_compaction.compaction_profile();
+ db_config.compaction = config.db_compaction.compaction_profile(&client_path);
db_config.wal = config.db_wal;
let pruning = config.pruning;
diff --git a/parity/blockchain.rs b/parity/blockchain.rs
index 94343cdf8..fbf25e1cf 100644
--- a/parity/blockchain.rs
+++ b/parity/blockchain.rs
@@ -155,7 +155,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
- try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
+ try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// prepare client config
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm, cmd.pruning_history);
@@ -306,7 +306,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
- try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
+ try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// prepare client config
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm, cmd.pruning_history);
diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs
index 2935560c9..a8e1cdba7 100644
--- a/parity/cli/mod.rs
+++ b/parity/cli/mod.rs
@@ -225,7 +225,7 @@ usage! {
or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some),
flag_fast_and_loose: bool = false,
or |c: &Config| otry!(c.footprint).fast_and_loose.clone(),
- flag_db_compaction: String = "ssd",
+ flag_db_compaction: String = "auto",
or |c: &Config| otry!(c.footprint).db_compaction.clone(),
flag_fat_db: String = "auto",
or |c: &Config| otry!(c.footprint).fat_db.clone(),
diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt
index c602c0c8a..8bf99c5e7 100644
--- a/parity/cli/usage.txt
+++ b/parity/cli/usage.txt
@@ -232,7 +232,8 @@ Footprint Options:
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
--db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs;
- hdd - suitable for slow HDDs (default: {flag_db_compaction}).
+ hdd - suitable for slow HDDs;
+ auto - determine automatically (default: {flag_db_compaction}).
--fat-db BOOL Build appropriate information to allow enumeration
of all accounts and storage keys. Doubles the size
of the state database. BOOL may be one of on, off
diff --git a/parity/dir.rs b/parity/dir.rs
index 5a87f8dac..d7638e33b 100644
--- a/parity/dir.rs
+++ b/parity/dir.rs
@@ -77,7 +77,8 @@ pub struct DatabaseDirectories {
}
impl DatabaseDirectories {
- fn fork_path(&self) -> PathBuf {
+ /// Base DB directory for the given fork.
+ pub fn fork_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
dir
diff --git a/parity/run.rs b/parity/run.rs
index 4d6b92600..9d0caf751 100644
--- a/parity/run.rs
+++ b/parity/run.rs
@@ -134,7 +134,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
- try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
+ try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// run in daemon mode
if let Some(pid_file) = cmd.daemon {
diff --git a/parity/snapshot.rs b/parity/snapshot.rs
index 709dafe5f..dc146d8fe 100644
--- a/parity/snapshot.rs
+++ b/parity/snapshot.rs
@@ -160,7 +160,7 @@ impl SnapshotCommand {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
- try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile()));
+ try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// prepare client config
let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history);
diff --git a/util/Cargo.toml b/util/Cargo.toml
index d6cdce45f..ce2992fe5 100644
--- a/util/Cargo.toml
+++ b/util/Cargo.toml
@@ -35,6 +35,7 @@ table = { path = "table" }
ansi_term = "0.7"
tiny-keccak= "1.0"
ethcore-bloom-journal = { path = "bloom" }
+regex = "0.1"
[features]
default = []
diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs
index e5507b78f..7d6ea399c 100644
--- a/util/src/kvdb.rs
+++ b/util/src/kvdb.rs
@@ -24,6 +24,12 @@ use std::path::PathBuf;
use rlp::{UntrustedRlp, RlpType, View, Compressible};
use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions};
+#[cfg(target_os = "linux")]
+use regex::Regex;
+#[cfg(target_os = "linux")]
+use std::process::Command;
+#[cfg(target_os = "linux")]
+use std::fs::File;
const DB_BACKGROUND_FLUSHES: i32 = 2;
const DB_BACKGROUND_COMPACTIONS: i32 = 2;
@@ -110,7 +116,7 @@ enum KeyState {
}
/// Compaction profile for the database settings
-#[derive(Clone, Copy)]
+#[derive(Clone, Copy, PartialEq, Debug)]
pub struct CompactionProfile {
/// L0-L1 target file size
pub initial_file_size: u64,
@@ -123,16 +129,73 @@ pub struct CompactionProfile {
impl Default for CompactionProfile {
/// Default profile suitable for most storage
fn default() -> CompactionProfile {
+ CompactionProfile::ssd()
+ }
+}
+
+/// Given output of df command return Linux rotational flag file path.
+#[cfg(target_os = "linux")]
+pub fn rotational_from_df_output(df_out: Vec) -> Option {
+ str::from_utf8(df_out.as_slice())
+ .ok()
+ // Get the drive name.
+ .and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
+ .ok()
+ .and_then(|re| re.captures(df_str))
+ .and_then(|captures| captures.at(1)))
+ // Generate path e.g. /sys/block/sda/queue/rotational
+ .map(|drive_path| {
+ let mut p = PathBuf::from("/sys/block");
+ p.push(drive_path);
+ p.push("queue/rotational");
+ p
+ })
+}
+
+impl CompactionProfile {
+ /// Attempt to determine the best profile automatically, only Linux for now.
+ #[cfg(target_os = "linux")]
+ pub fn auto(db_path: &Path) -> CompactionProfile {
+ let hdd_check_file = db_path
+ .to_str()
+ .and_then(|path_str| Command::new("df").arg(path_str).output().ok())
+ .and_then(|df_res| match df_res.status.success() {
+ true => Some(df_res.stdout),
+ false => None,
+ })
+ .and_then(rotational_from_df_output);
+ // Read out the file and match compaction profile.
+ if let Some(hdd_check) = hdd_check_file {
+ if let Ok(mut file) = File::open(hdd_check.as_path()) {
+ let mut buffer = [0; 1];
+ if file.read_exact(&mut buffer).is_ok() {
+ // 0 means not rotational.
+ if buffer == [48] { return Self::ssd(); }
+ // 1 means rotational.
+ if buffer == [49] { return Self::hdd(); }
+ }
+ }
+ }
+ // Fallback if drive type was not determined.
+ Self::default()
+ }
+
+ /// Just default for other platforms.
+ #[cfg(not(target_os = "linux"))]
+ pub fn auto(_db_path: &Path) -> CompactionProfile {
+ Self::default()
+ }
+
+ /// Default profile suitable for SSD storage
+ pub fn ssd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 32 * 1024 * 1024,
file_size_multiplier: 2,
write_rate_limit: None,
}
}
-}
-impl CompactionProfile {
- /// Slow hdd compaction profile
+ /// Slow HDD compaction profile
pub fn hdd() -> CompactionProfile {
CompactionProfile {
initial_file_size: 192 * 1024 * 1024,
@@ -538,6 +601,7 @@ mod tests {
use super::*;
use devtools::*;
use std::str::FromStr;
+ use std::path::PathBuf;
fn test_db(config: &DatabaseConfig) {
let path = RandomTempPath::create_dir();
@@ -598,4 +662,13 @@ mod tests {
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
test_db(&DatabaseConfig::default());
}
+
+ #[test]
+ #[cfg(target_os = "linux")]
+ fn df_to_rotational() {
+ // Example df output.
+ let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
+ let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
+ assert_eq!(rotational_from_df_output(example_df), expected_output);
+ }
}
diff --git a/util/src/lib.rs b/util/src/lib.rs
index 17c2f5151..bebb2819a 100644
--- a/util/src/lib.rs
+++ b/util/src/lib.rs
@@ -104,6 +104,7 @@ extern crate parking_lot;
extern crate ansi_term;
extern crate tiny_keccak;
extern crate rlp;
+extern crate regex;
#[macro_use]
extern crate heapsize;