Automatic compaction selection on Linux (#2785)

* add auto compaction types

* pass db paths

* detect drive type on Linux

* use base db path

* add docstring

* limit the test to be side effect free

* use base db path

* more docs

* fix parsing test

* update error

* detect only on Linux

* make test Linux only

* add second device letter, update cli doc

* use spaces in cli doc

* import only on linux

* default->auto
This commit is contained in:
keorn 2016-10-21 22:21:57 +01:00 committed by Arkadiy Paronyan
parent df799362bf
commit 479657b23b
12 changed files with 105 additions and 22 deletions

1
Cargo.lock generated
View File

@ -554,6 +554,7 @@ dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0", "rlp 0.1.0",
"rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)", "rocksdb 0.4.5 (git+https://github.com/ethcore/rust-rocksdb)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::FromStr; use std::str::FromStr;
use std::path::Path;
pub use std::time::Duration; pub use std::time::Duration;
pub use blockchain::Config as BlockChainConfig; pub use blockchain::Config as BlockChainConfig;
pub use trace::Config as TraceConfig; pub use trace::Config as TraceConfig;
@ -26,23 +27,26 @@ use util::{journaldb, CompactionProfile};
/// Client state db compaction profile /// Client state db compaction profile
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum DatabaseCompactionProfile { pub enum DatabaseCompactionProfile {
/// Default compaction profile /// Try to determine compaction profile automatically
Default, Auto,
/// SSD compaction profile
SSD,
/// HDD or other slow storage io compaction profile /// HDD or other slow storage io compaction profile
HDD, HDD,
} }
impl Default for DatabaseCompactionProfile { impl Default for DatabaseCompactionProfile {
fn default() -> Self { fn default() -> Self {
DatabaseCompactionProfile::Default DatabaseCompactionProfile::Auto
} }
} }
impl DatabaseCompactionProfile { impl DatabaseCompactionProfile {
/// Returns corresponding compaction profile. /// Returns corresponding compaction profile.
pub fn compaction_profile(&self) -> CompactionProfile { pub fn compaction_profile(&self, db_path: &Path) -> CompactionProfile {
match *self { match *self {
DatabaseCompactionProfile::Default => Default::default(), DatabaseCompactionProfile::Auto => CompactionProfile::auto(db_path),
DatabaseCompactionProfile::SSD => CompactionProfile::ssd(),
DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), DatabaseCompactionProfile::HDD => CompactionProfile::hdd(),
} }
} }
@ -53,9 +57,10 @@ impl FromStr for DatabaseCompactionProfile {
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
"ssd" | "default" => Ok(DatabaseCompactionProfile::Default), "auto" => Ok(DatabaseCompactionProfile::Auto),
"ssd" => Ok(DatabaseCompactionProfile::SSD),
"hdd" => Ok(DatabaseCompactionProfile::HDD), "hdd" => Ok(DatabaseCompactionProfile::HDD),
_ => Err("Invalid compaction profile given. Expected hdd/ssd (default).".into()), _ => Err("Invalid compaction profile given. Expected default/hdd/ssd.".into()),
} }
} }
} }
@ -120,13 +125,13 @@ mod test {
#[test] #[test]
fn test_default_compaction_profile() { fn test_default_compaction_profile() {
assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Default); assert_eq!(DatabaseCompactionProfile::default(), DatabaseCompactionProfile::Auto);
} }
#[test] #[test]
fn test_parsing_compaction_profile() { fn test_parsing_compaction_profile() {
assert_eq!(DatabaseCompactionProfile::Default, "ssd".parse().unwrap()); assert_eq!(DatabaseCompactionProfile::Auto, "auto".parse().unwrap());
assert_eq!(DatabaseCompactionProfile::Default, "default".parse().unwrap()); assert_eq!(DatabaseCompactionProfile::SSD, "ssd".parse().unwrap());
assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap()); assert_eq!(DatabaseCompactionProfile::HDD, "hdd".parse().unwrap());
} }

View File

@ -87,7 +87,7 @@ impl ClientService {
db_config.set_cache(::db::COL_STATE, size); db_config.set_cache(::db::COL_STATE, size);
} }
db_config.compaction = config.db_compaction.compaction_profile(); db_config.compaction = config.db_compaction.compaction_profile(&client_path);
db_config.wal = config.db_wal; db_config.wal = config.db_wal;
let pruning = config.pruning; let pruning = config.pruning;

View File

@ -155,7 +155,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let snapshot_path = db_dirs.snapshot_path(); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// prepare client config // prepare client config
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm, cmd.pruning_history); let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm, cmd.pruning_history);
@ -306,7 +306,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
let snapshot_path = db_dirs.snapshot_path(); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// prepare client config // prepare client config
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm, cmd.pruning_history); let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm, cmd.pruning_history);

View File

@ -225,7 +225,7 @@ usage! {
or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some), or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some),
flag_fast_and_loose: bool = false, flag_fast_and_loose: bool = false,
or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), or |c: &Config| otry!(c.footprint).fast_and_loose.clone(),
flag_db_compaction: String = "ssd", flag_db_compaction: String = "auto",
or |c: &Config| otry!(c.footprint).db_compaction.clone(), or |c: &Config| otry!(c.footprint).db_compaction.clone(),
flag_fat_db: String = "auto", flag_fat_db: String = "auto",
or |c: &Config| otry!(c.footprint).fat_db.clone(), or |c: &Config| otry!(c.footprint).fat_db.clone(),

View File

@ -232,7 +232,8 @@ Footprint Options:
but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose}) but means an unclean exit is unrecoverable. (default: {flag_fast_and_loose})
--db-compaction TYPE Database compaction type. TYPE may be one of: --db-compaction TYPE Database compaction type. TYPE may be one of:
ssd - suitable for SSDs and fast HDDs; ssd - suitable for SSDs and fast HDDs;
hdd - suitable for slow HDDs (default: {flag_db_compaction}). hdd - suitable for slow HDDs;
auto - determine automatically (default: {flag_db_compaction}).
--fat-db BOOL Build appropriate information to allow enumeration --fat-db BOOL Build appropriate information to allow enumeration
of all accounts and storage keys. Doubles the size of all accounts and storage keys. Doubles the size
of the state database. BOOL may be one of on, off of the state database. BOOL may be one of on, off

View File

@ -77,7 +77,8 @@ pub struct DatabaseDirectories {
} }
impl DatabaseDirectories { impl DatabaseDirectories {
fn fork_path(&self) -> PathBuf { /// Base DB directory for the given fork.
pub fn fork_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf(); let mut dir = Path::new(&self.path).to_path_buf();
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default())); dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
dir dir

View File

@ -134,7 +134,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
let snapshot_path = db_dirs.snapshot_path(); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// run in daemon mode // run in daemon mode
if let Some(pid_file) = cmd.daemon { if let Some(pid_file) = cmd.daemon {

View File

@ -160,7 +160,7 @@ impl SnapshotCommand {
let snapshot_path = db_dirs.snapshot_path(); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.fork_path().as_path())));
// prepare client config // prepare client config
let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history); let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history);

View File

@ -35,6 +35,7 @@ table = { path = "table" }
ansi_term = "0.7" ansi_term = "0.7"
tiny-keccak= "1.0" tiny-keccak= "1.0"
ethcore-bloom-journal = { path = "bloom" } ethcore-bloom-journal = { path = "bloom" }
regex = "0.1"
[features] [features]
default = [] default = []

View File

@ -24,6 +24,12 @@ use std::path::PathBuf;
use rlp::{UntrustedRlp, RlpType, View, Compressible}; use rlp::{UntrustedRlp, RlpType, View, Compressible};
use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator, use rocksdb::{DB, Writable, WriteBatch, WriteOptions, IteratorMode, DBIterator,
Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions}; Options, DBCompactionStyle, BlockBasedOptions, Direction, Cache, Column, ReadOptions};
#[cfg(target_os = "linux")]
use regex::Regex;
#[cfg(target_os = "linux")]
use std::process::Command;
#[cfg(target_os = "linux")]
use std::fs::File;
const DB_BACKGROUND_FLUSHES: i32 = 2; const DB_BACKGROUND_FLUSHES: i32 = 2;
const DB_BACKGROUND_COMPACTIONS: i32 = 2; const DB_BACKGROUND_COMPACTIONS: i32 = 2;
@ -110,7 +116,7 @@ enum KeyState {
} }
/// Compaction profile for the database settings /// Compaction profile for the database settings
#[derive(Clone, Copy)] #[derive(Clone, Copy, PartialEq, Debug)]
pub struct CompactionProfile { pub struct CompactionProfile {
/// L0-L1 target file size /// L0-L1 target file size
pub initial_file_size: u64, pub initial_file_size: u64,
@ -123,16 +129,73 @@ pub struct CompactionProfile {
impl Default for CompactionProfile { impl Default for CompactionProfile {
/// Default profile suitable for most storage /// Default profile suitable for most storage
fn default() -> CompactionProfile { fn default() -> CompactionProfile {
CompactionProfile::ssd()
}
}
/// Given output of df command return Linux rotational flag file path.
#[cfg(target_os = "linux")]
pub fn rotational_from_df_output(df_out: Vec<u8>) -> Option<PathBuf> {
str::from_utf8(df_out.as_slice())
.ok()
// Get the drive name.
.and_then(|df_str| Regex::new(r"/dev/(sd[:alpha:]{1,2})")
.ok()
.and_then(|re| re.captures(df_str))
.and_then(|captures| captures.at(1)))
// Generate path e.g. /sys/block/sda/queue/rotational
.map(|drive_path| {
let mut p = PathBuf::from("/sys/block");
p.push(drive_path);
p.push("queue/rotational");
p
})
}
impl CompactionProfile {
/// Attempt to determine the best profile automatically, only Linux for now.
#[cfg(target_os = "linux")]
pub fn auto(db_path: &Path) -> CompactionProfile {
let hdd_check_file = db_path
.to_str()
.and_then(|path_str| Command::new("df").arg(path_str).output().ok())
.and_then(|df_res| match df_res.status.success() {
true => Some(df_res.stdout),
false => None,
})
.and_then(rotational_from_df_output);
// Read out the file and match compaction profile.
if let Some(hdd_check) = hdd_check_file {
if let Ok(mut file) = File::open(hdd_check.as_path()) {
let mut buffer = [0; 1];
if file.read_exact(&mut buffer).is_ok() {
// 0 means not rotational.
if buffer == [48] { return Self::ssd(); }
// 1 means rotational.
if buffer == [49] { return Self::hdd(); }
}
}
}
// Fallback if drive type was not determined.
Self::default()
}
/// Just default for other platforms.
#[cfg(not(target_os = "linux"))]
pub fn auto(_db_path: &Path) -> CompactionProfile {
Self::default()
}
/// Default profile suitable for SSD storage
pub fn ssd() -> CompactionProfile {
CompactionProfile { CompactionProfile {
initial_file_size: 32 * 1024 * 1024, initial_file_size: 32 * 1024 * 1024,
file_size_multiplier: 2, file_size_multiplier: 2,
write_rate_limit: None, write_rate_limit: None,
} }
} }
}
impl CompactionProfile { /// Slow HDD compaction profile
/// Slow hdd compaction profile
pub fn hdd() -> CompactionProfile { pub fn hdd() -> CompactionProfile {
CompactionProfile { CompactionProfile {
initial_file_size: 192 * 1024 * 1024, initial_file_size: 192 * 1024 * 1024,
@ -538,6 +601,7 @@ mod tests {
use super::*; use super::*;
use devtools::*; use devtools::*;
use std::str::FromStr; use std::str::FromStr;
use std::path::PathBuf;
fn test_db(config: &DatabaseConfig) { fn test_db(config: &DatabaseConfig) {
let path = RandomTempPath::create_dir(); let path = RandomTempPath::create_dir();
@ -598,4 +662,13 @@ mod tests {
let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap(); let _ = Database::open_default(path.as_path().to_str().unwrap()).unwrap();
test_db(&DatabaseConfig::default()); test_db(&DatabaseConfig::default());
} }
#[test]
#[cfg(target_os = "linux")]
fn df_to_rotational() {
// Example df output.
let example_df = vec![70, 105, 108, 101, 115, 121, 115, 116, 101, 109, 32, 32, 32, 32, 32, 49, 75, 45, 98, 108, 111, 99, 107, 115, 32, 32, 32, 32, 32, 85, 115, 101, 100, 32, 65, 118, 97, 105, 108, 97, 98, 108, 101, 32, 85, 115, 101, 37, 32, 77, 111, 117, 110, 116, 101, 100, 32, 111, 110, 10, 47, 100, 101, 118, 47, 115, 100, 97, 49, 32, 32, 32, 32, 32, 32, 32, 54, 49, 52, 48, 57, 51, 48, 48, 32, 51, 56, 56, 50, 50, 50, 51, 54, 32, 32, 49, 57, 52, 52, 52, 54, 49, 54, 32, 32, 54, 55, 37, 32, 47, 10];
let expected_output = Some(PathBuf::from("/sys/block/sda/queue/rotational"));
assert_eq!(rotational_from_df_output(example_df), expected_output);
}
} }

View File

@ -104,6 +104,7 @@ extern crate parking_lot;
extern crate ansi_term; extern crate ansi_term;
extern crate tiny_keccak; extern crate tiny_keccak;
extern crate rlp; extern crate rlp;
extern crate regex;
#[macro_use] #[macro_use]
extern crate heapsize; extern crate heapsize;