diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 217464500..ad2e952e7 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -53,8 +53,7 @@ use types::filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; -use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, - DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient, +use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, ChainNotify}; use client::Error as ClientError; use env_info::EnvInfo; @@ -168,9 +167,7 @@ impl Client { Some(cache_size) => DatabaseConfig::with_cache(cache_size), }; - if config.db_compaction == DatabaseCompactionProfile::HDD { - state_db_config = state_db_config.compaction(CompactionProfile::hdd()); - } + state_db_config = state_db_config.compaction(config.db_compaction.compaction_profile()); let mut state_db = journaldb::new( &append_path(&path, "state"), diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 90b4ac405..220f6f1eb 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -21,7 +21,7 @@ pub use blockchain::Config as BlockChainConfig; pub use trace::{Config as TraceConfig, Switch}; pub use evm::VMType; pub use verification::VerifierType; -use util::journaldb; +use util::{journaldb, CompactionProfile}; use util::trie::TrieSpec; /// Client state db compaction profile @@ -39,6 +39,16 @@ impl Default for DatabaseCompactionProfile { } } +impl DatabaseCompactionProfile { + /// Returns corresponding compaction profile. + pub fn compaction_profile(&self) -> CompactionProfile { + match *self { + DatabaseCompactionProfile::Default => Default::default(), + DatabaseCompactionProfile::HDD => CompactionProfile::hdd(), + } + } +} + impl FromStr for DatabaseCompactionProfile { type Err = String; diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 2302d881c..0e036cf35 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -126,7 +126,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm)); + try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); // prepare client config let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.vm_type, "".into(), spec.fork_name.as_ref()); @@ -237,7 +237,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result { let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm)); + try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); // prepare client config let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, VMType::default(), "".into(), spec.fork_name.as_ref()); diff --git a/parity/helpers.rs b/parity/helpers.rs index 1018437c5..76d8250e5 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -19,7 +19,7 @@ use std::io::{Write, Read, BufReader, BufRead}; use std::time::Duration; use std::path::Path; use std::fs::File; -use util::{clean_0x, U256, Uint, Address, path, is_valid_node_url, H256}; +use util::{clean_0x, U256, Uint, Address, path, is_valid_node_url, H256, CompactionProfile}; use util::journaldb::Algorithm; use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; use ethcore::miner::PendingSet; @@ -220,7 +220,14 @@ pub fn to_client_config( client_config } -pub fn execute_upgrades(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> Result<(), String> { +pub fn execute_upgrades( + dirs: &Directories, + genesis_hash: H256, + fork_name: Option<&String>, + pruning: Algorithm, + compaction_profile: CompactionProfile +) -> Result<(), String> { + match upgrade(Some(&dirs.db)) { Ok(upgrades_applied) if upgrades_applied > 0 => { debug!("Executed {} upgrade scripts - ok", upgrades_applied); @@ -232,7 +239,7 @@ pub fn execute_upgrades(dirs: &Directories, genesis_hash: H256, fork_name: Optio } let client_path = dirs.client_path(genesis_hash, fork_name, pruning); - migrate(&client_path, pruning).map_err(|e| format!("{}", e)) + migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) } /// Prompts user asking for password. diff --git a/parity/migration.rs b/parity/migration.rs index 590a9f45c..33a000c56 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -21,6 +21,7 @@ use std::path::{Path, PathBuf}; use std::fmt::{Display, Formatter, Error as FmtError}; use util::journaldb::Algorithm; use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError}; +use util::kvdb::CompactionProfile; use ethcore::migrations; /// Database is assumed to be at default version, when no version file is found. @@ -140,29 +141,30 @@ fn backup_database_path(path: &Path) -> PathBuf { } /// Default migration settings. -fn default_migration_settings() -> MigrationConfig { +fn default_migration_settings(compaction_profile: CompactionProfile) -> MigrationConfig { MigrationConfig { batch_size: BATCH_SIZE, + compaction_profile: compaction_profile, } } /// Migrations on the blocks database. -fn blocks_database_migrations() -> Result { - let mut manager = MigrationManager::new(default_migration_settings()); +fn blocks_database_migrations(compaction_profile: CompactionProfile) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); try!(manager.add_migration(migrations::blocks::V8::default()).map_err(|_| Error::MigrationImpossible)); Ok(manager) } /// Migrations on the extras database. -fn extras_database_migrations() -> Result { - let mut manager = MigrationManager::new(default_migration_settings()); +fn extras_database_migrations(compaction_profile: CompactionProfile) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); try!(manager.add_migration(migrations::extras::ToV6).map_err(|_| Error::MigrationImpossible)); Ok(manager) } /// Migrations on the state database. -fn state_database_migrations(pruning: Algorithm) -> Result { - let mut manager = MigrationManager::new(default_migration_settings()); +fn state_database_migrations(pruning: Algorithm, compaction_profile: CompactionProfile) -> Result { + let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); let res = match pruning { Algorithm::Archive => manager.add_migration(migrations::state::ArchiveV7::default()), Algorithm::OverlayRecent => manager.add_migration(migrations::state::OverlayRecentV7::default()), @@ -208,7 +210,7 @@ fn exists(path: &Path) -> bool { } /// Migrates the database. -pub fn migrate(path: &Path, pruning: Algorithm) -> Result<(), Error> { +pub fn migrate(path: &Path, pruning: Algorithm, compaction_profile: CompactionProfile) -> Result<(), Error> { // read version file. let version = try!(current_version(path)); @@ -216,9 +218,9 @@ pub fn migrate(path: &Path, pruning: Algorithm) -> Result<(), Error> { // main db directory may already exists, so let's check if we have blocks dir if version < CURRENT_VERSION && exists(&blocks_database_path(path)) { println!("Migrating database from version {} to {}", version, CURRENT_VERSION); - try!(migrate_database(version, blocks_database_path(path), try!(blocks_database_migrations()))); - try!(migrate_database(version, extras_database_path(path), try!(extras_database_migrations()))); - try!(migrate_database(version, state_database_path(path), try!(state_database_migrations(pruning)))); + try!(migrate_database(version, blocks_database_path(path), try!(blocks_database_migrations(compaction_profile.clone())))); + try!(migrate_database(version, extras_database_path(path), try!(extras_database_migrations(compaction_profile.clone())))); + try!(migrate_database(version, state_database_path(path), try!(state_database_migrations(pruning, compaction_profile)))); println!("Migration finished"); } else if version > CURRENT_VERSION { return Err(Error::FutureDBVersion); diff --git a/parity/run.rs b/parity/run.rs index d531806de..53356b414 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -108,7 +108,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); // execute upgrades - try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm)); + try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); // run in daemon mode if let Some(pid_file) = cmd.daemon { diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 8461f1782..f3dd5fa12 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -52,6 +52,7 @@ impl DBTransaction { } /// Compaction profile for the database settings +#[derive(Clone)] pub struct CompactionProfile { /// L0-L1 target file size pub initial_file_size: u64, @@ -61,16 +62,18 @@ pub struct CompactionProfile { pub write_rate_limit: Option, } -impl CompactionProfile { +impl Default for CompactionProfile { /// Default profile suitable for most storage - pub fn default() -> CompactionProfile { + fn default() -> CompactionProfile { CompactionProfile { initial_file_size: 32 * 1024 * 1024, file_size_multiplier: 2, write_rate_limit: None, } } +} +impl CompactionProfile { /// Slow hdd compaction profile pub fn hdd() -> CompactionProfile { CompactionProfile { diff --git a/util/src/migration/mod.rs b/util/src/migration/mod.rs index 258a20b65..de62c0519 100644 --- a/util/src/migration/mod.rs +++ b/util/src/migration/mod.rs @@ -29,12 +29,15 @@ use ::kvdb::{CompactionProfile, Database, DatabaseConfig, DBTransaction}; pub struct Config { /// Defines how many elements should be migrated at once. pub batch_size: usize, + /// Database compaction profile. + pub compaction_profile: CompactionProfile, } impl Default for Config { fn default() -> Self { Config { batch_size: 1024, + compaction_profile: Default::default(), } } } @@ -199,7 +202,7 @@ impl Manager { let db_config = DatabaseConfig { max_open_files: 64, cache_size: None, - compaction: CompactionProfile::default(), + compaction: config.compaction_profile.clone(), }; let db_root = database_path(old_path);