user defaults (#2014)

* user defaults

* finished user defaults

* user defaults are network-dependent

* added tests for newly added functions, logger is initialized first

* dir cleanup in progress

* user_file is placed next to snapshots
This commit is contained in:
Marek Kotewicz 2016-09-26 19:21:25 +02:00 committed by GitHub
parent 598e9cea85
commit 56eb97abbf
16 changed files with 371 additions and 253 deletions

2
Cargo.lock generated
View File

@ -37,6 +37,8 @@ dependencies = [
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",

View File

@ -44,6 +44,8 @@ json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" }
ethcore-dapps = { path = "dapps", optional = true } ethcore-dapps = { path = "dapps", optional = true }
clippy = { version = "0.0.90", optional = true} clippy = { version = "0.0.90", optional = true}
ethcore-stratum = { path = "stratum" } ethcore-stratum = { path = "stratum" }
serde = "0.8.0"
serde_json = "0.8.0"
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
winapi = "0.2" winapi = "0.2"

View File

@ -170,7 +170,7 @@ impl Client {
let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database))); let db = Arc::new(try!(Database::open(&db_config, &path.to_str().unwrap()).map_err(ClientError::Database)));
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
let tracedb = RwLock::new(try!(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()))); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) {
@ -687,7 +687,7 @@ impl snapshot::DatabaseRestore for Client {
*state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE); *state_db = journaldb::new(db.clone(), self.pruning, ::db::COL_STATE);
*chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone()));
*tracedb = try!(TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()).map_err(ClientError::from)); *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone());
Ok(()) Ok(())
} }
} }

View File

@ -18,7 +18,7 @@ use std::str::FromStr;
pub use std::time::Duration; pub use std::time::Duration;
pub use block_queue::BlockQueueConfig; pub use block_queue::BlockQueueConfig;
pub use blockchain::Config as BlockChainConfig; pub use blockchain::Config as BlockChainConfig;
pub use trace::{Config as TraceConfig, Switch}; pub use trace::Config as TraceConfig;
pub use evm::VMType; pub use evm::VMType;
pub use verification::VerifierType; pub use verification::VerifierType;
use util::{journaldb, CompactionProfile}; use util::{journaldb, CompactionProfile};

View File

@ -23,7 +23,7 @@ mod trace;
mod client; mod client;
pub use self::client::*; pub use self::client::*;
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType}; pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, VMType};
pub use self::error::Error; pub use self::error::Error;
pub use types::ids::*; pub use types::ids::*;
pub use self::test_client::{TestBlockChainClient, EachBlockWith}; pub use self::test_client::{TestBlockChainClient, EachBlockWith};

View File

@ -15,57 +15,14 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Traces config. //! Traces config.
use std::str::FromStr;
use bloomchain::Config as BloomConfig; use bloomchain::Config as BloomConfig;
use trace::Error;
/// 3-value enum.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Switch {
/// True.
On,
/// False.
Off,
/// Auto.
Auto,
}
impl Default for Switch {
fn default() -> Self {
Switch::Auto
}
}
impl FromStr for Switch {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"on" => Ok(Switch::On),
"off" => Ok(Switch::Off),
"auto" => Ok(Switch::Auto),
other => Err(format!("Invalid switch value: {}", other))
}
}
}
impl Switch {
/// Tries to turn old switch to new value.
pub fn turn_to(&self, to: Switch) -> Result<bool, Error> {
match (*self, to) {
(Switch::On, Switch::On) | (Switch::On, Switch::Auto) | (Switch::Auto, Switch::On) => Ok(true),
(Switch::Off, Switch::On) => Err(Error::ResyncRequired),
_ => Ok(false),
}
}
}
/// Traces config. /// Traces config.
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct Config { pub struct Config {
/// Indicates if tracing should be enabled or not. /// Indicates if tracing should be enabled or not.
/// If it's None, it will be automatically configured. /// If it's None, it will be automatically configured.
pub enabled: Switch, pub enabled: bool,
/// Traces blooms configuration. /// Traces blooms configuration.
pub blooms: BloomConfig, pub blooms: BloomConfig,
/// Preferef cache-size. /// Preferef cache-size.
@ -77,7 +34,7 @@ pub struct Config {
impl Default for Config { impl Default for Config {
fn default() -> Self { fn default() -> Self {
Config { Config {
enabled: Switch::default(), enabled: false,
blooms: BloomConfig { blooms: BloomConfig {
levels: 3, levels: 3,
elements_per_index: 16, elements_per_index: 16,
@ -87,20 +44,3 @@ impl Default for Config {
} }
} }
} }
#[cfg(test)]
mod tests {
use super::Switch;
#[test]
fn test_switch_parsing() {
assert_eq!(Switch::On, "on".parse().unwrap());
assert_eq!(Switch::Off, "off".parse().unwrap());
assert_eq!(Switch::Auto, "auto".parse().unwrap());
}
#[test]
fn test_switch_default() {
assert_eq!(Switch::default(), Switch::Auto);
}
}

View File

@ -22,7 +22,7 @@ use bloomchain::{Number, Config as BloomConfig};
use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup}; use bloomchain::group::{BloomGroupDatabase, BloomGroupChain, GroupPosition, BloomGroup};
use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf}; use util::{H256, H264, Database, DBTransaction, RwLock, HeapSizeOf};
use header::BlockNumber; use header::BlockNumber;
use trace::{LocalizedTrace, Config, Switch, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras, Error}; use trace::{LocalizedTrace, Config, Filter, Database as TraceDatabase, ImportRequest, DatabaseExtras};
use db::{self, Key, Writable, Readable, CacheUpdatePolicy}; use db::{self, Key, Writable, Readable, CacheUpdatePolicy};
use blooms; use blooms;
use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; use super::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
@ -126,38 +126,20 @@ impl<T> BloomGroupDatabase for TraceDB<T> where T: DatabaseExtras {
impl<T> TraceDB<T> where T: DatabaseExtras { impl<T> TraceDB<T> where T: DatabaseExtras {
/// Creates new instance of `TraceDB`. /// Creates new instance of `TraceDB`.
pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Result<Self, Error> { pub fn new(config: Config, tracesdb: Arc<Database>, extras: Arc<T>) -> Self {
// check if in previously tracing was enabled
let old_tracing = match tracesdb.get(db::COL_TRACE, b"enabled").unwrap() {
Some(ref value) if value as &[u8] == &[0x1] => Switch::On,
Some(ref value) if value as &[u8] == &[0x0] => Switch::Off,
Some(_) => { panic!("tracesdb is corrupted") },
None => Switch::Auto,
};
let enabled = try!(old_tracing.turn_to(config.enabled));
let encoded_tracing = match enabled {
true => [0x1],
false => [0x0]
};
let mut batch = DBTransaction::new(&tracesdb); let mut batch = DBTransaction::new(&tracesdb);
batch.put(db::COL_TRACE, b"enabled", &encoded_tracing);
batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); batch.put(db::COL_TRACE, b"version", TRACE_DB_VER);
tracesdb.write(batch).unwrap(); tracesdb.write(batch).unwrap();
let db = TraceDB { TraceDB {
traces: RwLock::new(HashMap::new()), traces: RwLock::new(HashMap::new()),
blooms: RwLock::new(HashMap::new()), blooms: RwLock::new(HashMap::new()),
cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)), cache_manager: RwLock::new(CacheManager::new(config.pref_cache_size, config.max_cache_size, 10 * 1024)),
tracesdb: tracesdb, tracesdb: tracesdb,
bloom_config: config.blooms, bloom_config: config.blooms,
enabled: enabled, enabled: config.enabled,
extras: extras, extras: extras,
}; }
Ok(db)
} }
fn cache_size(&self) -> usize { fn cache_size(&self) -> usize {
@ -419,7 +401,7 @@ mod tests {
use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction}; use util::{Address, U256, H256, Database, DatabaseConfig, DBTransaction};
use devtools::RandomTempPath; use devtools::RandomTempPath;
use header::BlockNumber; use header::BlockNumber;
use trace::{Config, Switch, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest}; use trace::{Config, TraceDB, Database as TraceDatabase, DatabaseExtras, ImportRequest};
use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError}; use trace::{Filter, LocalizedTrace, AddressesFilter, TraceError};
use trace::trace::{Call, Action, Res}; use trace::trace::{Call, Action, Res};
use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces}; use trace::flat::{FlatTrace, FlatBlockTraces, FlatTransactionTraces};
@ -474,22 +456,10 @@ mod tests {
let mut config = Config::default(); let mut config = Config::default();
// set autotracing // set autotracing
config.enabled = Switch::Auto; config.enabled = false;
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
assert_eq!(tracedb.tracing_enabled(), false);
}
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false); assert_eq!(tracedb.tracing_enabled(), false);
} }
} }
@ -501,50 +471,12 @@ mod tests {
let mut config = Config::default(); let mut config = Config::default();
// set tracing on // set tracing on
config.enabled = Switch::On; config.enabled = true;
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras));
assert_eq!(tracedb.tracing_enabled(), true); assert_eq!(tracedb.tracing_enabled(), true);
} }
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Auto;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), false);
}
}
#[test]
#[should_panic]
fn test_invalid_reopening_db() {
let temp = RandomTempPath::new();
let db = new_db(temp.as_str());
let mut config = Config::default();
// set tracing on
config.enabled = Switch::Off;
{
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap();
assert_eq!(tracedb.tracing_enabled(), true);
}
config.enabled = Switch::On;
TraceDB::new(config.clone(), db.clone(), Arc::new(NoopExtras)).unwrap(); // should panic!
} }
fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest { fn create_simple_import_request(block_number: BlockNumber, block_hash: H256) -> ImportRequest {
@ -595,7 +527,7 @@ mod tests {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap()); let db = Arc::new(Database::open(&DatabaseConfig::with_columns(::db::NUM_COLUMNS), temp.as_str()).unwrap());
let mut config = Config::default(); let mut config = Config::default();
config.enabled = Switch::On; config.enabled = true;
let block_0 = H256::from(0xa1); let block_0 = H256::from(0xa1);
let block_1 = H256::from(0xa2); let block_1 = H256::from(0xa2);
let tx_0 = H256::from(0xff); let tx_0 = H256::from(0xff);
@ -607,7 +539,7 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
extras.transaction_hashes.insert(1, vec![tx_1.clone()]); extras.transaction_hashes.insert(1, vec![tx_1.clone()]);
let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras)).unwrap(); let tracedb = TraceDB::new(config, db.clone(), Arc::new(extras));
// import block 0 // import block 0
let request = create_simple_import_request(0, block_0.clone()); let request = create_simple_import_request(0, block_0.clone());
@ -679,10 +611,10 @@ mod tests {
extras.transaction_hashes.insert(0, vec![tx_0.clone()]); extras.transaction_hashes.insert(0, vec![tx_0.clone()]);
// set tracing on // set tracing on
config.enabled = Switch::On; config.enabled = true;
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone())).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras.clone()));
// import block 0 // import block 0
let request = create_simple_import_request(0, block_0.clone()); let request = create_simple_import_request(0, block_0.clone());
@ -692,7 +624,7 @@ mod tests {
} }
{ {
let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras)).unwrap(); let tracedb = TraceDB::new(config.clone(), db.clone(), Arc::new(extras));
let traces = tracedb.transaction_traces(0, 0); let traces = tracedb.transaction_traces(0, 0);
assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]); assert_eq!(traces.unwrap(), vec![create_simple_localized_trace(0, block_0, tx_0)]);
} }

View File

@ -26,7 +26,7 @@ mod noop_tracer;
pub use types::trace_types::{filter, flat, localized, trace}; pub use types::trace_types::{filter, flat, localized, trace};
pub use types::trace_types::error::Error as TraceError; pub use types::trace_types::error::Error as TraceError;
pub use self::config::{Config, Switch}; pub use self::config::Config;
pub use self::db::TraceDB; pub use self::db::TraceDB;
pub use self::error::Error; pub use self::error::Error;
pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff}; pub use types::trace_types::trace::{VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff};

View File

@ -26,15 +26,16 @@ use io::{PanicHandler, ForwardPanic};
use util::{ToPretty, Uint}; use util::{ToPretty, Uint};
use rlp::PayloadInfo; use rlp::PayloadInfo;
use ethcore::service::ClientService; use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType, BlockImportError, BlockChainClient, BlockID}; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockID};
use ethcore::error::ImportError; use ethcore::error::ImportError;
use ethcore::miner::Miner; use ethcore::miner::Miner;
use cache::CacheConfig; use cache::CacheConfig;
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
use informant::{Informant, MillisecondDuration}; use informant::{Informant, MillisecondDuration};
use io_handler::ImportIoHandler; use io_handler::ImportIoHandler;
use params::{SpecType, Pruning};
use helpers::{to_client_config, execute_upgrades}; use helpers::{to_client_config, execute_upgrades};
use dir::Directories; use dir::Directories;
use user_defaults::UserDefaults;
use fdlimit; use fdlimit;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -113,29 +114,44 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
// Setup logging
let _logger = setup_log(&cmd.logger_config);
// create dirs used by parity
try!(cmd.dirs.create_dirs());
// load spec file // load spec file
let spec = try!(cmd.spec.spec()); let spec = try!(cmd.spec.spec());
// load genesis hash // load genesis hash
let genesis_hash = spec.genesis_header().hash(); let genesis_hash = spec.genesis_header().hash();
// Setup logging // database paths
let _logger = setup_log(&cmd.logger_config); let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
// check if tracing is on
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// select pruning algorithm // select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); let algorithm = cmd.pruning.to_algorithm(&user_defaults);
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); let client_path = db_dirs.client_path(algorithm);
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
// prepare client config // prepare client config
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), spec.fork_name.as_ref()); let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm);
// build client // build client
let service = try!(ClientService::start( let service = try!(ClientService::start(
@ -220,6 +236,12 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
} }
} }
client.flush_queue(); client.flush_queue();
// save user defaults
user_defaults.pruning = algorithm;
user_defaults.tracing = tracing;
try!(user_defaults.save(&user_defaults_path));
let report = client.report(); let report = client.report();
let ms = timer.elapsed().as_milliseconds(); let ms = timer.elapsed().as_milliseconds();
@ -238,6 +260,12 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
// Setup panic handler // Setup panic handler
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
// Setup logging
let _logger = setup_log(&cmd.logger_config);
// create dirs used by parity
try!(cmd.dirs.create_dirs());
let format = cmd.format.unwrap_or_default(); let format = cmd.format.unwrap_or_default();
// load spec file // load spec file
@ -246,23 +274,32 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
// load genesis hash // load genesis hash
let genesis_hash = spec.genesis_header().hash(); let genesis_hash = spec.genesis_header().hash();
// Setup logging // database paths
let _logger = setup_log(&cmd.logger_config); let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
// check if tracing is on
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// select pruning algorithm // select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, spec.fork_name.as_ref()); let algorithm = cmd.pruning.to_algorithm(&user_defaults);
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = cmd.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); let client_path = db_dirs.client_path(algorithm);
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
// prepare client config // prepare client config
let client_config = to_client_config(&cmd.cache_config, &cmd.dirs, genesis_hash, cmd.mode, cmd.tracing, cmd.pruning, cmd.compaction, cmd.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm);
let service = try!(ClientService::start( let service = try!(ClientService::start(
client_config, client_config,

View File

@ -52,32 +52,13 @@ impl Directories {
Ok(()) Ok(())
} }
/// Get the chain's root path. /// Database paths.
pub fn chain_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf { pub fn database(&self, genesis_hash: H256, fork_name: Option<String>) -> DatabaseDirectories {
let mut dir = Path::new(&self.db).to_path_buf(); DatabaseDirectories {
dir.push(format!("{:?}{}", H64::from(genesis_hash), fork_name.map(|f| format!("-{}", f)).unwrap_or_default())); path: self.db.clone(),
dir genesis_hash: genesis_hash,
} fork_name: fork_name,
}
/// Get the root path for database
pub fn db_version_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = self.chain_path(genesis_hash, fork_name);
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, genesis_hash: H256, fork_name: Option<&String>, pruning: Algorithm) -> PathBuf {
let mut dir = self.db_version_path(genesis_hash, fork_name, pruning);
dir.push("db");
dir
}
/// Get the path for the snapshot directory given the genesis hash and fork name.
pub fn snapshot_path(&self, genesis_hash: H256, fork_name: Option<&String>) -> PathBuf {
let mut dir = self.chain_path(genesis_hash, fork_name);
dir.push("snapshot");
dir
} }
/// Get the ipc sockets path /// Get the ipc sockets path
@ -88,6 +69,49 @@ impl Directories {
} }
} }
#[derive(Debug, PartialEq)]
pub struct DatabaseDirectories {
pub path: String,
pub genesis_hash: H256,
pub fork_name: Option<String>,
}
impl DatabaseDirectories {
fn fork_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
dir
}
/// Get the root path for database
pub fn version_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.fork_path();
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.version_path(pruning);
dir.push("db");
dir
}
/// Get user defaults path
pub fn user_defaults_path(&self) -> PathBuf {
let mut dir = self.fork_path();
dir.push("user_defaults");
dir
}
/// Get the path for the snapshot directory given the genesis hash and fork name.
pub fn snapshot_path(&self) -> PathBuf {
let mut dir = self.fork_path();
dir.push("snapshot");
dir
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Directories; use super::Directories;

View File

@ -19,13 +19,12 @@ use std::io::{Write, Read, BufReader, BufRead};
use std::time::Duration; use std::time::Duration;
use std::path::Path; use std::path::Path;
use std::fs::File; use std::fs::File;
use util::{clean_0x, U256, Uint, Address, path, H256, CompactionProfile}; use util::{clean_0x, U256, Uint, Address, path, CompactionProfile};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use ethcore::client::{Mode, BlockID, Switch, VMType, DatabaseCompactionProfile, ClientConfig}; use ethcore::client::{Mode, BlockID, VMType, DatabaseCompactionProfile, ClientConfig};
use ethcore::miner::PendingSet; use ethcore::miner::PendingSet;
use cache::CacheConfig; use cache::CacheConfig;
use dir::Directories; use dir::DatabaseDirectories;
use params::Pruning;
use upgrade::upgrade; use upgrade::upgrade;
use migration::migrate; use migration::migrate;
use ethsync::is_valid_node_url; use ethsync::is_valid_node_url;
@ -190,16 +189,13 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration {
#[cfg_attr(feature = "dev", allow(too_many_arguments))] #[cfg_attr(feature = "dev", allow(too_many_arguments))]
pub fn to_client_config( pub fn to_client_config(
cache_config: &CacheConfig, cache_config: &CacheConfig,
dirs: &Directories,
genesis_hash: H256,
mode: Mode, mode: Mode,
tracing: Switch, tracing: bool,
pruning: Pruning,
compaction: DatabaseCompactionProfile, compaction: DatabaseCompactionProfile,
wal: bool, wal: bool,
vm_type: VMType, vm_type: VMType,
name: String, name: String,
fork_name: Option<&String>, pruning: Algorithm,
) -> ClientConfig { ) -> ClientConfig {
let mut client_config = ClientConfig::default(); let mut client_config = ClientConfig::default();
@ -221,7 +217,7 @@ pub fn to_client_config(
client_config.mode = mode; client_config.mode = mode;
client_config.tracing.enabled = tracing; client_config.tracing.enabled = tracing;
client_config.pruning = pruning.to_algorithm(dirs, genesis_hash, fork_name); client_config.pruning = pruning;
client_config.db_compaction = compaction; client_config.db_compaction = compaction;
client_config.db_wal = wal; client_config.db_wal = wal;
client_config.vm_type = vm_type; client_config.vm_type = vm_type;
@ -230,14 +226,12 @@ pub fn to_client_config(
} }
pub fn execute_upgrades( pub fn execute_upgrades(
dirs: &Directories, dirs: &DatabaseDirectories,
genesis_hash: H256,
fork_name: Option<&String>,
pruning: Algorithm, pruning: Algorithm,
compaction_profile: CompactionProfile compaction_profile: CompactionProfile
) -> Result<(), String> { ) -> Result<(), String> {
match upgrade(Some(&dirs.db)) { match upgrade(Some(&dirs.path)) {
Ok(upgrades_applied) if upgrades_applied > 0 => { Ok(upgrades_applied) if upgrades_applied > 0 => {
debug!("Executed {} upgrade scripts - ok", upgrades_applied); debug!("Executed {} upgrade scripts - ok", upgrades_applied);
}, },
@ -247,7 +241,7 @@ pub fn execute_upgrades(
_ => {}, _ => {},
} }
let client_path = dirs.db_version_path(genesis_hash, fork_name, pruning); let client_path = dirs.version_path(pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
} }

View File

@ -39,6 +39,8 @@ extern crate semver;
extern crate ethcore_io as io; extern crate ethcore_io as io;
extern crate ethcore_ipc as ipc; extern crate ethcore_ipc as ipc;
extern crate ethcore_ipc_nano as nanoipc; extern crate ethcore_ipc_nano as nanoipc;
extern crate serde;
extern crate serde_json;
extern crate rlp; extern crate rlp;
extern crate json_ipc_server as jsonipc; extern crate json_ipc_server as jsonipc;
@ -106,6 +108,7 @@ mod run;
mod sync; mod sync;
#[cfg(feature="ipc")] #[cfg(feature="ipc")]
mod boot; mod boot;
mod user_defaults;
#[cfg(feature="stratum")] #[cfg(feature="stratum")]
mod stratum; mod stratum;

View File

@ -14,15 +14,14 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::str::FromStr; use std::{str, fs};
use std::fs;
use std::time::Duration; use std::time::Duration;
use util::{H256, Address, U256, version_data}; use util::{Address, U256, version_data};
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use ethcore::spec::Spec; use ethcore::spec::Spec;
use ethcore::ethereum; use ethcore::ethereum;
use ethcore::miner::{GasPricer, GasPriceCalibratorOptions}; use ethcore::miner::{GasPricer, GasPriceCalibratorOptions};
use dir::Directories; use user_defaults::UserDefaults;
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
pub enum SpecType { pub enum SpecType {
@ -39,7 +38,7 @@ impl Default for SpecType {
} }
} }
impl FromStr for SpecType { impl str::FromStr for SpecType {
type Err = String; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
@ -81,7 +80,7 @@ impl Default for Pruning {
} }
} }
impl FromStr for Pruning { impl str::FromStr for Pruning {
type Err = String; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
@ -93,24 +92,12 @@ impl FromStr for Pruning {
} }
impl Pruning { impl Pruning {
pub fn to_algorithm(&self, dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm { pub fn to_algorithm(&self, user_defaults: &UserDefaults) -> Algorithm {
match *self { match *self {
Pruning::Specific(algo) => algo, Pruning::Specific(algo) => algo,
Pruning::Auto => Self::find_best_db(dirs, genesis_hash, fork_name), Pruning::Auto => user_defaults.pruning,
} }
} }
fn find_best_db(dirs: &Directories, genesis_hash: H256, fork_name: Option<&String>) -> Algorithm {
let mut algo_types = Algorithm::all_types();
// if all dbs have the same modification time, the last element is the default one
algo_types.push(Algorithm::default());
algo_types.into_iter().max_by_key(|i| {
let mut client_path = dirs.client_path(genesis_hash, fork_name, *i);
client_path.push("CURRENT");
fs::metadata(&client_path).and_then(|m| m.modified()).ok()
}).unwrap()
}
} }
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -128,7 +115,7 @@ impl Default for ResealPolicy {
} }
} }
impl FromStr for ResealPolicy { impl str::FromStr for ResealPolicy {
type Err = String; type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
@ -223,10 +210,50 @@ impl Default for MinerExtras {
} }
} }
/// 3-value enum.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum Switch {
/// True.
On,
/// False.
Off,
/// Auto.
Auto,
}
impl Default for Switch {
fn default() -> Self {
Switch::Auto
}
}
impl str::FromStr for Switch {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"on" => Ok(Switch::On),
"off" => Ok(Switch::Off),
"auto" => Ok(Switch::Auto),
other => Err(format!("Invalid switch value: {}", other))
}
}
}
pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> Result<bool, String> {
match (user_defaults.is_first_launch, switch, user_defaults.tracing) {
(false, Switch::On, false) => Err("TraceDB resync required".into()),
(_, Switch::On, _) => Ok(true),
(_, Switch::Off, _) => Ok(false),
(_, Switch::Auto, def) => Ok(def),
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use util::journaldb::Algorithm; use util::journaldb::Algorithm;
use super::{SpecType, Pruning, ResealPolicy}; use user_defaults::UserDefaults;
use super::{SpecType, Pruning, ResealPolicy, Switch, tracing_switch_to_bool};
#[test] #[test]
fn test_spec_type_parsing() { fn test_spec_type_parsing() {
@ -274,4 +301,36 @@ mod tests {
let all = ResealPolicy { own: true, external: true }; let all = ResealPolicy { own: true, external: true };
assert_eq!(all, ResealPolicy::default()); assert_eq!(all, ResealPolicy::default());
} }
#[test]
fn test_switch_parsing() {
assert_eq!(Switch::On, "on".parse().unwrap());
assert_eq!(Switch::Off, "off".parse().unwrap());
assert_eq!(Switch::Auto, "auto".parse().unwrap());
}
#[test]
fn test_switch_default() {
assert_eq!(Switch::default(), Switch::Auto);
}
fn user_defaults_with_tracing(first_launch: bool, tracing: bool) -> UserDefaults {
let mut ud = UserDefaults::default();
ud.is_first_launch = first_launch;
ud.tracing = tracing;
ud
}
#[test]
fn test_switch_to_bool() {
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, true)).unwrap());
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(true, false)).unwrap());
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, true)).unwrap());
assert!(!tracing_switch_to_bool(Switch::Off, &user_defaults_with_tracing(false, false)).unwrap());
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, true)).unwrap());
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(true, false)).unwrap());
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, true)).unwrap());
assert!(tracing_switch_to_bool(Switch::On, &user_defaults_with_tracing(false, false)).is_err());
}
} }

View File

@ -23,7 +23,7 @@ use ethcore_rpc::NetworkSettings;
use ethsync::NetworkConfiguration; use ethsync::NetworkConfiguration;
use util::{Colour, version, U256}; use util::{Colour, version, U256};
use io::{MayPanic, ForwardPanic, PanicHandler}; use io::{MayPanic, ForwardPanic, PanicHandler};
use ethcore::client::{Mode, Switch, DatabaseCompactionProfile, VMType, ChainNotify}; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, ChainNotify};
use ethcore::service::ClientService; use ethcore::service::ClientService;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;
use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions};
@ -35,10 +35,11 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration};
use signer::SignerServer; use signer::SignerServer;
use dapps::WebappServer; use dapps::WebappServer;
use io_handler::ClientIoHandler; use io_handler::ClientIoHandler;
use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras}; use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool};
use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use helpers::{to_client_config, execute_upgrades, passwords_from_files};
use dir::Directories; use dir::Directories;
use cache::CacheConfig; use cache::CacheConfig;
use user_defaults::UserDefaults;
use dapps; use dapps;
use signer; use signer;
use modules; use modules;
@ -87,34 +88,45 @@ pub struct RunCmd {
} }
pub fn execute(cmd: RunCmd) -> Result<(), String> { pub fn execute(cmd: RunCmd) -> Result<(), String> {
// increase max number of open files // set up panic handler
raise_fd_limit(); let panic_handler = PanicHandler::new_in_arc();
// set up logger // set up logger
let logger = try!(setup_log(&cmd.logger_config)); let logger = try!(setup_log(&cmd.logger_config));
// set up panic handler // increase max number of open files
let panic_handler = PanicHandler::new_in_arc(); raise_fd_limit();
// create dirs used by parity // create dirs used by parity
try!(cmd.dirs.create_dirs()); try!(cmd.dirs.create_dirs());
// load spec // load spec
let spec = try!(cmd.spec.spec()); let spec = try!(cmd.spec.spec());
let fork_name = spec.fork_name.clone();
// load genesis hash // load genesis hash
let genesis_hash = spec.genesis_header().hash(); let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
// check if tracing is on
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
// select pruning algorithm // select pruning algorithm
let algorithm = cmd.pruning.to_algorithm(&cmd.dirs, genesis_hash, fork_name.as_ref()); let algorithm = cmd.pruning.to_algorithm(&user_defaults);
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = cmd.dirs.client_path(genesis_hash, fork_name.as_ref(), algorithm); let client_path = db_dirs.client_path(algorithm);
let snapshot_path = cmd.dirs.snapshot_path(genesis_hash, fork_name.as_ref()); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&cmd.dirs, genesis_hash, fork_name.as_ref(), algorithm, cmd.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
// run in daemon mode // run in daemon mode
if let Some(pid_file) = cmd.daemon { if let Some(pid_file) = cmd.daemon {
@ -152,16 +164,13 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
// create client config // create client config
let client_config = to_client_config( let client_config = to_client_config(
&cmd.cache_config, &cmd.cache_config,
&cmd.dirs,
genesis_hash,
cmd.mode, cmd.mode,
cmd.tracing, tracing,
cmd.pruning,
cmd.compaction, cmd.compaction,
cmd.wal, cmd.wal,
cmd.vm_type, cmd.vm_type,
cmd.name, cmd.name,
fork_name.as_ref(), algorithm,
); );
// set up bootnodes // set up bootnodes
@ -288,6 +297,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port)); url::open(&format!("http://{}:{}/", cmd.dapps_conf.interface, cmd.dapps_conf.port));
} }
// save user defaults
user_defaults.pruning = algorithm;
user_defaults.tracing = tracing;
try!(user_defaults.save(&user_defaults_path));
// Handle exit // Handle exit
wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server);

View File

@ -25,14 +25,15 @@ use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService as SS};
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter}; use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
use ethcore::snapshot::service::Service as SnapshotService; use ethcore::snapshot::service::Service as SnapshotService;
use ethcore::service::ClientService; use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType}; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType};
use ethcore::miner::Miner; use ethcore::miner::Miner;
use ethcore::ids::BlockID; use ethcore::ids::BlockID;
use cache::CacheConfig; use cache::CacheConfig;
use params::{SpecType, Pruning}; use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
use helpers::{to_client_config, execute_upgrades}; use helpers::{to_client_config, execute_upgrades};
use dir::Directories; use dir::Directories;
use user_defaults::UserDefaults;
use fdlimit; use fdlimit;
use io::PanicHandler; use io::PanicHandler;
@ -129,23 +130,35 @@ impl SnapshotCommand {
// load genesis hash // load genesis hash
let genesis_hash = spec.genesis_header().hash(); let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
// load user defaults
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
// check if tracing is on
let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults));
// Setup logging // Setup logging
let _logger = setup_log(&self.logger_config); let _logger = setup_log(&self.logger_config);
fdlimit::raise_fd_limit(); fdlimit::raise_fd_limit();
// select pruning algorithm // select pruning algorithm
let algorithm = self.pruning.to_algorithm(&self.dirs, genesis_hash, spec.fork_name.as_ref()); let algorithm = self.pruning.to_algorithm(&user_defaults);
// prepare client and snapshot paths. // prepare client and snapshot paths.
let client_path = self.dirs.client_path(genesis_hash, spec.fork_name.as_ref(), algorithm); let client_path = db_dirs.client_path(algorithm);
let snapshot_path = self.dirs.snapshot_path(genesis_hash, spec.fork_name.as_ref()); let snapshot_path = db_dirs.snapshot_path();
// execute upgrades // execute upgrades
try!(execute_upgrades(&self.dirs, genesis_hash, spec.fork_name.as_ref(), algorithm, self.compaction.compaction_profile())); try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile()));
// prepare client config // prepare client config
let client_config = to_client_config(&self.cache_config, &self.dirs, genesis_hash, self.mode, self.tracing, self.pruning, self.compaction, self.wal, VMType::default(), "".into(), spec.fork_name.as_ref()); let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm);
let service = try!(ClientService::start( let service = try!(ClientService::start(
client_config, client_config,

98
parity/user_defaults.rs Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fs::File;
use std::io::Write;
use std::path::Path;
use std::collections::BTreeMap;
use serde::{Serialize, Serializer, Error, Deserialize, Deserializer};
use serde::de::{Visitor, MapVisitor};
use serde::de::impls::BTreeMapVisitor;
use serde_json::Value;
use serde_json::de::from_reader;
use serde_json::ser::to_string;
use util::journaldb::Algorithm;
pub struct UserDefaults {
pub is_first_launch: bool,
pub pruning: Algorithm,
pub tracing: bool,
}
impl Serialize for UserDefaults {
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
where S: Serializer {
let mut map: BTreeMap<String, Value> = BTreeMap::new();
map.insert("pruning".into(), Value::String(self.pruning.as_str().into()));
map.insert("tracing".into(), Value::Bool(self.tracing));
map.serialize(serializer)
}
}
struct UserDefaultsVisitor;
impl Deserialize for UserDefaults {
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
where D: Deserializer {
deserializer.deserialize(UserDefaultsVisitor)
}
}
impl Visitor for UserDefaultsVisitor {
type Value = UserDefaults;
fn visit_map<V>(&mut self, visitor: V) -> Result<Self::Value, V::Error>
where V: MapVisitor {
let mut map: BTreeMap<String, Value> = try!(BTreeMapVisitor::new().visit_map(visitor));
let pruning: Value = try!(map.remove("pruning".into()).ok_or_else(|| Error::custom("missing pruning")));
let pruning = try!(pruning.as_str().ok_or_else(|| Error::custom("invalid pruning value")));
let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method")));
let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing")));
let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value")));
let user_defaults = UserDefaults {
is_first_launch: false,
pruning: pruning,
tracing: tracing,
};
Ok(user_defaults)
}
}
impl Default for UserDefaults {
fn default() -> Self {
UserDefaults {
is_first_launch: true,
pruning: Algorithm::default(),
tracing: false,
}
}
}
impl UserDefaults {
pub fn load<P>(path: P) -> Result<Self, String> where P: AsRef<Path> {
match File::open(path) {
Ok(file) => from_reader(file).map_err(|e| e.to_string()),
_ => Ok(UserDefaults::default()),
}
}
pub fn save<P>(self, path: P) -> Result<(), String> where P: AsRef<Path> {
let mut file: File = try!(File::create(path).map_err(|_| "Cannot create user defaults file".to_owned()));
file.write_all(to_string(&self).unwrap().as_bytes()).map_err(|_| "Failed to save user defaults".to_owned())
}
}