* Actually enable fat db, and do RPCs for it. * Implement HashDB traits for AccountDB. * user defaults * finished user defaults * user defaults are network-dependent * added tests for newly added functions, logger is initialized first * dir cleanup in progress * user_file is placed next to snapshots * fixing requested change
This commit is contained in:
parent
0dcdaa7a2a
commit
06fe768ac2
@ -121,6 +121,10 @@ impl<'db> HashDB for AccountDB<'db>{
|
||||
fn remove(&mut self, _key: &H256) {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
fn get_aux(&self, hash: &[u8]) -> Option<Vec<u8>> {
|
||||
self.db.get_aux(hash)
|
||||
}
|
||||
}
|
||||
|
||||
/// DB backend wrapper for Account trie
|
||||
@ -193,6 +197,18 @@ impl<'db> HashDB for AccountDBMut<'db>{
|
||||
let key = combine_key(&self.address_hash, key);
|
||||
self.db.remove(&key)
|
||||
}
|
||||
|
||||
fn insert_aux(&mut self, hash: Vec<u8>, value: Vec<u8>) {
|
||||
self.db.insert_aux(hash, value);
|
||||
}
|
||||
|
||||
fn get_aux(&self, hash: &[u8]) -> Option<Vec<u8>> {
|
||||
self.db.get_aux(hash)
|
||||
}
|
||||
|
||||
fn remove_aux(&mut self, hash: &[u8]) {
|
||||
self.db.remove_aux(hash);
|
||||
}
|
||||
}
|
||||
|
||||
struct Wrapping<'db>(&'db HashDB);
|
||||
|
@ -23,9 +23,9 @@ use time::precise_time_ns;
|
||||
|
||||
// util
|
||||
use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock};
|
||||
use util::journaldb;
|
||||
use util::{U256, H256, Address, H2048, Uint};
|
||||
use util::TrieFactory;
|
||||
use util::{journaldb, TrieFactory, Trie};
|
||||
use util::trie::TrieSpec;
|
||||
use util::{U256, H256, Address, H2048, Uint, FixedHash};
|
||||
use util::kvdb::*;
|
||||
|
||||
// other
|
||||
@ -51,7 +51,7 @@ use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
||||
use client::{
|
||||
BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient,
|
||||
MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
|
||||
ChainNotify
|
||||
ChainNotify,
|
||||
};
|
||||
use client::Error as ClientError;
|
||||
use env_info::EnvInfo;
|
||||
@ -171,6 +171,11 @@ impl Client {
|
||||
let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone()));
|
||||
let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone()));
|
||||
|
||||
let trie_spec = match config.fat_db {
|
||||
true => TrieSpec::Fat,
|
||||
false => TrieSpec::Secure,
|
||||
};
|
||||
|
||||
let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE);
|
||||
let mut state_db = StateDB::new(journal_db);
|
||||
if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) {
|
||||
@ -193,7 +198,7 @@ impl Client {
|
||||
|
||||
let factories = Factories {
|
||||
vm: EvmFactory::new(config.vm_type.clone()),
|
||||
trie: TrieFactory::new(config.trie_spec.clone()),
|
||||
trie: TrieFactory::new(trie_spec),
|
||||
accountdb: Default::default(),
|
||||
};
|
||||
|
||||
@ -842,6 +847,38 @@ impl BlockChainClient for Client {
|
||||
self.state_at(id).map(|s| s.storage_at(address, position))
|
||||
}
|
||||
|
||||
fn list_accounts(&self, id: BlockID) -> Option<Vec<Address>> {
|
||||
if !self.factories.trie.is_fat() {
|
||||
trace!(target: "fatdb", "list_accounts: Not a fat DB");
|
||||
return None;
|
||||
}
|
||||
|
||||
let state = match self.state_at(id) {
|
||||
Some(state) => state,
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
let (root, db) = state.drop();
|
||||
let trie = match self.factories.trie.readonly(db.as_hashdb(), &root) {
|
||||
Ok(trie) => trie,
|
||||
_ => {
|
||||
trace!(target: "fatdb", "list_accounts: Couldn't open the DB");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
let iter = match trie.iter() {
|
||||
Ok(iter) => iter,
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
let accounts = iter.filter_map(|item| {
|
||||
item.ok().map(|(addr, _)| Address::from_slice(&addr))
|
||||
}).collect();
|
||||
|
||||
Some(accounts)
|
||||
}
|
||||
|
||||
fn transaction(&self, id: TransactionID) -> Option<LocalizedTransaction> {
|
||||
self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address))
|
||||
}
|
||||
|
@ -22,7 +22,6 @@ pub use evm::VMType;
|
||||
|
||||
use verification::{VerifierType, QueueConfig};
|
||||
use util::{journaldb, CompactionProfile};
|
||||
use util::trie::TrieSpec;
|
||||
|
||||
/// Client state db compaction profile
|
||||
#[derive(Debug, PartialEq)]
|
||||
@ -91,8 +90,8 @@ pub struct ClientConfig {
|
||||
pub tracing: TraceConfig,
|
||||
/// VM type.
|
||||
pub vm_type: VMType,
|
||||
/// Trie type.
|
||||
pub trie_spec: TrieSpec,
|
||||
/// Fat DB enabled?
|
||||
pub fat_db: bool,
|
||||
/// The JournalDB ("pruning") algorithm to use.
|
||||
pub pruning: journaldb::Algorithm,
|
||||
/// The name of the client instance.
|
||||
|
@ -384,6 +384,10 @@ impl BlockChainClient for TestBlockChainClient {
|
||||
}
|
||||
}
|
||||
|
||||
fn list_accounts(&self, _id: BlockID) -> Option<Vec<Address>> {
|
||||
None
|
||||
}
|
||||
|
||||
fn transaction(&self, _id: TransactionID) -> Option<LocalizedTransaction> {
|
||||
None // Simple default.
|
||||
}
|
||||
|
@ -112,6 +112,9 @@ pub trait BlockChainClient : Sync + Send {
|
||||
Therefore storage_at has returned Some; qed")
|
||||
}
|
||||
|
||||
/// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`.
|
||||
fn list_accounts(&self, id: BlockID) -> Option<Vec<Address>>;
|
||||
|
||||
/// Get transaction with given hash.
|
||||
fn transaction(&self, id: TransactionID) -> Option<LocalizedTransaction>;
|
||||
|
||||
|
@ -123,7 +123,6 @@ impl Args {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn die(msg: &'static str) -> ! {
|
||||
println!("{}", msg);
|
||||
::std::process::exit(-1)
|
||||
|
@ -30,8 +30,8 @@ use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError,
|
||||
use ethcore::error::ImportError;
|
||||
use ethcore::miner::Miner;
|
||||
use cache::CacheConfig;
|
||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
|
||||
use informant::{Informant, MillisecondDuration};
|
||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
||||
use io_handler::ImportIoHandler;
|
||||
use helpers::{to_client_config, execute_upgrades};
|
||||
use dir::Directories;
|
||||
@ -81,6 +81,7 @@ pub struct ImportBlockchain {
|
||||
pub wal: bool,
|
||||
pub mode: Mode,
|
||||
pub tracing: Switch,
|
||||
pub fat_db: Switch,
|
||||
pub vm_type: VMType,
|
||||
}
|
||||
|
||||
@ -96,6 +97,7 @@ pub struct ExportBlockchain {
|
||||
pub compaction: DatabaseCompactionProfile,
|
||||
pub wal: bool,
|
||||
pub mode: Mode,
|
||||
pub fat_db: Switch,
|
||||
pub tracing: Switch,
|
||||
pub from_block: BlockID,
|
||||
pub to_block: BlockID,
|
||||
@ -135,14 +137,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
|
||||
// load user defaults
|
||||
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||
|
||||
fdlimit::raise_fd_limit();
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||
|
||||
// check if fatdb is on
|
||||
let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm));
|
||||
|
||||
// prepare client and snapshot paths.
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
@ -151,7 +156,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
|
||||
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
|
||||
|
||||
// prepare client config
|
||||
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm);
|
||||
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm);
|
||||
|
||||
// build client
|
||||
let service = try!(ClientService::start(
|
||||
@ -283,14 +288,17 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
|
||||
// load user defaults
|
||||
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||
|
||||
fdlimit::raise_fd_limit();
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||
|
||||
// check if fatdb is on
|
||||
let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm));
|
||||
|
||||
// prepare client and snapshot paths.
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
@ -299,7 +307,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
|
||||
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile()));
|
||||
|
||||
// prepare client config
|
||||
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm);
|
||||
let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm);
|
||||
|
||||
let service = try!(ClientService::start(
|
||||
client_config,
|
||||
|
@ -82,7 +82,7 @@ cache_size_queue = 50
|
||||
cache_size = 128 # Overrides above caches with total size
|
||||
fast_and_loose = false
|
||||
db_compaction = "ssd"
|
||||
fat_db = false
|
||||
fat_db = "auto"
|
||||
|
||||
[snapshots]
|
||||
disable_periodic = false
|
||||
|
@ -49,7 +49,7 @@ cache_size_db = 128
|
||||
cache_size_blocks = 16
|
||||
cache_size_queue = 100
|
||||
db_compaction = "ssd"
|
||||
fat_db = true
|
||||
fat_db = "off"
|
||||
|
||||
[snapshots]
|
||||
disable_periodic = true
|
||||
|
@ -217,7 +217,7 @@ usage! {
|
||||
or |c: &Config| otry!(c.footprint).fast_and_loose.clone(),
|
||||
flag_db_compaction: String = "ssd",
|
||||
or |c: &Config| otry!(c.footprint).db_compaction.clone(),
|
||||
flag_fat_db: bool = false,
|
||||
flag_fat_db: String = "auto",
|
||||
or |c: &Config| otry!(c.footprint).fat_db.clone(),
|
||||
|
||||
// -- Import/Export Options
|
||||
@ -362,7 +362,7 @@ struct Footprint {
|
||||
cache_size_blocks: Option<u32>,
|
||||
cache_size_queue: Option<u32>,
|
||||
db_compaction: Option<String>,
|
||||
fat_db: Option<bool>,
|
||||
fat_db: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, RustcDecodable)]
|
||||
@ -535,7 +535,7 @@ mod tests {
|
||||
flag_cache_size: Some(128),
|
||||
flag_fast_and_loose: false,
|
||||
flag_db_compaction: "ssd".into(),
|
||||
flag_fat_db: false,
|
||||
flag_fat_db: "auto".into(),
|
||||
|
||||
// -- Import/Export Options
|
||||
flag_from: "1".into(),
|
||||
@ -687,7 +687,7 @@ mod tests {
|
||||
cache_size_blocks: Some(16),
|
||||
cache_size_queue: Some(100),
|
||||
db_compaction: Some("ssd".into()),
|
||||
fat_db: Some(true),
|
||||
fat_db: Some("off".into()),
|
||||
}),
|
||||
snapshots: Some(Snapshots {
|
||||
disable_periodic: Some(true),
|
||||
|
@ -217,7 +217,10 @@ Footprint Options:
|
||||
--db-compaction TYPE Database compaction type. TYPE may be one of:
|
||||
ssd - suitable for SSDs and fast HDDs;
|
||||
hdd - suitable for slow HDDs (default: {flag_db_compaction}).
|
||||
--fat-db Fat database. (default: {flag_fat_db})
|
||||
--fat-db BOOL Build appropriate information to allow enumeration
|
||||
of all accounts and storage keys. Doubles the size
|
||||
of the state database. BOOL may be one of on, off
|
||||
or auto. (default: {flag_fat_db})
|
||||
|
||||
Import/Export Options:
|
||||
--from BLOCK Export from block BLOCK, which may be an index or
|
||||
|
@ -84,6 +84,7 @@ impl Configuration {
|
||||
let cache_config = self.cache_config();
|
||||
let spec = try!(self.chain().parse());
|
||||
let tracing = try!(self.args.flag_tracing.parse());
|
||||
let fat_db = try!(self.args.flag_fat_db.parse());
|
||||
let compaction = try!(self.args.flag_db_compaction.parse());
|
||||
let wal = !self.args.flag_fast_and_loose;
|
||||
let enable_network = self.enable_network(&mode);
|
||||
@ -140,6 +141,7 @@ impl Configuration {
|
||||
wal: wal,
|
||||
mode: mode,
|
||||
tracing: tracing,
|
||||
fat_db: fat_db,
|
||||
vm_type: vm_type,
|
||||
};
|
||||
Cmd::Blockchain(BlockchainCmd::Import(import_cmd))
|
||||
@ -156,6 +158,7 @@ impl Configuration {
|
||||
wal: wal,
|
||||
mode: mode,
|
||||
tracing: tracing,
|
||||
fat_db: fat_db,
|
||||
from_block: try!(to_block_id(&self.args.flag_from)),
|
||||
to_block: try!(to_block_id(&self.args.flag_to)),
|
||||
};
|
||||
@ -169,6 +172,7 @@ impl Configuration {
|
||||
logger_config: logger_config,
|
||||
mode: mode,
|
||||
tracing: tracing,
|
||||
fat_db: fat_db,
|
||||
compaction: compaction,
|
||||
file_path: self.args.arg_file.clone(),
|
||||
wal: wal,
|
||||
@ -185,6 +189,7 @@ impl Configuration {
|
||||
logger_config: logger_config,
|
||||
mode: mode,
|
||||
tracing: tracing,
|
||||
fat_db: fat_db,
|
||||
compaction: compaction,
|
||||
file_path: self.args.arg_file.clone(),
|
||||
wal: wal,
|
||||
@ -216,6 +221,7 @@ impl Configuration {
|
||||
miner_extras: try!(self.miner_extras()),
|
||||
mode: mode,
|
||||
tracing: tracing,
|
||||
fat_db: fat_db,
|
||||
compaction: compaction,
|
||||
wal: wal,
|
||||
vm_type: vm_type,
|
||||
@ -717,6 +723,7 @@ mod tests {
|
||||
wal: true,
|
||||
mode: Default::default(),
|
||||
tracing: Default::default(),
|
||||
fat_db: Default::default(),
|
||||
vm_type: VMType::Interpreter,
|
||||
})));
|
||||
}
|
||||
@ -737,6 +744,7 @@ mod tests {
|
||||
wal: true,
|
||||
mode: Default::default(),
|
||||
tracing: Default::default(),
|
||||
fat_db: Default::default(),
|
||||
from_block: BlockID::Number(1),
|
||||
to_block: BlockID::Latest,
|
||||
})));
|
||||
@ -758,6 +766,7 @@ mod tests {
|
||||
wal: true,
|
||||
mode: Default::default(),
|
||||
tracing: Default::default(),
|
||||
fat_db: Default::default(),
|
||||
from_block: BlockID::Number(1),
|
||||
to_block: BlockID::Latest,
|
||||
})));
|
||||
@ -804,6 +813,7 @@ mod tests {
|
||||
ui: false,
|
||||
name: "".into(),
|
||||
custom_bootnodes: false,
|
||||
fat_db: Default::default(),
|
||||
no_periodic_snapshot: false,
|
||||
}));
|
||||
}
|
||||
|
@ -191,6 +191,7 @@ pub fn to_client_config(
|
||||
cache_config: &CacheConfig,
|
||||
mode: Mode,
|
||||
tracing: bool,
|
||||
fat_db: bool,
|
||||
compaction: DatabaseCompactionProfile,
|
||||
wal: bool,
|
||||
vm_type: VMType,
|
||||
@ -217,6 +218,7 @@ pub fn to_client_config(
|
||||
|
||||
client_config.mode = mode;
|
||||
client_config.tracing.enabled = tracing;
|
||||
client_config.fat_db = fat_db;
|
||||
client_config.pruning = pruning;
|
||||
client_config.db_compaction = compaction;
|
||||
client_config.db_wal = wal;
|
||||
|
@ -252,6 +252,19 @@ pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> R
|
||||
}
|
||||
}
|
||||
|
||||
pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, algorithm: Algorithm) -> Result<bool, String> {
|
||||
if algorithm != Algorithm::Archive {
|
||||
return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into());
|
||||
}
|
||||
|
||||
match (user_defaults.is_first_launch, switch, user_defaults.fat_db) {
|
||||
(false, Switch::On, false) => Err("FatDB resync required".into()),
|
||||
(_, Switch::On, _) => Ok(true),
|
||||
(_, Switch::Off, _) => Ok(false),
|
||||
(_, Switch::Auto, def) => Ok(def),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use util::journaldb::Algorithm;
|
||||
|
@ -35,7 +35,10 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration};
|
||||
use signer::SignerServer;
|
||||
use dapps::WebappServer;
|
||||
use io_handler::ClientIoHandler;
|
||||
use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool};
|
||||
use params::{
|
||||
SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch,
|
||||
tracing_switch_to_bool, fatdb_switch_to_bool,
|
||||
};
|
||||
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
|
||||
use dir::Directories;
|
||||
use cache::CacheConfig;
|
||||
@ -72,6 +75,7 @@ pub struct RunCmd {
|
||||
pub miner_extras: MinerExtras,
|
||||
pub mode: Mode,
|
||||
pub tracing: Switch,
|
||||
pub fat_db: Switch,
|
||||
pub compaction: DatabaseCompactionProfile,
|
||||
pub wal: bool,
|
||||
pub vm_type: VMType,
|
||||
@ -115,11 +119,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
||||
// load user defaults
|
||||
let mut user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults));
|
||||
|
||||
// select pruning algorithm
|
||||
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
|
||||
// check if fatdb is on
|
||||
let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm));
|
||||
|
||||
// prepare client and snapshot paths.
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
@ -135,7 +142,17 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
||||
|
||||
// display info about used pruning algorithm
|
||||
info!("Starting {}", Colour::White.bold().paint(version()));
|
||||
info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str()));
|
||||
info!("State DB configuation: {}{}{}",
|
||||
Colour::White.bold().paint(algorithm.as_str()),
|
||||
match fat_db {
|
||||
true => Colour::White.bold().paint(" +Fat").to_string(),
|
||||
false => "".to_owned(),
|
||||
},
|
||||
match tracing {
|
||||
true => Colour::White.bold().paint(" +Trace").to_string(),
|
||||
false => "".to_owned(),
|
||||
}
|
||||
);
|
||||
|
||||
// display warning about using experimental journaldb alorithm
|
||||
if !algorithm.is_stable() {
|
||||
@ -171,6 +188,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
|
||||
&cmd.cache_config,
|
||||
cmd.mode,
|
||||
tracing,
|
||||
fat_db,
|
||||
cmd.compaction,
|
||||
cmd.wal,
|
||||
cmd.vm_type,
|
||||
|
@ -30,7 +30,7 @@ use ethcore::miner::Miner;
|
||||
use ethcore::ids::BlockID;
|
||||
|
||||
use cache::CacheConfig;
|
||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool};
|
||||
use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool};
|
||||
use helpers::{to_client_config, execute_upgrades};
|
||||
use dir::Directories;
|
||||
use user_defaults::UserDefaults;
|
||||
@ -57,6 +57,7 @@ pub struct SnapshotCommand {
|
||||
pub logger_config: LogConfig,
|
||||
pub mode: Mode,
|
||||
pub tracing: Switch,
|
||||
pub fat_db: Switch,
|
||||
pub compaction: DatabaseCompactionProfile,
|
||||
pub file_path: Option<String>,
|
||||
pub wal: bool,
|
||||
@ -139,9 +140,6 @@ impl SnapshotCommand {
|
||||
// load user defaults
|
||||
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults));
|
||||
|
||||
// Setup logging
|
||||
let _logger = setup_log(&self.logger_config);
|
||||
|
||||
@ -150,6 +148,12 @@ impl SnapshotCommand {
|
||||
// select pruning algorithm
|
||||
let algorithm = self.pruning.to_algorithm(&user_defaults);
|
||||
|
||||
// check if tracing is on
|
||||
let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults));
|
||||
|
||||
// check if fatdb is on
|
||||
let fat_db = try!(fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm));
|
||||
|
||||
// prepare client and snapshot paths.
|
||||
let client_path = db_dirs.client_path(algorithm);
|
||||
let snapshot_path = db_dirs.snapshot_path();
|
||||
@ -158,7 +162,7 @@ impl SnapshotCommand {
|
||||
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile()));
|
||||
|
||||
// prepare client config
|
||||
let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm);
|
||||
let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm);
|
||||
|
||||
let service = try!(ClientService::start(
|
||||
client_config,
|
||||
|
@ -30,6 +30,7 @@ pub struct UserDefaults {
|
||||
pub is_first_launch: bool,
|
||||
pub pruning: Algorithm,
|
||||
pub tracing: bool,
|
||||
pub fat_db: bool,
|
||||
}
|
||||
|
||||
impl Serialize for UserDefaults {
|
||||
@ -38,6 +39,7 @@ impl Serialize for UserDefaults {
|
||||
let mut map: BTreeMap<String, Value> = BTreeMap::new();
|
||||
map.insert("pruning".into(), Value::String(self.pruning.as_str().into()));
|
||||
map.insert("tracing".into(), Value::Bool(self.tracing));
|
||||
map.insert("fat_db".into(), Value::Bool(self.fat_db));
|
||||
map.serialize(serializer)
|
||||
}
|
||||
}
|
||||
@ -62,11 +64,14 @@ impl Visitor for UserDefaultsVisitor {
|
||||
let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method")));
|
||||
let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing")));
|
||||
let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value")));
|
||||
let fat_db: Value = map.remove("fat_db".into()).unwrap_or_else(|| Value::Bool(false));
|
||||
let fat_db = try!(fat_db.as_bool().ok_or_else(|| Error::custom("invalid fat_db value")));
|
||||
|
||||
let user_defaults = UserDefaults {
|
||||
is_first_launch: false,
|
||||
pruning: pruning,
|
||||
tracing: tracing,
|
||||
fat_db: fat_db,
|
||||
};
|
||||
|
||||
Ok(user_defaults)
|
||||
@ -79,6 +84,7 @@ impl Default for UserDefaults {
|
||||
is_first_launch: true,
|
||||
pruning: Algorithm::default(),
|
||||
tracing: false,
|
||||
fat_db: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ use ethstore::random_phrase;
|
||||
use ethsync::{SyncProvider, ManageNetwork};
|
||||
use ethcore::miner::MinerService;
|
||||
use ethcore::client::{MiningBlockChainClient};
|
||||
use ethcore::ids::BlockID;
|
||||
|
||||
use jsonrpc_core::{from_params, to_value, Value, Error, Params, Ready};
|
||||
use v1::traits::Ethcore;
|
||||
@ -251,6 +252,24 @@ impl<C, M, S: ?Sized, F> Ethcore for EthcoreClient<C, M, S, F> where
|
||||
)
|
||||
}
|
||||
|
||||
fn list_accounts(&self, params: Params) -> Result<Value, Error> {
|
||||
try!(self.active());
|
||||
try!(expect_no_params(params));
|
||||
|
||||
take_weak!(self.client)
|
||||
.list_accounts(BlockID::Latest)
|
||||
.map(|a| Ok(to_value(&a.into_iter().map(Into::into).collect::<Vec<H160>>())))
|
||||
.unwrap_or(Ok(Value::Null))
|
||||
}
|
||||
|
||||
fn list_storage_keys(&self, params: Params) -> Result<Value, Error> {
|
||||
try!(self.active());
|
||||
|
||||
from_params::<(H160,)>(params).and_then(|(_addr,)|
|
||||
Ok(Value::Null)
|
||||
)
|
||||
}
|
||||
|
||||
fn encrypt_message(&self, params: Params) -> Result<Value, Error> {
|
||||
try!(self.active());
|
||||
from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| {
|
||||
|
@ -76,6 +76,14 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
|
||||
/// Returns the value of the registrar for this network.
|
||||
fn registry_address(&self, _: Params) -> Result<Value, Error>;
|
||||
|
||||
/// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not.
|
||||
/// Takes no parameters.
|
||||
fn list_accounts(&self, _: Params) -> Result<Value, Error>;
|
||||
|
||||
/// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`),
|
||||
/// or null if not.
|
||||
fn list_storage_keys(&self, _: Params) -> Result<Value, Error>;
|
||||
|
||||
/// Encrypt some data with a public key under ECIES.
|
||||
/// First parameter is the 512-byte destination public key, second is the message.
|
||||
fn encrypt_message(&self, _: Params) -> Result<Value, Error>;
|
||||
@ -108,6 +116,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static {
|
||||
delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase);
|
||||
delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address);
|
||||
delegate.add_method("ethcore_registryAddress", Ethcore::registry_address);
|
||||
delegate.add_method("ethcore_listAccounts", Ethcore::list_accounts);
|
||||
delegate.add_method("ethcore_listStorageKeys", Ethcore::list_storage_keys);
|
||||
delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message);
|
||||
delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions);
|
||||
delegate.add_async_method("ethcore_hashContent", Ethcore::hash_content);
|
||||
|
@ -233,4 +233,7 @@ impl TrieFactory {
|
||||
TrieSpec::Fat => Ok(Box::new(try!(FatDBMut::from_existing(db, root)))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true iff the trie DB is a fat DB (allows enumeration of keys).
|
||||
pub fn is_fat(&self) -> bool { self.spec == TrieSpec::Fat }
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user