diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index a02b670d0..2d00f8ed5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -121,6 +121,10 @@ impl<'db> HashDB for AccountDB<'db>{ fn remove(&mut self, _key: &H256) { unimplemented!() } + + fn get_aux(&self, hash: &[u8]) -> Option> { + self.db.get_aux(hash) + } } /// DB backend wrapper for Account trie @@ -193,6 +197,18 @@ impl<'db> HashDB for AccountDBMut<'db>{ let key = combine_key(&self.address_hash, key); self.db.remove(&key) } + + fn insert_aux(&mut self, hash: Vec, value: Vec) { + self.db.insert_aux(hash, value); + } + + fn get_aux(&self, hash: &[u8]) -> Option> { + self.db.get_aux(hash) + } + + fn remove_aux(&mut self, hash: &[u8]) { + self.db.remove_aux(hash); + } } struct Wrapping<'db>(&'db HashDB); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 32a363210..6b1cc0c65 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -23,9 +23,9 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock}; -use util::journaldb; -use util::{U256, H256, Address, H2048, Uint}; -use util::TrieFactory; +use util::{journaldb, TrieFactory, Trie}; +use util::trie::TrieSpec; +use util::{U256, H256, Address, H2048, Uint, FixedHash}; use util::kvdb::*; // other @@ -51,7 +51,7 @@ use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{ BlockID, TransactionID, UncleID, TraceId, ClientConfig, BlockChainClient, MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode, - ChainNotify + ChainNotify, }; use client::Error as ClientError; use env_info::EnvInfo; @@ -171,6 +171,11 @@ impl Client { let chain = Arc::new(BlockChain::new(config.blockchain.clone(), &gb, db.clone())); let tracedb = RwLock::new(TraceDB::new(config.tracing.clone(), db.clone(), chain.clone())); + let trie_spec = match config.fat_db { + true => TrieSpec::Fat, + false => TrieSpec::Secure, + }; + let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); let mut state_db = StateDB::new(journal_db); if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) { @@ -193,7 +198,7 @@ impl Client { let factories = Factories { vm: EvmFactory::new(config.vm_type.clone()), - trie: TrieFactory::new(config.trie_spec.clone()), + trie: TrieFactory::new(trie_spec), accountdb: Default::default(), }; @@ -842,6 +847,38 @@ impl BlockChainClient for Client { self.state_at(id).map(|s| s.storage_at(address, position)) } + fn list_accounts(&self, id: BlockID) -> Option> { + if !self.factories.trie.is_fat() { + trace!(target: "fatdb", "list_accounts: Not a fat DB"); + return None; + } + + let state = match self.state_at(id) { + Some(state) => state, + _ => return None, + }; + + let (root, db) = state.drop(); + let trie = match self.factories.trie.readonly(db.as_hashdb(), &root) { + Ok(trie) => trie, + _ => { + trace!(target: "fatdb", "list_accounts: Couldn't open the DB"); + return None; + } + }; + + let iter = match trie.iter() { + Ok(iter) => iter, + _ => return None, + }; + + let accounts = iter.filter_map(|item| { + item.ok().map(|(addr, _)| Address::from_slice(&addr)) + }).collect(); + + Some(accounts) + } + fn transaction(&self, id: TransactionID) -> Option { self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 399132108..8cf54387b 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -22,7 +22,6 @@ pub use evm::VMType; use verification::{VerifierType, QueueConfig}; use util::{journaldb, CompactionProfile}; -use util::trie::TrieSpec; /// Client state db compaction profile #[derive(Debug, PartialEq)] @@ -91,8 +90,8 @@ pub struct ClientConfig { pub tracing: TraceConfig, /// VM type. pub vm_type: VMType, - /// Trie type. - pub trie_spec: TrieSpec, + /// Fat DB enabled? + pub fat_db: bool, /// The JournalDB ("pruning") algorithm to use. pub pruning: journaldb::Algorithm, /// The name of the client instance. diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 971f7c448..bd74eb958 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -384,6 +384,10 @@ impl BlockChainClient for TestBlockChainClient { } } + fn list_accounts(&self, _id: BlockID) -> Option> { + None + } + fn transaction(&self, _id: TransactionID) -> Option { None // Simple default. } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 4da84bcbb..c2af744dd 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -112,6 +112,9 @@ pub trait BlockChainClient : Sync + Send { Therefore storage_at has returned Some; qed") } + /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. + fn list_accounts(&self, id: BlockID) -> Option>; + /// Get transaction with given hash. fn transaction(&self, id: TransactionID) -> Option; diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 94684129c..bc24afa1e 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -123,7 +123,6 @@ impl Args { } } - fn die(msg: &'static str) -> ! { println!("{}", msg); ::std::process::exit(-1) diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 3dfdac804..d4a4d8217 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -30,8 +30,8 @@ use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, use ethcore::error::ImportError; use ethcore::miner::Miner; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; use informant::{Informant, MillisecondDuration}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use io_handler::ImportIoHandler; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; @@ -81,6 +81,7 @@ pub struct ImportBlockchain { pub wal: bool, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub vm_type: VMType, } @@ -96,6 +97,7 @@ pub struct ExportBlockchain { pub compaction: DatabaseCompactionProfile, pub wal: bool, pub mode: Mode, + pub fat_db: Switch, pub tracing: Switch, pub from_block: BlockID, pub to_block: BlockID, @@ -135,14 +137,17 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // load user defaults let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - fdlimit::raise_fd_limit(); // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -151,7 +156,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, cmd.vm_type, "".into(), algorithm); // build client let service = try!(ClientService::start( @@ -283,14 +288,17 @@ fn execute_export(cmd: ExportBlockchain) -> Result { // load user defaults let user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - fdlimit::raise_fd_limit(); // select pruning algorithm let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -299,7 +307,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result { try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); + let client_config = to_client_config(&cmd.cache_config, cmd.mode, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index ec5dfbe35..a411e6767 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -82,7 +82,7 @@ cache_size_queue = 50 cache_size = 128 # Overrides above caches with total size fast_and_loose = false db_compaction = "ssd" -fat_db = false +fat_db = "auto" [snapshots] disable_periodic = false diff --git a/parity/cli/config.toml b/parity/cli/config.toml index 11ec333aa..a5ad55d40 100644 --- a/parity/cli/config.toml +++ b/parity/cli/config.toml @@ -49,7 +49,7 @@ cache_size_db = 128 cache_size_blocks = 16 cache_size_queue = 100 db_compaction = "ssd" -fat_db = true +fat_db = "off" [snapshots] disable_periodic = true diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index b8b10ec1d..082dbe8e4 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -217,7 +217,7 @@ usage! { or |c: &Config| otry!(c.footprint).fast_and_loose.clone(), flag_db_compaction: String = "ssd", or |c: &Config| otry!(c.footprint).db_compaction.clone(), - flag_fat_db: bool = false, + flag_fat_db: String = "auto", or |c: &Config| otry!(c.footprint).fat_db.clone(), // -- Import/Export Options @@ -362,7 +362,7 @@ struct Footprint { cache_size_blocks: Option, cache_size_queue: Option, db_compaction: Option, - fat_db: Option, + fat_db: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -535,7 +535,7 @@ mod tests { flag_cache_size: Some(128), flag_fast_and_loose: false, flag_db_compaction: "ssd".into(), - flag_fat_db: false, + flag_fat_db: "auto".into(), // -- Import/Export Options flag_from: "1".into(), @@ -687,7 +687,7 @@ mod tests { cache_size_blocks: Some(16), cache_size_queue: Some(100), db_compaction: Some("ssd".into()), - fat_db: Some(true), + fat_db: Some("off".into()), }), snapshots: Some(Snapshots { disable_periodic: Some(true), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 4c5d3b94b..861b7dafc 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -217,7 +217,10 @@ Footprint Options: --db-compaction TYPE Database compaction type. TYPE may be one of: ssd - suitable for SSDs and fast HDDs; hdd - suitable for slow HDDs (default: {flag_db_compaction}). - --fat-db Fat database. (default: {flag_fat_db}) + --fat-db BOOL Build appropriate information to allow enumeration + of all accounts and storage keys. Doubles the size + of the state database. BOOL may be one of on, off + or auto. (default: {flag_fat_db}) Import/Export Options: --from BLOCK Export from block BLOCK, which may be an index or diff --git a/parity/configuration.rs b/parity/configuration.rs index 1aa338c26..8eb617d2b 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -84,6 +84,7 @@ impl Configuration { let cache_config = self.cache_config(); let spec = try!(self.chain().parse()); let tracing = try!(self.args.flag_tracing.parse()); + let fat_db = try!(self.args.flag_fat_db.parse()); let compaction = try!(self.args.flag_db_compaction.parse()); let wal = !self.args.flag_fast_and_loose; let enable_network = self.enable_network(&mode); @@ -140,6 +141,7 @@ impl Configuration { wal: wal, mode: mode, tracing: tracing, + fat_db: fat_db, vm_type: vm_type, }; Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) @@ -156,6 +158,7 @@ impl Configuration { wal: wal, mode: mode, tracing: tracing, + fat_db: fat_db, from_block: try!(to_block_id(&self.args.flag_from)), to_block: try!(to_block_id(&self.args.flag_to)), }; @@ -169,6 +172,7 @@ impl Configuration { logger_config: logger_config, mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, file_path: self.args.arg_file.clone(), wal: wal, @@ -185,6 +189,7 @@ impl Configuration { logger_config: logger_config, mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, file_path: self.args.arg_file.clone(), wal: wal, @@ -216,6 +221,7 @@ impl Configuration { miner_extras: try!(self.miner_extras()), mode: mode, tracing: tracing, + fat_db: fat_db, compaction: compaction, wal: wal, vm_type: vm_type, @@ -717,6 +723,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), vm_type: VMType::Interpreter, }))); } @@ -737,6 +744,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), from_block: BlockID::Number(1), to_block: BlockID::Latest, }))); @@ -758,6 +766,7 @@ mod tests { wal: true, mode: Default::default(), tracing: Default::default(), + fat_db: Default::default(), from_block: BlockID::Number(1), to_block: BlockID::Latest, }))); @@ -804,6 +813,7 @@ mod tests { ui: false, name: "".into(), custom_bootnodes: false, + fat_db: Default::default(), no_periodic_snapshot: false, })); } diff --git a/parity/helpers.rs b/parity/helpers.rs index 0649e7fe9..abdd5daa5 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -191,6 +191,7 @@ pub fn to_client_config( cache_config: &CacheConfig, mode: Mode, tracing: bool, + fat_db: bool, compaction: DatabaseCompactionProfile, wal: bool, vm_type: VMType, @@ -217,6 +218,7 @@ pub fn to_client_config( client_config.mode = mode; client_config.tracing.enabled = tracing; + client_config.fat_db = fat_db; client_config.pruning = pruning; client_config.db_compaction = compaction; client_config.db_wal = wal; diff --git a/parity/params.rs b/parity/params.rs index 160b50866..df0730b59 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -252,6 +252,19 @@ pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> R } } +pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, algorithm: Algorithm) -> Result { + if algorithm != Algorithm::Archive { + return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into()); + } + + match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { + (false, Switch::On, false) => Err("FatDB resync required".into()), + (_, Switch::On, _) => Ok(true), + (_, Switch::Off, _) => Ok(false), + (_, Switch::Auto, def) => Ok(def), + } +} + #[cfg(test)] mod tests { use util::journaldb::Algorithm; diff --git a/parity/run.rs b/parity/run.rs index 4b458d4a6..d108ec53c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -35,7 +35,10 @@ use rpc::{HttpServer, IpcServer, HttpConfiguration, IpcConfiguration}; use signer::SignerServer; use dapps::WebappServer; use io_handler::ClientIoHandler; -use params::{SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool}; +use params::{ + SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, + tracing_switch_to_bool, fatdb_switch_to_bool, +}; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; use dir::Directories; use cache::CacheConfig; @@ -72,6 +75,7 @@ pub struct RunCmd { pub miner_extras: MinerExtras, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub compaction: DatabaseCompactionProfile, pub wal: bool, pub vm_type: VMType, @@ -115,11 +119,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // load user defaults let mut user_defaults = try!(UserDefaults::load(&user_defaults_path)); + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if tracing is on let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); - // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); @@ -135,7 +142,17 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // display info about used pruning algorithm info!("Starting {}", Colour::White.bold().paint(version())); - info!("Using state DB journalling strategy {}", Colour::White.bold().paint(algorithm.as_str())); + info!("State DB configuation: {}{}{}", + Colour::White.bold().paint(algorithm.as_str()), + match fat_db { + true => Colour::White.bold().paint(" +Fat").to_string(), + false => "".to_owned(), + }, + match tracing { + true => Colour::White.bold().paint(" +Trace").to_string(), + false => "".to_owned(), + } + ); // display warning about using experimental journaldb alorithm if !algorithm.is_stable() { @@ -171,6 +188,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { &cmd.cache_config, cmd.mode, tracing, + fat_db, cmd.compaction, cmd.wal, cmd.vm_type, diff --git a/parity/snapshot.rs b/parity/snapshot.rs index f3a8a45d3..6b2efeed5 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -30,7 +30,7 @@ use ethcore::miner::Miner; use ethcore::ids::BlockID; use cache::CacheConfig; -use params::{SpecType, Pruning, Switch, tracing_switch_to_bool}; +use params::{SpecType, Pruning, Switch, tracing_switch_to_bool, fatdb_switch_to_bool}; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; use user_defaults::UserDefaults; @@ -57,6 +57,7 @@ pub struct SnapshotCommand { pub logger_config: LogConfig, pub mode: Mode, pub tracing: Switch, + pub fat_db: Switch, pub compaction: DatabaseCompactionProfile, pub file_path: Option, pub wal: bool, @@ -139,9 +140,6 @@ impl SnapshotCommand { // load user defaults let user_defaults = try!(UserDefaults::load(&user_defaults_path)); - // check if tracing is on - let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); - // Setup logging let _logger = setup_log(&self.logger_config); @@ -150,6 +148,12 @@ impl SnapshotCommand { // select pruning algorithm let algorithm = self.pruning.to_algorithm(&user_defaults); + // check if tracing is on + let tracing = try!(tracing_switch_to_bool(self.tracing, &user_defaults)); + + // check if fatdb is on + let fat_db = try!(fatdb_switch_to_bool(self.fat_db, &user_defaults, algorithm)); + // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); @@ -158,7 +162,7 @@ impl SnapshotCommand { try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile())); // prepare client config - let client_config = to_client_config(&self.cache_config, self.mode, tracing, self.compaction, self.wal, VMType::default(), "".into(), algorithm); + let client_config = to_client_config(&self.cache_config, self.mode, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm); let service = try!(ClientService::start( client_config, diff --git a/parity/user_defaults.rs b/parity/user_defaults.rs index 8a1feebae..b7fc3d929 100644 --- a/parity/user_defaults.rs +++ b/parity/user_defaults.rs @@ -30,6 +30,7 @@ pub struct UserDefaults { pub is_first_launch: bool, pub pruning: Algorithm, pub tracing: bool, + pub fat_db: bool, } impl Serialize for UserDefaults { @@ -38,6 +39,7 @@ impl Serialize for UserDefaults { let mut map: BTreeMap = BTreeMap::new(); map.insert("pruning".into(), Value::String(self.pruning.as_str().into())); map.insert("tracing".into(), Value::Bool(self.tracing)); + map.insert("fat_db".into(), Value::Bool(self.fat_db)); map.serialize(serializer) } } @@ -62,11 +64,14 @@ impl Visitor for UserDefaultsVisitor { let pruning = try!(pruning.parse().map_err(|_| Error::custom("invalid pruning method"))); let tracing: Value = try!(map.remove("tracing".into()).ok_or_else(|| Error::custom("missing tracing"))); let tracing = try!(tracing.as_bool().ok_or_else(|| Error::custom("invalid tracing value"))); + let fat_db: Value = map.remove("fat_db".into()).unwrap_or_else(|| Value::Bool(false)); + let fat_db = try!(fat_db.as_bool().ok_or_else(|| Error::custom("invalid fat_db value"))); let user_defaults = UserDefaults { is_first_launch: false, pruning: pruning, tracing: tracing, + fat_db: fat_db, }; Ok(user_defaults) @@ -79,6 +84,7 @@ impl Default for UserDefaults { is_first_launch: true, pruning: Algorithm::default(), tracing: false, + fat_db: false, } } } diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index 684ce61a4..56f9f6fc4 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -29,6 +29,7 @@ use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; use ethcore::miner::MinerService; use ethcore::client::{MiningBlockChainClient}; +use ethcore::ids::BlockID; use jsonrpc_core::{from_params, to_value, Value, Error, Params, Ready}; use v1::traits::Ethcore; @@ -251,6 +252,24 @@ impl Ethcore for EthcoreClient where ) } + fn list_accounts(&self, params: Params) -> Result { + try!(self.active()); + try!(expect_no_params(params)); + + take_weak!(self.client) + .list_accounts(BlockID::Latest) + .map(|a| Ok(to_value(&a.into_iter().map(Into::into).collect::>()))) + .unwrap_or(Ok(Value::Null)) + } + + fn list_storage_keys(&self, params: Params) -> Result { + try!(self.active()); + + from_params::<(H160,)>(params).and_then(|(_addr,)| + Ok(Value::Null) + ) + } + fn encrypt_message(&self, params: Params) -> Result { try!(self.active()); from_params::<(H512, Bytes)>(params).and_then(|(key, phrase)| { diff --git a/rpc/src/v1/traits/ethcore.rs b/rpc/src/v1/traits/ethcore.rs index 0565da04a..011b78c8b 100644 --- a/rpc/src/v1/traits/ethcore.rs +++ b/rpc/src/v1/traits/ethcore.rs @@ -76,6 +76,14 @@ pub trait Ethcore: Sized + Send + Sync + 'static { /// Returns the value of the registrar for this network. fn registry_address(&self, _: Params) -> Result; + /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. + /// Takes no parameters. + fn list_accounts(&self, _: Params) -> Result; + + /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), + /// or null if not. + fn list_storage_keys(&self, _: Params) -> Result; + /// Encrypt some data with a public key under ECIES. /// First parameter is the 512-byte destination public key, second is the message. fn encrypt_message(&self, _: Params) -> Result; @@ -108,6 +116,8 @@ pub trait Ethcore: Sized + Send + Sync + 'static { delegate.add_method("ethcore_generateSecretPhrase", Ethcore::generate_secret_phrase); delegate.add_method("ethcore_phraseToAddress", Ethcore::phrase_to_address); delegate.add_method("ethcore_registryAddress", Ethcore::registry_address); + delegate.add_method("ethcore_listAccounts", Ethcore::list_accounts); + delegate.add_method("ethcore_listStorageKeys", Ethcore::list_storage_keys); delegate.add_method("ethcore_encryptMessage", Ethcore::encrypt_message); delegate.add_method("ethcore_pendingTransactions", Ethcore::pending_transactions); delegate.add_async_method("ethcore_hashContent", Ethcore::hash_content); diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index 6eebd8f5d..952eb8894 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -233,4 +233,7 @@ impl TrieFactory { TrieSpec::Fat => Ok(Box::new(try!(FatDBMut::from_existing(db, root)))), } } + + /// Returns true iff the trie DB is a fat DB (allows enumeration of keys). + pub fn is_fat(&self) -> bool { self.spec == TrieSpec::Fat } }