diff --git a/Cargo.lock b/Cargo.lock index a97bb776e..9aba48e46 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -224,7 +224,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "elastic-array" version = "0.6.0" -source = "git+https://github.com/ethcore/elastic-array#70e4012e691b732c7c4cb04e9232799e6aa268bc" +source = "git+https://github.com/ethcore/elastic-array#346f1ba5982576dab9d0b8fa178b50e1db0a21cd" dependencies = [ "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index 0761b7fba..63524a442 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -121,10 +121,6 @@ impl<'db> HashDB for AccountDB<'db>{ fn remove(&mut self, _key: &H256) { unimplemented!() } - - fn get_aux(&self, hash: &[u8]) -> Option { - self.db.get_aux(hash) - } } /// DB backend wrapper for Account trie @@ -197,18 +193,6 @@ impl<'db> HashDB for AccountDBMut<'db>{ let key = combine_key(&self.address_hash, key); self.db.remove(&key) } - - fn insert_aux(&mut self, hash: Vec, value: Vec) { - self.db.insert_aux(hash, value); - } - - fn get_aux(&self, hash: &[u8]) -> Option { - self.db.get_aux(hash) - } - - fn remove_aux(&mut self, hash: &[u8]) { - self.db.remove_aux(hash); - } } struct Wrapping<'db>(&'db HashDB); diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 54c2a7a02..bcbceb9aa 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -594,9 +594,9 @@ mod tests { use factory::Factories; use state_db::StateDB; use views::BlockView; - use util::Address; + use util::{Address, TrieFactory}; use util::hash::FixedHash; - + use util::trie::TrieSpec; use std::sync::Arc; /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header @@ -637,7 +637,7 @@ mod tests { let genesis_header = spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); @@ -653,7 +653,7 @@ mod tests { let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap() .close_and_lock().seal(engine, vec![]).unwrap(); @@ -662,7 +662,7 @@ mod tests { let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); @@ -681,7 +681,7 @@ mod tests { let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle1_header = Header::new(); @@ -697,7 +697,7 @@ mod tests { let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let bytes = e.rlp_bytes(); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 0e0b292f9..11fff936f 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -22,7 +22,7 @@ use std::time::{Instant}; use time::precise_time_ns; // util -use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock}; +use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, Hashable}; use util::{journaldb, TrieFactory, Trie}; use util::trie::TrieSpec; use util::{U256, H256, Address, H2048, Uint, FixedHash}; @@ -172,9 +172,10 @@ impl Client { false => TrieSpec::Secure, }; + let trie_factory = TrieFactory::new(trie_spec); let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); let mut state_db = StateDB::new(journal_db, config.state_cache_size); - if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) { + if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db, &trie_factory)) { let mut batch = DBTransaction::new(&db); try!(state_db.journal_under(&mut batch, 0, &spec.genesis_header().hash())); try!(db.write(batch).map_err(ClientError::Database)); @@ -216,7 +217,7 @@ impl Client { let factories = Factories { vm: EvmFactory::new(config.vm_type.clone(), config.jump_table_size), - trie: TrieFactory::new(trie_spec), + trie: trie_factory, accountdb: Default::default(), }; @@ -952,6 +953,10 @@ impl BlockChainClient for Client { self.state_at(id).map(|s| s.nonce(address)) } + fn storage_root(&self, address: &Address, id: BlockID) -> Option { + self.state_at(id).and_then(|s| s.storage_root(address)) + } + fn block_hash(&self, id: BlockID) -> Option { let chain = self.chain.read(); Self::block_hash(&chain, id) @@ -969,7 +974,7 @@ impl BlockChainClient for Client { self.state_at(id).map(|s| s.storage_at(address, position)) } - fn list_accounts(&self, id: BlockID) -> Option> { + fn list_accounts(&self, id: BlockID, after: Option<&Address>, count: u64) -> Option> { if !self.factories.trie.is_fat() { trace!(target: "fatdb", "list_accounts: Not a fat DB"); return None; @@ -989,18 +994,68 @@ impl BlockChainClient for Client { } }; - let iter = match trie.iter() { + let mut iter = match trie.iter() { Ok(iter) => iter, _ => return None, }; + if let Some(after) = after { + if let Err(e) = iter.seek(after) { + trace!(target: "fatdb", "list_accounts: Couldn't seek the DB: {:?}", e); + } + } + let accounts = iter.filter_map(|item| { item.ok().map(|(addr, _)| Address::from_slice(&addr)) - }).collect(); + }).take(count as usize).collect(); Some(accounts) } + fn list_storage(&self, id: BlockID, account: &Address, after: Option<&H256>, count: u64) -> Option> { + if !self.factories.trie.is_fat() { + trace!(target: "fatdb", "list_stroage: Not a fat DB"); + return None; + } + + let state = match self.state_at(id) { + Some(state) => state, + _ => return None, + }; + + let root = match state.storage_root(account) { + Some(root) => root, + _ => return None, + }; + + let (_, db) = state.drop(); + let account_db = self.factories.accountdb.readonly(db.as_hashdb(), account.sha3()); + let trie = match self.factories.trie.readonly(account_db.as_hashdb(), &root) { + Ok(trie) => trie, + _ => { + trace!(target: "fatdb", "list_storage: Couldn't open the DB"); + return None; + } + }; + + let mut iter = match trie.iter() { + Ok(iter) => iter, + _ => return None, + }; + + if let Some(after) = after { + if let Err(e) = iter.seek(after) { + trace!(target: "fatdb", "list_accounts: Couldn't seek the DB: {:?}", e); + } + } + + let keys = iter.filter_map(|item| { + item.ok().map(|(key, _)| H256::from_slice(&key)) + }).take(count as usize).collect(); + + Some(keys) + } + fn transaction(&self, id: TransactionID) -> Option { self.transaction_address(id).and_then(|address| self.chain.read().transaction(&address)) } diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 84ed25b37..c03b5920b 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -333,7 +333,7 @@ impl MiningBlockChainClient for TestBlockChainClient { let genesis_header = self.spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - self.spec.ensure_db_good(&mut db).unwrap(); + self.spec.ensure_db_good(&mut db, &TrieFactory::default()).unwrap(); let last_hashes = vec![genesis_header.hash()]; let mut open_block = OpenBlock::new( @@ -385,6 +385,10 @@ impl BlockChainClient for TestBlockChainClient { } } + fn storage_root(&self, _address: &Address, _id: BlockID) -> Option { + None + } + fn latest_nonce(&self, address: &Address) -> U256 { self.nonce(address, BlockID::Latest).unwrap() } @@ -416,10 +420,13 @@ impl BlockChainClient for TestBlockChainClient { } } - fn list_accounts(&self, _id: BlockID) -> Option> { + fn list_accounts(&self, _id: BlockID, _after: Option<&Address>, _count: u64) -> Option> { None } + fn list_storage(&self, _id: BlockID, _account: &Address, _after: Option<&H256>, _count: u64) -> Option> { + None + } fn transaction(&self, _id: TransactionID) -> Option { None // Simple default. } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 67092e986..6d774e250 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -68,6 +68,10 @@ pub trait BlockChainClient : Sync + Send { /// May not fail on BlockID::Latest. fn nonce(&self, address: &Address, id: BlockID) -> Option; + /// Attempt to get address storage root at given block. + /// May not fail on BlockID::Latest. + fn storage_root(&self, address: &Address, id: BlockID) -> Option; + /// Get address nonce at the latest block's state. fn latest_nonce(&self, address: &Address) -> U256 { self.nonce(address, BlockID::Latest) @@ -114,7 +118,12 @@ pub trait BlockChainClient : Sync + Send { } /// Get a list of all accounts in the block `id`, if fat DB is in operation, otherwise `None`. - fn list_accounts(&self, id: BlockID) -> Option>; + /// If `after` is set the list starts with the following item. + fn list_accounts(&self, id: BlockID, after: Option<&Address>, count: u64) -> Option>; + + /// Get a list of all storage keys in the block `id`, if fat DB is in operation, otherwise `None`. + /// If `after` is set the list starts with the following item. + fn list_storage(&self, id: BlockID, account: &Address, after: Option<&H256>, count: u64) -> Option>; /// Get transaction with given hash. fn transaction(&self, id: TransactionID) -> Option; diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 830fcf9c8..0c6ce0f9b 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -315,6 +315,7 @@ impl Engine for AuthorityRound { #[cfg(test)] mod tests { use util::*; + use util::trie::TrieSpec; use env_info::EnvInfo; use header::Header; use error::{Error, BlockError}; @@ -384,9 +385,9 @@ mod tests { let engine = &*spec.engine; let genesis_header = spec.genesis_header(); let mut db1 = get_temp_state_db().take(); - spec.ensure_db_good(&mut db1).unwrap(); + spec.ensure_db_good(&mut db1, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let mut db2 = get_temp_state_db().take(); - spec.ensure_db_good(&mut db2).unwrap(); + spec.ensure_db_good(&mut db2, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b1 = OpenBlock::new(engine, Default::default(), false, db1, &genesis_header, last_hashes.clone(), addr1, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b1 = b1.close_and_lock(); diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 23a97967c..fb2f9bde6 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -184,6 +184,7 @@ impl Engine for BasicAuthority { #[cfg(test)] mod tests { use util::*; + use util::trie::TrieSpec; use block::*; use env_info::EnvInfo; use error::{BlockError, Error}; @@ -256,7 +257,7 @@ mod tests { let genesis_header = spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 3dc78d1a2..f50f7344b 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -68,6 +68,7 @@ impl Engine for InstantSeal { #[cfg(test)] mod tests { use util::*; + use util::trie::TrieSpec; use tests::helpers::*; use account_provider::AccountProvider; use spec::Spec; @@ -84,7 +85,7 @@ mod tests { let genesis_header = spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index de2a85942..38a1df525 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -422,6 +422,7 @@ impl Header { #[cfg(test)] mod tests { use util::*; + use util::trie::TrieSpec; use block::*; use tests::helpers::*; use env_info::EnvInfo; @@ -438,7 +439,7 @@ mod tests { let genesis_header = spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close(); @@ -452,7 +453,7 @@ mod tests { let genesis_header = spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle = Header::new(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index e236924ad..3916e5ccc 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -72,6 +72,7 @@ pub fn new_morden() -> Spec { load(include_bytes!("../../res/ethereum/morden.jso #[cfg(test)] mod tests { use util::*; + use util::trie::TrieSpec; use state::*; use super::*; use tests::helpers::*; @@ -84,7 +85,7 @@ mod tests { let genesis_header = spec.genesis_header(); let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - spec.ensure_db_good(&mut db).unwrap(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into()); diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 0882b688c..92a78cebd 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -64,13 +64,13 @@ impl PodAccount { } /// Place additional data into given hash DB. - pub fn insert_additional(&self, db: &mut AccountDBMut) { + pub fn insert_additional(&self, db: &mut AccountDBMut, factory: &TrieFactory) { match self.code { Some(ref c) if !c.is_empty() => { db.insert(c); } _ => {} } let mut r = H256::new(); - let mut t = SecTrieDBMut::new(db, &mut r); + let mut t = factory.create(db, &mut r); for (k, v) in &self.storage { if let Err(e) = t.insert(k, &rlp::encode(&U256::from(&**v))) { warn!("Encountered potential DB corruption: {}", e); diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index c8910bbdd..71c15bca2 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -244,13 +244,13 @@ impl Spec { } /// Ensure that the given state DB has the trie nodes in for the genesis state. - pub fn ensure_db_good(&self, db: &mut StateDB) -> Result> { + pub fn ensure_db_good(&self, db: &mut StateDB, factory: &TrieFactory) -> Result> { if !db.as_hashdb().contains(&self.state_root()) { trace!(target: "spec", "ensure_db_good: Fresh database? Cannot find state root {}", self.state_root()); let mut root = H256::new(); { - let mut t = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root); + let mut t = factory.create(db.as_hashdb_mut(), &mut root); for (address, account) in self.genesis_state.get().iter() { try!(t.insert(&**address, &account.rlp())); } @@ -258,7 +258,7 @@ impl Spec { trace!(target: "spec", "ensure_db_good: Populated sec trie; root is {}", root); for (address, account) in self.genesis_state.get().iter() { db.note_non_null_account(address); - account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address)); + account.insert_additional(&mut AccountDBMut::new(db.as_hashdb_mut(), address), factory); } assert!(db.as_hashdb().contains(&self.state_root())); Ok(true) diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 76061f6a0..52ea884a0 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -314,11 +314,10 @@ impl Account { self.code_hash == SHA3_EMPTY } - #[cfg(test)] - /// return the storage root associated with this account or None if it has been altered via the overlay. + /// Return the storage root associated with this account or None if it has been altered via the overlay. pub fn storage_root(&self) -> Option<&H256> { if self.storage_is_clean() {Some(&self.storage_root)} else {None} } - /// return the storage overlay. + /// Return the storage overlay. pub fn storage_changes(&self) -> &HashMap { &self.storage_changes } /// Increment the nonce of the account by one. diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index 01a7e3b15..9fa680eb8 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -369,6 +369,12 @@ impl State { |a| a.as_ref().map_or(self.account_start_nonce, |account| *account.nonce())) } + /// Get the storage root of account `a`. + pub fn storage_root(&self, a: &Address) -> Option { + self.ensure_cached(a, RequireCache::None, true, + |a| a.as_ref().and_then(|account| account.storage_root().cloned())) + } + /// Mutate storage of account `address` so that it is `value` for `key`. pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { // Storage key search and update works like this: diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index adfb4f096..ea2ef378f 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -18,6 +18,7 @@ use ethkey::KeyPair; use io::*; use client::{BlockChainClient, Client, ClientConfig}; use util::*; +use util::trie::TrieSpec; use spec::*; use state_db::StateDB; use block::{OpenBlock, Drain}; @@ -157,7 +158,7 @@ pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_numbe let mut db_result = get_temp_state_db(); let mut db = db_result.take(); - test_spec.ensure_db_good(&mut db).unwrap(); + test_spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); let genesis_header = test_spec.genesis_header(); let mut rolling_timestamp = 40; diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 0baeb1354..02d3e39fb 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -22,7 +22,7 @@ use std::thread::sleep; use std::sync::Arc; use rustc_serialize::hex::FromHex; use io::{PanicHandler, ForwardPanic}; -use util::{ToPretty, Uint}; +use util::{ToPretty, Uint, U256, H256, Address, Hashable}; use rlp::PayloadInfo; use ethcore::service::ClientService; use ethcore::client::{Mode, DatabaseCompactionProfile, VMType, BlockImportError, BlockChainClient, BlockID}; @@ -65,6 +65,7 @@ impl FromStr for DataFormat { pub enum BlockchainCmd { Import(ImportBlockchain), Export(ExportBlockchain), + ExportState(ExportState), } #[derive(Debug, PartialEq)] @@ -103,10 +104,31 @@ pub struct ExportBlockchain { pub check_seal: bool, } +#[derive(Debug, PartialEq)] +pub struct ExportState { + pub spec: SpecType, + pub cache_config: CacheConfig, + pub dirs: Directories, + pub file_path: Option, + pub format: Option, + pub pruning: Pruning, + pub pruning_history: u64, + pub compaction: DatabaseCompactionProfile, + pub wal: bool, + pub fat_db: Switch, + pub tracing: Switch, + pub at: BlockID, + pub storage: bool, + pub code: bool, + pub min_balance: Option, + pub max_balance: Option, +} + pub fn execute(cmd: BlockchainCmd) -> Result { match cmd { BlockchainCmd::Import(import_cmd) => execute_import(import_cmd), BlockchainCmd::Export(export_cmd) => execute_export(export_cmd), + BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd), } } @@ -245,6 +267,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // save user defaults user_defaults.pruning = algorithm; user_defaults.tracing = tracing; + user_defaults.fat_db = fat_db; try!(user_defaults.save(&user_defaults_path)); let report = client.report(); @@ -261,23 +284,28 @@ fn execute_import(cmd: ImportBlockchain) -> Result { ).into()) } -fn execute_export(cmd: ExportBlockchain) -> Result { - // Setup panic handler - let panic_handler = PanicHandler::new_in_arc(); +fn start_client( + dirs: Directories, + spec: SpecType, + pruning: Pruning, + pruning_history: u64, + tracing: Switch, + fat_db: Switch, + compaction: DatabaseCompactionProfile, + wal: bool, + cache_config: CacheConfig) -> Result { // create dirs used by parity - try!(cmd.dirs.create_dirs(false, false)); - - let format = cmd.format.unwrap_or_default(); + try!(dirs.create_dirs(false, false)); // load spec file - let spec = try!(cmd.spec.spec()); + let spec = try!(spec.spec()); // load genesis hash let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = dirs.database(genesis_hash, spec.fork_name.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -288,34 +316,42 @@ fn execute_export(cmd: ExportBlockchain) -> Result { fdlimit::raise_fd_limit(); // select pruning algorithm - let algorithm = cmd.pruning.to_algorithm(&user_defaults); + let algorithm = pruning.to_algorithm(&user_defaults); // check if tracing is on - let tracing = try!(tracing_switch_to_bool(cmd.tracing, &user_defaults)); + let tracing = try!(tracing_switch_to_bool(tracing, &user_defaults)); // check if fatdb is on - let fat_db = try!(fatdb_switch_to_bool(cmd.fat_db, &user_defaults, algorithm)); + let fat_db = try!(fatdb_switch_to_bool(fat_db, &user_defaults, algorithm)); // prepare client and snapshot paths. let client_path = db_dirs.client_path(algorithm); let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.fork_path().as_path()))); // prepare client config - let client_config = to_client_config(&cmd.cache_config, Mode::Active, tracing, fat_db, cmd.compaction, cmd.wal, VMType::default(), "".into(), algorithm, cmd.pruning_history, cmd.check_seal); + let client_config = to_client_config(&cache_config, Mode::Active, tracing, fat_db, compaction, wal, VMType::default(), "".into(), algorithm, pruning_history, true); let service = try!(ClientService::start( client_config, &spec, &client_path, &snapshot_path, - &cmd.dirs.ipc_path(), + &dirs.ipc_path(), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); drop(spec); + Ok(service) +} + +fn execute_export(cmd: ExportBlockchain) -> Result { + // Setup panic handler + let service = try!(start_client(cmd.dirs, cmd.spec, cmd.pruning, cmd.pruning_history, cmd.tracing, cmd.fat_db, cmd.compaction, cmd.wal, cmd.cache_config)); + let panic_handler = PanicHandler::new_in_arc(); + let format = cmd.format.unwrap_or_default(); panic_handler.forward_from(&service); let client = service.client(); @@ -329,6 +365,9 @@ fn execute_export(cmd: ExportBlockchain) -> Result { let to = try!(client.block_number(cmd.to_block).ok_or("To block could not be found")); for i in from..(to + 1) { + if i % 10000 == 0 { + info!("#{}", i); + } let b = try!(client.block(BlockID::Number(i)).ok_or("Error exporting incomplete chain")); match format { DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } @@ -339,6 +378,85 @@ fn execute_export(cmd: ExportBlockchain) -> Result { Ok("Export completed.".into()) } +fn execute_export_state(cmd: ExportState) -> Result { + // Setup panic handler + let service = try!(start_client(cmd.dirs, cmd.spec, cmd.pruning, cmd.pruning_history, cmd.tracing, cmd.fat_db, cmd.compaction, cmd.wal, cmd.cache_config)); + let panic_handler = PanicHandler::new_in_arc(); + + panic_handler.forward_from(&service); + let client = service.client(); + + let mut out: Box = match cmd.file_path { + Some(f) => Box::new(try!(fs::File::create(&f).map_err(|_| format!("Cannot write to file given: {}", f)))), + None => Box::new(io::stdout()), + }; + + let mut last: Option
= None; + let at = cmd.at; + let mut i = 0usize; + + out.write_fmt(format_args!("{{ \"state\": [", )).expect("Couldn't write to stream."); + loop { + let accounts = try!(client.list_accounts(at, last.as_ref(), 1000).ok_or("Specified block not found")); + if accounts.is_empty() { + break; + } + + for account in accounts.into_iter() { + let balance = client.balance(&account, at).unwrap_or_else(U256::zero); + if cmd.min_balance.map_or(false, |m| balance < m) || cmd.max_balance.map_or(false, |m| balance > m) { + last = Some(account); + continue; //filtered out + } + + if i != 0 { + out.write(b",").expect("Write error"); + } + out.write_fmt(format_args!("\n\"0x{}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", account.hex(), balance, client.nonce(&account, at).unwrap_or_else(U256::zero))).expect("Write error"); + let code = client.code(&account, at).unwrap_or(None).unwrap_or_else(Vec::new); + if !code.is_empty() { + out.write_fmt(format_args!(", \"code_hash\": \"0x{}\"", code.sha3().hex())).expect("Write error"); + if cmd.code { + out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex())).expect("Write error"); + } + } + let storage_root = client.storage_root(&account, at).unwrap_or(::util::SHA3_NULL_RLP); + if storage_root != ::util::SHA3_NULL_RLP { + out.write_fmt(format_args!(", \"storage_root\": \"0x{}\"", storage_root.hex())).expect("Write error"); + if cmd.storage { + out.write_fmt(format_args!(", \"storage\": {{")).expect("Write error"); + let mut last_storage: Option = None; + loop { + let keys = try!(client.list_storage(at, &account, last_storage.as_ref(), 1000).ok_or("Specified block not found")); + if keys.is_empty() { + break; + } + + let mut si = 0; + for key in keys.into_iter() { + if si != 0 { + out.write(b",").expect("Write error"); + } + out.write_fmt(format_args!("\n\t\"0x{}\": \"0x{}\"", key.hex(), client.storage_at(&account, &key, at).unwrap_or_else(Default::default).hex())).expect("Write error"); + si += 1; + last_storage = Some(key); + } + } + out.write(b"\n}").expect("Write error"); + } + } + out.write(b"}").expect("Write error"); + i += 1; + if i % 10000 == 0 { + info!("Account #{}", i); + } + last = Some(account); + } + } + out.write_fmt(format_args!("\n]}}")).expect("Write error"); + Ok("Export completed.".into()) +} + #[cfg(test)] mod test { use super::DataFormat; diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 3f67cf1fa..d33c58d9d 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -26,6 +26,8 @@ usage! { cmd_new: bool, cmd_list: bool, cmd_export: bool, + cmd_blocks: bool, + cmd_state: bool, cmd_import: bool, cmd_signer: bool, cmd_new_token: bool, @@ -246,6 +248,10 @@ usage! { flag_to: String = "latest", or |_| None, flag_format: Option = None, or |_| None, flag_no_seal_check: bool = false, or |_| None, + flag_no_storage: bool = false, or |_| None, + flag_no_code: bool = false, or |_| None, + flag_min_balance: Option = None, or |_| None, + flag_max_balance: Option = None, or |_| None, // -- Snapshot Optons flag_at: String = "latest", or |_| None, @@ -484,6 +490,8 @@ mod tests { cmd_new: false, cmd_list: false, cmd_export: false, + cmd_state: false, + cmd_blocks: false, cmd_import: false, cmd_signer: false, cmd_new_token: false, @@ -600,6 +608,10 @@ mod tests { flag_to: "latest".into(), flag_format: None, flag_no_seal_check: false, + flag_no_code: false, + flag_no_storage: false, + flag_min_balance: None, + flag_max_balance: None, // -- Snapshot Optons flag_at: "latest".into(), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index fe0824dfe..b67af6110 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -10,7 +10,7 @@ Usage: parity account import ... [options] parity wallet import --password FILE [options] parity import [ ] [options] - parity export [ ] [options] + parity export (blocks | state) [ ] [options] parity signer new-token [options] parity snapshot [options] parity restore [ ] [options] @@ -271,6 +271,16 @@ Import/Export Options: one of 'hex' and 'binary'. (default: {flag_format:?} = Import: auto, Export: binary) --no-seal-check Skip block seal check. (default: {flag_no_seal_check}) + --at BLOCK Export state at the given block, which may be an + index, hash, or 'latest'. Note that taking snapshots at + non-recent blocks will only work with --pruning archive + (default: {flag_at}) + --no-storage Don't export account storge. (default: {flag_no_storage}) + --no-code Don't export account code. (default: {flag_no_code}) + --min-balance WEI Don't export accounts with balance less than specified. + (default: {flag_min_balance:?}) + --max-balance WEI Don't export accounts with balance greater than specified. + (default: {flag_max_balance:?}) Snapshot Options: --at BLOCK Take a snapshot at the given block, which may be an diff --git a/parity/configuration.rs b/parity/configuration.rs index ecf47ddad..c4a54f747 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -37,7 +37,7 @@ use dir::Directories; use dapps::Configuration as DappsConfiguration; use signer::{Configuration as SignerConfiguration}; use run::RunCmd; -use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat}; +use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, ExportState, DataFormat}; use presale::ImportWallet; use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts}; use snapshot::{self, SnapshotCommand}; @@ -161,23 +161,47 @@ impl Configuration { }; Cmd::Blockchain(BlockchainCmd::Import(import_cmd)) } else if self.args.cmd_export { - let export_cmd = ExportBlockchain { - spec: spec, - cache_config: cache_config, - dirs: dirs, - file_path: self.args.arg_file.clone(), - format: format, - pruning: pruning, - pruning_history: pruning_history, - compaction: compaction, - wal: wal, - tracing: tracing, - fat_db: fat_db, - from_block: try!(to_block_id(&self.args.flag_from)), - to_block: try!(to_block_id(&self.args.flag_to)), - check_seal: !self.args.flag_no_seal_check, - }; - Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) + if self.args.cmd_blocks { + let export_cmd = ExportBlockchain { + spec: spec, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_file.clone(), + format: format, + pruning: pruning, + pruning_history: pruning_history, + compaction: compaction, + wal: wal, + tracing: tracing, + fat_db: fat_db, + from_block: try!(to_block_id(&self.args.flag_from)), + to_block: try!(to_block_id(&self.args.flag_to)), + check_seal: !self.args.flag_no_seal_check, + }; + Cmd::Blockchain(BlockchainCmd::Export(export_cmd)) + } else if self.args.cmd_state { + let export_cmd = ExportState { + spec: spec, + cache_config: cache_config, + dirs: dirs, + file_path: self.args.arg_file.clone(), + format: format, + pruning: pruning, + pruning_history: pruning_history, + compaction: compaction, + wal: wal, + tracing: tracing, + fat_db: fat_db, + at: try!(to_block_id(&self.args.flag_at)), + storage: !self.args.flag_no_storage, + code: !self.args.flag_no_code, + min_balance: self.args.flag_min_balance.and_then(|s| to_u256(&s).ok()), + max_balance: self.args.flag_max_balance.and_then(|s| to_u256(&s).ok()), + }; + Cmd::Blockchain(BlockchainCmd::ExportState(export_cmd)) + } else { + unreachable!(); + } } else if self.args.cmd_snapshot { let snapshot_cmd = SnapshotCommand { cache_config: cache_config, @@ -690,7 +714,7 @@ mod tests { use helpers::{replace_home, default_network_config}; use run::RunCmd; use signer::{Configuration as SignerConfiguration}; - use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat}; + use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat, ExportState}; use presale::ImportWallet; use account::{AccountCmd, NewAccount, ImportAccounts}; use devtools::{RandomTempPath}; @@ -779,7 +803,7 @@ mod tests { #[test] fn test_command_blockchain_export() { - let args = vec!["parity", "export", "blockchain.json"]; + let args = vec!["parity", "export", "blocks", "blockchain.json"]; let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { spec: Default::default(), @@ -799,9 +823,33 @@ mod tests { }))); } + #[test] + fn test_command_state_export() { + let args = vec!["parity", "export", "state", "state.json"]; + let conf = parse(&args); + assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::ExportState(ExportState { + spec: Default::default(), + cache_config: Default::default(), + dirs: Default::default(), + file_path: Some("state.json".into()), + pruning: Default::default(), + pruning_history: 64, + format: Default::default(), + compaction: Default::default(), + wal: true, + tracing: Default::default(), + fat_db: Default::default(), + at: BlockID::Latest, + storage: true, + code: true, + min_balance: None, + max_balance: None, + }))); + } + #[test] fn test_command_blockchain_export_with_custom_format() { - let args = vec!["parity", "export", "--format", "hex", "blockchain.json"]; + let args = vec!["parity", "export", "blocks", "--format", "hex", "blockchain.json"]; let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Blockchain(BlockchainCmd::Export(ExportBlockchain { spec: Default::default(), diff --git a/parity/params.rs b/parity/params.rs index 28233400e..3ce07e889 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -257,17 +257,13 @@ pub fn tracing_switch_to_bool(switch: Switch, user_defaults: &UserDefaults) -> R } } -pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, algorithm: Algorithm) -> Result { +pub fn fatdb_switch_to_bool(switch: Switch, user_defaults: &UserDefaults, _algorithm: Algorithm) -> Result { let result = match (user_defaults.is_first_launch, switch, user_defaults.fat_db) { (false, Switch::On, false) => Err("FatDB resync required".into()), (_, Switch::On, _) => Ok(true), (_, Switch::Off, _) => Ok(false), (_, Switch::Auto, def) => Ok(def), }; - - if result.clone().unwrap_or(false) && algorithm != Algorithm::Archive { - return Err("Fat DB is not supported with the chosen pruning option. Please rerun with `--pruning=archive`".into()); - } result } diff --git a/parity/run.rs b/parity/run.rs index f56ba5b92..fd3349af0 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -219,7 +219,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { // create client config let client_config = to_client_config( &cmd.cache_config, - mode, + mode.clone(), tracing, fat_db, cmd.compaction, @@ -354,6 +354,8 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { // save user defaults user_defaults.pruning = algorithm; user_defaults.tracing = tracing; + user_defaults.fat_db = fat_db; + user_defaults.mode = mode; try!(user_defaults.save(&user_defaults_path)); let on_mode_change = move |mode: &Mode| { diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 1fdcbdef8..74f467e5e 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -28,7 +28,6 @@ use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; use ethcore::miner::MinerService; use ethcore::client::{MiningBlockChainClient}; -use ethcore::ids::BlockID; use ethcore::mode::Mode; use ethcore::account_provider::AccountProvider; @@ -38,9 +37,11 @@ use v1::types::{ Bytes, U256, H160, H256, H512, Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, + BlockNumber, }; use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::dispatch::DEFAULT_MAC; +use v1::helpers::auto_args::Trailing; /// Parity implementation. pub struct ParityClient where @@ -234,19 +235,20 @@ impl Parity for ParityClient where Ok(Brain::new(phrase).generate().unwrap().address().into()) } - fn list_accounts(&self) -> Result>, Error> { + fn list_accounts(&self, count: u64, after: Option, block_number: Trailing) -> Result>, Error> { try!(self.active()); Ok(take_weak!(self.client) - .list_accounts(BlockID::Latest) + .list_accounts(block_number.0.into(), after.map(Into::into).as_ref(), count) .map(|a| a.into_iter().map(Into::into).collect())) } - fn list_storage_keys(&self, _address: H160) -> Result>, Error> { + fn list_storage_keys(&self, address: H160, count: u64, after: Option, block_number: Trailing) -> Result>, Error> { try!(self.active()); - // TODO: implement this - Ok(None) + Ok(take_weak!(self.client) + .list_storage(block_number.0.into(), &address.into(), after.map(Into::into).as_ref(), count) + .map(|a| a.into_iter().map(Into::into).collect())) } fn encrypt_message(&self, key: H512, phrase: Bytes) -> Result { diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index b4df594e8..18440b654 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -18,11 +18,12 @@ use jsonrpc_core::Error; use std::collections::BTreeMap; -use v1::helpers::auto_args::Wrap; +use v1::helpers::auto_args::{Wrap, Trailing}; use v1::types::{ H160, H256, H512, U256, Bytes, Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, + BlockNumber }; build_rpc_trait! { @@ -103,12 +104,12 @@ build_rpc_trait! { /// Returns all addresses if Fat DB is enabled (`--fat-db`), or null if not. #[rpc(name = "parity_listAccounts")] - fn list_accounts(&self) -> Result>, Error>; + fn list_accounts(&self, u64, Option, Trailing) -> Result>, Error>; /// Returns all storage keys of the given address (first parameter) if Fat DB is enabled (`--fat-db`), /// or null if not. #[rpc(name = "parity_listStorageKeys")] - fn list_storage_keys(&self, H160) -> Result>, Error>; + fn list_storage_keys(&self, H160, u64, Option, Trailing) -> Result>, Error>; /// Encrypt some data with a public key under ECIES. /// First parameter is the 512-byte destination public key, second is the message. diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 671b32ed5..092d40d8a 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -107,21 +107,6 @@ pub trait HashDB: AsHashDB + Send + Sync { /// } /// ``` fn remove(&mut self, key: &H256); - - /// Insert auxiliary data into hashdb. - fn insert_aux(&mut self, _hash: Vec, _value: Vec) { - unimplemented!(); - } - - /// Get auxiliary data from hashdb. - fn get_aux(&self, _hash: &[u8]) -> Option { - unimplemented!(); - } - - /// Removes auxiliary data from hashdb. - fn remove_aux(&mut self, _hash: &[u8]) { - unimplemented!(); - } } /// Upcast trait. diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index a8800045b..30a358bdb 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -26,10 +26,6 @@ use kvdb::{Database, DBTransaction}; #[cfg(test)] use std::env; -/// Suffix appended to auxiliary keys to distinguish them from normal keys. -/// Would be nich to use rocksdb columns for this eventually. -const AUX_FLAG: u8 = 255; - /// Implementation of the `HashDB` trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -108,26 +104,6 @@ impl HashDB for ArchiveDB { fn remove(&mut self, key: &H256) { self.overlay.remove(key); } - - fn insert_aux(&mut self, hash: Vec, value: Vec) { - self.overlay.insert_aux(hash, value); - } - - fn get_aux(&self, hash: &[u8]) -> Option { - if let Some(res) = self.overlay.get_aux(hash) { - return Some(res) - } - - let mut db_hash = hash.to_vec(); - db_hash.push(AUX_FLAG); - - self.backing.get(self.column, &db_hash) - .expect("Low-level database error. Some issue with your hard disk?") - } - - fn remove_aux(&mut self, hash: &[u8]) { - self.overlay.remove_aux(hash); - } } impl JournalDB for ArchiveDB { @@ -164,11 +140,6 @@ impl JournalDB for ArchiveDB { } } - for (mut key, value) in self.overlay.drain_aux() { - key.push(AUX_FLAG); - batch.put(self.column, &key, &value); - } - if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); @@ -204,11 +175,6 @@ impl JournalDB for ArchiveDB { } } - for (mut key, value) in self.overlay.drain_aux() { - key.push(AUX_FLAG); - batch.put(self.column, &key, &value); - } - Ok((inserts + deletes) as u32) } diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 338f12b1e..20dd3a41f 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -17,7 +17,6 @@ //! Reference-counted memory-based `HashDB` implementation. use hash::*; -use bytes::*; use rlp::*; use sha3::*; use hashdb::*; @@ -72,7 +71,6 @@ use std::collections::hash_map::Entry; #[derive(Default, Clone, PartialEq)] pub struct MemoryDB { data: H256FastMap<(DBValue, i32)>, - aux: HashMap, } impl MemoryDB { @@ -80,7 +78,6 @@ impl MemoryDB { pub fn new() -> MemoryDB { MemoryDB { data: H256FastMap::default(), - aux: HashMap::new(), } } @@ -118,11 +115,6 @@ impl MemoryDB { mem::replace(&mut self.data, H256FastMap::default()) } - /// Return the internal map of auxiliary data, clearing the current state. - pub fn drain_aux(&mut self) -> HashMap { - mem::replace(&mut self.aux, HashMap::new()) - } - /// Grab the raw information associated with a key. Returns None if the key /// doesn't exist. /// @@ -138,7 +130,6 @@ impl MemoryDB { /// Returns the size of allocated heap memory pub fn mem_used(&self) -> usize { self.data.heap_size_of_children() - + self.aux.heap_size_of_children() } /// Remove an element and delete it from storage if reference count reaches zero. @@ -256,18 +247,6 @@ impl HashDB for MemoryDB { self.data.insert(key.clone(), (DBValue::new(), -1)); } } - - fn insert_aux(&mut self, hash: Vec, value: Vec) { - self.aux.insert(hash, DBValue::from_vec(value)); - } - - fn get_aux(&self, hash: &[u8]) -> Option { - self.aux.get(hash).cloned() - } - - fn remove_aux(&mut self, hash: &[u8]) { - self.aux.remove(hash); - } } #[test] diff --git a/util/src/trie/fatdb.rs b/util/src/trie/fatdb.rs index 700156429..ca3f4ca79 100644 --- a/util/src/trie/fatdb.rs +++ b/util/src/trie/fatdb.rs @@ -17,7 +17,7 @@ use hash::H256; use sha3::Hashable; use hashdb::{HashDB, DBValue}; -use super::{TrieDB, Trie, TrieDBIterator, TrieItem, Recorder}; +use super::{TrieDB, Trie, TrieDBIterator, TrieItem, Recorder, TrieIterator}; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// Additionaly it stores inserted hash-key mappings for later retrieval. @@ -46,7 +46,7 @@ impl<'db> FatDB<'db> { } impl<'db> Trie for FatDB<'db> { - fn iter<'a>(&'a self) -> super::Result + 'a>> { + fn iter<'a>(&'a self) -> super::Result + 'a>> { FatDBIterator::new(&self.raw).map(|iter| Box::new(iter) as Box<_>) } @@ -81,6 +81,12 @@ impl<'db> FatDBIterator<'db> { } } +impl<'db> TrieIterator for FatDBIterator<'db> { + fn seek(&mut self, key: &[u8]) -> super::Result<()> { + self.trie_iterator.seek(&key.sha3()) + } +} + impl<'db> Iterator for FatDBIterator<'db> { type Item = TrieItem<'db>; @@ -88,7 +94,8 @@ impl<'db> Iterator for FatDBIterator<'db> { self.trie_iterator.next() .map(|res| res.map(|(hash, value)| { - (self.trie.db().get_aux(&hash).expect("Missing fatdb hash").to_vec(), value) + let aux_hash = hash.sha3(); + (self.trie.db().get(&aux_hash).expect("Missing fatdb hash").to_vec(), value) }) ) } diff --git a/util/src/trie/fatdbmut.rs b/util/src/trie/fatdbmut.rs index fa1c168e8..c81c62f71 100644 --- a/util/src/trie/fatdbmut.rs +++ b/util/src/trie/fatdbmut.rs @@ -51,6 +51,10 @@ impl<'db> FatDBMut<'db> { pub fn db_mut(&mut self) -> &mut HashDB { self.raw.db_mut() } + + fn to_aux_key(key: &[u8]) -> H256 { + key.sha3() + } } impl<'db> TrieMut for FatDBMut<'db> { @@ -76,12 +80,14 @@ impl<'db> TrieMut for FatDBMut<'db> { let hash = key.sha3(); try!(self.raw.insert(&hash, value)); let db = self.raw.db_mut(); - db.insert_aux(hash.to_vec(), key.to_vec()); + db.emplace(Self::to_aux_key(&hash), DBValue::from_slice(key)); Ok(()) } fn remove(&mut self, key: &[u8]) -> super::Result<()> { - self.raw.remove(&key.sha3()) + let hash = key.sha3(); + self.raw.db_mut().remove(&Self::to_aux_key(&hash)); + self.raw.remove(&hash) } } diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index d4cc04962..9c4284b89 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -102,7 +102,7 @@ pub trait Trie { where 'a: 'b, R: Recorder; /// Returns an iterator over elements of trie. - fn iter<'a>(&'a self) -> Result + 'a>>; + fn iter<'a>(&'a self) -> Result + 'a>>; } /// A key-value datastore implemented as a database-backed modified Merkle tree. @@ -130,6 +130,12 @@ pub trait TrieMut { fn remove(&mut self, key: &[u8]) -> Result<()>; } +/// A trie iterator that also supports random access. +pub trait TrieIterator : Iterator { + /// Position the iterator on the first element with key > `key` + fn seek(&mut self, key: &[u8]) -> Result<()>; +} + /// Trie types #[derive(Debug, PartialEq, Clone)] pub enum TrieSpec { @@ -193,7 +199,7 @@ impl<'db> Trie for TrieKinds<'db> { wrapper!(self, get_recorded, key, r) } - fn iter<'a>(&'a self) -> Result + 'a>> { + fn iter<'a>(&'a self) -> Result + 'a>> { wrapper!(self, iter,) } } diff --git a/util/src/trie/sectriedb.rs b/util/src/trie/sectriedb.rs index b1d7bbc0c..0861f53f3 100644 --- a/util/src/trie/sectriedb.rs +++ b/util/src/trie/sectriedb.rs @@ -18,7 +18,7 @@ use hash::H256; use sha3::Hashable; use hashdb::{HashDB, DBValue}; use super::triedb::TrieDB; -use super::{Trie, TrieItem, Recorder}; +use super::{Trie, TrieItem, Recorder, TrieIterator}; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// @@ -49,7 +49,7 @@ impl<'db> SecTrieDB<'db> { } impl<'db> Trie for SecTrieDB<'db> { - fn iter<'a>(&'a self) -> super::Result + 'a>> { + fn iter<'a>(&'a self) -> super::Result + 'a>> { TrieDB::iter(&self.raw) } diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index d929c9d68..ecd8bdded 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -20,7 +20,7 @@ use nibbleslice::*; use rlp::*; use super::node::Node; use super::recorder::{Recorder, NoOp}; -use super::{Trie, TrieItem, TrieError}; +use super::{Trie, TrieItem, TrieError, TrieIterator}; /// A `Trie` implementation using a generic `HashDB` backing database. /// @@ -295,6 +295,64 @@ impl<'a> TrieDBIterator<'a> { Ok(r) } + fn seek_descend<'key> ( &mut self, node: &[u8], key: &NibbleSlice<'key>, d: u32) -> super::Result<()> { + match Node::decoded(node) { + Node::Leaf(ref slice, _) => { + let slice = &NibbleSlice::from_encoded(slice).0; + if slice == key { + self.trail.push(Crumb { + status: Status::At, + node: Node::decoded(node), + }); + } else { + self.trail.push(Crumb { + status: Status::Exiting, + node: Node::decoded(node), + }); + } + self.key_nibbles.extend(slice.iter()); + Ok(()) + }, + Node::Extension(ref slice, ref item) => { + let slice = &NibbleSlice::from_encoded(slice).0; + if key.starts_with(slice) { + let mut r = NoOp; + self.trail.push(Crumb { + status: Status::At, + node: Node::decoded(node), + }); + self.key_nibbles.extend(slice.iter()); + let data = try!(self.db.get_raw_or_lookup(&*item, &mut r, d)); + self.seek_descend(&data, &key.mid(slice.len()), d + 1) + } else { + try!(self.descend(node)); + Ok(()) + } + }, + Node::Branch(ref nodes, _) => match key.is_empty() { + true => { + self.trail.push(Crumb { + status: Status::At, + node: Node::decoded(node), + }); + Ok(()) + }, + false => { + let mut r = NoOp; + let i = key.at(0); + self.trail.push(Crumb { + status: Status::AtChild(i as usize), + node: Node::decoded(node), + }); + self.key_nibbles.push(i); + let child = try!(self.db.get_raw_or_lookup(&*nodes[i as usize], &mut r, d)); + self.seek_descend(&child, &key.mid(1), d + 1) + } + }, + _ => Ok(()) + } + } + /// Descend into a payload. fn descend(&mut self, d: &[u8]) -> super::Result<()> { self.trail.push(Crumb { @@ -316,6 +374,17 @@ impl<'a> TrieDBIterator<'a> { } } +impl<'a> TrieIterator for TrieDBIterator<'a> { + /// Position the iterator on the first element with key >= `key` + fn seek(&mut self, key: &[u8]) -> super::Result<()> { + self.trail.clear(); + self.key_nibbles.clear(); + let mut r = NoOp; + let root_rlp = try!(self.db.root_data(&mut r)); + self.seek_descend(&root_rlp, &NibbleSlice::new(key), 1) + } +} + impl<'a> Iterator for TrieDBIterator<'a> { type Item = TrieItem<'a>; @@ -372,7 +441,7 @@ impl<'a> Iterator for TrieDBIterator<'a> { } impl<'db> Trie for TrieDB<'db> { - fn iter<'a>(&'a self) -> super::Result + 'a>> { + fn iter<'a>(&'a self) -> super::Result + 'a>> { TrieDBIterator::new(self).map(|iter| Box::new(iter) as Box<_>) } @@ -415,3 +484,48 @@ fn iterator() { assert_eq!(d.iter().map(|i| i.clone().to_vec()).collect::>(), t.iter().unwrap().map(|x| x.unwrap().0).collect::>()); assert_eq!(d, t.iter().unwrap().map(|x| x.unwrap().1).collect::>()); } + +#[test] +fn iterator_seek() { + use memorydb::*; + use super::TrieMut; + use super::triedbmut::*; + + let d = vec![ DBValue::from_slice(b"A"), DBValue::from_slice(b"AA"), DBValue::from_slice(b"AB"), DBValue::from_slice(b"B") ]; + + let mut memdb = MemoryDB::new(); + let mut root = H256::new(); + { + let mut t = TrieDBMut::new(&mut memdb, &mut root); + for x in &d { + t.insert(x, x).unwrap(); + } + } + + let t = TrieDB::new(&memdb, &root).unwrap(); + let mut iter = t.iter().unwrap(); + assert_eq!(iter.next(), Some(Ok((b"A".to_vec(), DBValue::from_slice(b"A"))))); + iter.seek(b"!").unwrap(); + assert_eq!(d, iter.map(|x| x.unwrap().1).collect::>()); + let mut iter = t.iter().unwrap(); + iter.seek(b"A").unwrap(); + assert_eq!(&d[1..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"AA").unwrap(); + assert_eq!(&d[2..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"A!").unwrap(); + assert_eq!(&d[1..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"AB").unwrap(); + assert_eq!(&d[3..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"AB!").unwrap(); + assert_eq!(&d[3..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"B").unwrap(); + assert_eq!(&d[4..], &iter.map(|x| x.unwrap().1).collect::>()[..]); + let mut iter = t.iter().unwrap(); + iter.seek(b"C").unwrap(); + assert_eq!(&d[4..], &iter.map(|x| x.unwrap().1).collect::>()[..]); +}