diff --git a/ethcore/res/ethereum/tests b/ethcore/res/ethereum/tests index c670b1d8c..3116f85a4 160000 --- a/ethcore/res/ethereum/tests +++ b/ethcore/res/ethereum/tests @@ -1 +1 @@ -Subproject commit c670b1d8c9f09593a6758ab2c099360e16c7c25b +Subproject commit 3116f85a499ceaf4dfdc46726060fc056e2d7829 diff --git a/ethcore/src/account.rs b/ethcore/src/account.rs index 63f86b171..a7013a70e 100644 --- a/ethcore/src/account.rs +++ b/ethcore/src/account.rs @@ -2,6 +2,7 @@ use util::*; use pod_account::*; +use account_db::*; /// Single account in the system. #[derive(Clone)] @@ -99,7 +100,7 @@ impl Account { } /// Get (and cache) the contents of the trie's storage at `key`. - pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { + pub fn storage_at(&self, db: &AccountDB, key: &H256) -> H256 { self.storage_overlay.borrow_mut().entry(key.clone()).or_insert_with(||{ (Filth::Clean, H256::from(SecTrieDB::new(db, &self.storage_root).get(key.bytes()).map_or(U256::zero(), |v| -> U256 {decode(v)}))) }).1.clone() @@ -147,7 +148,7 @@ impl Account { } /// Provide a database to lookup `code_hash`. Should not be called if it is a contract without code. - pub fn cache_code(&mut self, db: &HashDB) -> bool { + pub fn cache_code(&mut self, db: &AccountDB) -> bool { // TODO: fill out self.code_cache; trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.is_cached() || @@ -184,7 +185,7 @@ impl Account { pub fn sub_balance(&mut self, x: &U256) { self.balance = self.balance - *x; } /// Commit the `storage_overlay` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, db: &mut HashDB) { + pub fn commit_storage(&mut self, db: &mut AccountDBMut) { let mut t = SecTrieDBMut::from_existing(db, &mut self.storage_root); for (k, &mut (ref mut f, ref mut v)) in self.storage_overlay.borrow_mut().iter_mut() { if f == &Filth::Dirty { @@ -200,7 +201,7 @@ impl Account { } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. - pub fn commit_code(&mut self, db: &mut HashDB) { + pub fn commit_code(&mut self, db: &mut AccountDBMut) { trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty()); match (self.code_hash.is_none(), self.code_cache.is_empty()) { (true, true) => self.code_hash = Some(SHA3_EMPTY), @@ -233,10 +234,12 @@ mod tests { use util::*; use super::*; + use account_db::*; #[test] fn storage_at() { let mut db = MemoryDB::new(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); let rlp = { let mut a = Account::new_contract(U256::from(69u8)); a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64))); @@ -248,13 +251,14 @@ mod tests { let a = Account::from_rlp(&rlp); assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2"); - assert_eq!(a.storage_at(&db, &H256::from(&U256::from(0x00u64))), H256::from(&U256::from(0x1234u64))); - assert_eq!(a.storage_at(&db, &H256::from(&U256::from(0x01u64))), H256::new()); + assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x00u64))), H256::from(&U256::from(0x1234u64))); + assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x01u64))), H256::new()); } #[test] fn note_code() { let mut db = MemoryDB::new(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); let rlp = { let mut a = Account::new_contract(U256::from(69u8)); @@ -264,7 +268,7 @@ mod tests { }; let mut a = Account::from_rlp(&rlp); - assert!(a.cache_code(&db)); + assert!(a.cache_code(&db.immutable())); let mut a = Account::from_rlp(&rlp); assert_eq!(a.note_code(vec![0x55, 0x44, 0xffu8]), Ok(())); @@ -274,6 +278,7 @@ mod tests { fn commit_storage() { let mut a = Account::new_contract(U256::from(69u8)); let mut db = MemoryDB::new(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); a.set_storage(x!(0), x!(0x1234)); assert_eq!(a.storage_root(), None); a.commit_storage(&mut db); @@ -284,6 +289,7 @@ mod tests { fn commit_remove_commit_storage() { let mut a = Account::new_contract(U256::from(69u8)); let mut db = MemoryDB::new(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); a.set_storage(x!(0), x!(0x1234)); a.commit_storage(&mut db); a.set_storage(x!(1), x!(0x1234)); @@ -297,6 +303,7 @@ mod tests { fn commit_code() { let mut a = Account::new_contract(U256::from(69u8)); let mut db = MemoryDB::new(); + let mut db = AccountDBMut::new(&mut db, &Address::new()); a.init_code(vec![0x55, 0x44, 0xffu8]); assert_eq!(a.code_hash(), SHA3_EMPTY); a.commit_code(&mut db); diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs new file mode 100644 index 000000000..e7f1b2bad --- /dev/null +++ b/ethcore/src/account_db.rs @@ -0,0 +1,120 @@ +//! DB backend wrapper for Account trie +use util::*; + +static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; + +// TODO: introduce HashDBMut? +/// DB backend wrapper for Account trie +/// Transforms trie node keys for the database +pub struct AccountDB<'db> { + db: &'db HashDB, + address: H256, +} + +#[inline] +fn combine_key<'a>(address: &'a H256, key: &'a H256) -> H256 { + let mut addr_hash = address.sha3(); + // preserve 96 bits of original key for db lookup + addr_hash[0..12].clone_from_slice(&[0u8; 12]); + &addr_hash ^ key +} + +impl<'db> AccountDB<'db> { + pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> { + AccountDB { + db: db, + address: x!(address.clone()), + } + } +} + +impl<'db> HashDB for AccountDB<'db>{ + fn keys(&self) -> HashMap { + unimplemented!() + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + if key == &SHA3_NULL_RLP { + return Some(&NULL_RLP_STATIC); + } + self.db.lookup(&combine_key(&self.address, key)) + } + + fn exists(&self, key: &H256) -> bool { + if key == &SHA3_NULL_RLP { + return true; + } + self.db.exists(&combine_key(&self.address, key)) + } + + fn insert(&mut self, _value: &[u8]) -> H256 { + unimplemented!() + } + + fn emplace(&mut self, _key: H256, _value: Bytes) { + unimplemented!() + } + + fn kill(&mut self, _key: &H256) { + unimplemented!() + } +} + +/// DB backend wrapper for Account trie +pub struct AccountDBMut<'db> { + db: &'db mut HashDB, + address: H256, +} + +impl<'db> AccountDBMut<'db> { + pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> { + AccountDBMut { + db: db, + address: x!(address.clone()), + } + } + + #[allow(dead_code)] + pub fn immutable(&'db self) -> AccountDB<'db> { + AccountDB { db: self.db, address: self.address.clone() } + } +} + +impl<'db> HashDB for AccountDBMut<'db>{ + fn keys(&self) -> HashMap { + unimplemented!() + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + if key == &SHA3_NULL_RLP { + return Some(&NULL_RLP_STATIC); + } + self.db.lookup(&combine_key(&self.address, key)) + } + + fn exists(&self, key: &H256) -> bool { + if key == &SHA3_NULL_RLP { + return true; + } + self.db.exists(&combine_key(&self.address, key)) + } + + fn insert(&mut self, value: &[u8]) -> H256 { + let k = value.sha3(); + let ak = combine_key(&self.address, &k); + self.db.emplace(ak, value.to_vec()); + k + } + + fn emplace(&mut self, key: H256, value: Bytes) { + let key = combine_key(&self.address, &key); + self.db.emplace(key, value.to_vec()) + } + + fn kill(&mut self, key: &H256) { + let key = combine_key(&self.address, key); + self.db.kill(&key) + } +} + + diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index a2de89d13..2a8874bb3 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -62,7 +62,7 @@ impl Decodable for Block { /// Internal type for a block's common elements. // TODO: rename to ExecutedBlock // TODO: use BareBlock -#[derive(Debug, Clone)] +#[derive(Debug)] pub struct ExecutedBlock { base: Block, @@ -318,8 +318,10 @@ impl IsBlock for SealedBlock { /// Enact the block given by block header, transactions and uncles pub fn enact<'x, 'y>(header: &Header, transactions: &[Transaction], uncles: &[Header], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: &'y LastHashes) -> Result, Error> { { - let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); - trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); + if ::log::max_log_level() >= ::log::LogLevel::Trace { + let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); + trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); + } } let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author().clone(), header.extra_data().clone()); @@ -363,10 +365,10 @@ mod tests { let engine = Spec::new_test().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); - let db = db_result.reference_mut(); - engine.spec().ensure_db_good(db); + let mut db = db_result.take(); + engine.spec().ensure_db_good(&mut db); let last_hashes = vec![genesis_header.hash()]; - let b = OpenBlock::new(engine.deref(), db.clone(), &genesis_header, &last_hashes, Address::zero(), vec![]); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); let b = b.close(); let _ = b.seal(vec![]); } @@ -378,16 +380,16 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); - let db = db_result.reference_mut(); - engine.spec().ensure_db_good(db); - let b = OpenBlock::new(engine.deref(), db.clone(), &genesis_header, &vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(vec![]).unwrap(); + let mut db = db_result.take(); + engine.spec().ensure_db_good(&mut db); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, &vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); let mut db_result = get_temp_journal_db(); - let db = db_result.reference_mut(); - engine.spec().ensure_db_good(db); - let e = enact_and_seal(&orig_bytes, engine.deref(), db.clone(), &genesis_header, &vec![genesis_header.hash()]).unwrap(); + let mut db = db_result.take(); + engine.spec().ensure_db_good(&mut db); + let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, &vec![genesis_header.hash()]).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 03c03ab49..0016c51e6 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -139,10 +139,10 @@ impl ClientReport { pub struct Client { chain: Arc>, engine: Arc>, - state_db: JournalDB, + state_db: Arc, + state_journal: Mutex, block_queue: RwLock, report: RwLock, - uncommited_states: RwLock>, import_lock: Mutex<()> } @@ -180,16 +180,16 @@ impl Client { let engine = Arc::new(try!(spec.to_engine())); let mut state_db = JournalDB::new_with_arc(db.clone()); - if engine.spec().ensure_db_good(&mut state_db) { + if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } Ok(Arc::new(Client { chain: chain, engine: engine.clone(), - state_db: state_db, + state_db: db.clone(), + state_journal: Mutex::new(JournalDB::new_with_arc(db)), block_queue: RwLock::new(BlockQueue::new(engine, message_channel)), report: RwLock::new(Default::default()), - uncommited_states: RwLock::new(HashMap::new()), import_lock: Mutex::new(()), })) } @@ -242,7 +242,7 @@ impl Client { } } - let db = self.state_db.clone(); + let db = self.state_journal.lock().unwrap().clone(); let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { @@ -277,14 +277,9 @@ impl Client { ret } - /// Clear cached state overlay - pub fn clear_state(&self, hash: &H256) { - self.uncommited_states.write().unwrap().remove(hash); - } - /// Get a copy of the best block's state. pub fn state(&self) -> State { - State::from_existing(self.state_db.clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) + State::from_existing(JournalDB::new_with_arc(self.state_db.clone()), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) } /// Get info on the cache. diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index e9aedc128..9cd287654 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -232,10 +232,10 @@ fn on_close_block() { let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); - let mut db = db_result.reference_mut(); - engine.spec().ensure_db_good(db); + let mut db = db_result.take(); + engine.spec().ensure_db_good(&mut db); let last_hashes = vec![genesis_header.hash()]; - let b = OpenBlock::new(engine.deref(), db.clone(), &genesis_header, &last_hashes, Address::zero(), vec![]); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); let b = b.close(); assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); } @@ -246,10 +246,10 @@ fn on_close_block_with_uncle() { let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); - let mut db = db_result.reference_mut(); - engine.spec().ensure_db_good(db); + let mut db = db_result.take(); + engine.spec().ensure_db_good(&mut db); let last_hashes = vec![genesis_header.hash()]; - let mut b = OpenBlock::new(engine.deref(), db.clone(), &genesis_header, &last_hashes, Address::zero(), vec![]); + let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); let mut uncle = Header::new(); let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106"); uncle.author = uncle_author.clone(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index ec6cfe103..1e8cc2c9c 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -44,9 +44,9 @@ mod tests { let engine = new_morden().to_engine().unwrap(); let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); - let mut db = db_result.reference_mut(); - engine.spec().ensure_db_good(db); - let s = State::from_existing(db.clone(), genesis_header.state_root.clone(), engine.account_start_nonce()); + let mut db = db_result.take(); + engine.spec().ensure_db_good(&mut db); + let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000003")), U256::from(1u64)); diff --git a/ethcore/src/evm/interpreter.rs b/ethcore/src/evm/interpreter.rs index 6516d9946..6bec042cb 100644 --- a/ethcore/src/evm/interpreter.rs +++ b/ethcore/src/evm/interpreter.rs @@ -263,7 +263,7 @@ pub struct Interpreter; impl evm::Evm for Interpreter { fn exec(&self, params: ActionParams, ext: &mut evm::Ext) -> evm::Result { - let code = ¶ms.code.clone().unwrap(); + let code = ¶ms.code.as_ref().unwrap(); let valid_jump_destinations = self.find_jump_destinations(&code); let mut current_gas = params.gas.clone(); @@ -728,12 +728,15 @@ impl Interpreter { let big_id = stack.pop_back(); let id = big_id.low_u64() as usize; let max = id.wrapping_add(32); - let data = params.data.clone().unwrap_or_else(|| vec![]); - let bound = cmp::min(data.len(), max); - if id < bound && big_id < U256::from(data.len()) { - let mut v = data[id..bound].to_vec(); - v.resize(32, 0); - stack.push(U256::from(&v[..])) + if let Some(data) = params.data.as_ref() { + let bound = cmp::min(data.len(), max); + if id < bound && big_id < U256::from(data.len()) { + let mut v = [0u8; 32]; + v[0..bound-id].clone_from_slice(&data[id..bound]); + stack.push(U256::from(&v[..])) + } else { + stack.push(U256::zero()) + } } else { stack.push(U256::zero()) } diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index b67b71306..0a9cc37a3 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -194,7 +194,7 @@ impl<'a> Executive<'a> { /// Returns either gas_left or `evm::Error`. pub fn call(&mut self, params: ActionParams, substate: &mut Substate, mut output: BytesRef) -> evm::Result { // backup used in case of running out of gas - let backup = self.state.clone(); + self.state.snapshot(); // at first, transfer value to destination if let ActionValue::Transfer(val) = params.value { @@ -212,11 +212,12 @@ impl<'a> Executive<'a> { match cost <= params.gas { true => { self.engine.execute_builtin(¶ms.code_address, data, &mut output); + self.state.clear_snapshot(); Ok(params.gas - cost) }, // just drain the whole gas false => { - self.state.revert(backup); + self.state.revert_snapshot(); Err(evm::Error::OutOfGas) } } @@ -232,11 +233,12 @@ impl<'a> Executive<'a> { trace!("exec: sstore-clears={}\n", unconfirmed_substate.sstore_clears_count); trace!("exec: substate={:?}; unconfirmed_substate={:?}\n", substate, unconfirmed_substate); - self.enact_result(&res, substate, unconfirmed_substate, backup); + self.enact_result(&res, substate, unconfirmed_substate); trace!("exec: new substate={:?}\n", substate); res } else { // otherwise, nothing + self.state.clear_snapshot(); Ok(params.gas) } } @@ -246,7 +248,7 @@ impl<'a> Executive<'a> { /// Modifies the substate. pub fn create(&mut self, params: ActionParams, substate: &mut Substate) -> evm::Result { // backup used in case of running out of gas - let backup = self.state.clone(); + self.state.snapshot(); // part of substate that may be reverted let mut unconfirmed_substate = Substate::new(); @@ -263,7 +265,7 @@ impl<'a> Executive<'a> { let res = { self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::InitContract) }; - self.enact_result(&res, substate, unconfirmed_substate, backup); + self.enact_result(&res, substate, unconfirmed_substate); res } @@ -324,16 +326,19 @@ impl<'a> Executive<'a> { } } - fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate, backup: State) { + fn enact_result(&mut self, result: &evm::Result, substate: &mut Substate, un_substate: Substate) { match *result { Err(evm::Error::OutOfGas) | Err(evm::Error::BadJumpDestination {..}) | Err(evm::Error::BadInstruction {.. }) | Err(evm::Error::StackUnderflow {..}) | Err(evm::Error::OutOfStack {..}) => { - self.state.revert(backup); + self.state.revert_snapshot(); }, - Ok(_) | Err(evm::Error::Internal) => substate.accrue(un_substate) + Ok(_) | Err(evm::Error::Internal) => { + self.state.clear_snapshot(); + substate.accrue(un_substate) + } } } } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 540b5ea40..bdf45f3f8 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -97,6 +97,7 @@ mod state_diff; mod engine; mod state; mod account; +mod account_db; mod action_params; mod transaction; mod null_engine; diff --git a/ethcore/src/pod_account.rs b/ethcore/src/pod_account.rs index 7bc886617..95ac7dd4f 100644 --- a/ethcore/src/pod_account.rs +++ b/ethcore/src/pod_account.rs @@ -1,5 +1,6 @@ use util::*; use account::*; +use account_db::*; #[derive(Debug,Clone,PartialEq,Eq)] /// An account, expressed as Plain-Old-Data (hence the name). @@ -44,7 +45,7 @@ impl PodAccount { } /// Place additional data into given hash DB. - pub fn insert_additional(&self, db: &mut HashDB) { + pub fn insert_additional(&self, db: &mut AccountDBMut) { if !self.code.is_empty() { db.insert(&self.code); } diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 50bbbb633..05fcfba99 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -4,6 +4,7 @@ use common::*; use engine::*; use pod_state::*; use null_engine::*; +use account_db::*; /// Convert JSON value to equivalent RLP representation. // TODO: handle container types. @@ -262,8 +263,8 @@ impl Spec { t.insert(address.as_slice(), &account.rlp()); } } - for (_, account) in self.genesis_state.get().iter() { - account.insert_additional(db); + for (address, account) in self.genesis_state.get().iter() { + account.insert_additional(&mut AccountDBMut::new(db, address)); } assert!(db.contains(&self.state_root())); true diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index ed7d29813..ca170ba91 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -1,6 +1,7 @@ use common::*; use engine::Engine; use executive::Executive; +use account_db::*; #[cfg(test)] #[cfg(feature = "json-tests")] use pod_account::*; @@ -13,12 +14,11 @@ use pod_state::PodState; pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. -#[derive(Clone)] pub struct State { db: JournalDB, root: H256, cache: RefCell>>, - + snapshots: RefCell>>>>, account_start_nonce: U256, } @@ -36,6 +36,7 @@ impl State { db: db, root: root, cache: RefCell::new(HashMap::new()), + snapshots: RefCell::new(Vec::new()), account_start_nonce: account_start_nonce, } } @@ -51,10 +52,63 @@ impl State { db: db, root: root, cache: RefCell::new(HashMap::new()), + snapshots: RefCell::new(Vec::new()), account_start_nonce: account_start_nonce, } } + /// Create a recoverable snaphot of this state + pub fn snapshot(&mut self) { + self.snapshots.borrow_mut().push(HashMap::new()); + } + + /// Merge last snapshot with previous + pub fn clear_snapshot(&mut self) { + // merge with previous snapshot + let last = self.snapshots.borrow_mut().pop(); + if let Some(mut snapshot) = last { + if let Some(ref mut prev) = self.snapshots.borrow_mut().last_mut() { + for (k, v) in snapshot.drain() { + prev.entry(k).or_insert(v); + } + } + } + } + + /// Revert to snapshot + pub fn revert_snapshot(&mut self) { + if let Some(mut snapshot) = self.snapshots.borrow_mut().pop() { + for (k, v) in snapshot.drain() { + match v { + Some(v) => { + self.cache.borrow_mut().insert(k, v); + }, + None => { + self.cache.borrow_mut().remove(&k); + } + } + } + } + } + + fn insert_cache(&self, address: &Address, account: Option) { + if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { + if !snapshot.contains_key(&address) { + snapshot.insert(address.clone(), self.cache.borrow_mut().insert(address.clone(), account)); + return; + } + } + self.cache.borrow_mut().insert(address.clone(), account); + } + + fn note_cache(&self, address: &Address) { + if let Some(ref mut snapshot) = self.snapshots.borrow_mut().last_mut() { + if !snapshot.contains_key(&address) { + snapshot.insert(address.clone(), self.cache.borrow().get(address).cloned()); + } + } + } + /// Destroy the current object and return root and database. pub fn drop(self) -> (H256, JournalDB) { (self.root, self.db) @@ -68,12 +122,12 @@ impl State { /// Create a new contract at address `contract`. If there is already an account at the address /// it will have its code reset, ready for `init_code()`. pub fn new_contract(&mut self, contract: &Address, balance: U256) { - self.cache.borrow_mut().insert(contract.clone(), Some(Account::new_contract(balance))); + self.insert_cache(&contract, Some(Account::new_contract(balance))); } /// Remove an existing account. pub fn kill_account(&mut self, account: &Address) { - self.cache.borrow_mut().insert(account.clone(), None); + self.insert_cache(account, None); } /// Determine whether an account exists. @@ -91,9 +145,9 @@ impl State { self.get(a, false).as_ref().map_or(U256::zero(), |account| account.nonce().clone()) } - /// Mutate storage of account `a` so that it is `value` for `key`. - pub fn storage_at(&self, a: &Address, key: &H256) -> H256 { - self.get(a, false).as_ref().map_or(H256::new(), |a|a.storage_at(&self.db, key)) + /// Mutate storage of account `address` so that it is `value` for `key`. + pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { + self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(&self.db, address), key)) } /// Mutate storage of account `a` so that it is `value` for `key`. @@ -152,22 +206,18 @@ impl State { Ok(receipt) } - /// Reverts uncommited changed. - pub fn revert(&mut self, backup: State) { - self.cache = backup.cache; - } - /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. #[allow(match_ref_pats)] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? - for (_, ref mut a) in accounts.iter_mut() { + for (address, ref mut a) in accounts.iter_mut() { match a { &mut&mut Some(ref mut account) => { - account.commit_storage(db); - account.commit_code(db); + let mut account_db = AccountDBMut::new(db, address); + account.commit_storage(&mut account_db); + account.commit_code(&mut account_db); } &mut&mut None => {} } @@ -186,6 +236,7 @@ impl State { /// Commits our cached account changes into the trie. pub fn commit(&mut self) { + assert!(self.snapshots.borrow().is_empty()); Self::commit_into(&mut self.db, &mut self.root, self.cache.borrow_mut().deref_mut()); } @@ -193,6 +244,7 @@ impl State { #[cfg(feature = "json-tests")] /// Populate the state from `accounts`. pub fn populate_from(&mut self, accounts: PodState) { + assert!(self.snapshots.borrow().is_empty()); for (add, acc) in accounts.drain().into_iter() { self.cache.borrow_mut().insert(add, Some(Account::from_pod(acc))); } @@ -202,6 +254,7 @@ impl State { #[cfg(feature = "json-tests")] /// Populate a PodAccount map from this state. pub fn to_pod(&self) -> PodState { + assert!(self.snapshots.borrow().is_empty()); // TODO: handle database rather than just the cache. PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| { if let Some(ref acc) = *opt { @@ -214,12 +267,13 @@ impl State { /// Pull account `a` in our cache from the trie DB and return it. /// `require_code` requires that the code be cached, too. fn get(&self, a: &Address, require_code: bool) -> Ref> { - self.cache.borrow_mut().entry(a.clone()).or_insert_with(|| { - SecTrieDB::new(&self.db, &self.root).get(&a).map(|rlp| Account::from_rlp(rlp)) - }); + let have_key = self.cache.borrow().contains_key(a); + if !have_key { + self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + } if require_code { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { - account.cache_code(&self.db); + account.cache_code(&AccountDB::new(&self.db, a)); } } Ref::map(self.cache.borrow(), |m| m.get(a).unwrap()) @@ -233,8 +287,12 @@ impl State { /// Pull account `a` in our cache from the trie DB. `require_code` requires that the code be cached, too. /// If it doesn't exist, make account equal the evaluation of `default`. fn require_or_from Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> RefMut { - self.cache.borrow_mut().entry(a.clone()).or_insert_with(|| - SecTrieDB::new(&self.db, &self.root).get(&a).map(|rlp| Account::from_rlp(rlp))); + let have_key = self.cache.borrow().contains_key(a); + if !have_key { + self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + } else { + self.note_cache(a); + } let preexists = self.cache.borrow().get(a).unwrap().is_none(); if preexists { self.cache.borrow_mut().insert(a.clone(), Some(default())); @@ -245,7 +303,7 @@ impl State { let b = self.cache.borrow_mut(); RefMut::map(b, |m| m.get_mut(a).unwrap().as_mut().map(|account| { if require_code { - account.cache_code(&self.db); + account.cache_code(&AccountDB::new(&self.db, a)); } account }).unwrap()) @@ -424,6 +482,38 @@ fn ensure_cached() { assert_eq!(state.root().hex(), "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785"); } +#[test] +fn snapshot_basic() { + let mut state_result = get_temp_state(); + let mut state = state_result.reference_mut(); + let a = Address::zero(); + state.snapshot(); + state.add_balance(&a, &U256::from(69u64)); + assert_eq!(state.balance(&a), U256::from(69u64)); + state.clear_snapshot(); + assert_eq!(state.balance(&a), U256::from(69u64)); + state.snapshot(); + state.add_balance(&a, &U256::from(1u64)); + assert_eq!(state.balance(&a), U256::from(70u64)); + state.revert_snapshot(); + assert_eq!(state.balance(&a), U256::from(69u64)); +} + +#[test] +fn snapshot_nested() { + let mut state_result = get_temp_state(); + let mut state = state_result.reference_mut(); + let a = Address::zero(); + state.snapshot(); + state.snapshot(); + state.add_balance(&a, &U256::from(69u64)); + assert_eq!(state.balance(&a), U256::from(69u64)); + state.clear_snapshot(); + assert_eq!(state.balance(&a), U256::from(69u64)); + state.revert_snapshot(); + assert_eq!(state.balance(&a), U256::from(0)); +} + #[test] fn create_empty() { let mut state_result = get_temp_state(); diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 04d47cbd3..a1d27442f 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -48,18 +48,22 @@ impl Drop for RandomTempPath { #[cfg(test)] pub struct GuardedTempResult { - result: T, + result: Option, _temp: RandomTempPath } impl GuardedTempResult { pub fn reference(&self) -> &T { - &self.result + self.result.as_ref().unwrap() } pub fn reference_mut(&mut self) -> &mut T { - &mut self.result + self.result.as_mut().unwrap() } + + pub fn take(&mut self) -> T { + self.result.take().unwrap() + } } pub fn get_test_spec() -> Spec { @@ -150,7 +154,7 @@ pub fn generate_dummy_client(block_number: u32) -> GuardedTempResult GuardedTempResult::> { _temp: dir, - result: client + result: Some(client) } } @@ -168,7 +172,7 @@ pub fn get_test_client_with_blocks(blocks: Vec) -> GuardedTempResult> { _temp: dir, - result: client + result: Some(client) } } @@ -181,7 +185,7 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult { _temp: temp, - result: bc + result: Some(bc) } } @@ -194,7 +198,7 @@ pub fn generate_dummy_blockchain_with_extra(block_number: u32) -> GuardedTempRes GuardedTempResult:: { _temp: temp, - result: bc + result: Some(bc) } } @@ -204,7 +208,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { GuardedTempResult:: { _temp: temp, - result: bc + result: Some(bc) } } @@ -214,7 +218,7 @@ pub fn get_temp_journal_db() -> GuardedTempResult { let journal_db = JournalDB::new(db); GuardedTempResult { _temp: temp, - result: journal_db + result: Some(journal_db) } } @@ -223,7 +227,7 @@ pub fn get_temp_state() -> GuardedTempResult { let journal_db = get_temp_journal_db_in(temp.as_path()); GuardedTempResult { _temp: temp, - result: State::new(journal_db, U256::from(0u8)) + result: Some(State::new(journal_db, U256::from(0u8))) } } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index e30b8947c..393154b48 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -3,12 +3,11 @@ use common::*; use rlp::*; use hashdb::*; -use overlaydb::*; -use rocksdb::{DB, Writable}; +use memorydb::*; +use rocksdb::{DB, Writable, WriteBatch, IteratorMode}; #[cfg(test)] use std::env; -#[derive(Clone)] /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -17,31 +16,48 @@ use std::env; /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct JournalDB { - forward: OverlayDB, + overlay: MemoryDB, backing: Arc, - inserts: Vec, - removes: Vec, + counters: Arc>>, } +impl Clone for JournalDB { + fn clone(&self) -> JournalDB { + JournalDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + counters: self.counters.clone(), + } + } +} + +const LAST_ERA_KEY : [u8; 4] = [ b'l', b'a', b's', b't' ]; +const VERSION_KEY : [u8; 4] = [ b'j', b'v', b'e', b'r' ]; + +const DB_VERSION: u32 = 1; + impl JournalDB { /// Create a new instance given a `backing` database. pub fn new(backing: DB) -> JournalDB { let db = Arc::new(backing); - JournalDB { - forward: OverlayDB::new_with_arc(db.clone()), - backing: db, - inserts: vec![], - removes: vec![], - } + JournalDB::new_with_arc(db) } /// Create a new instance given a shared `backing` database. pub fn new_with_arc(backing: Arc) -> JournalDB { + if backing.iterator(IteratorMode::Start).next().is_some() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + let counters = JournalDB::read_counters(&backing); JournalDB { - forward: OverlayDB::new_with_arc(backing.clone()), + overlay: MemoryDB::new(), backing: backing, - inserts: vec![], - removes: vec![], + counters: Arc::new(RwLock::new(counters)), } } @@ -53,18 +69,21 @@ impl JournalDB { Self::new(DB::open_default(dir.to_str().unwrap()).unwrap()) } - /// Get a clone of the overlay db portion of this. - pub fn to_overlaydb(&self) -> OverlayDB { self.forward.clone() } + /// Check if this database has any commits + pub fn is_empty(&self) -> bool { + self.backing.get(&LAST_ERA_KEY).expect("Low level database error").is_none() + } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. + #[allow(cyclomatic_complexity)] pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] - // TODO: store last_era, reclaim_period. + // TODO: store reclaim_period. // when we make a new commit, we journal the inserts and removes. // for each end_era that we journaled that we are no passing by, @@ -72,6 +91,8 @@ impl JournalDB { // of its inserts otherwise. // record new commit's details. + let batch = WriteBatch::new(); + let mut counters = self.counters.write().unwrap(); { let mut index = 0usize; let mut last; @@ -87,12 +108,15 @@ impl JournalDB { } let mut r = RlpStream::new_list(3); + let inserts: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); + for i in &inserts { + *counters.entry(i.clone()).or_insert(0) += 1; + } + let removes: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect(); r.append(id); - r.append(&self.inserts); - r.append(&self.removes); - try!(self.backing.put(&last, r.as_raw())); - self.inserts.clear(); - self.removes.clear(); + r.append(&inserts); + r.append(&removes); + try!(batch.put(&last, r.as_raw())); } // apply old commits' details @@ -106,32 +130,137 @@ impl JournalDB { last = r.drain(); &last })) { + let to_add; let rlp = Rlp::new(&rlp_data); - let to_remove: Vec = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); - for i in &to_remove { - self.forward.remove(i); + { + to_add = rlp.val_at(1); + for i in &to_add { + let delete_counter = { + if let Some(mut cnt) = counters.get_mut(i) { + *cnt -= 1; + *cnt == 0 + } + else { false } + + }; + if delete_counter { + counters.remove(i); + } + } } - try!(self.backing.delete(&last)); + let to_remove: Vec = if canon_id == rlp.val_at(0) {rlp.val_at(2)} else {to_add}; + for i in &to_remove { + if !counters.contains_key(i) { + batch.delete(&i).expect("Low-level database error. Some issue with your hard disk?"); + } + } + + try!(batch.delete(&last)); trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); index += 1; } + + try!(batch.put(&LAST_ERA_KEY, &encode(&end_era))); } - self.forward.commit() + let mut ret = 0u32; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc > 0 { + assert!(rc == 1); + batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); + ret += 1; + } + if rc < 0 { + assert!(rc == -1); + ret += 1; + deletes += 1; + } + } + + try!(self.backing.write(batch)); + trace!("JournalDB::commit() deleted {} nodes", deletes); + Ok(ret) } - /// Revert all operations on this object (i.e. `insert()`s and `removes()`s) since the - /// last `commit()`. - pub fn revert(&mut self) { self.forward.revert(); self.removes.clear(); } + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_counters(db: &DB) -> HashMap { + let mut res = HashMap::new(); + if let Some(val) = db.get(&LAST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val) + 1; + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(2); + r.append(&era); + r.append(&index); + &r.drain() + }).expect("Low-level database error.") { + let rlp = Rlp::new(&rlp_data); + let to_add: Vec = rlp.val_at(1); + for h in to_add { + *res.entry(h).or_insert(0) += 1; + } + index += 1; + }; + if index == 0 { + break; + } + era += 1; + } + } + trace!("Recovered {} counters", res.len()); + res + } } impl HashDB for JournalDB { - fn keys(&self) -> HashMap { self.forward.keys() } - fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) } - fn exists(&self, key: &H256) -> bool { self.forward.exists(key) } - fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } - fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } - fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iterator(IteratorMode::Start) { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } } #[cfg(test)] @@ -223,4 +352,23 @@ mod tests { assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = JournalDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 7ef0c3309..4a16a5717 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -12,7 +12,6 @@ use std::env; use std::collections::HashMap; use rocksdb::{DB, Writable, IteratorMode}; -#[derive(Clone)] /// Implementation of the HashDB trait for a disk-backed database with a memory overlay. /// /// The operations `insert()` and `remove()` take place on the memory overlay; batches of diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 9909c05a4..14b927149 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -196,7 +196,7 @@ impl<'db> TrieDB<'db> { // check if its sha3 + len let r = Rlp::new(node); match r.is_data() && r.size() == 32 { - true => self.db.lookup(&r.as_val::()).expect("Not found!"), + true => self.db.lookup(&r.as_val::()).unwrap_or_else(|| panic!("Not found! {:?}", r.as_val::())), false => node } }