diff --git a/ethcore/snapshot/src/lib.rs b/ethcore/snapshot/src/lib.rs index c119917bc..091e1abe9 100644 --- a/ethcore/snapshot/src/lib.rs +++ b/ethcore/snapshot/src/lib.rs @@ -435,12 +435,8 @@ impl StateRebuilder { } } - let backing = self.db.backing().clone(); - let mut batch = backing.transaction(); - // Drain the transaction overlay and put the data into the batch. - self.db.inject(&mut batch)?; - backing.write_buffered(batch); - + let batch = self.db.drain_transaction_overlay()?; + self.db.backing().write(batch)?; Ok(()) } diff --git a/util/journaldb/src/archivedb.rs b/util/journaldb/src/archivedb.rs index 1b68e32f6..bf896e074 100644 --- a/util/journaldb/src/archivedb.rs +++ b/util/journaldb/src/archivedb.rs @@ -142,9 +142,8 @@ impl JournalDB for ArchiveDB { Ok(0) } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut inserts = 0usize; - let mut deletes = 0usize; + fn drain_transaction_overlay(&mut self) -> io::Result { + let mut batch = DBTransaction::new(); for i in self.overlay.drain() { let (key, (value, rc)) = i; @@ -153,7 +152,6 @@ impl JournalDB for ArchiveDB { return Err(error_key_already_exists(&key)); } batch.put(self.column, key.as_bytes(), &value); - inserts += 1; } if rc < 0 { assert!(rc == -1); @@ -161,11 +159,10 @@ impl JournalDB for ArchiveDB { return Err(error_negatively_reference_hash(&key)); } batch.delete(self.column, key.as_bytes()); - deletes += 1; } } - Ok((inserts + deletes) as u32) + Ok(batch) } fn latest_era(&self) -> Option { self.latest_era } @@ -209,7 +206,7 @@ mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; - use crate::{JournalDB, inject_batch, commit_batch}; + use crate::{JournalDB, drain_overlay, commit_batch}; #[test] fn insert_same_in_fork() { @@ -463,11 +460,11 @@ mod tests { fn inject() { let mut jdb = ArchiveDB::new(Arc::new(kvdb_memorydb::create(1)), 0); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec()); jdb.remove(&key, EMPTY_PREFIX); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/earlymergedb.rs b/util/journaldb/src/earlymergedb.rs index 0f2a61306..a121865b4 100644 --- a/util/journaldb/src/earlymergedb.rs +++ b/util/journaldb/src/earlymergedb.rs @@ -474,11 +474,9 @@ impl JournalDB for EarlyMergeDB { Ok(0) } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut ops = 0; + fn drain_transaction_overlay(&mut self) -> io::Result { + let mut batch = DBTransaction::new(); for (key, (value, rc)) in self.overlay.drain() { - if rc != 0 { ops += 1 } - match rc { 0 => {} 1 => { @@ -497,7 +495,7 @@ impl JournalDB for EarlyMergeDB { } } - Ok(ops) + Ok(batch) } fn consolidate(&mut self, with: super::MemoryDB) { @@ -529,7 +527,7 @@ mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; - use crate::{inject_batch, commit_batch}; + use crate::{drain_overlay, commit_batch}; #[test] fn insert_same_in_fork() { @@ -1050,11 +1048,11 @@ mod tests { fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec()); jdb.remove(&key, EMPTY_PREFIX); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index e18e94646..1fdd122a8 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -72,11 +72,8 @@ pub trait JournalDB: HashDB { /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. /// - /// Any keys or values inserted or deleted must be completely independent of those affected - /// by any previous `commit` operations. Essentially, this means that `inject` can be used - /// either to restore a state to a fresh database, or to insert data which may only be journalled - /// from this point onwards. - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result; + /// Returns a transaction to be committed. + fn drain_transaction_overlay(&mut self) -> io::Result; /// State data query fn state(&self, _id: &H256) -> Option; @@ -213,12 +210,11 @@ pub fn new_memory_db() -> MemoryDB { MemoryDB::from_null_node(&rlp::NULL_RLP, rlp::NULL_RLP.as_ref().into()) } -#[cfg(test)] /// Inject all changes in a single batch. -pub fn inject_batch(jdb: &mut dyn JournalDB) -> io::Result { - let mut batch = jdb.backing().transaction(); - let res = jdb.inject(&mut batch)?; - jdb.backing().write(batch).map(|_| res).map_err(Into::into) +#[cfg(test)] +pub fn drain_overlay(jdb: &mut dyn JournalDB) -> io::Result<()> { + let batch = jdb.drain_transaction_overlay()?; + jdb.backing().write(batch).map_err(Into::into) } /// Commit all changes in a single batch diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index 62e7d43e8..a05d0ac20 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -397,11 +397,9 @@ impl JournalDB for OverlayRecentDB { Ok(ops as u32) } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { - let mut ops = 0; + fn drain_transaction_overlay(&mut self) -> io::Result { + let mut batch = DBTransaction::new(); for (key, (value, rc)) in self.transaction_overlay.drain() { - if rc != 0 { ops += 1 } - match rc { 0 => {} _ if rc > 0 => { @@ -417,7 +415,7 @@ impl JournalDB for OverlayRecentDB { } } - Ok(ops) + Ok(batch) } fn state(&self, key: &H256) -> Option { @@ -507,7 +505,7 @@ mod tests { use super::*; use hash_db::{HashDB, EMPTY_PREFIX}; use kvdb_memorydb; - use crate::{JournalDB, inject_batch, commit_batch}; + use crate::{JournalDB, drain_overlay, commit_batch}; fn new_db() -> OverlayRecentDB { let backing = Arc::new(kvdb_memorydb::create(1)); @@ -1026,11 +1024,11 @@ mod tests { fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec()); jdb.remove(&key, EMPTY_PREFIX); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); } diff --git a/util/journaldb/src/refcounteddb.rs b/util/journaldb/src/refcounteddb.rs index ea278a6ac..bb20ad07c 100644 --- a/util/journaldb/src/refcounteddb.rs +++ b/util/journaldb/src/refcounteddb.rs @@ -193,12 +193,13 @@ impl JournalDB for RefCountedDB { Ok(r) } - fn inject(&mut self, batch: &mut DBTransaction) -> io::Result { + fn drain_transaction_overlay(&mut self) -> io::Result { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } - self.forward.commit_to_batch(batch) + let mut batch = DBTransaction::new(); + self.forward.commit_to_batch(&mut batch).map(|_| batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { @@ -224,7 +225,7 @@ mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; - use crate::{JournalDB, inject_batch, commit_batch}; + use crate::{JournalDB, drain_overlay, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(1)); @@ -338,11 +339,11 @@ mod tests { fn inject() { let mut jdb = new_db(); let key = jdb.insert(EMPTY_PREFIX, b"dog"); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert_eq!(jdb.get(&key, EMPTY_PREFIX).unwrap(), b"dog".to_vec()); jdb.remove(&key, EMPTY_PREFIX); - inject_batch(&mut jdb).unwrap(); + drain_overlay(&mut jdb).unwrap(); assert!(jdb.get(&key, EMPTY_PREFIX).is_none()); }