From d77d9ad9d84105f2c0b4cd53b90f835e3087f669 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 6 Mar 2016 17:28:50 +0100 Subject: [PATCH 01/13] JournalDB with history overlay --- ethcore/src/client.rs | 8 +- parity/main.rs | 3 +- util/src/journaldb.rs | 326 ++++++++++++++++++++++++++---------------- util/src/memorydb.rs | 6 + 4 files changed, 216 insertions(+), 127 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 858185873..2b5ec5ccb 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -190,6 +190,8 @@ pub struct ClientReport { pub transactions_applied: usize, /// How much gas has been processed so far. pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, } impl ClientReport { @@ -222,7 +224,7 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "4.0"; +const CLIENT_DB_VER_STR: &'static str = "5.0"; impl Client { /// Create a new client with given spec and DB path. @@ -432,7 +434,9 @@ impl Client where V: Verifier { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.read().unwrap().clone() + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report } /// Tick the client. diff --git a/parity/main.rs b/parity/main.rs index 3f4243a0a..605fb315d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -395,7 +395,7 @@ impl Informant { let sync_info = sync.status(); if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]", + println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -408,6 +408,7 @@ impl Informant { queue_info.unverified_queue_size, queue_info.verified_queue_size, + Informant::format_bytes(report.state_db_mem), Informant::format_bytes(cache_info.total()), Informant::format_bytes(queue_info.mem_used), Informant::format_bytes(sync_info.mem_used), diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 01e53f819..48bd94d64 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -35,17 +35,36 @@ use std::env; /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct JournalDB { - overlay: MemoryDB, + transaction_overlay: MemoryDB, backing: Arc, - counters: Option>>>, + journal_overlay: Option>>, +} + +struct JournalOverlay { + backing_overlay: MemoryDB, + journal: VecDeque +} + +struct JournalEntry { + id: H256, + index: usize, + era: u64, + insertions: Vec, + deletions: Vec, +} + +impl HeapSizeOf for JournalEntry { + fn heap_size_of_children(&self) -> usize { + self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() + } } impl Clone for JournalDB { fn clone(&self) -> JournalDB { JournalDB { - overlay: MemoryDB::new(), + transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), - counters: self.counters.clone(), + journal_overlay: self.journal_overlay.clone(), } } } @@ -60,7 +79,6 @@ const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl JournalDB { - /// Create a new instance from file pub fn new(path: &str) -> JournalDB { Self::from_prefs(path, true) @@ -86,15 +104,16 @@ impl JournalDB { with_journal = prefer_journal; } - let counters = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) + + let journal_overlay = if with_journal { + Some(Arc::new(RwLock::new(JournalDB::read_overlay(&backing)))) } else { None }; JournalDB { - overlay: MemoryDB::new(), + transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), - counters: counters, + journal_overlay: journal_overlay, } } @@ -113,71 +132,48 @@ impl JournalDB { /// Commit all recent insert operations. pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) + let have_journal_overlay = self.journal_overlay.is_some(); + if have_journal_overlay { + self.commit_with_overlay(now, id, end) } else { - self.commit_without_counters() + self.commit_without_overlay() } } /// Drain the overlay and place it into a batch for the DB. fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut inserts = 0usize; - let mut deletes = 0usize; + let mut insertions = 0usize; + let mut deletions = 0usize; for i in overlay.drain().into_iter() { let (key, (value, rc)) = i; if rc > 0 { assert!(rc == 1); batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - inserts += 1; + insertions += 1; } if rc < 0 { assert!(rc == -1); - deletes += 1; + deletions += 1; } } - trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); - inserts + deletes + trace!("commit: Inserted {}, Deleted {} nodes", insertions, deletions); + insertions + deletions } - /// Just commit the overlay into the backing DB. - fn commit_without_counters(&mut self) -> Result { + /// Just commit the transaction overlay into the backing DB. + fn commit_without_overlay(&mut self) -> Result { let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); + let ret = Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); try!(self.backing.write(batch)); Ok(ret as u32) } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: - // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, n] => [ ... ] - - // TODO: store reclaim_period. - - // when we make a new commit, we journal the inserts and removes. - // for each end_era that we journaled that we are no passing by, - // we remove all of its removes assuming it is canonical and all - // of its inserts otherwise. - // - // We also keep reference counters for each key inserted in the journal to handle - // the following cases where key K must not be deleted from the DB when processing removals : - // Given H is the journal size in eras, 0 <= C <= H. - // Key K is removed in era A(N) and re-inserted in canonical era B(N + C). - // Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C). - // Key K is added in non-canonical era A'(N) canonical B(N + C). - // - // The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions - // is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased - // and the key is safe to delete. - + fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); let batch = DBTransaction::new(); { let mut index = 0usize; @@ -204,90 +200,83 @@ impl JournalDB { } let mut r = RlpStream::new_list(3); - let inserts: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); + let mut tx = self.transaction_overlay.drain(); + let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); + let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); // Increase counter for each inserted key no matter if the block is canonical or not. - for i in &inserts { - *counters.entry(i.clone()).or_insert(0) += 1; - } - let removes: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect(); + let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); r.append(id); - r.append(&inserts); - r.append(&removes); + r.begin_list(inserted_keys.len()); + for (k, v) in insertions { + r.begin_list(2); + r.append(&k); + r.append(&v); + journal_overlay.backing_overlay.emplace(k, v); + } + r.append(&removed_keys); try!(batch.put(&last, r.as_raw())); try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.journal.push_back(JournalEntry { id: id.clone(), index: index, era: now, insertions: inserted_keys, deletions: removed_keys }); } // apply old commits' details + if let Some((end_era, canon_id)) = end { - let mut index = 0usize; - let mut last; - let mut to_remove: Vec = Vec::new(); - let mut canon_inserts: Vec = Vec::new(); - while let Some(rlp_data) = try!(self.backing.get({ + let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); + let mut canon_deletions: Vec = Vec::new(); + let mut overlay_deletions: Vec = Vec::new(); + while journal_overlay.journal.front().map_or(false, |e| e.era <= end_era) { + let mut journal = journal_overlay.journal.pop_front().unwrap(); + //delete the record from the db let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); + r.append(&journal.era); + r.append(&journal.index); r.append(&&PADDING[..]); - last = r.drain(); - &last - })) { - let rlp = Rlp::new(&rlp_data); - let mut inserts: Vec = rlp.val_at(1); - JournalDB::decrease_counters(&inserts, &mut counters); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - if canon_id == rlp.val_at(0) { - let mut canon_deletes: Vec = rlp.val_at(2); - trace!("Purging nodes deleted from canon: {:?}", canon_deletes); - to_remove.append(&mut canon_deletes); - canon_inserts = inserts; + try!(batch.delete(&r.drain())); + trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, journal.index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); + { + if canon_id == journal.id { + for h in &journal.insertions { + match journal_overlay.backing_overlay.raw(&h) { + Some(&(ref d, rc)) if rc > 0 => canon_insertions.push((h.clone(), d.clone())), //TODO: optimizie this to avoid data copy + _ => () + } + } + canon_deletions = journal.deletions; + } + overlay_deletions.append(&mut journal.insertions); } - else { - trace!("Purging nodes inserted in non-canon: {:?}", inserts); - to_remove.append(&mut inserts); + if canon_id == journal.id { } - trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::(0), canon_id, to_remove.len()); - try!(batch.delete(&last)); - index += 1; } - - let canon_inserts = canon_inserts.drain(..).collect::>(); - // Purge removed keys if they are not referenced and not re-inserted in the canon commit - let mut deletes = 0; - trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::>()); - for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) { - try!(batch.delete(&h)); - deletes += 1; + // apply canon inserts first + for (k, v) in canon_insertions { + try!(batch.put(&k, &v)); } - trace!("Total nodes purged: {}", deletes); + // clean the overlay + for k in overlay_deletions { + journal_overlay.backing_overlay.kill(&k); + } + // apply removes + for k in canon_deletions { + if !journal_overlay.backing_overlay.exists(&k) { + try!(batch.delete(&k)); + } + } + journal_overlay.backing_overlay.purge(); } - - // Commit overlay insertions - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) - } - - - // Decrease counters for given keys. Deletes obsolete counters - fn decrease_counters(keys: &[H256], counters: &mut HashMap) { - for i in keys.iter() { - let delete_counter = { - let cnt = counters.get_mut(i).expect("Missing key counter"); - *cnt -= 1; - *cnt == 0 - }; - if delete_counter { - counters.remove(i); - } - } + Ok(0 as u32) } fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &Database) -> HashMap { - let mut res = HashMap::new(); + fn read_overlay(db: &Database) -> JournalOverlay { + let mut journal = VecDeque::new(); + let mut overlay = MemoryDB::new(); + let mut count = 0; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); loop { @@ -300,10 +289,24 @@ impl JournalDB { &r.drain() }).expect("Low-level database error.") { let rlp = Rlp::new(&rlp_data); - let to_add: Vec = rlp.val_at(1); - for h in to_add { - *res.entry(h).or_insert(0) += 1; + let id: H256 = rlp.val_at(0); + let insertions = rlp.at(1); + let deletions: Vec = rlp.val_at(2); + let mut inserted_keys = Vec::new(); + for r in insertions.iter() { + let k: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + overlay.emplace(k.clone(), v); + inserted_keys.push(k); + count += 1; } + journal.push_front(JournalEntry { + id: id, + index: index, + era: era, + insertions: inserted_keys, + deletions: deletions, + }); index += 1; }; if index == 0 || era == 0 { @@ -312,8 +315,19 @@ impl JournalDB { era -= 1; } } - trace!("Recovered {} counters", res.len()); - res + trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); + JournalOverlay { backing_overlay: overlay, journal: journal } + } + + /// Returns heap memory size used + pub fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + if let Some(ref overlay) = self.journal_overlay.as_ref() { + let overlay = overlay.read().unwrap(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.journal.heap_size_of_children(); + } + mem } } @@ -325,7 +339,7 @@ impl HashDB for JournalDB { ret.insert(h, 1); } - for (key, refs) in self.overlay.keys().into_iter() { + for (key, refs) in self.transaction_overlay.keys().into_iter() { let refs = *ret.get(&key).unwrap_or(&0) + refs; ret.insert(key, refs); } @@ -333,15 +347,23 @@ impl HashDB for JournalDB { } fn lookup(&self, key: &H256) -> Option<&[u8]> { - let k = self.overlay.raw(key); + let k = self.transaction_overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), _ => { - if let Some(x) = self.payload(key) { - Some(&self.overlay.denote(key, x).0) - } - else { - None + let v = self.journal_overlay.as_ref().map_or(None, |ref j| j.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec())); + match v { + Some(x) => { + Some(&self.transaction_overlay.denote(key, x).0) + } + _ => { + if let Some(x) = self.payload(key) { + Some(&self.transaction_overlay.denote(key, x).0) + } + else { + None + } + } } } } @@ -352,13 +374,13 @@ impl HashDB for JournalDB { } fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) + self.transaction_overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); + self.transaction_overlay.emplace(key, value); } fn kill(&mut self, key: &H256) { - self.overlay.kill(key); + self.transaction_overlay.kill(key); } } @@ -492,11 +514,13 @@ mod tests { fn reopen() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); + let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); foo }; @@ -510,8 +534,62 @@ mod tests { { let mut jdb = JournalDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(!jdb.exists(&foo)); } } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } } diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 680a6e1d0..9cd018935 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -21,6 +21,7 @@ use bytes::*; use rlp::*; use sha3::*; use hashdb::*; +use heapsize::*; use std::mem; use std::collections::HashMap; @@ -143,6 +144,11 @@ impl MemoryDB { } self.raw(key).unwrap() } + + /// Returns the size of allocated heap memory + pub fn mem_used(&self) -> usize { + self.data.heap_size_of_children() + } } static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; From 744c4c7d8bba32c5c5e61eb8c9b32bd52498fd30 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 7 Mar 2016 07:06:55 +0100 Subject: [PATCH 02/13] JournalDB documentation --- util/src/journaldb.rs | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 48bd94d64..1c62a9960 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -27,13 +27,37 @@ use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. /// -/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves +/// If `journal_overlay` is `None`, then it behaves exactly like OverlayDB. If not it behaves /// differently: /// /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. +/// +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history +/// overlay on each `commit()` +/// - History overlay contains all data inserted during the history period. When the node +/// in the overlay becomes ancient it is written to disk on `commit()` +/// +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track +/// data nodes that go out of history scope and must be written to disk. +/// +/// Commit workflow: +/// Create a new journal record from the transaction overlay. +/// Inseart each node from the transaction overlay into the History overlay increasing reference +/// count if it is already there. Note that the reference counting is managed by `MemoryDB` +/// Clear the transaction overlay. +/// For a canonical journal record that becomes ancient inserts its insertions into the disk DB +/// For each journal record that goes out of the history scope (becomes ancient) remove its +/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// if reaches zero. +/// For a canonical journal record that becomes ancient delete its removals from the disk only if +/// the removed key is not present in the history overlay. +/// Delete ancient record from memory and disk. +/// pub struct JournalDB { transaction_overlay: MemoryDB, backing: Arc, @@ -220,7 +244,6 @@ impl JournalDB { } // apply old commits' details - if let Some((end_era, canon_id)) = end { let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); let mut canon_deletions: Vec = Vec::new(); From a069e890ba52cf5242cf4492266b1d638a138d74 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 8 Mar 2016 19:14:43 +0100 Subject: [PATCH 03/13] Replaced --archive option with --pruning --- parity/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index ceb58e31e..2d7bd8bab 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -77,7 +77,7 @@ Protocol Options: or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. - --archive Client should not prune the state/storage trie. + --pruning Enable state/storage trie pruning. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. @@ -135,7 +135,7 @@ struct Args { flag_identity: String, flag_cache: Option, flag_keys_path: String, - flag_archive: bool, + flag_pruning: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, @@ -362,7 +362,7 @@ impl Configuration { client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; } } - client_config.prefer_journal = !self.args.flag_archive; + client_config.prefer_journal = self.args.flag_pruning; client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); From c302fa9a4ebae4710ad98933539fbb4b6998b6f4 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 9 Mar 2016 18:37:44 +0100 Subject: [PATCH 04/13] Style --- util/src/journaldb.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 3309612c2..1d854924a 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -243,9 +243,10 @@ impl JournalDB { { if canon_id == journal.id { for h in &journal.insertions { - match journal_overlay.backing_overlay.raw(&h) { - Some(&(ref d, rc)) if rc > 0 => canon_insertions.push((h.clone(), d.clone())), //TODO: optimizie this to avoid data copy - _ => () + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if rc > 0 { + canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy + } } } canon_deletions = journal.deletions; @@ -352,7 +353,7 @@ impl HashDB for JournalDB { ret } - fn lookup(&self, key: &H256) -> Option<&[u8]> { + fn lookup(&self, key: &H256) -> Option<&[u8]> { let k = self.transaction_overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), @@ -573,7 +574,6 @@ mod tests { fn reopen_remove() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); From 06a3abd01e0cd6f2435510c1133c34e87c48beff Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 21:15:43 +0100 Subject: [PATCH 05/13] Removed unused return type --- util/src/journaldb.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 97ae077b2..4f2cdeb31 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -153,7 +153,7 @@ impl JournalDB { } /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { let have_journal_overlay = self.journal_overlay.is_some(); if have_journal_overlay { self.commit_with_overlay(now, id, end) @@ -183,16 +183,16 @@ impl JournalDB { } /// Just commit the transaction overlay into the backing DB. - fn commit_without_overlay(&mut self) -> Result { + fn commit_without_overlay(&mut self) -> Result<(), UtilError> { let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); + Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) + Ok(()) } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); @@ -274,7 +274,7 @@ impl JournalDB { journal_overlay.journal.remove(&end_era); } try!(self.backing.write(batch)); - Ok(0 as u32) + Ok(()) } fn payload(&self, key: &H256) -> Option { From 3a4a7ac822289181229c16af58690e63e3b98b62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:35:26 +0100 Subject: [PATCH 06/13] Bumping clippy version --- Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- sync/Cargo.toml | 2 +- util/Cargo.toml | 2 +- util/bigint/Cargo.toml | 1 - 6 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 22d0f9288..70e4cbc34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index fbfe175d7..b5c4a7636 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -21,7 +21,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 2ce430e51..918160be9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,7 +18,7 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 46baa8a83..8412d3700 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -14,7 +14,7 @@ rustc_version = "0.1" [dependencies] ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } log = "0.3" env_logger = "0.3" time = "0.1.34" diff --git a/util/Cargo.toml b/util/Cargo.toml index 0ce27ec2b..93b475538 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -27,7 +27,7 @@ crossbeam = "0.2" slab = "0.1" sha3 = { path = "sha3" } serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } json-tests = { path = "json-tests" } rustc_version = "0.1.0" igd = "0.4.2" diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 377391eeb..1bd2b994e 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -15,7 +15,6 @@ rustc-serialize = "0.3" arrayvec = "0.3" rand = "0.3.12" serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } heapsize = "0.3" [features] From 8709dd28f884742756ae4bbba9bd628708f271bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:57:58 +0100 Subject: [PATCH 07/13] Fixing clippy warnings --- Cargo.lock | 30 ++++++++++++------------------ ethcore/src/extras.rs | 14 ++++++++++---- ethcore/src/lib.rs | 2 ++ ethcore/src/substate.rs | 14 ++++++++++---- sync/src/chain.rs | 2 +- sync/src/lib.rs | 4 +++- sync/src/transaction_queue.rs | 8 ++++++++ util/src/keys/store.rs | 10 ++++++++-- util/src/kvdb.rs | 7 +++++++ util/src/lib.rs | 2 ++ util/src/memorydb.rs | 11 +++++++++-- util/src/network/discovery.rs | 9 ++++++++- util/src/network/host.rs | 9 ++++++++- util/src/panics.rs | 11 +++++++++-- util/src/rlp/rlpstream.rs | 17 +++++++++++++++-- util/src/table.rs | 13 ++++++++++++- util/src/trie/journal.rs | 7 +++++++ 17 files changed, 131 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f583a8747..e33e0288c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "parity" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,7 +94,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.44" +version = "0.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -207,7 +207,7 @@ dependencies = [ name = "ethcore" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", @@ -234,7 +234,7 @@ dependencies = [ name = "ethcore-rpc" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -258,7 +258,7 @@ dependencies = [ "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -273,7 +273,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.2 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -290,7 +290,7 @@ dependencies = [ name = "ethsync" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -469,11 +469,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.2.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -597,11 +596,6 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "pkg-config" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "primal" version = "0.2.3" @@ -697,11 +691,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" -version = "0.4.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.4.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] [[package]] diff --git a/ethcore/src/extras.rs b/ethcore/src/extras.rs index f4759b040..a7c82c37c 100644 --- a/ethcore/src/extras.rs +++ b/ethcore/src/extras.rs @@ -35,13 +35,13 @@ pub enum ExtrasIndex { BlocksBlooms = 4, /// Block receipts index BlockReceipts = 5, -} +} /// trait used to write Extras data to db pub trait ExtrasWritable { /// Write extra data to db fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable; } @@ -60,9 +60,9 @@ pub trait ExtrasReadable { impl ExtrasWritable for DBTransaction { fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable { - + self.put(&hash.to_extras_slice(T::extras_index()), &encode(value)).unwrap() } } @@ -215,6 +215,12 @@ pub struct BlocksBlooms { pub blooms: [H2048; 16], } +impl Default for BlocksBlooms { + fn default() -> Self { + BlocksBlooms::new() + } +} + impl BlocksBlooms { pub fn new() -> Self { BlocksBlooms { blooms: unsafe { ::std::mem::zeroed() }} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 469364eb3..2209df7dc 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -25,6 +25,8 @@ #![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Ethcore library //! diff --git a/ethcore/src/substate.rs b/ethcore/src/substate.rs index 374397ca7..57e35ad2e 100644 --- a/ethcore/src/substate.rs +++ b/ethcore/src/substate.rs @@ -31,6 +31,12 @@ pub struct Substate { pub contracts_created: Vec
} +impl Default for Substate { + fn default() -> Self { + Substate::new() + } +} + impl Substate { /// Creates new substate. pub fn new() -> Self { @@ -67,8 +73,8 @@ mod tests { let mut sub_state = Substate::new(); sub_state.contracts_created.push(address_from_u64(1u64)); sub_state.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state.sstore_clears_count = x!(5); @@ -77,8 +83,8 @@ mod tests { let mut sub_state_2 = Substate::new(); sub_state_2.contracts_created.push(address_from_u64(2u64)); sub_state_2.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state_2.sstore_clears_count = x!(7); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index da3908a1e..a22f23eb8 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1307,7 +1307,7 @@ impl ChainSync { where T: Fn(&Address) -> U256 { let mut queue = self.transaction_queue.lock().unwrap(); - queue.add(transaction, fetch_nonce); + let _ = queue.add(transaction, fetch_nonce); } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 3b79e5614..6cb98521e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -17,9 +17,10 @@ #![warn(missing_docs)] #![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] - // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: @@ -172,6 +173,7 @@ impl NetworkProtocolHandler for EthSync { self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } + #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 243939a4c..45dc0e299 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -79,6 +79,7 @@ //! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. //! +use std::default::Default; use std::cmp::{Ordering}; use std::collections::{HashMap, BTreeSet}; use util::numbers::{Uint, U256}; @@ -102,6 +103,7 @@ struct TransactionOrder { hash: H256, } + impl TransactionOrder { fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { TransactionOrder { @@ -253,6 +255,12 @@ pub struct TransactionQueue { last_nonces: HashMap, } +impl Default for TransactionQueue { + fn default() -> Self { + TransactionQueue::new() + } +} + impl TransactionQueue { /// Creates new instance of this Queue pub fn new() -> Self { diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index dcc165259..7e12703ea 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -78,9 +78,15 @@ struct AccountUnlock { expires: DateTime, } +impl Default for SecretStore { + fn default() -> Self { + SecretStore::new() + } +} + impl SecretStore { /// new instance of Secret Store in default home directory - pub fn new() -> SecretStore { + pub fn new() -> Self { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); @@ -89,7 +95,7 @@ impl SecretStore { } /// new instance of Secret Store in specific directory - pub fn new_in(path: &Path) -> SecretStore { + pub fn new_in(path: &Path) -> Self { SecretStore { directory: KeyDirectory::new(path), unlocks: RwLock::new(HashMap::new()), diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index a2fa2215a..df5c2c448 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,6 +16,7 @@ //! Key-Value store abstraction with RocksDB backend. +use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction}; @@ -24,6 +25,12 @@ pub struct DBTransaction { batch: WriteBatch, } +impl Default for DBTransaction { + fn default() -> Self { + DBTransaction::new() + } +} + impl DBTransaction { /// Create new transaction. pub fn new() -> DBTransaction { diff --git a/util/src/lib.rs b/util/src/lib.rs index 59d66a325..bdb4be6e4 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -27,6 +27,8 @@ #![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Ethcore-util library //! diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..66fa32055 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -24,6 +24,7 @@ use hashdb::*; use heapsize::*; use std::mem; use std::collections::HashMap; +use std::default::Default; #[derive(Debug,Clone)] /// Reference-counted memory-based HashDB implementation. @@ -32,7 +33,7 @@ use std::collections::HashMap; /// with `kill()`, check for existance with `exists()` and lookup a hash to derive /// the data with `lookup()`. Clear with `clear()` and purge the portions of the data /// that have no references with `purge()`. -/// +/// /// # Example /// ```rust /// extern crate ethcore_util; @@ -74,6 +75,12 @@ pub struct MemoryDB { static_null_rlp: (Bytes, i32), } +impl Default for MemoryDB { + fn default() -> Self { + MemoryDB::new() + } +} + impl MemoryDB { /// Create a new instance of the memory DB. pub fn new() -> MemoryDB { @@ -133,7 +140,7 @@ impl MemoryDB { /// Denote than an existing value has the given key. Used when a key gets removed without /// a prior insert and thus has a negative reference with no value. - /// + /// /// May safely be called even if the key's value is known, in which case it will be a no-op. pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { if self.raw(key) == None { diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 644af22af..2e7c51cb0 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -19,6 +19,7 @@ use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; use std::cmp; +use std::default::Default; use mio::*; use mio::udp::*; use sha3::*; @@ -62,8 +63,14 @@ struct NodeBucket { nodes: VecDeque, //sorted by last active } +impl Default for NodeBucket { + fn default() -> Self { + NodeBucket::new() + } +} + impl NodeBucket { - fn new() -> NodeBucket { + fn new() -> Self { NodeBucket { nodes: VecDeque::new() } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 2d1af55ba..3db94131a 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -23,6 +23,7 @@ use std::ops::*; use std::cmp::min; use std::path::{Path, PathBuf}; use std::io::{Read, Write}; +use std::default::Default; use std::fs; use mio::*; use mio::tcp::*; @@ -75,9 +76,15 @@ pub struct NetworkConfiguration { pub ideal_peers: u32, } +impl Default for NetworkConfiguration { + fn default() -> Self { + NetworkConfiguration::new() + } +} + impl NetworkConfiguration { /// Create a new instance of default settings. - pub fn new() -> NetworkConfiguration { + pub fn new() -> Self { NetworkConfiguration { config_path: None, listen_address: None, diff --git a/util/src/panics.rs b/util/src/panics.rs index 70ce0bc33..ab25eae57 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -19,6 +19,7 @@ use std::thread; use std::ops::DerefMut; use std::sync::{Arc, Mutex}; +use std::default::Default; /// Thread-safe closure for handling possible panics pub trait OnPanicListener: Send + Sync + 'static { @@ -56,14 +57,20 @@ pub struct PanicHandler { listeners: Mutex>> } +impl Default for PanicHandler { + fn default() -> Self { + PanicHandler::new() + } +} + impl PanicHandler { /// Creates new `PanicHandler` wrapped in `Arc` - pub fn new_in_arc() -> Arc { + pub fn new_in_arc() -> Arc { Arc::new(Self::new()) } /// Creates new `PanicHandler` - pub fn new() -> PanicHandler { + pub fn new() -> Self { PanicHandler { listeners: Mutex::new(vec![]) } diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index ba70e7b2b..7bf3d3cdd 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::ops::Deref; +use std::default::Default; use elastic_array::*; use rlp::bytes::{ToBytes, VecLike}; use rlp::{Stream, Encoder, Encodable}; @@ -44,6 +45,12 @@ pub struct RlpStream { finished_list: bool, } +impl Default for RlpStream { + fn default() -> Self { + RlpStream::new() + } +} + impl Stream for RlpStream { fn new() -> Self { RlpStream { @@ -190,8 +197,14 @@ struct BasicEncoder { bytes: ElasticArray1024, } +impl Default for BasicEncoder { + fn default() -> Self { + BasicEncoder::new() + } +} + impl BasicEncoder { - fn new() -> BasicEncoder { + fn new() -> Self { BasicEncoder { bytes: ElasticArray1024::new() } } @@ -222,7 +235,7 @@ impl Encoder for BasicEncoder { // just 0 0 => self.bytes.push(0x80u8), // byte is its own encoding if < 0x80 - 1 => { + 1 => { value.to_bytes(&mut self.bytes); let len = self.bytes.len(); let last_byte = self.bytes[len - 1]; diff --git a/util/src/table.rs b/util/src/table.rs index e41209608..5ba572289 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -16,6 +16,7 @@ //! A collection associating pair of keys (row and column) with a single value. +use std::default::Default; use std::hash::Hash; use std::collections::HashMap; @@ -30,11 +31,21 @@ pub struct Table map: HashMap>, } +impl Default for Table + where Row: Eq + Hash + Clone, + Col: Eq + Hash { + fn default() -> Self { + Table::new() + } +} + +// There is default but clippy does not detect it? +#[allow(new_without_default)] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash { /// Creates new Table - pub fn new() -> Table { + pub fn new() -> Self { Table { map: HashMap::new(), } diff --git a/util/src/trie/journal.rs b/util/src/trie/journal.rs index db16a313d..4ffd7cf5c 100644 --- a/util/src/trie/journal.rs +++ b/util/src/trie/journal.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::default::Default; use sha3::*; use hash::H256; use bytes::*; @@ -39,6 +40,12 @@ pub struct Score { #[derive(Debug)] pub struct Journal (Vec); +impl Default for Journal { + fn default() -> Self { + Journal::new() + } +} + impl Journal { /// Create a new, empty, object. pub fn new() -> Journal { Journal(vec![]) } From 8f54c24e47032a52bdbc9bd48cb2f16c7c9aa888 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 11 Mar 2016 11:52:11 +0100 Subject: [PATCH 08/13] Merged changes from jdb_option1, keep LATEST_ERA from decreasing --- ethcore/src/account_db.rs | 9 +- util/src/journaldb.rs | 347 +++++++++++++++++++++++++++++++++++--- util/src/memorydb.rs | 1 + 3 files changed, 323 insertions(+), 34 deletions(-) diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index e7f1b2bad..026e813f5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -13,17 +13,14 @@ pub struct AccountDB<'db> { #[inline] fn combine_key<'a>(address: &'a H256, key: &'a H256) -> H256 { - let mut addr_hash = address.sha3(); - // preserve 96 bits of original key for db lookup - addr_hash[0..12].clone_from_slice(&[0u8; 12]); - &addr_hash ^ key + address ^ key } impl<'db> AccountDB<'db> { pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> { AccountDB { db: db, - address: x!(address.clone()), + address: x!(address), } } } @@ -70,7 +67,7 @@ impl<'db> AccountDBMut<'db> { pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> { AccountDBMut { db: db, - address: x!(address.clone()), + address: x!(address), } } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 4f2cdeb31..8bff08a77 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -64,11 +64,14 @@ pub struct JournalDB { journal_overlay: Option>>, } +#[derive(PartialEq)] struct JournalOverlay { backing_overlay: MemoryDB, - journal: HashMap> + journal: HashMap>, + latest_era: u64, } +#[derive(PartialEq)] struct JournalEntry { id: H256, insertions: Vec, @@ -220,7 +223,10 @@ impl JournalDB { k.append(&index); k.append(&&PADDING[..]); try!(batch.put(&k.drain(), r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + if now >= journal_overlay.latest_era { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.latest_era = now; + } journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); } @@ -243,7 +249,7 @@ impl JournalDB { { if canon_id == journal.id { for h in &journal.insertions { - if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { if rc > 0 { canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy } @@ -253,17 +259,17 @@ impl JournalDB { } overlay_deletions.append(&mut journal.insertions); } - index +=1; + index += 1; } // apply canon inserts first for (k, v) in canon_insertions { try!(batch.put(&k, &v)); } - // clean the overlay + // update the overlay for k in overlay_deletions { journal_overlay.backing_overlay.kill(&k); } - // apply removes + // apply canon deletions for k in canon_deletions { if !journal_overlay.backing_overlay.exists(&k) { try!(batch.delete(&k)); @@ -277,6 +283,13 @@ impl JournalDB { Ok(()) } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&self.backing); + let journal_overlay = self.journal_overlay.as_ref().unwrap().read().unwrap(); + *journal_overlay == reconstructed + } + fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } @@ -285,8 +298,10 @@ impl JournalDB { let mut journal = HashMap::new(); let mut overlay = MemoryDB::new(); let mut count = 0; + let mut latest_era = 0; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + latest_era = decode::(&val); + let mut era = latest_era; loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -296,7 +311,7 @@ impl JournalDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); + trace!("read_overlay: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let id: H256 = rlp.val_at(0); let insertions = rlp.at(1); @@ -323,7 +338,7 @@ impl JournalDB { } } trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); - JournalOverlay { backing_overlay: overlay, journal: journal } + JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } } /// Returns heap memory size used @@ -396,6 +411,7 @@ mod tests { use common::*; use super::*; use hashdb::*; + use log::init_log; #[test] fn insert_same_in_fork() { @@ -404,17 +420,25 @@ mod tests { let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&x)); } @@ -425,15 +449,20 @@ mod tests { let mut jdb = JournalDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.remove(&h); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&h)); } @@ -445,6 +474,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); @@ -452,6 +482,7 @@ mod tests { jdb.remove(&bar); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); @@ -459,17 +490,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); @@ -483,21 +517,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); @@ -510,35 +548,113 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.exists(&foo)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = JournalDB::new_temp(); + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } #[test] fn reopen() { @@ -552,6 +668,7 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); foo }; @@ -559,6 +676,7 @@ mod tests { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); } { @@ -566,41 +684,210 @@ mod tests { assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } #[test] - fn reopen_remove() { + fn insert_delete_insert_delete_insert_expunge() { + init_log(); let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // foo is ancient history. + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); @@ -611,18 +898,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..dd8de7fd8 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -69,6 +69,7 @@ use std::collections::HashMap; /// assert!(!m.exists(&k)); /// } /// ``` +#[derive(PartialEq)] pub struct MemoryDB { data: HashMap, static_null_rlp: (Bytes, i32), From 90e20cbcad54787eaf239bef016756a884b03f62 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 20:08:01 +0100 Subject: [PATCH 09/13] additional (failing) sstore test --- util/src/keys/store.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index ea97cc80e..cbef7b5f9 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -381,6 +381,7 @@ mod tests { use super::*; use devtools::*; use common::*; + use crypto::KeyPair; #[test] fn can_insert() { @@ -555,4 +556,15 @@ mod tests { let accounts = sstore.accounts().unwrap(); assert_eq!(30, accounts.len()); } + + #[test] + fn validate_generated_addresses() { + let temp = RandomTempPath::create_dir(); + let mut sstore = SecretStore::new_test(&temp); + let addr = sstore.new_account("test").unwrap(); + let _ok = sstore.unlock_account(&addr, "test").unwrap(); + let secret = sstore.account_secret(&addr).unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(Address::from(kp.public().sha3()), addr); + } } From b1327a045fa39e5e9552797a5f137de383206517 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 22:47:12 +0400 Subject: [PATCH 10/13] fixed new account generation --- util/src/keys/store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index cbef7b5f9..cd5fa8427 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -214,12 +214,12 @@ impl SecretStore { /// Creates new account pub fn new_account(&mut self, pass: &str) -> Result { - let secret = H256::random(); + let key_pair = crypto::KeyPair::create().expect("Error creating key-pair. Something wrong with crypto libraries?"); + let address = Address::from(key_pair.public().sha3()); let key_id = H128::random(); - self.insert(key_id.clone(), secret, pass); + self.insert(key_id.clone(), key_pair.secret().clone(), pass); let mut key_file = self.directory.get(&key_id).expect("the key was just inserted"); - let address = Address::random(); key_file.account = Some(address); try!(self.directory.save(key_file)); Ok(address) From 82a881005740f2e1ddc2f862b2f617b3e5773597 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 11:19:42 +0100 Subject: [PATCH 11/13] Rename into something that is a little more descriptive. --- .../{optiononedb.rs => earlymergedb.rs} | 48 +++++++------- util/src/journaldb/mod.rs | 8 +-- .../{overlay.rs => overlayrecentdb.rs} | 66 +++++++++---------- 3 files changed, 61 insertions(+), 61 deletions(-) rename util/src/journaldb/{optiononedb.rs => earlymergedb.rs} (94%) rename util/src/journaldb/{overlay.rs => overlayrecentdb.rs} (93%) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/earlymergedb.rs similarity index 94% rename from util/src/journaldb/optiononedb.rs rename to util/src/journaldb/earlymergedb.rs index dfa7c8ec1..48083f113 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -32,7 +32,7 @@ use std::env; /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. -pub struct OptionOneDB { +pub struct EarlyMergeDB { overlay: MemoryDB, backing: Arc, counters: Option>>>, @@ -44,9 +44,9 @@ const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 const DB_VERSION : u32 = 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl OptionOneDB { +impl EarlyMergeDB { /// Create a new instance from file - pub fn new(path: &str) -> OptionOneDB { + pub fn new(path: &str) -> EarlyMergeDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -62,8 +62,8 @@ impl OptionOneDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); - OptionOneDB { + let counters = Some(Arc::new(RwLock::new(EarlyMergeDB::read_counters(&backing)))); + EarlyMergeDB { overlay: MemoryDB::new(), backing: Arc::new(backing), counters: counters, @@ -72,7 +72,7 @@ impl OptionOneDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - fn new_temp() -> OptionOneDB { + fn new_temp() -> EarlyMergeDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) @@ -193,7 +193,7 @@ impl OptionOneDB { } } -impl HashDB for OptionOneDB { +impl HashDB for EarlyMergeDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -238,9 +238,9 @@ impl HashDB for OptionOneDB { } } -impl JournalDB for OptionOneDB { +impl JournalDB for EarlyMergeDB { fn spawn(&self) -> Box { - Box::new(OptionOneDB { + Box::new(EarlyMergeDB { overlay: MemoryDB::new(), backing: self.backing.clone(), counters: self.counters.clone(), @@ -369,11 +369,11 @@ impl JournalDB for OptionOneDB { try!(batch.delete(&last)); index += 1; } - trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + trace!("EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } try!(self.backing.write(batch)); -// trace!("OptionOneDB::commit() deleted {} nodes", deletes); +// trace!("EarlyMergeDB::commit() deleted {} nodes", deletes); Ok(0) } } @@ -388,7 +388,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -410,7 +410,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.exists(&h)); @@ -428,7 +428,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -466,7 +466,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -494,7 +494,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -513,7 +513,7 @@ mod tests { #[test] fn fork_same_key() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); jdb.commit(0, &b"0".sha3(), None).unwrap(); let foo = jdb.insert(b"foo"); @@ -535,7 +535,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -544,13 +544,13 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); } { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -564,7 +564,7 @@ mod tests { dir.push(H32::random().hex()); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -578,7 +578,7 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.exists(&foo)); @@ -593,7 +593,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -608,7 +608,7 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index cf8e7d392..724e61dfb 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -21,8 +21,8 @@ use common::*; /// Export the journaldb module. pub mod traits; mod archivedb; -mod optiononedb; -mod overlay; +mod earlymergedb; +mod overlayrecentdb; /// Export the JournalDB trait. pub use self::traits::JournalDB; @@ -73,8 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)), - Algorithm::OverlayRecent => Box::new(overlay::JournalOverlayDB::new(path)), + Algorithm::EarlyMerge => Box::new(optiononedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(optiononedb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } diff --git a/util/src/journaldb/overlay.rs b/util/src/journaldb/overlayrecentdb.rs similarity index 93% rename from util/src/journaldb/overlay.rs rename to util/src/journaldb/overlayrecentdb.rs index e91709041..8dd4d1752 100644 --- a/util/src/journaldb/overlay.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -56,7 +56,7 @@ use super::JournalDB; /// the removed key is not present in the history overlay. /// 7. Delete ancient record from memory and disk. /// -pub struct JournalOverlayDB { +pub struct OverlayRecentDB { transaction_overlay: MemoryDB, backing: Arc, journal_overlay: Arc>, @@ -82,9 +82,9 @@ impl HeapSizeOf for JournalEntry { } } -impl Clone for JournalOverlayDB { - fn clone(&self) -> JournalOverlayDB { - JournalOverlayDB { +impl Clone for OverlayRecentDB { + fn clone(&self) -> OverlayRecentDB { + OverlayRecentDB { transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), journal_overlay: self.journal_overlay.clone(), @@ -98,14 +98,14 @@ const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 const DB_VERSION : u32 = 0x200 + 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl JournalOverlayDB { +impl OverlayRecentDB { /// Create a new instance from file - pub fn new(path: &str) -> JournalOverlayDB { + pub fn new(path: &str) -> OverlayRecentDB { Self::from_prefs(path) } /// Create a new instance from file - pub fn from_prefs(path: &str) -> JournalOverlayDB { + pub fn from_prefs(path: &str) -> OverlayRecentDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -121,8 +121,8 @@ impl JournalOverlayDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let journal_overlay = Arc::new(RwLock::new(JournalOverlayDB::read_overlay(&backing))); - JournalOverlayDB { + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing))); + OverlayRecentDB { transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), journal_overlay: journal_overlay, @@ -131,7 +131,7 @@ impl JournalOverlayDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - pub fn new_temp() -> JournalOverlayDB { + pub fn new_temp() -> OverlayRecentDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) @@ -196,7 +196,7 @@ impl JournalOverlayDB { } } -impl JournalDB for JournalOverlayDB { +impl JournalDB for OverlayRecentDB { fn spawn(&self) -> Box { Box::new(self.clone()) } @@ -303,7 +303,7 @@ impl JournalDB for JournalOverlayDB { } -impl HashDB for JournalOverlayDB { +impl HashDB for OverlayRecentDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -367,7 +367,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -397,7 +397,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -420,7 +420,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -463,7 +463,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -495,7 +495,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -520,7 +520,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -548,7 +548,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -576,7 +576,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -614,7 +614,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -624,14 +624,14 @@ mod tests { }; { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -646,7 +646,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -675,7 +675,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -724,7 +724,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -755,7 +755,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -795,7 +795,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -816,7 +816,7 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -824,14 +824,14 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -844,7 +844,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -862,7 +862,7 @@ mod tests { }; { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); From b03679e1a6e8f29dee36b0bfe3a4c701a7f154e4 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 11:22:02 +0100 Subject: [PATCH 12/13] Fix typos. --- util/src/journaldb/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 724e61dfb..cf5278368 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -73,8 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::EarlyMergeDB::new(path)), - Algorithm::OverlayRecent => Box::new(optiononedb::OverlayRecentDB::new(path)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } From 98bae098bea3b5a6db3fd891eba4e54ce8e3645e Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 12:10:55 +0100 Subject: [PATCH 13/13] Update cargo lock. --- Cargo.lock | 88 +++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f26ac923..8e3a4b168 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -43,14 +43,14 @@ name = "aster" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bigint" version = "0.1.0" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -65,7 +65,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -85,7 +85,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chrono" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -117,7 +117,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -136,7 +136,7 @@ version = "1.1.1" source = "git+https://github.com/tomusdrw/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -145,7 +145,7 @@ name = "daemonize" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -161,7 +161,7 @@ name = "docopt" version = "0.6.78" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -177,7 +177,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -185,7 +185,7 @@ name = "eth-secp256k1" version = "0.5.4" source = "git+https://github.com/ethcore/rust-secp256k1#283a0677d8327536be58a87e0494d7e0e7b1d1d8" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -253,9 +253,9 @@ dependencies = [ name = "ethcore-util" version = "0.9.99" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", - "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -264,10 +264,10 @@ dependencies = [ "ethcore-devtools 0.9.99", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "json-tests 0.1.0", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -304,7 +304,7 @@ dependencies = [ name = "fdlimit" version = "0.1.0" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -314,7 +314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "glob" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -328,7 +328,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -381,7 +381,7 @@ dependencies = [ "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -391,21 +391,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itertools" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "json-tests" version = "0.1.0" dependencies = [ - "glob 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -461,7 +461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -469,7 +469,7 @@ name = "librocksdb-sys" version = "0.2.2" source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -477,7 +477,7 @@ name = "log" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -490,7 +490,7 @@ name = "memchr" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -536,7 +536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -560,7 +560,7 @@ dependencies = [ [[package]] name = "nom" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -577,7 +577,7 @@ name = "num_cpus" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -639,7 +639,7 @@ name = "quasi" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -649,7 +649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aster 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -657,7 +657,7 @@ name = "rand" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -672,7 +672,7 @@ dependencies = [ [[package]] name = "regex" -version = "0.1.54" +version = "0.1.55" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -691,7 +691,7 @@ name = "rocksdb" version = "0.4.2" source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] @@ -701,7 +701,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -741,7 +741,7 @@ name = "semver" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nom 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -766,7 +766,7 @@ dependencies = [ "quasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "quasi_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -809,16 +809,16 @@ name = "syntex" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syntex_syntax" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -844,7 +844,7 @@ name = "termios" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -853,7 +853,7 @@ version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "url" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -975,7 +975,7 @@ name = "xml-rs" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]]