From d77d9ad9d84105f2c0b4cd53b90f835e3087f669 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 6 Mar 2016 17:28:50 +0100 Subject: [PATCH 01/20] JournalDB with history overlay --- ethcore/src/client.rs | 8 +- parity/main.rs | 3 +- util/src/journaldb.rs | 326 ++++++++++++++++++++++++++---------------- util/src/memorydb.rs | 6 + 4 files changed, 216 insertions(+), 127 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 858185873..2b5ec5ccb 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -190,6 +190,8 @@ pub struct ClientReport { pub transactions_applied: usize, /// How much gas has been processed so far. pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, } impl ClientReport { @@ -222,7 +224,7 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "4.0"; +const CLIENT_DB_VER_STR: &'static str = "5.0"; impl Client { /// Create a new client with given spec and DB path. @@ -432,7 +434,9 @@ impl Client where V: Verifier { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.read().unwrap().clone() + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report } /// Tick the client. diff --git a/parity/main.rs b/parity/main.rs index 3f4243a0a..605fb315d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -395,7 +395,7 @@ impl Informant { let sync_info = sync.status(); if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]", + println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -408,6 +408,7 @@ impl Informant { queue_info.unverified_queue_size, queue_info.verified_queue_size, + Informant::format_bytes(report.state_db_mem), Informant::format_bytes(cache_info.total()), Informant::format_bytes(queue_info.mem_used), Informant::format_bytes(sync_info.mem_used), diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 01e53f819..48bd94d64 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -35,17 +35,36 @@ use std::env; /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct JournalDB { - overlay: MemoryDB, + transaction_overlay: MemoryDB, backing: Arc, - counters: Option>>>, + journal_overlay: Option>>, +} + +struct JournalOverlay { + backing_overlay: MemoryDB, + journal: VecDeque +} + +struct JournalEntry { + id: H256, + index: usize, + era: u64, + insertions: Vec, + deletions: Vec, +} + +impl HeapSizeOf for JournalEntry { + fn heap_size_of_children(&self) -> usize { + self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() + } } impl Clone for JournalDB { fn clone(&self) -> JournalDB { JournalDB { - overlay: MemoryDB::new(), + transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), - counters: self.counters.clone(), + journal_overlay: self.journal_overlay.clone(), } } } @@ -60,7 +79,6 @@ const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl JournalDB { - /// Create a new instance from file pub fn new(path: &str) -> JournalDB { Self::from_prefs(path, true) @@ -86,15 +104,16 @@ impl JournalDB { with_journal = prefer_journal; } - let counters = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) + + let journal_overlay = if with_journal { + Some(Arc::new(RwLock::new(JournalDB::read_overlay(&backing)))) } else { None }; JournalDB { - overlay: MemoryDB::new(), + transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), - counters: counters, + journal_overlay: journal_overlay, } } @@ -113,71 +132,48 @@ impl JournalDB { /// Commit all recent insert operations. pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) + let have_journal_overlay = self.journal_overlay.is_some(); + if have_journal_overlay { + self.commit_with_overlay(now, id, end) } else { - self.commit_without_counters() + self.commit_without_overlay() } } /// Drain the overlay and place it into a batch for the DB. fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut inserts = 0usize; - let mut deletes = 0usize; + let mut insertions = 0usize; + let mut deletions = 0usize; for i in overlay.drain().into_iter() { let (key, (value, rc)) = i; if rc > 0 { assert!(rc == 1); batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - inserts += 1; + insertions += 1; } if rc < 0 { assert!(rc == -1); - deletes += 1; + deletions += 1; } } - trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); - inserts + deletes + trace!("commit: Inserted {}, Deleted {} nodes", insertions, deletions); + insertions + deletions } - /// Just commit the overlay into the backing DB. - fn commit_without_counters(&mut self) -> Result { + /// Just commit the transaction overlay into the backing DB. + fn commit_without_overlay(&mut self) -> Result { let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); + let ret = Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); try!(self.backing.write(batch)); Ok(ret as u32) } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: - // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, n] => [ ... ] - - // TODO: store reclaim_period. - - // when we make a new commit, we journal the inserts and removes. - // for each end_era that we journaled that we are no passing by, - // we remove all of its removes assuming it is canonical and all - // of its inserts otherwise. - // - // We also keep reference counters for each key inserted in the journal to handle - // the following cases where key K must not be deleted from the DB when processing removals : - // Given H is the journal size in eras, 0 <= C <= H. - // Key K is removed in era A(N) and re-inserted in canonical era B(N + C). - // Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C). - // Key K is added in non-canonical era A'(N) canonical B(N + C). - // - // The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions - // is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased - // and the key is safe to delete. - + fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); let batch = DBTransaction::new(); { let mut index = 0usize; @@ -204,90 +200,83 @@ impl JournalDB { } let mut r = RlpStream::new_list(3); - let inserts: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); + let mut tx = self.transaction_overlay.drain(); + let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); + let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); // Increase counter for each inserted key no matter if the block is canonical or not. - for i in &inserts { - *counters.entry(i.clone()).or_insert(0) += 1; - } - let removes: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect(); + let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); r.append(id); - r.append(&inserts); - r.append(&removes); + r.begin_list(inserted_keys.len()); + for (k, v) in insertions { + r.begin_list(2); + r.append(&k); + r.append(&v); + journal_overlay.backing_overlay.emplace(k, v); + } + r.append(&removed_keys); try!(batch.put(&last, r.as_raw())); try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.journal.push_back(JournalEntry { id: id.clone(), index: index, era: now, insertions: inserted_keys, deletions: removed_keys }); } // apply old commits' details + if let Some((end_era, canon_id)) = end { - let mut index = 0usize; - let mut last; - let mut to_remove: Vec = Vec::new(); - let mut canon_inserts: Vec = Vec::new(); - while let Some(rlp_data) = try!(self.backing.get({ + let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); + let mut canon_deletions: Vec = Vec::new(); + let mut overlay_deletions: Vec = Vec::new(); + while journal_overlay.journal.front().map_or(false, |e| e.era <= end_era) { + let mut journal = journal_overlay.journal.pop_front().unwrap(); + //delete the record from the db let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); + r.append(&journal.era); + r.append(&journal.index); r.append(&&PADDING[..]); - last = r.drain(); - &last - })) { - let rlp = Rlp::new(&rlp_data); - let mut inserts: Vec = rlp.val_at(1); - JournalDB::decrease_counters(&inserts, &mut counters); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - if canon_id == rlp.val_at(0) { - let mut canon_deletes: Vec = rlp.val_at(2); - trace!("Purging nodes deleted from canon: {:?}", canon_deletes); - to_remove.append(&mut canon_deletes); - canon_inserts = inserts; + try!(batch.delete(&r.drain())); + trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, journal.index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); + { + if canon_id == journal.id { + for h in &journal.insertions { + match journal_overlay.backing_overlay.raw(&h) { + Some(&(ref d, rc)) if rc > 0 => canon_insertions.push((h.clone(), d.clone())), //TODO: optimizie this to avoid data copy + _ => () + } + } + canon_deletions = journal.deletions; + } + overlay_deletions.append(&mut journal.insertions); } - else { - trace!("Purging nodes inserted in non-canon: {:?}", inserts); - to_remove.append(&mut inserts); + if canon_id == journal.id { } - trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::(0), canon_id, to_remove.len()); - try!(batch.delete(&last)); - index += 1; } - - let canon_inserts = canon_inserts.drain(..).collect::>(); - // Purge removed keys if they are not referenced and not re-inserted in the canon commit - let mut deletes = 0; - trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::>()); - for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) { - try!(batch.delete(&h)); - deletes += 1; + // apply canon inserts first + for (k, v) in canon_insertions { + try!(batch.put(&k, &v)); } - trace!("Total nodes purged: {}", deletes); + // clean the overlay + for k in overlay_deletions { + journal_overlay.backing_overlay.kill(&k); + } + // apply removes + for k in canon_deletions { + if !journal_overlay.backing_overlay.exists(&k) { + try!(batch.delete(&k)); + } + } + journal_overlay.backing_overlay.purge(); } - - // Commit overlay insertions - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) - } - - - // Decrease counters for given keys. Deletes obsolete counters - fn decrease_counters(keys: &[H256], counters: &mut HashMap) { - for i in keys.iter() { - let delete_counter = { - let cnt = counters.get_mut(i).expect("Missing key counter"); - *cnt -= 1; - *cnt == 0 - }; - if delete_counter { - counters.remove(i); - } - } + Ok(0 as u32) } fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &Database) -> HashMap { - let mut res = HashMap::new(); + fn read_overlay(db: &Database) -> JournalOverlay { + let mut journal = VecDeque::new(); + let mut overlay = MemoryDB::new(); + let mut count = 0; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); loop { @@ -300,10 +289,24 @@ impl JournalDB { &r.drain() }).expect("Low-level database error.") { let rlp = Rlp::new(&rlp_data); - let to_add: Vec = rlp.val_at(1); - for h in to_add { - *res.entry(h).or_insert(0) += 1; + let id: H256 = rlp.val_at(0); + let insertions = rlp.at(1); + let deletions: Vec = rlp.val_at(2); + let mut inserted_keys = Vec::new(); + for r in insertions.iter() { + let k: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + overlay.emplace(k.clone(), v); + inserted_keys.push(k); + count += 1; } + journal.push_front(JournalEntry { + id: id, + index: index, + era: era, + insertions: inserted_keys, + deletions: deletions, + }); index += 1; }; if index == 0 || era == 0 { @@ -312,8 +315,19 @@ impl JournalDB { era -= 1; } } - trace!("Recovered {} counters", res.len()); - res + trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); + JournalOverlay { backing_overlay: overlay, journal: journal } + } + + /// Returns heap memory size used + pub fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + if let Some(ref overlay) = self.journal_overlay.as_ref() { + let overlay = overlay.read().unwrap(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.journal.heap_size_of_children(); + } + mem } } @@ -325,7 +339,7 @@ impl HashDB for JournalDB { ret.insert(h, 1); } - for (key, refs) in self.overlay.keys().into_iter() { + for (key, refs) in self.transaction_overlay.keys().into_iter() { let refs = *ret.get(&key).unwrap_or(&0) + refs; ret.insert(key, refs); } @@ -333,15 +347,23 @@ impl HashDB for JournalDB { } fn lookup(&self, key: &H256) -> Option<&[u8]> { - let k = self.overlay.raw(key); + let k = self.transaction_overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), _ => { - if let Some(x) = self.payload(key) { - Some(&self.overlay.denote(key, x).0) - } - else { - None + let v = self.journal_overlay.as_ref().map_or(None, |ref j| j.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec())); + match v { + Some(x) => { + Some(&self.transaction_overlay.denote(key, x).0) + } + _ => { + if let Some(x) = self.payload(key) { + Some(&self.transaction_overlay.denote(key, x).0) + } + else { + None + } + } } } } @@ -352,13 +374,13 @@ impl HashDB for JournalDB { } fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) + self.transaction_overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); + self.transaction_overlay.emplace(key, value); } fn kill(&mut self, key: &H256) { - self.overlay.kill(key); + self.transaction_overlay.kill(key); } } @@ -492,11 +514,13 @@ mod tests { fn reopen() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); + let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); foo }; @@ -510,8 +534,62 @@ mod tests { { let mut jdb = JournalDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(!jdb.exists(&foo)); } } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } } diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 680a6e1d0..9cd018935 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -21,6 +21,7 @@ use bytes::*; use rlp::*; use sha3::*; use hashdb::*; +use heapsize::*; use std::mem; use std::collections::HashMap; @@ -143,6 +144,11 @@ impl MemoryDB { } self.raw(key).unwrap() } + + /// Returns the size of allocated heap memory + pub fn mem_used(&self) -> usize { + self.data.heap_size_of_children() + } } static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; From 744c4c7d8bba32c5c5e61eb8c9b32bd52498fd30 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 7 Mar 2016 07:06:55 +0100 Subject: [PATCH 02/20] JournalDB documentation --- util/src/journaldb.rs | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 48bd94d64..1c62a9960 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -27,13 +27,37 @@ use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. /// -/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves +/// If `journal_overlay` is `None`, then it behaves exactly like OverlayDB. If not it behaves /// differently: /// /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. +/// +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history +/// overlay on each `commit()` +/// - History overlay contains all data inserted during the history period. When the node +/// in the overlay becomes ancient it is written to disk on `commit()` +/// +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track +/// data nodes that go out of history scope and must be written to disk. +/// +/// Commit workflow: +/// Create a new journal record from the transaction overlay. +/// Inseart each node from the transaction overlay into the History overlay increasing reference +/// count if it is already there. Note that the reference counting is managed by `MemoryDB` +/// Clear the transaction overlay. +/// For a canonical journal record that becomes ancient inserts its insertions into the disk DB +/// For each journal record that goes out of the history scope (becomes ancient) remove its +/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// if reaches zero. +/// For a canonical journal record that becomes ancient delete its removals from the disk only if +/// the removed key is not present in the history overlay. +/// Delete ancient record from memory and disk. +/// pub struct JournalDB { transaction_overlay: MemoryDB, backing: Arc, @@ -220,7 +244,6 @@ impl JournalDB { } // apply old commits' details - if let Some((end_era, canon_id)) = end { let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); let mut canon_deletions: Vec = Vec::new(); From a069e890ba52cf5242cf4492266b1d638a138d74 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 8 Mar 2016 19:14:43 +0100 Subject: [PATCH 03/20] Replaced --archive option with --pruning --- parity/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index ceb58e31e..2d7bd8bab 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -77,7 +77,7 @@ Protocol Options: or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. - --archive Client should not prune the state/storage trie. + --pruning Enable state/storage trie pruning. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. @@ -135,7 +135,7 @@ struct Args { flag_identity: String, flag_cache: Option, flag_keys_path: String, - flag_archive: bool, + flag_pruning: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, @@ -362,7 +362,7 @@ impl Configuration { client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; } } - client_config.prefer_journal = !self.args.flag_archive; + client_config.prefer_journal = self.args.flag_pruning; client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); From c302fa9a4ebae4710ad98933539fbb4b6998b6f4 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 9 Mar 2016 18:37:44 +0100 Subject: [PATCH 04/20] Style --- util/src/journaldb.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 3309612c2..1d854924a 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -243,9 +243,10 @@ impl JournalDB { { if canon_id == journal.id { for h in &journal.insertions { - match journal_overlay.backing_overlay.raw(&h) { - Some(&(ref d, rc)) if rc > 0 => canon_insertions.push((h.clone(), d.clone())), //TODO: optimizie this to avoid data copy - _ => () + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if rc > 0 { + canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy + } } } canon_deletions = journal.deletions; @@ -352,7 +353,7 @@ impl HashDB for JournalDB { ret } - fn lookup(&self, key: &H256) -> Option<&[u8]> { + fn lookup(&self, key: &H256) -> Option<&[u8]> { let k = self.transaction_overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), @@ -573,7 +574,6 @@ mod tests { fn reopen_remove() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); From 06a3abd01e0cd6f2435510c1133c34e87c48beff Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 21:15:43 +0100 Subject: [PATCH 05/20] Removed unused return type --- util/src/journaldb.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 97ae077b2..4f2cdeb31 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -153,7 +153,7 @@ impl JournalDB { } /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { let have_journal_overlay = self.journal_overlay.is_some(); if have_journal_overlay { self.commit_with_overlay(now, id, end) @@ -183,16 +183,16 @@ impl JournalDB { } /// Just commit the transaction overlay into the backing DB. - fn commit_without_overlay(&mut self) -> Result { + fn commit_without_overlay(&mut self) -> Result<(), UtilError> { let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); + Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) + Ok(()) } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); @@ -274,7 +274,7 @@ impl JournalDB { journal_overlay.journal.remove(&end_era); } try!(self.backing.write(batch)); - Ok(0 as u32) + Ok(()) } fn payload(&self, key: &H256) -> Option { From 3a4a7ac822289181229c16af58690e63e3b98b62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:35:26 +0100 Subject: [PATCH 06/20] Bumping clippy version --- Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- sync/Cargo.toml | 2 +- util/Cargo.toml | 2 +- util/bigint/Cargo.toml | 1 - 6 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 22d0f9288..70e4cbc34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index fbfe175d7..b5c4a7636 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -21,7 +21,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 2ce430e51..918160be9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,7 +18,7 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 46baa8a83..8412d3700 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -14,7 +14,7 @@ rustc_version = "0.1" [dependencies] ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } log = "0.3" env_logger = "0.3" time = "0.1.34" diff --git a/util/Cargo.toml b/util/Cargo.toml index 0ce27ec2b..93b475538 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -27,7 +27,7 @@ crossbeam = "0.2" slab = "0.1" sha3 = { path = "sha3" } serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } json-tests = { path = "json-tests" } rustc_version = "0.1.0" igd = "0.4.2" diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 377391eeb..1bd2b994e 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -15,7 +15,6 @@ rustc-serialize = "0.3" arrayvec = "0.3" rand = "0.3.12" serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } heapsize = "0.3" [features] From 8709dd28f884742756ae4bbba9bd628708f271bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:57:58 +0100 Subject: [PATCH 07/20] Fixing clippy warnings --- Cargo.lock | 30 ++++++++++++------------------ ethcore/src/extras.rs | 14 ++++++++++---- ethcore/src/lib.rs | 2 ++ ethcore/src/substate.rs | 14 ++++++++++---- sync/src/chain.rs | 2 +- sync/src/lib.rs | 4 +++- sync/src/transaction_queue.rs | 8 ++++++++ util/src/keys/store.rs | 10 ++++++++-- util/src/kvdb.rs | 7 +++++++ util/src/lib.rs | 2 ++ util/src/memorydb.rs | 11 +++++++++-- util/src/network/discovery.rs | 9 ++++++++- util/src/network/host.rs | 9 ++++++++- util/src/panics.rs | 11 +++++++++-- util/src/rlp/rlpstream.rs | 17 +++++++++++++++-- util/src/table.rs | 13 ++++++++++++- util/src/trie/journal.rs | 7 +++++++ 17 files changed, 131 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f583a8747..e33e0288c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "parity" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,7 +94,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.44" +version = "0.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -207,7 +207,7 @@ dependencies = [ name = "ethcore" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", @@ -234,7 +234,7 @@ dependencies = [ name = "ethcore-rpc" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -258,7 +258,7 @@ dependencies = [ "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -273,7 +273,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.2 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -290,7 +290,7 @@ dependencies = [ name = "ethsync" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -469,11 +469,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.2.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -597,11 +596,6 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "pkg-config" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "primal" version = "0.2.3" @@ -697,11 +691,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" -version = "0.4.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.4.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] [[package]] diff --git a/ethcore/src/extras.rs b/ethcore/src/extras.rs index f4759b040..a7c82c37c 100644 --- a/ethcore/src/extras.rs +++ b/ethcore/src/extras.rs @@ -35,13 +35,13 @@ pub enum ExtrasIndex { BlocksBlooms = 4, /// Block receipts index BlockReceipts = 5, -} +} /// trait used to write Extras data to db pub trait ExtrasWritable { /// Write extra data to db fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable; } @@ -60,9 +60,9 @@ pub trait ExtrasReadable { impl ExtrasWritable for DBTransaction { fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable { - + self.put(&hash.to_extras_slice(T::extras_index()), &encode(value)).unwrap() } } @@ -215,6 +215,12 @@ pub struct BlocksBlooms { pub blooms: [H2048; 16], } +impl Default for BlocksBlooms { + fn default() -> Self { + BlocksBlooms::new() + } +} + impl BlocksBlooms { pub fn new() -> Self { BlocksBlooms { blooms: unsafe { ::std::mem::zeroed() }} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 469364eb3..2209df7dc 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -25,6 +25,8 @@ #![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Ethcore library //! diff --git a/ethcore/src/substate.rs b/ethcore/src/substate.rs index 374397ca7..57e35ad2e 100644 --- a/ethcore/src/substate.rs +++ b/ethcore/src/substate.rs @@ -31,6 +31,12 @@ pub struct Substate { pub contracts_created: Vec
} +impl Default for Substate { + fn default() -> Self { + Substate::new() + } +} + impl Substate { /// Creates new substate. pub fn new() -> Self { @@ -67,8 +73,8 @@ mod tests { let mut sub_state = Substate::new(); sub_state.contracts_created.push(address_from_u64(1u64)); sub_state.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state.sstore_clears_count = x!(5); @@ -77,8 +83,8 @@ mod tests { let mut sub_state_2 = Substate::new(); sub_state_2.contracts_created.push(address_from_u64(2u64)); sub_state_2.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state_2.sstore_clears_count = x!(7); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index da3908a1e..a22f23eb8 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1307,7 +1307,7 @@ impl ChainSync { where T: Fn(&Address) -> U256 { let mut queue = self.transaction_queue.lock().unwrap(); - queue.add(transaction, fetch_nonce); + let _ = queue.add(transaction, fetch_nonce); } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 3b79e5614..6cb98521e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -17,9 +17,10 @@ #![warn(missing_docs)] #![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] - // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: @@ -172,6 +173,7 @@ impl NetworkProtocolHandler for EthSync { self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } + #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 243939a4c..45dc0e299 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -79,6 +79,7 @@ //! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. //! +use std::default::Default; use std::cmp::{Ordering}; use std::collections::{HashMap, BTreeSet}; use util::numbers::{Uint, U256}; @@ -102,6 +103,7 @@ struct TransactionOrder { hash: H256, } + impl TransactionOrder { fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { TransactionOrder { @@ -253,6 +255,12 @@ pub struct TransactionQueue { last_nonces: HashMap, } +impl Default for TransactionQueue { + fn default() -> Self { + TransactionQueue::new() + } +} + impl TransactionQueue { /// Creates new instance of this Queue pub fn new() -> Self { diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index dcc165259..7e12703ea 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -78,9 +78,15 @@ struct AccountUnlock { expires: DateTime, } +impl Default for SecretStore { + fn default() -> Self { + SecretStore::new() + } +} + impl SecretStore { /// new instance of Secret Store in default home directory - pub fn new() -> SecretStore { + pub fn new() -> Self { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); @@ -89,7 +95,7 @@ impl SecretStore { } /// new instance of Secret Store in specific directory - pub fn new_in(path: &Path) -> SecretStore { + pub fn new_in(path: &Path) -> Self { SecretStore { directory: KeyDirectory::new(path), unlocks: RwLock::new(HashMap::new()), diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index a2fa2215a..df5c2c448 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,6 +16,7 @@ //! Key-Value store abstraction with RocksDB backend. +use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction}; @@ -24,6 +25,12 @@ pub struct DBTransaction { batch: WriteBatch, } +impl Default for DBTransaction { + fn default() -> Self { + DBTransaction::new() + } +} + impl DBTransaction { /// Create new transaction. pub fn new() -> DBTransaction { diff --git a/util/src/lib.rs b/util/src/lib.rs index 59d66a325..bdb4be6e4 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -27,6 +27,8 @@ #![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Ethcore-util library //! diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..66fa32055 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -24,6 +24,7 @@ use hashdb::*; use heapsize::*; use std::mem; use std::collections::HashMap; +use std::default::Default; #[derive(Debug,Clone)] /// Reference-counted memory-based HashDB implementation. @@ -32,7 +33,7 @@ use std::collections::HashMap; /// with `kill()`, check for existance with `exists()` and lookup a hash to derive /// the data with `lookup()`. Clear with `clear()` and purge the portions of the data /// that have no references with `purge()`. -/// +/// /// # Example /// ```rust /// extern crate ethcore_util; @@ -74,6 +75,12 @@ pub struct MemoryDB { static_null_rlp: (Bytes, i32), } +impl Default for MemoryDB { + fn default() -> Self { + MemoryDB::new() + } +} + impl MemoryDB { /// Create a new instance of the memory DB. pub fn new() -> MemoryDB { @@ -133,7 +140,7 @@ impl MemoryDB { /// Denote than an existing value has the given key. Used when a key gets removed without /// a prior insert and thus has a negative reference with no value. - /// + /// /// May safely be called even if the key's value is known, in which case it will be a no-op. pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { if self.raw(key) == None { diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 644af22af..2e7c51cb0 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -19,6 +19,7 @@ use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; use std::cmp; +use std::default::Default; use mio::*; use mio::udp::*; use sha3::*; @@ -62,8 +63,14 @@ struct NodeBucket { nodes: VecDeque, //sorted by last active } +impl Default for NodeBucket { + fn default() -> Self { + NodeBucket::new() + } +} + impl NodeBucket { - fn new() -> NodeBucket { + fn new() -> Self { NodeBucket { nodes: VecDeque::new() } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 2d1af55ba..3db94131a 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -23,6 +23,7 @@ use std::ops::*; use std::cmp::min; use std::path::{Path, PathBuf}; use std::io::{Read, Write}; +use std::default::Default; use std::fs; use mio::*; use mio::tcp::*; @@ -75,9 +76,15 @@ pub struct NetworkConfiguration { pub ideal_peers: u32, } +impl Default for NetworkConfiguration { + fn default() -> Self { + NetworkConfiguration::new() + } +} + impl NetworkConfiguration { /// Create a new instance of default settings. - pub fn new() -> NetworkConfiguration { + pub fn new() -> Self { NetworkConfiguration { config_path: None, listen_address: None, diff --git a/util/src/panics.rs b/util/src/panics.rs index 70ce0bc33..ab25eae57 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -19,6 +19,7 @@ use std::thread; use std::ops::DerefMut; use std::sync::{Arc, Mutex}; +use std::default::Default; /// Thread-safe closure for handling possible panics pub trait OnPanicListener: Send + Sync + 'static { @@ -56,14 +57,20 @@ pub struct PanicHandler { listeners: Mutex>> } +impl Default for PanicHandler { + fn default() -> Self { + PanicHandler::new() + } +} + impl PanicHandler { /// Creates new `PanicHandler` wrapped in `Arc` - pub fn new_in_arc() -> Arc { + pub fn new_in_arc() -> Arc { Arc::new(Self::new()) } /// Creates new `PanicHandler` - pub fn new() -> PanicHandler { + pub fn new() -> Self { PanicHandler { listeners: Mutex::new(vec![]) } diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index ba70e7b2b..7bf3d3cdd 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::ops::Deref; +use std::default::Default; use elastic_array::*; use rlp::bytes::{ToBytes, VecLike}; use rlp::{Stream, Encoder, Encodable}; @@ -44,6 +45,12 @@ pub struct RlpStream { finished_list: bool, } +impl Default for RlpStream { + fn default() -> Self { + RlpStream::new() + } +} + impl Stream for RlpStream { fn new() -> Self { RlpStream { @@ -190,8 +197,14 @@ struct BasicEncoder { bytes: ElasticArray1024, } +impl Default for BasicEncoder { + fn default() -> Self { + BasicEncoder::new() + } +} + impl BasicEncoder { - fn new() -> BasicEncoder { + fn new() -> Self { BasicEncoder { bytes: ElasticArray1024::new() } } @@ -222,7 +235,7 @@ impl Encoder for BasicEncoder { // just 0 0 => self.bytes.push(0x80u8), // byte is its own encoding if < 0x80 - 1 => { + 1 => { value.to_bytes(&mut self.bytes); let len = self.bytes.len(); let last_byte = self.bytes[len - 1]; diff --git a/util/src/table.rs b/util/src/table.rs index e41209608..5ba572289 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -16,6 +16,7 @@ //! A collection associating pair of keys (row and column) with a single value. +use std::default::Default; use std::hash::Hash; use std::collections::HashMap; @@ -30,11 +31,21 @@ pub struct Table map: HashMap>, } +impl Default for Table + where Row: Eq + Hash + Clone, + Col: Eq + Hash { + fn default() -> Self { + Table::new() + } +} + +// There is default but clippy does not detect it? +#[allow(new_without_default)] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash { /// Creates new Table - pub fn new() -> Table { + pub fn new() -> Self { Table { map: HashMap::new(), } diff --git a/util/src/trie/journal.rs b/util/src/trie/journal.rs index db16a313d..4ffd7cf5c 100644 --- a/util/src/trie/journal.rs +++ b/util/src/trie/journal.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::default::Default; use sha3::*; use hash::H256; use bytes::*; @@ -39,6 +40,12 @@ pub struct Score { #[derive(Debug)] pub struct Journal (Vec); +impl Default for Journal { + fn default() -> Self { + Journal::new() + } +} + impl Journal { /// Create a new, empty, object. pub fn new() -> Journal { Journal(vec![]) } From 8f54c24e47032a52bdbc9bd48cb2f16c7c9aa888 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 11 Mar 2016 11:52:11 +0100 Subject: [PATCH 08/20] Merged changes from jdb_option1, keep LATEST_ERA from decreasing --- ethcore/src/account_db.rs | 9 +- util/src/journaldb.rs | 347 +++++++++++++++++++++++++++++++++++--- util/src/memorydb.rs | 1 + 3 files changed, 323 insertions(+), 34 deletions(-) diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index e7f1b2bad..026e813f5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -13,17 +13,14 @@ pub struct AccountDB<'db> { #[inline] fn combine_key<'a>(address: &'a H256, key: &'a H256) -> H256 { - let mut addr_hash = address.sha3(); - // preserve 96 bits of original key for db lookup - addr_hash[0..12].clone_from_slice(&[0u8; 12]); - &addr_hash ^ key + address ^ key } impl<'db> AccountDB<'db> { pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> { AccountDB { db: db, - address: x!(address.clone()), + address: x!(address), } } } @@ -70,7 +67,7 @@ impl<'db> AccountDBMut<'db> { pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> { AccountDBMut { db: db, - address: x!(address.clone()), + address: x!(address), } } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 4f2cdeb31..8bff08a77 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -64,11 +64,14 @@ pub struct JournalDB { journal_overlay: Option>>, } +#[derive(PartialEq)] struct JournalOverlay { backing_overlay: MemoryDB, - journal: HashMap> + journal: HashMap>, + latest_era: u64, } +#[derive(PartialEq)] struct JournalEntry { id: H256, insertions: Vec, @@ -220,7 +223,10 @@ impl JournalDB { k.append(&index); k.append(&&PADDING[..]); try!(batch.put(&k.drain(), r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + if now >= journal_overlay.latest_era { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.latest_era = now; + } journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); } @@ -243,7 +249,7 @@ impl JournalDB { { if canon_id == journal.id { for h in &journal.insertions { - if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { if rc > 0 { canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy } @@ -253,17 +259,17 @@ impl JournalDB { } overlay_deletions.append(&mut journal.insertions); } - index +=1; + index += 1; } // apply canon inserts first for (k, v) in canon_insertions { try!(batch.put(&k, &v)); } - // clean the overlay + // update the overlay for k in overlay_deletions { journal_overlay.backing_overlay.kill(&k); } - // apply removes + // apply canon deletions for k in canon_deletions { if !journal_overlay.backing_overlay.exists(&k) { try!(batch.delete(&k)); @@ -277,6 +283,13 @@ impl JournalDB { Ok(()) } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&self.backing); + let journal_overlay = self.journal_overlay.as_ref().unwrap().read().unwrap(); + *journal_overlay == reconstructed + } + fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } @@ -285,8 +298,10 @@ impl JournalDB { let mut journal = HashMap::new(); let mut overlay = MemoryDB::new(); let mut count = 0; + let mut latest_era = 0; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + latest_era = decode::(&val); + let mut era = latest_era; loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -296,7 +311,7 @@ impl JournalDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); + trace!("read_overlay: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let id: H256 = rlp.val_at(0); let insertions = rlp.at(1); @@ -323,7 +338,7 @@ impl JournalDB { } } trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); - JournalOverlay { backing_overlay: overlay, journal: journal } + JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } } /// Returns heap memory size used @@ -396,6 +411,7 @@ mod tests { use common::*; use super::*; use hashdb::*; + use log::init_log; #[test] fn insert_same_in_fork() { @@ -404,17 +420,25 @@ mod tests { let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&x)); } @@ -425,15 +449,20 @@ mod tests { let mut jdb = JournalDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.remove(&h); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&h)); } @@ -445,6 +474,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); @@ -452,6 +482,7 @@ mod tests { jdb.remove(&bar); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); @@ -459,17 +490,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); @@ -483,21 +517,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); @@ -510,35 +548,113 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.exists(&foo)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = JournalDB::new_temp(); + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } #[test] fn reopen() { @@ -552,6 +668,7 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); foo }; @@ -559,6 +676,7 @@ mod tests { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); } { @@ -566,41 +684,210 @@ mod tests { assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } #[test] - fn reopen_remove() { + fn insert_delete_insert_delete_insert_expunge() { + init_log(); let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // foo is ancient history. + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); @@ -611,18 +898,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..dd8de7fd8 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -69,6 +69,7 @@ use std::collections::HashMap; /// assert!(!m.exists(&k)); /// } /// ``` +#[derive(PartialEq)] pub struct MemoryDB { data: HashMap, static_null_rlp: (Bytes, i32), From ed0047725cbe4a027bd6c75bf592728cf1292efb Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 15:49:49 +0400 Subject: [PATCH 09/20] adding cli extension --- parity/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index b6ed5cba3..5f6d50027 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -101,7 +101,7 @@ API and Console Options: --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal]. --rpc Equivalent to --jsonrpc (geth-compatible). --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). @@ -208,6 +208,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc server.add_delegate(PersonalClient::new(&secret_store).to_delegate()), _ => { die!("{}: Invalid API name to be enabled.", api); } From 70ee6aa94219bac3cc22dbc9dac037b76e535c15 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 15:50:13 +0400 Subject: [PATCH 10/20] refactoring to use generic account provider as web3 svc --- rpc/src/v1/impls/personal.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 7b79ceae7..ce200244c 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -22,20 +22,20 @@ use util::keys::store::*; use util::Address; /// Account management (personal) rpc implementation. -pub struct PersonalClient { - accounts: Weak, +pub struct PersonalClient where A: AccountProvider { + accounts: Weak, } -impl PersonalClient { +impl PersonalClient where A: AccountProvider { /// Creates new PersonalClient - pub fn new(store: &Arc) -> Self { + pub fn new(store: &Arc) -> Self { PersonalClient { accounts: Arc::downgrade(store), } } } -impl Personal for PersonalClient { +impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { let store = take_weak!(self.accounts); match store.accounts() { From c6ba378b6b4c4e3117cac3d758e09732c81c131b Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 16:17:09 +0100 Subject: [PATCH 11/20] rpc web3 tests --- rpc/src/v1/tests/mod.rs | 1 + rpc/src/v1/tests/net.rs | 6 +++--- rpc/src/v1/tests/web3.rs | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 rpc/src/v1/tests/web3.rs diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 5ef74987c..3a38ced15 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -17,4 +17,5 @@ //!TODO: load custom blockchain state and test mod net; +mod web3; mod helpers; diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs index 792e469d8..e24045ca6 100644 --- a/rpc/src/v1/tests/net.rs +++ b/rpc/src/v1/tests/net.rs @@ -36,7 +36,7 @@ fn rpc_net_version() { let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } #[test] @@ -49,7 +49,7 @@ fn rpc_net_peer_count() { let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } #[test] @@ -62,5 +62,5 @@ fn rpc_net_listening() { let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/web3.rs b/rpc/src/v1/tests/web3.rs new file mode 100644 index 000000000..c717d361a --- /dev/null +++ b/rpc/src/v1/tests/web3.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::IoHandler; +use util::version; +use v1::{Web3, Web3Client}; + +#[test] +fn rpc_web3_version() { + let web3 = Web3Client::new().to_delegate(); + let io = IoHandler::new(); + io.add_delegate(web3); + + let v = version().to_owned().replace("Parity/", "Parity//"); + + let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref()); + + assert_eq!(io.handle_request(request), Some(response)); +} From 90e20cbcad54787eaf239bef016756a884b03f62 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 20:08:01 +0100 Subject: [PATCH 12/20] additional (failing) sstore test --- util/src/keys/store.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index ea97cc80e..cbef7b5f9 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -381,6 +381,7 @@ mod tests { use super::*; use devtools::*; use common::*; + use crypto::KeyPair; #[test] fn can_insert() { @@ -555,4 +556,15 @@ mod tests { let accounts = sstore.accounts().unwrap(); assert_eq!(30, accounts.len()); } + + #[test] + fn validate_generated_addresses() { + let temp = RandomTempPath::create_dir(); + let mut sstore = SecretStore::new_test(&temp); + let addr = sstore.new_account("test").unwrap(); + let _ok = sstore.unlock_account(&addr, "test").unwrap(); + let secret = sstore.account_secret(&addr).unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(Address::from(kp.public().sha3()), addr); + } } From b1327a045fa39e5e9552797a5f137de383206517 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 22:47:12 +0400 Subject: [PATCH 13/20] fixed new account generation --- util/src/keys/store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index cbef7b5f9..cd5fa8427 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -214,12 +214,12 @@ impl SecretStore { /// Creates new account pub fn new_account(&mut self, pass: &str) -> Result { - let secret = H256::random(); + let key_pair = crypto::KeyPair::create().expect("Error creating key-pair. Something wrong with crypto libraries?"); + let address = Address::from(key_pair.public().sha3()); let key_id = H128::random(); - self.insert(key_id.clone(), secret, pass); + self.insert(key_id.clone(), key_pair.secret().clone(), pass); let mut key_file = self.directory.get(&key_id).expect("the key was just inserted"); - let address = Address::random(); key_file.account = Some(address); try!(self.directory.save(key_file)); Ok(address) From 12e1abdfb70e3cd2a662cbcbb4b44bbbe3c56f41 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 09:51:17 +0100 Subject: [PATCH 14/20] Port fixes to new infrastrtcutre. --- util/src/journaldb/optiononedb.rs | 572 +++++++++++++++++++++++++----- 1 file changed, 492 insertions(+), 80 deletions(-) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index dfa7c8ec1..b51d0819d 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -25,6 +25,34 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; +#[derive(Clone, PartialEq, Eq)] +struct RefInfo { + queue_refs: usize, + in_archive: bool, +} + +impl HeapSizeOf for RefInfo { + fn heap_size_of_children(&self) -> usize { 0 } +} + +impl fmt::Display for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +impl fmt::Debug for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +#[derive(Clone, PartialEq, Eq)] +enum RemoveFrom { + Queue, + Archive, +} + /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -35,7 +63,7 @@ use std::env; pub struct OptionOneDB { overlay: MemoryDB, backing: Arc, - counters: Option>>>, + refs: Option>>>, } // all keys must be at least 12 bytes @@ -62,11 +90,11 @@ impl OptionOneDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); + let refs = Some(Arc::new(RwLock::new(OptionOneDB::read_refs(&backing)))); OptionOneDB { overlay: MemoryDB::new(), backing: Arc::new(backing), - counters: counters, + refs: refs, } } @@ -91,11 +119,14 @@ impl OptionOneDB { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { for &(ref h, ref d) in inserts { - if let Some(c) = counters.get_mut(h) { + if let Some(c) = refs.get_mut(h) { // already counting. increment. - *c += 1; + c.queue_refs += 1; + if trace { + trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, c.queue_refs); + } continue; } @@ -103,7 +134,10 @@ impl OptionOneDB { if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { // already in the backing DB. start counting, and remember it was already in. Self::set_already_in(batch, &h); - counters.insert(h.clone(), 1); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); + } continue; } @@ -111,60 +145,104 @@ impl OptionOneDB { //Self::reset_already_in(&h); assert!(!Self::is_already_in(backing, &h)); batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); + } } } - fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { - trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); + fn replay_keys(inserts: &[H256], backing: &Database, refs: &mut HashMap) { + trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); for h in inserts { - if let Some(c) = counters.get_mut(h) { + if let Some(c) = refs.get_mut(h) { // already counting. increment. - *c += 1; + c.queue_refs += 1; continue; } // this is the first entry for this node in the journal. // it is initialised to 1 if it was already in. - if Self::is_already_in(backing, h) { - trace!("replace_keys: Key {} was already in!", h); - counters.insert(h.clone(), 1); - } + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, h)}); } - trace!("replay_keys: (end) counters={:?}", counters); + trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { - for h in deletes.into_iter() { - let mut n: Option = None; - if let Some(c) = counters.get_mut(&h) { - if *c > 1 { - *c -= 1; + fn kill_keys(deletes: &Vec, refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + // with a kill on {queue_refs: 1, in_archive: true}, we have two options: + // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) + // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) + // (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.) + // both are valid, but we switch between them depending on context. + // All inserts in queue (i.e. those which may yet be reverted) have an entry in refs. + for h in deletes.iter() { + let mut n: Option = None; + if let Some(c) = refs.get_mut(h) { + if c.in_archive && from == RemoveFrom::Archive { + c.in_archive = false; + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Reducing to queue only and recording", h); + } + continue; + } else if c.queue_refs > 1 { + c.queue_refs -= 1; + if trace { + trace!(target: "jdb.fine", " kill({}): In queue > 1 refs: Decrementing ref count to {}", h, c.queue_refs); + } continue; } else { - n = Some(*c); + n = Some(c.clone()); } } match n { - Some(i) if i == 1 => { - counters.remove(&h); - Self::reset_already_in(batch, &h); + Some(RefInfo{queue_refs: 1, in_archive: true}) => { + refs.remove(h); + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); + } + } + Some(RefInfo{queue_refs: 1, in_archive: false}) => { + refs.remove(h); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); + } } None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); + } } - _ => panic!("Invalid value in counters: {:?}", n), + _ => panic!("Invalid value in refs: {:?}", n), } } } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_refs(&self.backing); + let refs = self.refs.as_ref().unwrap().write().unwrap(); + if *refs != reconstructed { + let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); + false + } else { + true + } + } + fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &Database) -> HashMap { - let mut counters = HashMap::new(); + fn read_refs(db: &Database) -> HashMap { + let mut refs = HashMap::new(); if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); loop { @@ -176,10 +254,9 @@ impl OptionOneDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); - Self::replay_keys(&inserts, db, &mut counters); + Self::replay_keys(&inserts, db, &mut refs); index += 1; }; if index == 0 || era == 0 { @@ -188,10 +265,9 @@ impl OptionOneDB { era -= 1; } } - trace!("Recovered {} counters", counters.len()); - counters + refs } -} + } impl HashDB for OptionOneDB { fn keys(&self) -> HashMap { @@ -243,23 +319,23 @@ impl JournalDB for OptionOneDB { Box::new(OptionOneDB { overlay: MemoryDB::new(), backing: self.backing.clone(), - counters: self.counters.clone(), + refs: self.refs.clone(), }) } - fn mem_used(&self) -> usize { - self.overlay.mem_used() + match self.counters { - Some(ref c) => c.read().unwrap().heap_size_of_children(), - None => 0 - } - } - fn is_empty(&self) -> bool { self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() } + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.refs { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -304,9 +380,9 @@ impl JournalDB for OptionOneDB { // // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let mut refs = self.refs.as_ref().unwrap().write().unwrap(); let batch = DBTransaction::new(); + let trace = false; { let mut index = 0usize; let mut last; @@ -323,6 +399,11 @@ impl JournalDB for OptionOneDB { } let drained = self.overlay.drain(); + + if trace { + trace!(target: "jdb", "commit: #{} ({}), end era: {:?}", now, id, end); + } + let removes: Vec = drained .iter() .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) @@ -332,6 +413,9 @@ impl JournalDB for OptionOneDB { .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) .collect(); + + // TODO: check all removes are in the db. + let mut r = RlpStream::new_list(3); r.append(id); @@ -344,7 +428,12 @@ impl JournalDB for OptionOneDB { r.begin_list(inserts.len()); inserts.iter().foreach(|&(k, _)| {r.append(&k);}); r.append(&removes); - Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); + Self::insert_keys(&inserts, &self.backing, &mut refs, &batch, trace); + if trace { + let ins = inserts.iter().map(|&(k, _)| k).collect::>(); + trace!(target: "jdb.ops", " Inserts: {:?}", ins); + trace!(target: "jdb.ops", " Deletes: {:?}", removes); + } try!(batch.put(&last, r.as_raw())); try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); } @@ -363,17 +452,64 @@ impl JournalDB for OptionOneDB { })) { let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); - let deletes: Vec = rlp.val_at(2); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); + + if canon_id == rlp.val_at(0) { + // Collect keys to be removed. Canon block - remove the (enacted) deletes. + let deletes: Vec = rlp.val_at(2); + if trace { + trace!(target: "jdb.ops", " Expunging: {:?}", deletes); + } + Self::kill_keys(&deletes, &mut refs, &batch, RemoveFrom::Archive, trace); + + if trace { + trace!(target: "jdb.ops", " Finalising: {:?}", inserts); + } + for k in inserts.iter() { + match refs.get(k).cloned() { + None => { + // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert + // already expunged from the queue (which is allowed since the key is in the archive). + // leave well alone. + } + Some( RefInfo{queue_refs: 1, in_archive: false} ) => { + // just delete the refs entry. + refs.remove(k); + } + Some( RefInfo{queue_refs: x, in_archive: false} ) => { + // must set already in; , + Self::set_already_in(&batch, k); + refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); + } + Some( RefInfo{queue_refs: _, in_archive: true} ) => { + // Invalid! Reinserted the same key twice. + warn!("Key {} inserted twice into same fork.", k); + } + } + } + } else { + // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. + if trace { + trace!(target: "jdb.ops", " Reverting: {:?}", inserts); + } + Self::kill_keys(&inserts, &mut refs, &batch, RemoveFrom::Queue, trace); + } + try!(batch.delete(&last)); index += 1; } - trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + if trace { + trace!(target: "jdb", "delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + } } try!(self.backing.write(batch)); -// trace!("OptionOneDB::commit() deleted {} nodes", deletes); + + // Comment out for now. TODO: automatically enable in tests. + + if trace { + trace!(target: "jdb", "OK: {:?}", refs.clone()); + } + Ok(0) } } @@ -383,26 +519,34 @@ mod tests { use common::*; use super::*; use hashdb::*; - use journaldb::traits::JournalDB; + use log::init_log; #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&x)); } @@ -410,29 +554,35 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.remove(&h); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&h)); } #[test] fn complex() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); @@ -440,6 +590,7 @@ mod tests { jdb.remove(&bar); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); @@ -447,17 +598,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); @@ -466,26 +620,30 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); @@ -494,39 +652,117 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.exists(&foo)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } #[test] fn reopen() { @@ -535,81 +771,257 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); foo }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } #[test] - fn reopen_remove() { + fn insert_delete_insert_delete_insert_expunge() { + init_log(); let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 - let foo = jdb.insert(b"foo"); + jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); // foo is ancient history. - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; - - { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); From e6a273f3a796f3353985fdf34a32d26838e3c5a9 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 09:53:54 +0100 Subject: [PATCH 15/20] Fix tests. --- util/src/journaldb/optiononedb.rs | 42 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index b51d0819d..567f620b1 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -524,7 +524,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -554,7 +554,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -577,7 +577,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -620,7 +620,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -652,7 +652,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -677,7 +677,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -705,7 +705,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -733,7 +733,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -771,7 +771,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -781,14 +781,14 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -803,7 +803,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -832,7 +832,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -881,7 +881,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -912,7 +912,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -952,7 +952,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -973,7 +973,7 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -981,14 +981,14 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -1001,7 +1001,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -1019,7 +1019,7 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); From 874393ba06dbce3c91ab59afb3b110c37c1b1896 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 09:57:57 +0100 Subject: [PATCH 16/20] Fix tests, --- util/src/journaldb/archivedb.rs | 2 +- util/src/journaldb/optiononedb.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index e7da1b737..a8b9c1f74 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -364,7 +364,7 @@ mod tests { fn reopen_fork() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let (foo, bar, baz) = { + let (foo, _, _) = { let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index 567f620b1..e44c337db 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -518,6 +518,7 @@ impl JournalDB for OptionOneDB { mod tests { use common::*; use super::*; + use super::super::traits::JournalDB; use hashdb::*; use log::init_log; From d7039b72e237fb739dc98e344aa06ebf8cd49028 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 10:48:28 +0100 Subject: [PATCH 17/20] Update archivedb.rs --- util/src/journaldb/archivedb.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index c8c29e765..8db239cff 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -305,7 +305,6 @@ mod tests { assert!(jdb.exists(&foo)); } - #[test] fn reopen() { let mut dir = ::std::env::temp_dir(); @@ -364,6 +363,7 @@ mod tests { jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); } } + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); @@ -406,6 +406,5 @@ mod tests { let state = jdb.state(&key); assert!(state.is_some()); } - } } From 82a881005740f2e1ddc2f862b2f617b3e5773597 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 11:19:42 +0100 Subject: [PATCH 18/20] Rename into something that is a little more descriptive. --- .../{optiononedb.rs => earlymergedb.rs} | 48 +++++++------- util/src/journaldb/mod.rs | 8 +-- .../{overlay.rs => overlayrecentdb.rs} | 66 +++++++++---------- 3 files changed, 61 insertions(+), 61 deletions(-) rename util/src/journaldb/{optiononedb.rs => earlymergedb.rs} (94%) rename util/src/journaldb/{overlay.rs => overlayrecentdb.rs} (93%) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/earlymergedb.rs similarity index 94% rename from util/src/journaldb/optiononedb.rs rename to util/src/journaldb/earlymergedb.rs index dfa7c8ec1..48083f113 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -32,7 +32,7 @@ use std::env; /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. -pub struct OptionOneDB { +pub struct EarlyMergeDB { overlay: MemoryDB, backing: Arc, counters: Option>>>, @@ -44,9 +44,9 @@ const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 const DB_VERSION : u32 = 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl OptionOneDB { +impl EarlyMergeDB { /// Create a new instance from file - pub fn new(path: &str) -> OptionOneDB { + pub fn new(path: &str) -> EarlyMergeDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -62,8 +62,8 @@ impl OptionOneDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); - OptionOneDB { + let counters = Some(Arc::new(RwLock::new(EarlyMergeDB::read_counters(&backing)))); + EarlyMergeDB { overlay: MemoryDB::new(), backing: Arc::new(backing), counters: counters, @@ -72,7 +72,7 @@ impl OptionOneDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - fn new_temp() -> OptionOneDB { + fn new_temp() -> EarlyMergeDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) @@ -193,7 +193,7 @@ impl OptionOneDB { } } -impl HashDB for OptionOneDB { +impl HashDB for EarlyMergeDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -238,9 +238,9 @@ impl HashDB for OptionOneDB { } } -impl JournalDB for OptionOneDB { +impl JournalDB for EarlyMergeDB { fn spawn(&self) -> Box { - Box::new(OptionOneDB { + Box::new(EarlyMergeDB { overlay: MemoryDB::new(), backing: self.backing.clone(), counters: self.counters.clone(), @@ -369,11 +369,11 @@ impl JournalDB for OptionOneDB { try!(batch.delete(&last)); index += 1; } - trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + trace!("EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } try!(self.backing.write(batch)); -// trace!("OptionOneDB::commit() deleted {} nodes", deletes); +// trace!("EarlyMergeDB::commit() deleted {} nodes", deletes); Ok(0) } } @@ -388,7 +388,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -410,7 +410,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.exists(&h)); @@ -428,7 +428,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -466,7 +466,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -494,7 +494,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -513,7 +513,7 @@ mod tests { #[test] fn fork_same_key() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); jdb.commit(0, &b"0".sha3(), None).unwrap(); let foo = jdb.insert(b"foo"); @@ -535,7 +535,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -544,13 +544,13 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); } { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -564,7 +564,7 @@ mod tests { dir.push(H32::random().hex()); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -578,7 +578,7 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.exists(&foo)); @@ -593,7 +593,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -608,7 +608,7 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index cf8e7d392..724e61dfb 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -21,8 +21,8 @@ use common::*; /// Export the journaldb module. pub mod traits; mod archivedb; -mod optiononedb; -mod overlay; +mod earlymergedb; +mod overlayrecentdb; /// Export the JournalDB trait. pub use self::traits::JournalDB; @@ -73,8 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)), - Algorithm::OverlayRecent => Box::new(overlay::JournalOverlayDB::new(path)), + Algorithm::EarlyMerge => Box::new(optiononedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(optiononedb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } diff --git a/util/src/journaldb/overlay.rs b/util/src/journaldb/overlayrecentdb.rs similarity index 93% rename from util/src/journaldb/overlay.rs rename to util/src/journaldb/overlayrecentdb.rs index e91709041..8dd4d1752 100644 --- a/util/src/journaldb/overlay.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -56,7 +56,7 @@ use super::JournalDB; /// the removed key is not present in the history overlay. /// 7. Delete ancient record from memory and disk. /// -pub struct JournalOverlayDB { +pub struct OverlayRecentDB { transaction_overlay: MemoryDB, backing: Arc, journal_overlay: Arc>, @@ -82,9 +82,9 @@ impl HeapSizeOf for JournalEntry { } } -impl Clone for JournalOverlayDB { - fn clone(&self) -> JournalOverlayDB { - JournalOverlayDB { +impl Clone for OverlayRecentDB { + fn clone(&self) -> OverlayRecentDB { + OverlayRecentDB { transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), journal_overlay: self.journal_overlay.clone(), @@ -98,14 +98,14 @@ const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 const DB_VERSION : u32 = 0x200 + 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl JournalOverlayDB { +impl OverlayRecentDB { /// Create a new instance from file - pub fn new(path: &str) -> JournalOverlayDB { + pub fn new(path: &str) -> OverlayRecentDB { Self::from_prefs(path) } /// Create a new instance from file - pub fn from_prefs(path: &str) -> JournalOverlayDB { + pub fn from_prefs(path: &str) -> OverlayRecentDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -121,8 +121,8 @@ impl JournalOverlayDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let journal_overlay = Arc::new(RwLock::new(JournalOverlayDB::read_overlay(&backing))); - JournalOverlayDB { + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing))); + OverlayRecentDB { transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), journal_overlay: journal_overlay, @@ -131,7 +131,7 @@ impl JournalOverlayDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - pub fn new_temp() -> JournalOverlayDB { + pub fn new_temp() -> OverlayRecentDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) @@ -196,7 +196,7 @@ impl JournalOverlayDB { } } -impl JournalDB for JournalOverlayDB { +impl JournalDB for OverlayRecentDB { fn spawn(&self) -> Box { Box::new(self.clone()) } @@ -303,7 +303,7 @@ impl JournalDB for JournalOverlayDB { } -impl HashDB for JournalOverlayDB { +impl HashDB for OverlayRecentDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -367,7 +367,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -397,7 +397,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -420,7 +420,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -463,7 +463,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -495,7 +495,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -520,7 +520,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -548,7 +548,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -576,7 +576,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -614,7 +614,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -624,14 +624,14 @@ mod tests { }; { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -646,7 +646,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -675,7 +675,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -724,7 +724,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -755,7 +755,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -795,7 +795,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -816,7 +816,7 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -824,14 +824,14 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -844,7 +844,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -862,7 +862,7 @@ mod tests { }; { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); From b03679e1a6e8f29dee36b0bfe3a4c701a7f154e4 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 11:22:02 +0100 Subject: [PATCH 19/20] Fix typos. --- util/src/journaldb/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 724e61dfb..cf5278368 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -73,8 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::EarlyMergeDB::new(path)), - Algorithm::OverlayRecent => Box::new(optiononedb::OverlayRecentDB::new(path)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } From 98bae098bea3b5a6db3fd891eba4e54ce8e3645e Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 12:10:55 +0100 Subject: [PATCH 20/20] Update cargo lock. --- Cargo.lock | 88 +++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f26ac923..8e3a4b168 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -43,14 +43,14 @@ name = "aster" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bigint" version = "0.1.0" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -65,7 +65,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -85,7 +85,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chrono" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -117,7 +117,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -136,7 +136,7 @@ version = "1.1.1" source = "git+https://github.com/tomusdrw/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -145,7 +145,7 @@ name = "daemonize" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -161,7 +161,7 @@ name = "docopt" version = "0.6.78" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -177,7 +177,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -185,7 +185,7 @@ name = "eth-secp256k1" version = "0.5.4" source = "git+https://github.com/ethcore/rust-secp256k1#283a0677d8327536be58a87e0494d7e0e7b1d1d8" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -253,9 +253,9 @@ dependencies = [ name = "ethcore-util" version = "0.9.99" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", - "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -264,10 +264,10 @@ dependencies = [ "ethcore-devtools 0.9.99", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "json-tests 0.1.0", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -304,7 +304,7 @@ dependencies = [ name = "fdlimit" version = "0.1.0" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -314,7 +314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "glob" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -328,7 +328,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -381,7 +381,7 @@ dependencies = [ "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -391,21 +391,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itertools" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "json-tests" version = "0.1.0" dependencies = [ - "glob 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -461,7 +461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -469,7 +469,7 @@ name = "librocksdb-sys" version = "0.2.2" source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -477,7 +477,7 @@ name = "log" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -490,7 +490,7 @@ name = "memchr" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -536,7 +536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -560,7 +560,7 @@ dependencies = [ [[package]] name = "nom" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -577,7 +577,7 @@ name = "num_cpus" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -639,7 +639,7 @@ name = "quasi" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -649,7 +649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aster 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -657,7 +657,7 @@ name = "rand" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -672,7 +672,7 @@ dependencies = [ [[package]] name = "regex" -version = "0.1.54" +version = "0.1.55" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -691,7 +691,7 @@ name = "rocksdb" version = "0.4.2" source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] @@ -701,7 +701,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -741,7 +741,7 @@ name = "semver" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nom 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -766,7 +766,7 @@ dependencies = [ "quasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "quasi_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -809,16 +809,16 @@ name = "syntex" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syntex_syntax" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -844,7 +844,7 @@ name = "termios" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -853,7 +853,7 @@ version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "url" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -975,7 +975,7 @@ name = "xml-rs" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]]