Merge branch 'master' of github.com:ethcore/parity into jsonrpc

This commit is contained in:
debris 2016-02-08 11:59:03 +01:00
commit 90f965cf53
5 changed files with 78 additions and 38 deletions

4
.gitmodules vendored
View File

@ -1,4 +1,4 @@
[submodule "ethcore/res/ethereum/tests"] [submodule "ethcore/res/ethereum/tests"]
path = ethcore/res/ethereum/tests path = ethcore/res/ethereum/tests
url = git@github.com:ethereum/tests url = https://github.com/ethereum/tests.git
branch = develop branch = develop

View File

@ -2,20 +2,22 @@
[![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![Join the chat at https://gitter.im/trogdoro/xiki][gitter-image]][gitter-url] [![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![Join the chat at https://gitter.im/trogdoro/xiki][gitter-image]][gitter-url]
[travis-image]: https://travis-ci.com/ethcore/parity.svg?token=DMFvZu71iaTbUYx9UypX&branch=master [travis-image]: https://travis-ci.org/ethcore/parity.svg?branch=master
[travis-url]: https://travis-ci.com/ethcore/parity [travis-url]: https://travis-ci.org/ethcore/parity
[coveralls-image]: https://coveralls.io/repos/github/ethcore/parity/badge.svg?branch=master&t=Fk0OuQ [coveralls-image]: https://coveralls.io/repos/github/ethcore/parity/badge.svg?branch=master&t=Fk0OuQ
[coveralls-url]: https://coveralls.io/r/ethcore/parity?branch=master [coveralls-url]: https://coveralls.io/r/ethcore/parity?branch=master
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg [gitter-image]: https://badges.gitter.im/Join%20Chat.svg
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge [gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
[Documentation](http://ethcore.github.io/parity/ethcore/index.html)
### Building from source ### Building from source
##### Ubuntu 14.04 ##### Ubuntu 14.04, 15.04, 15.10
```bash ```bash
# install rocksdb # install rocksdb
add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" add-apt-repository ppa:ethcore/ethcore
apt-get update apt-get update
apt-get install -y --force-yes librocksdb apt-get install -y --force-yes librocksdb
@ -32,7 +34,7 @@ cd parity
cargo build --release cargo build --release
``` ```
##### Linux ##### Other Linux
```bash ```bash
# install rocksdb # install rocksdb

View File

@ -205,6 +205,8 @@ impl BlockQueue {
let mut verification = self.verification.lock().unwrap(); let mut verification = self.verification.lock().unwrap();
verification.unverified.clear(); verification.unverified.clear();
verification.verifying.clear(); verification.verifying.clear();
verification.verified.clear();
self.processing.write().unwrap().clear();
} }
/// Wait for queue to be empty /// Wait for queue to be empty

View File

@ -67,7 +67,6 @@ impl fmt::Display for BlockChainInfo {
} }
} }
/// Blockchain database client. Owns and manages a blockchain and a block queue. /// Blockchain database client. Owns and manages a blockchain and a block queue.
pub trait BlockChainClient : Sync + Send { pub trait BlockChainClient : Sync + Send {
/// Get raw block header data by block header hash. /// Get raw block header data by block header hash.
@ -158,8 +157,7 @@ impl ClientReport {
pub struct Client { pub struct Client {
chain: Arc<RwLock<BlockChain>>, chain: Arc<RwLock<BlockChain>>,
engine: Arc<Box<Engine>>, engine: Arc<Box<Engine>>,
state_db: Arc<DB>, state_db: Mutex<JournalDB>,
state_journal: Mutex<JournalDB>,
block_queue: RwLock<BlockQueue>, block_queue: RwLock<BlockQueue>,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()> import_lock: Mutex<()>
@ -212,8 +210,7 @@ impl Client {
Ok(Arc::new(Client { Ok(Arc::new(Client {
chain: chain, chain: chain,
engine: engine.clone(), engine: engine.clone(),
state_db: db.clone(), state_db: Mutex::new(state_db),
state_journal: Mutex::new(JournalDB::new_with_arc(db)),
block_queue: RwLock::new(BlockQueue::new(engine, message_channel)), block_queue: RwLock::new(BlockQueue::new(engine, message_channel)),
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
@ -268,7 +265,7 @@ impl Client {
} }
} }
let db = self.state_journal.lock().unwrap().clone(); let db = self.state_db.lock().unwrap().clone();
let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) {
Ok(b) => b, Ok(b) => b,
Err(e) => { Err(e) => {
@ -305,7 +302,7 @@ impl Client {
/// Get a copy of the best block's state. /// Get a copy of the best block's state.
pub fn state(&self) -> State { pub fn state(&self) -> State {
State::from_existing(JournalDB::new_with_arc(self.state_db.clone()), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce())
} }
/// Get info on the cache. /// Get info on the cache.

View File

@ -92,7 +92,6 @@ impl JournalDB {
/// Commit all recent insert operations and historical removals from the old era /// Commit all recent insert operations and historical removals from the old era
/// to the backing database. /// to the backing database.
#[allow(cyclomatic_complexity)]
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> { pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format: // journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
@ -105,6 +104,17 @@ impl JournalDB {
// for each end_era that we journaled that we are no passing by, // for each end_era that we journaled that we are no passing by,
// we remove all of its removes assuming it is canonical and all // we remove all of its removes assuming it is canonical and all
// of its inserts otherwise. // of its inserts otherwise.
//
// We also keep reference counters for each key inserted in the journal to handle
// the following cases where key K must not be deleted from the DB when processing removals :
// Given H is the journal size in eras, 0 <= C <= H.
// Key K is removed in era A(N) and re-inserted in canonical era B(N + C).
// Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C).
// Key K is added in non-canonical era A'(N) canonical B(N + C).
//
// The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions
// is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased
// and the key is safe to delete.
// record new commit's details. // record new commit's details.
let batch = WriteBatch::new(); let batch = WriteBatch::new();
@ -125,6 +135,7 @@ impl JournalDB {
let mut r = RlpStream::new_list(3); let mut r = RlpStream::new_list(3);
let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); let inserts: Vec<H256> = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect();
// Increase counter for each inserted key no matter if the block is canonical or not.
for i in &inserts { for i in &inserts {
*counters.entry(i.clone()).or_insert(0) += 1; *counters.entry(i.clone()).or_insert(0) += 1;
} }
@ -139,6 +150,8 @@ impl JournalDB {
if let Some((end_era, canon_id)) = end { if let Some((end_era, canon_id)) = end {
let mut index = 0usize; let mut index = 0usize;
let mut last; let mut last;
let mut to_remove: Vec<H256> = Vec::new();
let mut canon_inserts: Vec<H256> = Vec::new();
while let Some(rlp_data) = try!(self.backing.get({ while let Some(rlp_data) = try!(self.backing.get({
let mut r = RlpStream::new_list(2); let mut r = RlpStream::new_list(2);
r.append(&end_era); r.append(&end_era);
@ -146,39 +159,33 @@ impl JournalDB {
last = r.drain(); last = r.drain();
&last &last
})) { })) {
let to_add;
let rlp = Rlp::new(&rlp_data); let rlp = Rlp::new(&rlp_data);
{ let inserts: Vec<H256> = rlp.val_at(1);
to_add = rlp.val_at(1); JournalDB::decrease_counters(&inserts, &mut counters);
for i in &to_add { // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical
let delete_counter = { if canon_id == rlp.val_at(0) {
if let Some(mut cnt) = counters.get_mut(i) { to_remove.extend(rlp.at(2).iter().map(|r| r.as_val::<H256>()));
*cnt -= 1; canon_inserts = inserts;
*cnt == 0
}
else { false }
};
if delete_counter {
counters.remove(i);
}
}
} }
let to_remove: Vec<H256> = if canon_id == rlp.val_at(0) {rlp.val_at(2)} else {to_add}; else {
for i in &to_remove { to_remove.extend(inserts);
if !counters.contains_key(i) {
batch.delete(&i).expect("Low-level database error. Some issue with your hard disk?");
}
} }
try!(batch.delete(&last)); try!(batch.delete(&last));
trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len());
index += 1; index += 1;
} }
let canon_inserts = canon_inserts.drain(..).collect::<HashSet<_>>();
// Purge removed keys if they are not referenced and not re-inserted in the canon commit
let mut deletes = 0;
for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) {
try!(batch.delete(&h));
deletes += 1;
}
try!(batch.put(&LAST_ERA_KEY, &encode(&end_era))); try!(batch.put(&LAST_ERA_KEY, &encode(&end_era)));
trace!("JournalDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, deletes);
} }
// Commit overlay insertions
let mut ret = 0u32; let mut ret = 0u32;
let mut deletes = 0usize; let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() { for i in self.overlay.drain().into_iter() {
@ -200,6 +207,21 @@ impl JournalDB {
Ok(ret) Ok(ret)
} }
// Decrease counters for given keys. Deletes obsolete counters
fn decrease_counters(keys: &[H256], counters: &mut HashMap<H256, i32>) {
for i in keys.iter() {
let delete_counter = {
let cnt = counters.get_mut(i).expect("Missing key counter");
*cnt -= 1;
*cnt == 0
};
if delete_counter {
counters.remove(i);
}
}
}
fn payload(&self, key: &H256) -> Option<Bytes> { fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
} }
@ -387,4 +409,21 @@ mod tests {
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));
} }
#[test]
fn fork_same_key() {
// history is 1
let mut jdb = JournalDB::new_temp();
jdb.commit(0, &b"0".sha3(), None).unwrap();
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
} }