Merge branch 'master' into merge_ethminer

Conflicts:
	parity/main.rs
This commit is contained in:
Tomasz Drwięga 2016-03-13 20:42:33 +01:00
commit 54ff64c0e0
5 changed files with 324 additions and 36 deletions

View File

@ -83,7 +83,7 @@ Protocol Options:
--testnet Equivalent to --chain testnet (geth-compatible).
--networkid INDEX Override the network identifier from the chain we are on.
--pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive,
light (experimental), fast (experimental) [default: archive].
basic (experimental), light (experimental), fast (experimental) [default: archive].
-d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity]
--db-path PATH Specify the database & configuration directory path [default: $HOME/.parity]
--keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys]
@ -398,10 +398,10 @@ impl Configuration {
}
}
client_config.pruning = match self.args.flag_pruning.as_str() {
"archive" => journaldb::Algorithm::Archive,
"" | "archive" => journaldb::Algorithm::Archive,
"pruned" => journaldb::Algorithm::EarlyMerge,
"fast" => journaldb::Algorithm::OverlayRecent,
// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged.
"slow" => journaldb::Algorithm::RefCounted,
_ => { die!("Invalid pruning method given."); }
};
client_config.name = self.args.flag_identity.clone();

View File

@ -35,6 +35,7 @@ use std::env;
pub struct ArchiveDB {
overlay: MemoryDB,
backing: Arc<Database>,
latest_era: Option<u64>,
}
// all keys must be at least 12 bytes
@ -60,9 +61,11 @@ impl ArchiveDB {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::<u64>(&val));
ArchiveDB {
overlay: MemoryDB::new(),
backing: Arc::new(backing),
latest_era: latest_era,
}
}
@ -129,6 +132,7 @@ impl JournalDB for ArchiveDB {
Box::new(ArchiveDB {
overlay: MemoryDB::new(),
backing: self.backing.clone(),
latest_era: None,
})
}
@ -137,7 +141,7 @@ impl JournalDB for ArchiveDB {
}
fn is_empty(&self) -> bool {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
self.latest_era.is_none()
}
fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result<u32, UtilError> {

View File

@ -23,6 +23,7 @@ pub mod traits;
mod archivedb;
mod earlymergedb;
mod overlayrecentdb;
mod refcounteddb;
/// Export the JournalDB trait.
pub use self::traits::JournalDB;
@ -75,6 +76,6 @@ pub fn new(path: &str, algorithm: Algorithm) -> Box<JournalDB> {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)),
Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)),
Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)),
_ => unimplemented!(),
Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path)),
}
}

View File

@ -0,0 +1,275 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed, ref-counted JournalDB implementation.
use common::*;
use rlp::*;
use hashdb::*;
use overlaydb::*;
use super::traits::JournalDB;
use kvdb::{Database, DBTransaction, DatabaseConfig};
#[cfg(test)]
use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
pub struct RefCountedDB {
forward: OverlayDB,
backing: Arc<Database>,
latest_era: Option<u64>,
inserts: Vec<H256>,
removes: Vec<H256>,
}
const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION : u32 = 512;
impl RefCountedDB {
/// Create a new instance given a `backing` database.
pub fn new(path: &str) -> RefCountedDB {
let opts = DatabaseConfig {
prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix
};
let backing = Database::open(&opts, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let backing = Arc::new(backing);
let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::<u64>(&val));
RefCountedDB {
forward: OverlayDB::new_with_arc(backing.clone()),
backing: backing,
inserts: vec![],
removes: vec![],
latest_era: latest_era,
}
}
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
fn new_temp() -> RefCountedDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap())
}
}
impl HashDB for RefCountedDB {
fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() }
fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) }
fn exists(&self, key: &H256) -> bool { self.forward.exists(key) }
fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r }
fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); }
fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); }
}
impl JournalDB for RefCountedDB {
fn spawn(&self) -> Box<JournalDB> {
Box::new(RefCountedDB {
forward: self.forward.clone(),
backing: self.backing.clone(),
latest_era: self.latest_era,
inserts: self.inserts.clone(),
removes: self.removes.clone(),
})
}
fn mem_used(&self) -> usize {
self.inserts.heap_size_of_children() + self.removes.heap_size_of_children()
}
fn is_empty(&self) -> bool {
self.latest_era.is_none()
}
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, n] => [ ... ]
// TODO: store last_era, reclaim_period.
// when we make a new commit, we journal the inserts and removes.
// for each end_era that we journaled that we are no passing by,
// we remove all of its removes assuming it is canonical and all
// of its inserts otherwise.
// record new commit's details.
let batch = DBTransaction::new();
{
let mut index = 0usize;
let mut last;
while try!(self.backing.get({
let mut r = RlpStream::new_list(2);
r.append(&now);
r.append(&index);
last = r.drain();
&last
})).is_some() {
index += 1;
}
let mut r = RlpStream::new_list(3);
r.append(id);
r.append(&self.inserts);
r.append(&self.removes);
try!(self.backing.put(&last, r.as_raw()));
self.inserts.clear();
self.removes.clear();
if self.latest_era.map_or(true, |e| now > e) {
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
self.latest_era = Some(now);
}
}
// apply old commits' details
if let Some((end_era, canon_id)) = end {
let mut index = 0usize;
let mut last;
while let Some(rlp_data) = try!(self.backing.get({
let mut r = RlpStream::new_list(2);
r.append(&end_era);
r.append(&index);
last = r.drain();
&last
})) {
let rlp = Rlp::new(&rlp_data);
let to_remove: Vec<H256> = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1});
for i in &to_remove {
self.forward.remove(i);
}
try!(self.backing.delete(&last));
trace!("RefCountedDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len());
index += 1;
}
}
let r = try!(self.forward.commit_to_batch(&batch));
try!(self.backing.write(batch));
Ok(r)
}
}
#[cfg(test)]
mod tests {
use common::*;
use super::*;
use super::super::traits::JournalDB;
use hashdb::*;
#[test]
fn long_history() {
// history is 3
let mut jdb = RefCountedDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(!jdb.exists(&h));
}
#[test]
fn complex() {
// history is 1
let mut jdb = RefCountedDB::new_temp();
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
assert!(jdb.exists(&baz));
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&bar));
assert!(jdb.exists(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&bar));
assert!(!jdb.exists(&baz));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(!jdb.exists(&foo));
assert!(!jdb.exists(&bar));
assert!(!jdb.exists(&baz));
}
#[test]
fn fork() {
// history is 1
let mut jdb = RefCountedDB::new_temp();
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
assert!(jdb.exists(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&baz));
assert!(!jdb.exists(&bar));
}
}

View File

@ -26,7 +26,7 @@ use std::ops::*;
use std::sync::*;
use std::env;
use std::collections::HashMap;
use kvdb::{Database};
use kvdb::{Database, DBTransaction};
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay.
///
@ -36,7 +36,7 @@ use kvdb::{Database};
///
/// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()`
/// queries have an immediate effect in terms of these functions.
//#[derive(Clone)]
#[derive(Clone)]
pub struct OverlayDB {
overlay: MemoryDB,
backing: Arc<Database>,
@ -58,6 +58,36 @@ impl OverlayDB {
Self::new(Database::open_default(dir.to_str().unwrap()).unwrap())
}
/// Commit all operations to given batch.
pub fn commit_to_batch(&mut self, batch: &DBTransaction) -> Result<u32, UtilError> {
let mut ret = 0u32;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc != 0 {
match self.payload(&key) {
Some(x) => {
let (back_value, back_rc) = x;
let total_rc: i32 = back_rc as i32 + rc;
if total_rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash));
}
deletes += if self.put_payload(batch, &key, (back_value, total_rc as u32)) {1} else {0};
}
None => {
if rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash));
}
self.put_payload(batch, &key, (value, rc as u32));
}
};
ret += 1;
}
}
trace!("OverlayDB::commit() deleted {} nodes", deletes);
Ok(ret)
}
/// Commit all memory operations to the backing database.
///
/// Returns either an error or the number of items changed in the backing database.
@ -86,32 +116,10 @@ impl OverlayDB {
/// }
/// ```
pub fn commit(&mut self) -> Result<u32, UtilError> {
let mut ret = 0u32;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc != 0 {
match self.payload(&key) {
Some(x) => {
let (back_value, back_rc) = x;
let total_rc: i32 = back_rc as i32 + rc;
if total_rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash));
}
deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0};
}
None => {
if rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash));
}
self.put_payload(&key, (value, rc as u32));
}
};
ret += 1;
}
}
trace!("OverlayDB::commit() deleted {} nodes", deletes);
Ok(ret)
let batch = DBTransaction::new();
let r = try!(self.commit_to_batch(&batch));
try!(self.backing.write(batch));
Ok(r)
}
/// Revert all operations on this object (i.e. `insert()`s and `kill()`s) since the
@ -148,15 +156,15 @@ impl OverlayDB {
}
/// Put the refs and value of the given key, possibly deleting it from the db.
fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool {
fn put_payload(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool {
if payload.1 > 0 {
let mut s = RlpStream::new_list(2);
s.append(&payload.1);
s.append(&payload.0);
self.backing.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
batch.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
false
} else {
self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?");
batch.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?");
true
}
}