First JournalDB implementation.

This commit is contained in:
Gav Wood 2016-01-18 12:41:31 +01:00
parent d09a5c8b19
commit 193d615f9a
4 changed files with 139 additions and 7 deletions

View File

@ -22,6 +22,7 @@ pub enum UtilError {
BaseData(BaseDataError),
Network(NetworkError),
Decoder(DecoderError),
SimpleString(String),
BadSize,
}
@ -73,6 +74,12 @@ impl From<::rlp::DecoderError> for UtilError {
}
}
impl From<String> for UtilError {
fn from(err: String) -> UtilError {
UtilError::SimpleString(err)
}
}
// TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted.
/*#![feature(concat_idents)]
macro_rules! assimilate {

123
util/src/journaldb.rs Normal file
View File

@ -0,0 +1,123 @@
//! Disk-backed HashDB implementation.
use std::env;
use common::*;
use rlp::*;
use hashdb::*;
use overlaydb::*;
use rocksdb::{DB, Writable};
#[derive(Clone)]
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
pub struct JournalDB {
forward: OverlayDB,
backing: Arc<DB>,
inserts: Vec<H256>,
removes: Vec<H256>,
}
impl JournalDB {
/// Create a new instance given a `backing` database.
pub fn new(backing: DB) -> JournalDB {
let db = Arc::new(backing);
// TODO: check it doesn't overwrite anything before.
// TODO: proper handling of errors (return )
JournalDB {
forward: OverlayDB::new_with_arc(db.clone()),
backing: db,
inserts: vec![],
removes: vec![],
}
}
/// Create a new instance with an anonymous temporary database.
pub fn new_temp() -> JournalDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(DB::open_default(dir.to_str().unwrap()).unwrap())
}
/// Get a clone of the overlay db portion of this.
pub fn to_overlaydb(&self) -> OverlayDB { self.forward.clone() }
/// Commit all recent insert operations and historical removals from the old era
/// to the backing database.
pub fn commit(&mut self, now: u64, id: &H256, end_era: u64, canon_id: &H256) -> Result<u32, UtilError> {
// journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, n] => [ ... ]
// TODO: store last_era, reclaim_period.
// when we make a new commit, we journal the inserts and removes.
// for each end_era that we journaled that we are no passing by,
// we remove all of its removes assuming it is canonical and all
// of its inserts otherwise.
// record new commit's details.
{
let mut index = 0usize;
let mut last;
while try!(self.backing.get({
let mut r = RlpStream::new_list(2);
r.append(&now);
r.append(&index);
last = r.out();
&last
})).is_some() {
index += 1;
}
let mut r = RlpStream::new_list(3);
r.append(id);
r.append(&self.inserts);
r.append(&self.removes);
try!(self.backing.put(&last, &r.out()));
}
// apply old commits' details
let mut index = 0usize;
let mut last;
while let Some(rlp_data) = try!(self.backing.get({
let mut r = RlpStream::new_list(2);
r.append(&end_era);
r.append(&index);
last = r.out();
&last
})) {
let rlp = Rlp::new(&rlp_data);
let to_remove: Vec<H256> = rlp.val_at(if *canon_id == rlp.val_at(0) {2} else {1});
for i in to_remove.iter() {
self.forward.remove(i);
}
try!(self.backing.delete(&last));
index += 1;
}
self.inserts.clear();
self.removes.clear();
self.forward.commit()
}
/// Revert all operations on this object (i.e. `insert()`s and `removes()`s) since the
/// last `commit()`.
pub fn revert(&mut self) { self.forward.revert(); self.removes.clear(); }
}
impl HashDB for JournalDB {
fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() }
fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) }
fn exists(&self, key: &H256) -> bool { self.forward.exists(key) }
fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r }
fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); }
fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); }
}

View File

@ -69,6 +69,7 @@ pub mod sha3;
pub mod hashdb;
pub mod memorydb;
pub mod overlaydb;
pub mod journaldb;
pub mod math;
pub mod chainfilter;
pub mod crypto;

View File

@ -15,11 +15,11 @@ use rocksdb::{DB, Writable, IteratorMode};
#[derive(Clone)]
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay.
///
/// The operations `insert()` and `kill()` take place on the memory overlay; batches of
/// The operations `insert()` and `remove()` take place on the memory overlay; batches of
/// such operations may be flushed to the disk-backed DB with `commit()` or discarded with
/// `revert()`.
///
/// `lookup()` and `exists()` maintain normal behaviour - all `insert()` and `kill()`
/// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()`
/// queries have an immediate effect in terms of these functions.
pub struct OverlayDB {
overlay: MemoryDB,
@ -28,8 +28,11 @@ pub struct OverlayDB {
impl OverlayDB {
/// Create a new instance of OverlayDB given a `backing` database.
pub fn new(backing: DB) -> OverlayDB {
OverlayDB{ overlay: MemoryDB::new(), backing: Arc::new(backing) }
pub fn new(backing: DB) -> OverlayDB { Self::new_with_arc(Arc::new(backing)) }
/// Create a new instance of OverlayDB given a `backing` database.
pub fn new_with_arc(backing: Arc<DB>) -> OverlayDB {
OverlayDB{ overlay: MemoryDB::new(), backing: backing }
}
/// Create a new instance of OverlayDB with an anonymous temporary database.
@ -70,9 +73,7 @@ impl OverlayDB {
let mut ret = 0u32;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
// until we figure out state trie pruning, only commit stuff when it has a strictly positive delkta of RCs -
// this prevents RCs being reduced to 0 where the DB would pretent that the node had been removed.
if rc > 0 {
if rc != 0 {
match self.payload(&key) {
Some(x) => {
let (back_value, back_rc) = x;