Merge pull request #705 from ethcore/refcounteddb

Fixes and traces for refcountdb.
This commit is contained in:
Gav Wood 2016-03-13 23:07:58 +01:00
commit eb651b6462
5 changed files with 81 additions and 27 deletions

View File

@ -398,10 +398,10 @@ impl Configuration {
} }
} }
client_config.pruning = match self.args.flag_pruning.as_str() { client_config.pruning = match self.args.flag_pruning.as_str() {
"" | "archive" => journaldb::Algorithm::Archive, "archive" => journaldb::Algorithm::Archive,
"pruned" => journaldb::Algorithm::EarlyMerge, "light" => journaldb::Algorithm::EarlyMerge,
"fast" => journaldb::Algorithm::OverlayRecent, "fast" => journaldb::Algorithm::OverlayRecent,
"slow" => journaldb::Algorithm::RefCounted, "basic" => journaldb::Algorithm::RefCounted,
_ => { die!("Invalid pruning method given."); } _ => { die!("Invalid pruning method given."); }
}; };
client_config.name = self.args.flag_identity.clone(); client_config.name = self.args.flag_identity.clone();

View File

@ -21,12 +21,13 @@ use network::NetworkError;
use rlp::DecoderError; use rlp::DecoderError;
use io; use io;
use std::fmt; use std::fmt;
use hash::H256;
#[derive(Debug)] #[derive(Debug)]
/// Error in database subsystem. /// Error in database subsystem.
pub enum BaseDataError { pub enum BaseDataError {
/// An entry was removed more times than inserted. /// An entry was removed more times than inserted.
NegativelyReferencedHash, NegativelyReferencedHash(H256),
} }
#[derive(Debug)] #[derive(Debug)]

View File

@ -132,7 +132,7 @@ impl JournalDB for ArchiveDB {
Box::new(ArchiveDB { Box::new(ArchiveDB {
overlay: MemoryDB::new(), overlay: MemoryDB::new(),
backing: self.backing.clone(), backing: self.backing.clone(),
latest_era: None, latest_era: self.latest_era,
}) })
} }
@ -144,7 +144,7 @@ impl JournalDB for ArchiveDB {
self.latest_era.is_none() self.latest_era.is_none()
} }
fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result<u32, UtilError> { fn commit(&mut self, now: u64, _: &H256, _: Option<(u64, H256)>) -> Result<u32, UtilError> {
let batch = DBTransaction::new(); let batch = DBTransaction::new();
let mut inserts = 0usize; let mut inserts = 0usize;
let mut deletes = 0usize; let mut deletes = 0usize;
@ -160,6 +160,10 @@ impl JournalDB for ArchiveDB {
deletes += 1; deletes += 1;
} }
} }
if self.latest_era.map_or(true, |e| now > e) {
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
self.latest_era = Some(now);
}
try!(self.backing.write(batch)); try!(self.backing.write(batch));
Ok((inserts + deletes) as u32) Ok((inserts + deletes) as u32)
} }

View File

@ -43,6 +43,7 @@ pub struct RefCountedDB {
const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION : u32 = 512; const DB_VERSION : u32 = 512;
const PADDING : [u8; 10] = [ 0u8; 10 ];
impl RefCountedDB { impl RefCountedDB {
/// Create a new instance given a `backing` database. /// Create a new instance given a `backing` database.
@ -131,9 +132,10 @@ impl JournalDB for RefCountedDB {
let mut last; let mut last;
while try!(self.backing.get({ while try!(self.backing.get({
let mut r = RlpStream::new_list(2); let mut r = RlpStream::new_list(3);
r.append(&now); r.append(&now);
r.append(&index); r.append(&index);
r.append(&&PADDING[..]);
last = r.drain(); last = r.drain();
&last &last
})).is_some() { })).is_some() {
@ -144,7 +146,10 @@ impl JournalDB for RefCountedDB {
r.append(id); r.append(id);
r.append(&self.inserts); r.append(&self.inserts);
r.append(&self.removes); r.append(&self.removes);
try!(self.backing.put(&last, r.as_raw())); try!(batch.put(&last, r.as_raw()));
trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes);
self.inserts.clear(); self.inserts.clear();
self.removes.clear(); self.removes.clear();
@ -158,20 +163,25 @@ impl JournalDB for RefCountedDB {
if let Some((end_era, canon_id)) = end { if let Some((end_era, canon_id)) = end {
let mut index = 0usize; let mut index = 0usize;
let mut last; let mut last;
while let Some(rlp_data) = try!(self.backing.get({ while let Some(rlp_data) = {
let mut r = RlpStream::new_list(2); // trace!(target: "rcdb", "checking for journal #{}.{}", end_era, index);
try!(self.backing.get({
let mut r = RlpStream::new_list(3);
r.append(&end_era); r.append(&end_era);
r.append(&index); r.append(&index);
r.append(&&PADDING[..]);
last = r.drain(); last = r.drain();
&last &last
})) { }))
} {
let rlp = Rlp::new(&rlp_data); let rlp = Rlp::new(&rlp_data);
let to_remove: Vec<H256> = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); let our_id: H256 = rlp.val_at(0);
let to_remove: Vec<H256> = rlp.val_at(if canon_id == our_id {2} else {1});
trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, index, our_id, canon_id, to_remove);
for i in &to_remove { for i in &to_remove {
self.forward.remove(i); self.forward.remove(i);
} }
try!(self.backing.delete(&last)); try!(batch.delete(&last));
trace!("RefCountedDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len());
index += 1; index += 1;
} }
} }

View File

@ -70,15 +70,15 @@ impl OverlayDB {
let (back_value, back_rc) = x; let (back_value, back_rc) = x;
let total_rc: i32 = back_rc as i32 + rc; let total_rc: i32 = back_rc as i32 + rc;
if total_rc < 0 { if total_rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash)); return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
} }
deletes += if self.put_payload(batch, &key, (back_value, total_rc as u32)) {1} else {0}; deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0};
} }
None => { None => {
if rc < 0 { if rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash)); return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
} }
self.put_payload(batch, &key, (value, rc as u32)); self.put_payload_in_batch(batch, &key, (value, rc as u32));
} }
}; };
ret += 1; ret += 1;
@ -116,10 +116,32 @@ impl OverlayDB {
/// } /// }
/// ``` /// ```
pub fn commit(&mut self) -> Result<u32, UtilError> { pub fn commit(&mut self) -> Result<u32, UtilError> {
let batch = DBTransaction::new(); let mut ret = 0u32;
let r = try!(self.commit_to_batch(&batch)); let mut deletes = 0usize;
try!(self.backing.write(batch)); for i in self.overlay.drain().into_iter() {
Ok(r) let (key, (value, rc)) = i;
if rc != 0 {
match self.payload(&key) {
Some(x) => {
let (back_value, back_rc) = x;
let total_rc: i32 = back_rc as i32 + rc;
if total_rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
}
deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0};
}
None => {
if rc < 0 {
return Err(From::from(BaseDataError::NegativelyReferencedHash(key)));
}
self.put_payload(&key, (value, rc as u32));
}
};
ret += 1;
}
}
trace!("OverlayDB::commit() deleted {} nodes", deletes);
Ok(ret)
} }
/// Revert all operations on this object (i.e. `insert()`s and `kill()`s) since the /// Revert all operations on this object (i.e. `insert()`s and `kill()`s) since the
@ -145,6 +167,9 @@ impl OverlayDB {
/// ``` /// ```
pub fn revert(&mut self) { self.overlay.clear(); } pub fn revert(&mut self) { self.overlay.clear(); }
/// Get the number of references that would be committed.
pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(&key).map_or(0, |&(_, refs)| refs) }
/// Get the refs and value of the given key. /// Get the refs and value of the given key.
fn payload(&self, key: &H256) -> Option<(Bytes, u32)> { fn payload(&self, key: &H256) -> Option<(Bytes, u32)> {
self.backing.get(&key.bytes()) self.backing.get(&key.bytes())
@ -156,7 +181,7 @@ impl OverlayDB {
} }
/// Put the refs and value of the given key, possibly deleting it from the db. /// Put the refs and value of the given key, possibly deleting it from the db.
fn put_payload(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { fn put_payload_in_batch(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool {
if payload.1 > 0 { if payload.1 > 0 {
let mut s = RlpStream::new_list(2); let mut s = RlpStream::new_list(2);
s.append(&payload.1); s.append(&payload.1);
@ -168,6 +193,20 @@ impl OverlayDB {
true true
} }
} }
/// Put the refs and value of the given key, possibly deleting it from the db.
fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool {
if payload.1 > 0 {
let mut s = RlpStream::new_list(2);
s.append(&payload.1);
s.append(&payload.0);
self.backing.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?");
false
} else {
self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?");
true
}
}
} }
impl HashDB for OverlayDB { impl HashDB for OverlayDB {