This commit is contained in:
arkpar 2016-02-05 01:49:06 +01:00
parent 048c27237a
commit 75ebac36a3
8 changed files with 30 additions and 113 deletions

View File

@ -2,113 +2,7 @@
use util::*;
use pod_account::*;
pub struct AccountDB<'db> {
db: &'db HashDB,
address: H256,
}
impl<'db> AccountDB<'db> {
pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> {
AccountDB {
db: db,
address: x!(address.clone()),
}
}
#[inline]
fn key(&self, k: &H256) -> H256 {
k.clone() ^ self.address.clone()
}
}
impl<'db> HashDB for AccountDB<'db>{
fn keys(&self) -> HashMap<H256, i32> {
unimplemented!()
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
if key == &SHA3_NULL_RLP {
return self.db.lookup(key);
}
self.db.lookup(&self.key(key))
}
fn exists(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
self.db.exists(&self.key(key))
}
fn insert(&mut self, _value: &[u8]) -> H256 {
unimplemented!()
}
fn emplace(&mut self, _key: H256, _value: Bytes) {
unimplemented!()
}
fn kill(&mut self, _key: &H256) {
unimplemented!()
}
}
pub struct AccountDBMut<'db> {
db: &'db mut HashDB,
address: H256,
}
impl<'db> AccountDBMut<'db> {
pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> {
AccountDBMut {
db: db,
address: x!(address.clone()),
}
}
#[allow(dead_code)]
pub fn immutable(&'db self) -> AccountDB<'db> {
AccountDB { db: self.db, address: self.address.clone() }
}
#[inline]
fn key(&self, k: &H256) -> H256 {
k.clone() ^ self.address.clone()
}
}
impl<'db> HashDB for AccountDBMut<'db>{
fn keys(&self) -> HashMap<H256, i32> {
unimplemented!()
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
if key == &SHA3_NULL_RLP {
return self.db.lookup(key);
}
self.db.lookup(&self.key(key))
}
fn exists(&self, key: &H256) -> bool {
if key == &SHA3_NULL_RLP {
return true;
}
self.db.exists(&self.key(key))
}
fn insert(&mut self, value: &[u8]) -> H256 {
let k = value.sha3();
let ak = self.key(&k);
self.db.emplace(ak, value.to_vec());
k
}
fn emplace(&mut self, key: H256, value: Bytes) {
let key = self.key(&key);
self.db.emplace(key, value.to_vec())
}
fn kill(&mut self, key: &H256) {
let key = self.key(&key);
self.db.kill(&key)
}
}
use account_db::*;
/// Single account in the system.
#[derive(Clone)]
@ -340,6 +234,7 @@ mod tests {
use util::*;
use super::*;
use account_db::*;
#[test]
fn storage_at() {

View File

@ -318,8 +318,10 @@ impl IsBlock for SealedBlock {
/// Enact the block given by block header, transactions and uncles
pub fn enact<'x, 'y>(header: &Header, transactions: &[Transaction], uncles: &[Header], engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: &'y LastHashes) -> Result<ClosedBlock<'x, 'y>, Error> {
{
//let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce());
//trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author()));
if ::log::max_log_level() >= ::log::LogLevel::Trace {
let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce());
trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author()));
}
}
let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author().clone(), header.extra_data().clone());

View File

@ -180,7 +180,8 @@ impl Client {
let engine = Arc::new(try!(spec.to_engine()));
let mut state_db = JournalDB::new_with_arc(db.clone());
if engine.spec().ensure_db_good(&mut state_db) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) {
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
}
Ok(Arc::new(Client {
chain: chain,

View File

@ -96,6 +96,7 @@ mod state_diff;
mod engine;
mod state;
mod account;
mod account_db;
mod action_params;
mod transaction;
mod receipt;

View File

@ -1,5 +1,6 @@
use util::*;
use account::*;
use account_db::*;
#[derive(Debug,Clone,PartialEq,Eq)]
/// An account, expressed as Plain-Old-Data (hence the name).

View File

@ -4,6 +4,7 @@ use common::*;
use engine::*;
use pod_state::*;
use null_engine::*;
use account_db::*;
/// Convert JSON value to equivalent RLP representation.
// TODO: handle container types.

View File

@ -1,6 +1,7 @@
use common::*;
use engine::Engine;
use executive::Executive;
use account_db::*;
#[cfg(test)]
#[cfg(feature = "json-tests")]
use pod_account::*;

View File

@ -4,7 +4,7 @@ use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use rocksdb::{DB, Writable, IteratorMode, WriteBatch};
use rocksdb::{DB, Writable, WriteBatch, IteratorMode};
#[cfg(test)]
use std::env;
@ -32,6 +32,9 @@ impl Clone for JournalDB {
}
const LAST_ERA_KEY : [u8; 4] = [ b'l', b'a', b's', b't' ];
const VERSION_KEY : [u8; 4] = [ b'j', b'v', b'e', b'r' ];
const DB_VERSION: u32 = 1;
impl JournalDB {
/// Create a new instance given a `backing` database.
@ -42,6 +45,14 @@ impl JournalDB {
/// Create a new instance given a shared `backing` database.
pub fn new_with_arc(backing: Arc<DB>) -> JournalDB {
if backing.iterator(IteratorMode::Start).next().is_some() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let counters = JournalDB::read_counters(&backing);
JournalDB {
overlay: MemoryDB::new(),
@ -58,6 +69,10 @@ impl JournalDB {
Self::new(DB::open_default(dir.to_str().unwrap()).unwrap())
}
/// Check if this database has any commits
pub fn is_empty(&self) -> bool {
self.backing.get(&LAST_ERA_KEY).expect("Low level database error").is_none()
}
/// Commit all recent insert operations and historical removals from the old era
/// to the backing database.
@ -68,7 +83,7 @@ impl JournalDB {
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, n] => [ ... ]
// TODO: store last_era, reclaim_period.
// TODO: store reclaim_period.
// when we make a new commit, we journal the inserts and removes.
// for each end_era that we journaled that we are no passing by,
@ -197,7 +212,7 @@ impl JournalDB {
era += 1;
}
}
info!("Recovered {} counters", res.len());
trace!("Recovered {} counters", res.len());
res
}
}