Merge branch 'blockchaintests' of github.com:ethcore/parity into rlp

This commit is contained in:
arkpar 2016-01-25 18:57:05 +01:00
commit 1dbae06a83
10 changed files with 133 additions and 64 deletions

View File

@ -21,6 +21,11 @@ pub struct BlockQueueInfo {
pub verified_queue_size: usize,
}
impl BlockQueueInfo {
/// The total size of the queues.
pub fn total_queue_size(&self) -> usize { self.unverified_queue_size + self.verified_queue_size }
}
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
/// Sorts them ready for blockchain insertion.
pub struct BlockQueue {
@ -99,6 +104,7 @@ impl BlockQueue {
fn verify(verification: Arc<Mutex<Verification>>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>) {
while !deleting.load(AtomicOrdering::Relaxed) {
{
let mut lock = verification.lock().unwrap();
while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Relaxed) {
@ -139,6 +145,7 @@ impl BlockQueue {
},
Err(err) => {
let mut v = verification.lock().unwrap();
flushln!("Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
v.bad.insert(block_hash.clone());
v.verifying.retain(|e| e.hash != block_hash);

View File

@ -283,13 +283,6 @@ impl BlockChain {
bc
}
/// Ensure that the best block does indeed have a state_root in the state DB.
/// If it doesn't, then rewind down until we find one that does and delete data to ensure that
/// later blocks will be reimported.
pub fn ensure_good(&mut self, _state: &JournalDB) {
unimplemented!();
}
/// Returns a tree route between `from` and `to`, which is a tuple of:
///
/// - a vector of hashes of all blocks, ordered from `from` to `to`.
@ -392,7 +385,6 @@ impl BlockChain {
}
}
/// Inserts the block into backing cache database.
/// Expects the block to be valid and already verified.
/// If the block is already known, does nothing.

View File

@ -1,3 +1,5 @@
use std::thread;
use std::time;
use util::*;
use rocksdb::{Options, DB};
use blockchain::{BlockChain, BlockProvider, CacheSize};
@ -121,6 +123,7 @@ impl ClientReport {
}
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
pub struct Client {
chain: Arc<RwLock<BlockChain>>,
engine: Arc<Box<Engine>>,
@ -140,7 +143,8 @@ impl Client {
let mut opts = Options::new();
opts.set_max_open_files(256);
opts.create_if_missing(true);
/*opts.set_use_fsync(false);
opts.set_use_fsync(false);
/*
opts.set_bytes_per_sync(8388608);
opts.set_disable_data_sync(false);
opts.set_block_cache_size_mb(1024);
@ -177,16 +181,22 @@ impl Client {
}))
}
/// Flush the block import queue.
pub fn flush_queue(&self) {
flushln!("Flushing queue {:?}", self.block_queue.read().unwrap().queue_info());
while self.block_queue.read().unwrap().queue_info().unverified_queue_size > 0 {
thread::sleep(time::Duration::from_millis(20));
flushln!("Flushing queue [waited] {:?}", self.block_queue.read().unwrap().queue_info());
}
}
/// This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self, _io: &IoChannel<NetSyncMessage>) {
let mut bad = HashSet::new();
let _import_lock = self.import_lock.lock();
let blocks = self.block_queue.write().unwrap().drain(128);
if blocks.is_empty() {
return;
}
for block in blocks {
for block in self.block_queue.write().unwrap().drain(128) {
flushln!("Importing block...");
if bad.contains(&block.header.parent_hash) {
self.block_queue.write().unwrap().mark_as_bad(&block.header.hash());
bad.insert(block.header.hash());

View File

@ -1,17 +1,25 @@
use util::*;
use pod_account::*;
#[derive(Debug,Clone,PartialEq,Eq)]
#[derive(Debug,Clone,PartialEq,Eq,Default)]
/// TODO [Gav Wood] Please document me
pub struct PodState (BTreeMap<Address, PodAccount>);
impl PodState {
/// Contruct a new object from the `m`.
pub fn new(m: BTreeMap<Address, PodAccount>) -> PodState { PodState(m) }
pub fn new() -> PodState { Default::default() }
/// Contruct a new object from the `m`.
pub fn from(m: BTreeMap<Address, PodAccount>) -> PodState { PodState(m) }
/// Get the underlying map.
pub fn get(&self) -> &BTreeMap<Address, PodAccount> { &self.0 }
/// Get the root hash of the trie of the RLP of this.
pub fn root(&self) -> H256 {
sec_trie_root(self.0.iter().map(|(k, v)| (k.to_vec(), v.rlp())).collect())
}
/// Drain object to get the underlying map.
pub fn drain(self) -> BTreeMap<Address, PodAccount> { self.0 }
}

View File

@ -1,6 +1,7 @@
use common::*;
use flate2::read::GzDecoder;
use engine::*;
use pod_state::*;
use null_engine::*;
/// Converts file from base64 gzipped bytes to json
@ -40,28 +41,6 @@ fn json_to_rlp_map(json: &Json) -> HashMap<String, Bytes> {
})
}
//TODO: add code and data
#[derive(Debug)]
/// Genesis account data. Does no thave a DB overlay cache
pub struct GenesisAccount {
// Balance of the account.
balance: U256,
// Nonce of the account.
nonce: U256,
}
impl GenesisAccount {
/// TODO [arkpar] Please document me
pub fn rlp(&self) -> Bytes {
let mut stream = RlpStream::new_list(4);
stream.append(&self.nonce);
stream.append(&self.balance);
stream.append(&SHA3_NULL_RLP);
stream.append(&SHA3_EMPTY);
stream.out()
}
}
/// Parameters for a block chain; includes both those intrinsic to the design of the
/// chain and those to be interpreted by the active chain engine.
#[derive(Debug)]
@ -83,7 +62,7 @@ pub struct Spec {
// Builtin-contracts are here for now but would like to abstract into Engine API eventually.
/// TODO [Gav Wood] Please document me
pub builtins: HashMap<Address, Builtin>,
pub builtins: BTreeMap<Address, Builtin>,
// Genesis params.
/// TODO [Gav Wood] Please document me
@ -101,7 +80,7 @@ pub struct Spec {
/// TODO [arkpar] Please document me
pub extra_data: Bytes,
/// TODO [Gav Wood] Please document me
pub genesis_state: HashMap<Address, GenesisAccount>,
pub genesis_state: PodState,
/// TODO [Gav Wood] Please document me
pub seal_fields: usize,
/// TODO [Gav Wood] Please document me
@ -126,7 +105,7 @@ impl Spec {
/// Return the state root for the genesis state, memoising accordingly.
pub fn state_root(&self) -> H256 {
if self.state_root_memo.read().unwrap().is_none() {
*self.state_root_memo.write().unwrap() = Some(sec_trie_root(self.genesis_state.iter().map(|(k, v)| (k.to_vec(), v.rlp())).collect()));
*self.state_root_memo.write().unwrap() = Some(self.genesis_state.root());
}
self.state_root_memo.read().unwrap().as_ref().unwrap().clone()
}
@ -174,6 +153,35 @@ impl Spec {
ret.append_raw(&empty_list, 1);
ret.out()
}
/// Overwrite the genesis components with the given JSON, assuming standard Ethereum test format.
pub fn overwrite_genesis(&mut self, genesis: &Json) {
let (seal_fields, seal_rlp) = {
if genesis.find("mixHash").is_some() && genesis.find("nonce").is_some() {
let mut s = RlpStream::new();
s.append(&H256::from_json(&genesis["mixHash"]));
s.append(&H64::from_json(&genesis["nonce"]));
(2, s.out())
} else {
// backup algo that will work with sealFields/sealRlp (and without).
(
u64::from_json(&genesis["sealFields"]) as usize,
Bytes::from_json(&genesis["sealRlp"])
)
}
};
self.parent_hash = H256::from_json(&genesis["parentHash"]);
self.author = Address::from_json(&genesis["coinbase"]);
self.difficulty = U256::from_json(&genesis["difficulty"]);
self.gas_limit = U256::from_json(&genesis["gasLimit"]);
self.gas_used = U256::from_json(&genesis["gasUsed"]);
self.timestamp = u64::from_json(&genesis["timestamp"]);
self.extra_data = Bytes::from_json(&genesis["extraData"]);
self.seal_fields = seal_fields;
self.seal_rlp = seal_rlp;
self.state_root_memo = RwLock::new(genesis.find("stateRoot").and_then(|_| Some(H256::from_json(&genesis["stateRoot"]))));
}
}
impl FromJson for Spec {
@ -181,8 +189,8 @@ impl FromJson for Spec {
fn from_json(json: &Json) -> Spec {
// once we commit ourselves to some json parsing library (serde?)
// move it to proper data structure
let mut state = HashMap::new();
let mut builtins = HashMap::new();
let mut builtins = BTreeMap::new();
let mut state = PodState::new();
if let Some(&Json::Object(ref accounts)) = json.find("accounts") {
for (address, acc) in accounts.iter() {
@ -192,15 +200,8 @@ impl FromJson for Spec {
builtins.insert(addr.clone(), builtin);
}
}
let balance = acc.find("balance").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None });
let nonce = acc.find("nonce").and_then(|x| match *x { Json::String(ref b) => U256::from_dec_str(b).ok(), _ => None });
// let balance = if let Some(&Json::String(ref b)) = acc.find("balance") {U256::from_dec_str(b).unwrap_or(U256::from(0))} else {U256::from(0)};
// let nonce = if let Some(&Json::String(ref n)) = acc.find("nonce") {U256::from_dec_str(n).unwrap_or(U256::from(0))} else {U256::from(0)};
// TODO: handle code & data if they exist.
if balance.is_some() || nonce.is_some() {
state.insert(addr, GenesisAccount { balance: balance.unwrap_or_else(U256::zero), nonce: nonce.unwrap_or_else(U256::zero) });
}
}
state = xjson!(&json["accounts"]);
}
let nodes = if let Some(&Json::Array(ref ns)) = json.find("nodes") {
@ -253,7 +254,7 @@ impl Spec {
let mut root = H256::new();
{
let mut t = SecTrieDBMut::new(db, &mut root);
for (address, account) in &self.genesis_state {
for (address, account) in self.genesis_state.get().iter() {
t.insert(address.as_slice(), &account.rlp());
}
}

View File

@ -221,7 +221,7 @@ impl State {
/// Populate a PodAccount map from this state.
pub fn to_pod(&self) -> PodState {
// TODO: handle database rather than just the cache.
PodState::new(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
PodState::from(self.cache.borrow().iter().fold(BTreeMap::new(), |mut m, (add, opt)| {
if let Some(ref acc) = *opt {
m.insert(add.clone(), PodAccount::from_account(acc));
}

View File

@ -32,8 +32,8 @@ mod test {
#[test]
fn create_delete() {
let a = PodState::new(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
assert_eq!(StateDiff::diff_pod(&a, &PodState::new(map![])), StateDiff(map![
let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
assert_eq!(StateDiff::diff_pod(&a, &PodState::new()), StateDiff(map![
x!(1) => AccountDiff{
balance: Diff::Died(x!(69)),
nonce: Diff::Died(x!(0)),
@ -41,7 +41,7 @@ mod test {
storage: map![],
}
]));
assert_eq!(StateDiff::diff_pod(&PodState::new(map![]), &a), StateDiff(map![
assert_eq!(StateDiff::diff_pod(&PodState::new(), &a), StateDiff(map![
x!(1) => AccountDiff{
balance: Diff::Born(x!(69)),
nonce: Diff::Born(x!(0)),
@ -53,8 +53,8 @@ mod test {
#[test]
fn create_delete_with_unchanged() {
let a = PodState::new(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
let b = PodState::new(map![
let a = PodState::from(map![ x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]) ]);
let b = PodState::from(map![
x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]),
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
]);
@ -78,11 +78,11 @@ mod test {
#[test]
fn change_with_unchanged() {
let a = PodState::new(map![
let a = PodState::from(map![
x!(1) => PodAccount::new(x!(69), x!(0), vec![], map![]),
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
]);
let b = PodState::new(map![
let b = PodState::from(map![
x!(1) => PodAccount::new(x!(69), x!(1), vec![], map![]),
x!(2) => PodAccount::new(x!(69), x!(0), vec![], map![])
]);

49
src/tests/chain.rs Normal file
View File

@ -0,0 +1,49 @@
use std::env;
use super::test_common::*;
use client::{BlockChainClient,Client};
use pod_state::*;
use ethereum;
fn do_json_test(json_data: &[u8]) -> Vec<String> {
let json = Json::from_str(::std::str::from_utf8(json_data).unwrap()).expect("Json is invalid");
let mut failed = Vec::new();
for (name, test) in json.as_object().unwrap() {
let mut fail = false;
{
let mut fail_unless = |cond: bool| if !cond && !fail {
failed.push(name.clone());
flush(format!("FAIL\n"));
fail = true;
true
} else {false};
flush(format!(" - {}...", name));
let blocks: Vec<Bytes> = test["blocks"].as_array().unwrap().iter().map(|e| xjson!(&e["rlp"])).collect();
let mut spec = ethereum::new_frontier_like_test();
spec.overwrite_genesis(test.find("genesisBlockHeader").unwrap());
spec.genesis_state = PodState::from_json(test.find("pre").unwrap());
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
{
let client = Client::new(spec, &dir, IoChannel::disconnected()).unwrap();
blocks.into_iter().foreach(|b| {
client.import_block(b).unwrap();
});
client.flush_queue();
client.import_verified_blocks(&IoChannel::disconnected());
flushln!("Best hash: {}", client.chain_info().best_block_hash);
}
fs::remove_dir_all(&dir).unwrap();
}
if !fail {
flush(format!("ok\n"));
}
}
println!("!!! {:?} tests from failed.", failed.len());
failed
}
declare_test!{BlockchainTests_bcStateTest, "BlockchainTests/bcStateTest"}

View File

@ -4,3 +4,4 @@ mod test_common;
mod transaction;
mod executive;
mod state;
mod chain;

View File

@ -1,13 +1,14 @@
pub use std::io;
pub use std::fs;
pub use std::str;
pub use std::fmt;
pub use std::slice;
pub use std::cmp;
pub use std::ptr;
pub use std::result;
pub use std::option;
pub use std::mem;
pub use std::ops;
pub use std::slice;
pub use std::result;
pub use std::option;
pub use std::path::Path;
pub use std::str::{FromStr};
@ -15,9 +16,9 @@ pub use std::io::{Read,Write};
pub use std::hash::{Hash, Hasher};
pub use std::error::Error as StdError;
pub use std::sync::*;
pub use std::ops::*;
pub use std::cmp::*;
pub use std::sync::*;
pub use std::cell::*;
pub use std::collections::*;