Merge branch 'master' of github.com:ethcore/parity into jdb_option2

This commit is contained in:
arkpar 2016-03-11 22:43:59 +01:00
commit 896ba57555
68 changed files with 2671 additions and 1042 deletions

3
Cargo.lock generated
View File

@ -219,7 +219,6 @@ dependencies = [
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
"rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
@ -243,7 +242,6 @@ dependencies = [
"jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -299,7 +297,6 @@ dependencies = [
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)",
] ]

View File

@ -27,20 +27,17 @@ ethcore-devtools = { path = "devtools" }
ethcore-rpc = { path = "rpc", optional = true } ethcore-rpc = { path = "rpc", optional = true }
rpassword = "0.1" rpassword = "0.1"
[dev-dependencies]
ethcore = { path = "ethcore", features = ["dev"] }
ethcore-util = { path = "util", features = ["dev"] }
ethsync = { path = "sync", features = ["dev"] }
ethcore-rpc = { path = "rpc", features = ["dev"] }
[features] [features]
default = ["rpc"] default = ["rpc"]
rpc = ["ethcore-rpc"] rpc = ["ethcore-rpc"]
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"]
dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"]
travis-beta = ["ethcore/json-tests"] travis-beta = ["ethcore/json-tests"]
travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"] travis-nightly = ["ethcore/json-tests", "dev"]
[[bin]] [[bin]]
path = "parity/main.rs" path = "parity/main.rs"
name = "parity" name = "parity"
[profile.release]
debug = false
lto = false

View File

@ -34,9 +34,6 @@ Then, download and build Parity:
git clone https://github.com/ethcore/parity git clone https://github.com/ethcore/parity
cd parity cd parity
# parity should be built with rust beta
multirust override beta
# build in release mode # build in release mode
cargo build --release cargo build --release
``` ```

View File

@ -1,2 +0,0 @@
#!/bin/sh
cargo "$@" --features dev-clippy

View File

@ -5,10 +5,6 @@ license = "GPL-3.0"
name = "ethcore" name = "ethcore"
version = "0.9.99" version = "0.9.99"
authors = ["Ethcore <admin@ethcore.io>"] authors = ["Ethcore <admin@ethcore.io>"]
build = "build.rs"
[build-dependencies]
rustc_version = "0.1"
[dependencies] [dependencies]
log = "0.3" log = "0.3"
@ -31,5 +27,5 @@ jit = ["evmjit"]
evm-debug = [] evm-debug = []
json-tests = [] json-tests = []
test-heavy = [] test-heavy = []
dev = [] dev = ["clippy"]
default = [] default = []

View File

@ -24,7 +24,7 @@ pub type LogBloom = H2048;
/// Constant 2048-bit datum for 0. Often used as a default. /// Constant 2048-bit datum for 0. Often used as a default.
pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]);
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] #[cfg_attr(feature="dev", allow(enum_variant_names))]
/// Semantic boolean for when a seal/signature is included. /// Semantic boolean for when a seal/signature is included.
pub enum Seal { pub enum Seal {
/// The seal/signature is included. /// The seal/signature is included.

View File

@ -16,7 +16,7 @@
//! Blockchain block. //! Blockchain block.
#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> #![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_>
use common::*; use common::*;
use engine::*; use engine::*;
@ -171,7 +171,7 @@ pub struct SealedBlock {
impl<'x> OpenBlock<'x> { impl<'x> OpenBlock<'x> {
/// Create a new OpenBlock ready for transaction pushing. /// Create a new OpenBlock ready for transaction pushing.
pub fn new(engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { pub fn new(engine: &'x Engine, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self {
let mut r = OpenBlock { let mut r = OpenBlock {
block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())),
engine: engine, engine: engine,
@ -317,7 +317,7 @@ impl ClosedBlock {
} }
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
pub fn drain(self) -> JournalDB { self.block.state.drop().1 } pub fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 }
} }
impl SealedBlock { impl SealedBlock {
@ -331,7 +331,7 @@ impl SealedBlock {
} }
/// Drop this object and return the underlieing database. /// Drop this object and return the underlieing database.
pub fn drain(self) -> JournalDB { self.block.state.drop().1 } pub fn drain(self) -> Box<JournalDB> { self.block.state.drop().1 }
} }
impl IsBlock for SealedBlock { impl IsBlock for SealedBlock {
@ -339,10 +339,10 @@ impl IsBlock for SealedBlock {
} }
/// Enact the block given by block header, transactions and uncles /// Enact the block given by block header, transactions and uncles
pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> { pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
{ {
if ::log::max_log_level() >= ::log::LogLevel::Trace { if ::log::max_log_level() >= ::log::LogLevel::Trace {
let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); let s = State::from_existing(db.spawn(), parent.state_root().clone(), engine.account_start_nonce());
trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author()));
} }
} }
@ -357,20 +357,20 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head
} }
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> { pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
let block = BlockView::new(block_bytes); let block = BlockView::new(block_bytes);
let header = block.header(); let header = block.header();
enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes)
} }
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> { pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<ClosedBlock, Error> {
let view = BlockView::new(&block.bytes); let view = BlockView::new(&block.bytes);
enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes)
} }
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result<SealedBlock, Error> { pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box<JournalDB>, parent: &Header, last_hashes: LastHashes) -> Result<SealedBlock, Error> {
let header = BlockView::new(block_bytes).header_view(); let header = BlockView::new(block_bytes).header_view();
Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal()))) Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal())))
} }
@ -389,7 +389,7 @@ mod tests {
let genesis_header = engine.spec().genesis_header(); let genesis_header = engine.spec().genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()]; let last_hashes = vec![genesis_header.hash()];
let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]);
let b = b.close(); let b = b.close();
@ -404,14 +404,14 @@ mod tests {
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(db.as_hashdb_mut());
let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap(); let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap();
let orig_bytes = b.rlp_bytes(); let orig_bytes = b.rlp_bytes();
let orig_db = b.drain(); let orig_db = b.drain();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(db.as_hashdb_mut());
let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, vec![genesis_header.hash()]).unwrap(); let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, vec![genesis_header.hash()]).unwrap();
assert_eq!(e.rlp_bytes(), orig_bytes); assert_eq!(e.rlp_bytes(), orig_bytes);

View File

@ -95,7 +95,7 @@ pub struct BlockQueue {
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
engine: Arc<Box<Engine>>, engine: Arc<Box<Engine>>,
more_to_verify: Arc<Condvar>, more_to_verify: Arc<Condvar>,
verification: Arc<Mutex<Verification>>, verification: Arc<Verification>,
verifiers: Vec<JoinHandle<()>>, verifiers: Vec<JoinHandle<()>>,
deleting: Arc<AtomicBool>, deleting: Arc<AtomicBool>,
ready_signal: Arc<QueueSignal>, ready_signal: Arc<QueueSignal>,
@ -121,7 +121,7 @@ struct QueueSignal {
} }
impl QueueSignal { impl QueueSignal {
#[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))] #[cfg_attr(feature="dev", allow(bool_comparison))]
fn set(&self) { fn set(&self) {
if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false {
self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message");
@ -132,18 +132,23 @@ impl QueueSignal {
} }
} }
#[derive(Default)]
struct Verification { struct Verification {
unverified: VecDeque<UnverifiedBlock>, // All locks must be captured in the order declared here.
verified: VecDeque<PreverifiedBlock>, unverified: Mutex<VecDeque<UnverifiedBlock>>,
verifying: VecDeque<VerifyingBlock>, verified: Mutex<VecDeque<PreverifiedBlock>>,
bad: HashSet<H256>, verifying: Mutex<VecDeque<VerifyingBlock>>,
bad: Mutex<HashSet<H256>>,
} }
impl BlockQueue { impl BlockQueue {
/// Creates a new queue instance. /// Creates a new queue instance.
pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue { pub fn new(config: BlockQueueConfig, engine: Arc<Box<Engine>>, message_channel: IoChannel<NetSyncMessage>) -> BlockQueue {
let verification = Arc::new(Mutex::new(Verification::default())); let verification = Arc::new(Verification {
unverified: Mutex::new(VecDeque::new()),
verified: Mutex::new(VecDeque::new()),
verifying: Mutex::new(VecDeque::new()),
bad: Mutex::new(HashSet::new()),
});
let more_to_verify = Arc::new(Condvar::new()); let more_to_verify = Arc::new(Condvar::new());
let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel }); let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel });
let deleting = Arc::new(AtomicBool::new(false)); let deleting = Arc::new(AtomicBool::new(false));
@ -186,17 +191,17 @@ impl BlockQueue {
} }
} }
fn verify(verification: Arc<Mutex<Verification>>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) { fn verify(verification: Arc<Verification>, engine: Arc<Box<Engine>>, wait: Arc<Condvar>, ready: Arc<QueueSignal>, deleting: Arc<AtomicBool>, empty: Arc<Condvar>) {
while !deleting.load(AtomicOrdering::Acquire) { while !deleting.load(AtomicOrdering::Acquire) {
{ {
let mut lock = verification.lock().unwrap(); let mut unverified = verification.unverified.lock().unwrap();
if lock.unverified.is_empty() && lock.verifying.is_empty() { if unverified.is_empty() && verification.verifying.lock().unwrap().is_empty() {
empty.notify_all(); empty.notify_all();
} }
while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) {
lock = wait.wait(lock).unwrap(); unverified = wait.wait(unverified).unwrap();
} }
if deleting.load(AtomicOrdering::Acquire) { if deleting.load(AtomicOrdering::Acquire) {
@ -205,39 +210,42 @@ impl BlockQueue {
} }
let block = { let block = {
let mut v = verification.lock().unwrap(); let mut unverified = verification.unverified.lock().unwrap();
if v.unverified.is_empty() { if unverified.is_empty() {
continue; continue;
} }
let block = v.unverified.pop_front().unwrap(); let mut verifying = verification.verifying.lock().unwrap();
v.verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); let block = unverified.pop_front().unwrap();
verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None });
block block
}; };
let block_hash = block.header.hash(); let block_hash = block.header.hash();
match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) { match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) {
Ok(verified) => { Ok(verified) => {
let mut v = verification.lock().unwrap(); let mut verifying = verification.verifying.lock().unwrap();
for e in &mut v.verifying { for e in verifying.iter_mut() {
if e.hash == block_hash { if e.hash == block_hash {
e.block = Some(verified); e.block = Some(verified);
break; break;
} }
} }
if !v.verifying.is_empty() && v.verifying.front().unwrap().hash == block_hash { if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash {
// we're next! // we're next!
let mut vref = v.deref_mut(); let mut verified = verification.verified.lock().unwrap();
BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); let mut bad = verification.bad.lock().unwrap();
BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
ready.set(); ready.set();
} }
}, },
Err(err) => { Err(err) => {
let mut v = verification.lock().unwrap(); let mut verifying = verification.verifying.lock().unwrap();
let mut verified = verification.verified.lock().unwrap();
let mut bad = verification.bad.lock().unwrap();
warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err);
v.bad.insert(block_hash.clone()); bad.insert(block_hash.clone());
v.verifying.retain(|e| e.hash != block_hash); verifying.retain(|e| e.hash != block_hash);
let mut vref = v.deref_mut(); BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad);
BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad);
ready.set(); ready.set();
} }
} }
@ -257,19 +265,21 @@ impl BlockQueue {
} }
/// Clear the queue and stop verification activity. /// Clear the queue and stop verification activity.
pub fn clear(&mut self) { pub fn clear(&self) {
let mut verification = self.verification.lock().unwrap(); let mut unverified = self.verification.unverified.lock().unwrap();
verification.unverified.clear(); let mut verifying = self.verification.verifying.lock().unwrap();
verification.verifying.clear(); let mut verified = self.verification.verified.lock().unwrap();
verification.verified.clear(); unverified.clear();
verifying.clear();
verified.clear();
self.processing.write().unwrap().clear(); self.processing.write().unwrap().clear();
} }
/// Wait for queue to be empty /// Wait for unverified queue to be empty
pub fn flush(&mut self) { pub fn flush(&self) {
let mut verification = self.verification.lock().unwrap(); let mut unverified = self.verification.unverified.lock().unwrap();
while !verification.unverified.is_empty() || !verification.verifying.is_empty() { while !unverified.is_empty() || !self.verification.verifying.lock().unwrap().is_empty() {
verification = self.empty.wait(verification).unwrap(); unverified = self.empty.wait(unverified).unwrap();
} }
} }
@ -278,27 +288,28 @@ impl BlockQueue {
if self.processing.read().unwrap().contains(&hash) { if self.processing.read().unwrap().contains(&hash) {
return BlockStatus::Queued; return BlockStatus::Queued;
} }
if self.verification.lock().unwrap().bad.contains(&hash) { if self.verification.bad.lock().unwrap().contains(&hash) {
return BlockStatus::Bad; return BlockStatus::Bad;
} }
BlockStatus::Unknown BlockStatus::Unknown
} }
/// Add a block to the queue. /// Add a block to the queue.
pub fn import_block(&mut self, bytes: Bytes) -> ImportResult { pub fn import_block(&self, bytes: Bytes) -> ImportResult {
let header = BlockView::new(&bytes).header(); let header = BlockView::new(&bytes).header();
let h = header.hash(); let h = header.hash();
if self.processing.read().unwrap().contains(&h) {
return Err(x!(ImportError::AlreadyQueued));
}
{ {
let mut verification = self.verification.lock().unwrap(); if self.processing.read().unwrap().contains(&h) {
if verification.bad.contains(&h) { return Err(x!(ImportError::AlreadyQueued));
}
let mut bad = self.verification.bad.lock().unwrap();
if bad.contains(&h) {
return Err(x!(ImportError::KnownBad)); return Err(x!(ImportError::KnownBad));
} }
if verification.bad.contains(&header.parent_hash) { if bad.contains(&header.parent_hash) {
verification.bad.insert(h.clone()); bad.insert(h.clone());
return Err(x!(ImportError::KnownBad)); return Err(x!(ImportError::KnownBad));
} }
} }
@ -306,48 +317,47 @@ impl BlockQueue {
match verify_block_basic(&header, &bytes, self.engine.deref().deref()) { match verify_block_basic(&header, &bytes, self.engine.deref().deref()) {
Ok(()) => { Ok(()) => {
self.processing.write().unwrap().insert(h.clone()); self.processing.write().unwrap().insert(h.clone());
self.verification.lock().unwrap().unverified.push_back(UnverifiedBlock { header: header, bytes: bytes }); self.verification.unverified.lock().unwrap().push_back(UnverifiedBlock { header: header, bytes: bytes });
self.more_to_verify.notify_all(); self.more_to_verify.notify_all();
Ok(h) Ok(h)
}, },
Err(err) => { Err(err) => {
warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err);
self.verification.lock().unwrap().bad.insert(h.clone()); self.verification.bad.lock().unwrap().insert(h.clone());
Err(err) Err(err)
} }
} }
} }
/// Mark given block and all its children as bad. Stops verification. /// Mark given block and all its children as bad. Stops verification.
pub fn mark_as_bad(&mut self, block_hashes: &[H256]) { pub fn mark_as_bad(&self, block_hashes: &[H256]) {
if block_hashes.is_empty() { if block_hashes.is_empty() {
return; return;
} }
let mut verification_lock = self.verification.lock().unwrap(); let mut verified_lock = self.verification.verified.lock().unwrap();
let mut verified = verified_lock.deref_mut();
let mut bad = self.verification.bad.lock().unwrap();
let mut processing = self.processing.write().unwrap(); let mut processing = self.processing.write().unwrap();
bad.reserve(block_hashes.len());
let mut verification = verification_lock.deref_mut();
verification.bad.reserve(block_hashes.len());
for hash in block_hashes { for hash in block_hashes {
verification.bad.insert(hash.clone()); bad.insert(hash.clone());
processing.remove(&hash); processing.remove(&hash);
} }
let mut new_verified = VecDeque::new(); let mut new_verified = VecDeque::new();
for block in verification.verified.drain(..) { for block in verified.drain(..) {
if verification.bad.contains(&block.header.parent_hash) { if bad.contains(&block.header.parent_hash) {
verification.bad.insert(block.header.hash()); bad.insert(block.header.hash());
processing.remove(&block.header.hash()); processing.remove(&block.header.hash());
} else { } else {
new_verified.push_back(block); new_verified.push_back(block);
} }
} }
verification.verified = new_verified; *verified = new_verified;
} }
/// Mark given block as processed /// Mark given block as processed
pub fn mark_as_good(&mut self, block_hashes: &[H256]) { pub fn mark_as_good(&self, block_hashes: &[H256]) {
if block_hashes.is_empty() { if block_hashes.is_empty() {
return; return;
} }
@ -358,16 +368,16 @@ impl BlockQueue {
} }
/// Removes up to `max` verified blocks from the queue /// Removes up to `max` verified blocks from the queue
pub fn drain(&mut self, max: usize) -> Vec<PreverifiedBlock> { pub fn drain(&self, max: usize) -> Vec<PreverifiedBlock> {
let mut verification = self.verification.lock().unwrap(); let mut verified = self.verification.verified.lock().unwrap();
let count = min(max, verification.verified.len()); let count = min(max, verified.len());
let mut result = Vec::with_capacity(count); let mut result = Vec::with_capacity(count);
for _ in 0..count { for _ in 0..count {
let block = verification.verified.pop_front().unwrap(); let block = verified.pop_front().unwrap();
result.push(block); result.push(block);
} }
self.ready_signal.reset(); self.ready_signal.reset();
if !verification.verified.is_empty() { if !verified.is_empty() {
self.ready_signal.set(); self.ready_signal.set();
} }
result result
@ -375,28 +385,39 @@ impl BlockQueue {
/// Get queue status. /// Get queue status.
pub fn queue_info(&self) -> BlockQueueInfo { pub fn queue_info(&self) -> BlockQueueInfo {
let verification = self.verification.lock().unwrap(); let (unverified_len, unverified_bytes) = {
let v = self.verification.unverified.lock().unwrap();
(v.len(), v.heap_size_of_children())
};
let (verifying_len, verifying_bytes) = {
let v = self.verification.verifying.lock().unwrap();
(v.len(), v.heap_size_of_children())
};
let (verified_len, verified_bytes) = {
let v = self.verification.verified.lock().unwrap();
(v.len(), v.heap_size_of_children())
};
BlockQueueInfo { BlockQueueInfo {
verified_queue_size: verification.verified.len(), unverified_queue_size: unverified_len,
unverified_queue_size: verification.unverified.len(), verifying_queue_size: verifying_len,
verifying_queue_size: verification.verifying.len(), verified_queue_size: verified_len,
max_queue_size: self.max_queue_size, max_queue_size: self.max_queue_size,
max_mem_use: self.max_mem_use, max_mem_use: self.max_mem_use,
mem_used: mem_used:
verification.unverified.heap_size_of_children() unverified_bytes
+ verification.verifying.heap_size_of_children() + verifying_bytes
+ verification.verified.heap_size_of_children(), + verified_bytes
// TODO: https://github.com/servo/heapsize/pull/50 // TODO: https://github.com/servo/heapsize/pull/50
//+ self.processing.read().unwrap().heap_size_of_children(), //+ self.processing.read().unwrap().heap_size_of_children(),
} }
} }
/// Optimise memory footprint of the heap fields.
pub fn collect_garbage(&self) { pub fn collect_garbage(&self) {
{ {
let mut verification = self.verification.lock().unwrap(); self.verification.unverified.lock().unwrap().shrink_to_fit();
verification.unverified.shrink_to_fit(); self.verification.verifying.lock().unwrap().shrink_to_fit();
verification.verifying.shrink_to_fit(); self.verification.verified.lock().unwrap().shrink_to_fit();
verification.verified.shrink_to_fit();
} }
self.processing.write().unwrap().shrink_to_fit(); self.processing.write().unwrap().shrink_to_fit();
} }
@ -444,7 +465,7 @@ mod tests {
#[test] #[test]
fn can_import_blocks() { fn can_import_blocks() {
let mut queue = get_test_queue(); let queue = get_test_queue();
if let Err(e) = queue.import_block(get_good_dummy_block()) { if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
@ -452,7 +473,7 @@ mod tests {
#[test] #[test]
fn returns_error_for_duplicates() { fn returns_error_for_duplicates() {
let mut queue = get_test_queue(); let queue = get_test_queue();
if let Err(e) = queue.import_block(get_good_dummy_block()) { if let Err(e) = queue.import_block(get_good_dummy_block()) {
panic!("error importing block that is valid by definition({:?})", e); panic!("error importing block that is valid by definition({:?})", e);
} }
@ -471,7 +492,7 @@ mod tests {
#[test] #[test]
fn returns_ok_for_drained_duplicates() { fn returns_ok_for_drained_duplicates() {
let mut queue = get_test_queue(); let queue = get_test_queue();
let block = get_good_dummy_block(); let block = get_good_dummy_block();
let hash = BlockView::new(&block).header().hash().clone(); let hash = BlockView::new(&block).header().hash().clone();
if let Err(e) = queue.import_block(block) { if let Err(e) = queue.import_block(block) {
@ -488,7 +509,7 @@ mod tests {
#[test] #[test]
fn returns_empty_once_finished() { fn returns_empty_once_finished() {
let mut queue = get_test_queue(); let queue = get_test_queue();
queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition"); queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition");
queue.flush(); queue.flush();
queue.drain(1); queue.drain(1);

View File

@ -16,6 +16,7 @@
//! Blockchain database. //! Blockchain database.
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder};
use util::*; use util::*;
use header::*; use header::*;
use extras::*; use extras::*;
@ -134,8 +135,9 @@ struct CacheManager {
/// ///
/// **Does not do input data verification.** /// **Does not do input data verification.**
pub struct BlockChain { pub struct BlockChain {
pref_cache_size: usize, // All locks must be captured in the order declared here.
max_cache_size: usize, pref_cache_size: AtomicUsize,
max_cache_size: AtomicUsize,
best_block: RwLock<BestBlock>, best_block: RwLock<BestBlock>,
@ -157,6 +159,8 @@ pub struct BlockChain {
// blooms indexing // blooms indexing
bloom_indexer: BloomIndexer, bloom_indexer: BloomIndexer,
insert_lock: Mutex<()>
} }
impl FilterDataSource for BlockChain { impl FilterDataSource for BlockChain {
@ -262,8 +266,8 @@ impl BlockChain {
(0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new()));
let bc = BlockChain { let bc = BlockChain {
pref_cache_size: config.pref_cache_size, pref_cache_size: AtomicUsize::new(config.pref_cache_size),
max_cache_size: config.max_cache_size, max_cache_size: AtomicUsize::new(config.max_cache_size),
best_block: RwLock::new(BestBlock::default()), best_block: RwLock::new(BestBlock::default()),
blocks: RwLock::new(HashMap::new()), blocks: RwLock::new(HashMap::new()),
block_details: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()),
@ -275,7 +279,8 @@ impl BlockChain {
extras_db: extras_db, extras_db: extras_db,
blocks_db: blocks_db, blocks_db: blocks_db,
cache_man: RwLock::new(cache_man), cache_man: RwLock::new(cache_man),
bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS) bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS),
insert_lock: Mutex::new(()),
}; };
// load best block // load best block
@ -318,9 +323,9 @@ impl BlockChain {
} }
/// Set the cache configuration. /// Set the cache configuration.
pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) { pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
self.pref_cache_size = pref_cache_size; self.pref_cache_size.store(pref_cache_size, AtomicOrder::Relaxed);
self.max_cache_size = max_cache_size; self.max_cache_size.store(max_cache_size, AtomicOrder::Relaxed);
} }
/// Returns a tree route between `from` and `to`, which is a tuple of: /// Returns a tree route between `from` and `to`, which is a tuple of:
@ -424,6 +429,7 @@ impl BlockChain {
return ImportRoute::none(); return ImportRoute::none();
} }
let _lock = self.insert_lock.lock();
// store block in db // store block in db
self.blocks_db.put(&hash, &bytes).unwrap(); self.blocks_db.put(&hash, &bytes).unwrap();
@ -446,48 +452,58 @@ impl BlockChain {
let batch = DBTransaction::new(); let batch = DBTransaction::new();
batch.put(b"best", &update.info.hash).unwrap(); batch.put(b"best", &update.info.hash).unwrap();
// update best block {
let mut best_block = self.best_block.write().unwrap(); let mut write_details = self.block_details.write().unwrap();
match update.info.location { for (hash, details) in update.block_details.into_iter() {
BlockLocation::Branch => (), batch.put_extras(&hash, &details);
_ => { self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone()));
*best_block = BestBlock { write_details.insert(hash, details);
hash: update.info.hash,
number: update.info.number,
total_difficulty: update.info.total_difficulty
};
} }
} }
let mut write_hashes = self.block_hashes.write().unwrap(); {
for (number, hash) in &update.block_hashes { let mut write_receipts = self.block_receipts.write().unwrap();
batch.put_extras(number, hash); for (hash, receipt) in &update.block_receipts {
write_hashes.remove(number); batch.put_extras(hash, receipt);
write_receipts.remove(hash);
}
} }
let mut write_details = self.block_details.write().unwrap(); {
for (hash, details) in update.block_details.into_iter() { let mut write_blocks_blooms = self.blocks_blooms.write().unwrap();
batch.put_extras(&hash, &details); for (bloom_hash, blocks_bloom) in &update.blocks_blooms {
write_details.insert(hash.clone(), details); batch.put_extras(bloom_hash, blocks_bloom);
self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash)); write_blocks_blooms.remove(bloom_hash);
}
} }
let mut write_receipts = self.block_receipts.write().unwrap(); // These cached values must be updated last and togeterh
for (hash, receipt) in &update.block_receipts { {
batch.put_extras(hash, receipt); let mut best_block = self.best_block.write().unwrap();
write_receipts.remove(hash); let mut write_hashes = self.block_hashes.write().unwrap();
} let mut write_txs = self.transaction_addresses.write().unwrap();
let mut write_txs = self.transaction_addresses.write().unwrap(); // update best block
for (hash, tx_address) in &update.transactions_addresses { match update.info.location {
batch.put_extras(hash, tx_address); BlockLocation::Branch => (),
write_txs.remove(hash); _ => {
} *best_block = BestBlock {
hash: update.info.hash,
number: update.info.number,
total_difficulty: update.info.total_difficulty
};
}
}
let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); for (number, hash) in &update.block_hashes {
for (bloom_hash, blocks_bloom) in &update.blocks_blooms { batch.put_extras(number, hash);
batch.put_extras(bloom_hash, blocks_bloom); write_hashes.remove(number);
write_blocks_blooms.remove(bloom_hash); }
for (hash, tx_address) in &update.transactions_addresses {
batch.put_extras(hash, tx_address);
write_txs.remove(hash);
}
} }
// update extras database // update extras database
@ -781,11 +797,10 @@ impl BlockChain {
/// Ticks our cache system and throws out any old data. /// Ticks our cache system and throws out any old data.
pub fn collect_garbage(&self) { pub fn collect_garbage(&self) {
if self.cache_size().total() < self.pref_cache_size { return; } if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; }
for _ in 0..COLLECTION_QUEUE_SIZE { for _ in 0..COLLECTION_QUEUE_SIZE {
{ {
let mut cache_man = self.cache_man.write().unwrap();
let mut blocks = self.blocks.write().unwrap(); let mut blocks = self.blocks.write().unwrap();
let mut block_details = self.block_details.write().unwrap(); let mut block_details = self.block_details.write().unwrap();
let mut block_hashes = self.block_hashes.write().unwrap(); let mut block_hashes = self.block_hashes.write().unwrap();
@ -793,6 +808,7 @@ impl BlockChain {
let mut block_logs = self.block_logs.write().unwrap(); let mut block_logs = self.block_logs.write().unwrap();
let mut blocks_blooms = self.blocks_blooms.write().unwrap(); let mut blocks_blooms = self.blocks_blooms.write().unwrap();
let mut block_receipts = self.block_receipts.write().unwrap(); let mut block_receipts = self.block_receipts.write().unwrap();
let mut cache_man = self.cache_man.write().unwrap();
for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { for id in cache_man.cache_usage.pop_back().unwrap().into_iter() {
cache_man.in_use.remove(&id); cache_man.in_use.remove(&id);
@ -819,7 +835,7 @@ impl BlockChain {
blocks_blooms.shrink_to_fit(); blocks_blooms.shrink_to_fit();
block_receipts.shrink_to_fit(); block_receipts.shrink_to_fit();
} }
if self.cache_size().total() < self.max_cache_size { break; } if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; }
} }
// TODO: m_lastCollection = chrono::system_clock::now(); // TODO: m_lastCollection = chrono::system_clock::now();
@ -891,7 +907,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_find_uncles() { fn test_find_uncles() {
let mut canon_chain = ChainGenerator::default(); let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default(); let mut finalizer = BlockFinalizer::default();
@ -929,7 +945,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_small_fork() { fn test_small_fork() {
let mut canon_chain = ChainGenerator::default(); let mut canon_chain = ChainGenerator::default();
let mut finalizer = BlockFinalizer::default(); let mut finalizer = BlockFinalizer::default();

View File

@ -23,9 +23,9 @@ mod bloom_indexer;
mod cache; mod cache;
mod tree_route; mod tree_route;
mod update; mod update;
mod import_route;
#[cfg(test)] #[cfg(test)]
mod generator; mod generator;
mod import_route;
pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig}; pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig};
pub use self::cache::CacheSize; pub use self::cache::CacheSize;

View File

@ -20,7 +20,6 @@ use std::marker::PhantomData;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use util::*; use util::*;
use util::panics::*; use util::panics::*;
use blockchain::{BlockChain, BlockProvider};
use views::BlockView; use views::BlockView;
use error::*; use error::*;
use header::{BlockNumber}; use header::{BlockNumber};
@ -28,7 +27,6 @@ use state::State;
use spec::Spec; use spec::Spec;
use engine::Engine; use engine::Engine;
use views::HeaderView; use views::HeaderView;
use block_queue::BlockQueue;
use service::{NetSyncMessage, SyncMessage}; use service::{NetSyncMessage, SyncMessage};
use env_info::LastHashes; use env_info::LastHashes;
use verification::*; use verification::*;
@ -37,33 +35,10 @@ use transaction::LocalizedTransaction;
use extras::TransactionAddress; use extras::TransactionAddress;
use filter::Filter; use filter::Filter;
use log_entry::LocalizedLogEntry; use log_entry::LocalizedLogEntry;
use util::keys::store::SecretStore; use block_queue::{BlockQueue, BlockQueueInfo};
pub use block_queue::{BlockQueueConfig, BlockQueueInfo}; use blockchain::{BlockChain, BlockProvider, TreeRoute};
pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize}; use client::{BlockId, TransactionId, ClientConfig, BlockChainClient};
pub use blockchain::CacheSize as BlockChainCacheSize;
/// Uniquely identifies block.
#[derive(Debug, PartialEq, Clone)]
pub enum BlockId {
/// Block's sha3.
/// Querying by hash is always faster.
Hash(H256),
/// Block number within canon blockchain.
Number(BlockNumber),
/// Earliest block (genesis).
Earliest,
/// Latest mined block.
Latest
}
/// Uniquely identifies transaction.
#[derive(Debug, PartialEq, Clone)]
pub enum TransactionId {
/// Transaction's sha3.
Hash(H256),
/// Block id and transaction index within this block.
/// Querying by block position is always faster.
Location(BlockId, usize)
}
/// General block status /// General block status
#[derive(Debug, Eq, PartialEq)] #[derive(Debug, Eq, PartialEq)]
@ -78,30 +53,6 @@ pub enum BlockStatus {
Unknown, Unknown,
} }
/// Client configuration. Includes configs for all sub-systems.
#[derive(Debug)]
pub struct ClientConfig {
/// Block queue configuration.
pub queue: BlockQueueConfig,
/// Blockchain configuration.
pub blockchain: BlockChainConfig,
/// Prefer journal rather than archive.
pub prefer_journal: bool,
/// The name of the client instance.
pub name: String,
}
impl Default for ClientConfig {
fn default() -> ClientConfig {
ClientConfig {
queue: Default::default(),
blockchain: Default::default(),
prefer_journal: false,
name: Default::default(),
}
}
}
/// Information about the blockchain gathered together. /// Information about the blockchain gathered together.
#[derive(Debug)] #[derive(Debug)]
pub struct BlockChainInfo { pub struct BlockChainInfo {
@ -123,79 +74,8 @@ impl fmt::Display for BlockChainInfo {
} }
} }
/// Blockchain database client. Owns and manages a blockchain and a block queue.
pub trait BlockChainClient : Sync + Send {
/// Get raw block header data by block id.
fn block_header(&self, id: BlockId) -> Option<Bytes>;
/// Get raw block body data by block id.
/// Block body is an RLP list of two items: uncles and transactions.
fn block_body(&self, id: BlockId) -> Option<Bytes>;
/// Get raw block data by block header hash.
fn block(&self, id: BlockId) -> Option<Bytes>;
/// Get block status by block header hash.
fn block_status(&self, id: BlockId) -> BlockStatus;
/// Get block total difficulty.
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
/// Get address nonce.
fn nonce(&self, address: &Address) -> U256;
/// Get block hash.
fn block_hash(&self, id: BlockId) -> Option<H256>;
/// Get address code.
fn code(&self, address: &Address) -> Option<Bytes>;
/// Get transaction with given hash.
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction>;
/// Get a tree route between `from` and `to`.
/// See `BlockChain::tree_route`.
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
/// Get latest state node
fn state_data(&self, hash: &H256) -> Option<Bytes>;
/// Get raw block receipts data by block header hash.
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
/// Import a block into the blockchain.
fn import_block(&self, bytes: Bytes) -> ImportResult;
/// Get block queue information.
fn queue_info(&self) -> BlockQueueInfo;
/// Clear block queue and abort all import activity.
fn clear_queue(&self);
/// Get blockchain information.
fn chain_info(&self) -> BlockChainInfo;
/// Get the best block header.
fn best_block_header(&self) -> Bytes {
self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap()
}
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>>;
/// Returns logs matching given filter.
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>>;
/// Submit `seal` as a valid solution for the header of `pow_hash`.
/// Will check the seal, but not actually insert the block into the chain.
fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error>;
}
#[derive(Default, Clone, Debug, Eq, PartialEq)]
/// Report on the status of a client. /// Report on the status of a client.
#[derive(Default, Clone, Debug, Eq, PartialEq)]
pub struct ClientReport { pub struct ClientReport {
/// How many blocks have been imported so far. /// How many blocks have been imported so far.
pub blocks_imported: usize, pub blocks_imported: usize,
@ -219,10 +99,10 @@ impl ClientReport {
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
pub struct Client<V = CanonVerifier> where V: Verifier { pub struct Client<V = CanonVerifier> where V: Verifier {
chain: Arc<RwLock<BlockChain>>, chain: Arc<BlockChain>,
engine: Arc<Box<Engine>>, engine: Arc<Box<Engine>>,
state_db: Mutex<JournalDB>, state_db: Mutex<Box<JournalDB>>,
block_queue: RwLock<BlockQueue>, block_queue: BlockQueue,
report: RwLock<ClientReport>, report: RwLock<ClientReport>,
import_lock: Mutex<()>, import_lock: Mutex<()>,
panic_handler: Arc<PanicHandler>, panic_handler: Arc<PanicHandler>,
@ -233,7 +113,6 @@ pub struct Client<V = CanonVerifier> where V: Verifier {
author: RwLock<Address>, author: RwLock<Address>,
extra_data: RwLock<Bytes>, extra_data: RwLock<Bytes>,
verifier: PhantomData<V>, verifier: PhantomData<V>,
secret_store: Arc<RwLock<SecretStore>>,
} }
const HISTORY: u64 = 1000; const HISTORY: u64 = 1000;
@ -252,16 +131,19 @@ impl<V> Client<V> where V: Verifier {
let mut dir = path.to_path_buf(); let mut dir = path.to_path_buf();
dir.push(H64::from(spec.genesis_header().hash()).hex()); dir.push(H64::from(spec.genesis_header().hash()).hex());
//TODO: sec/fat: pruned/full versioning //TODO: sec/fat: pruned/full versioning
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" })); // version here is a bit useless now, since it's controlled only be the pruning algo.
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, config.pruning));
let path = dir.as_path(); let path = dir.as_path();
let gb = spec.genesis_block(); let gb = spec.genesis_block();
let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path))); let chain = Arc::new(BlockChain::new(config.blockchain, &gb, path));
let mut state_path = path.to_path_buf(); let mut state_path = path.to_path_buf();
state_path.push("state"); state_path.push("state");
let engine = Arc::new(try!(spec.to_engine())); let engine = Arc::new(try!(spec.to_engine()));
let mut state_db = JournalDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal); let state_path_str = state_path.to_str().unwrap();
if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) { let mut state_db = journaldb::new(state_path_str, config.pruning);
if state_db.is_empty() && engine.spec().ensure_db_good(state_db.as_hashdb_mut()) {
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
} }
@ -269,14 +151,11 @@ impl<V> Client<V> where V: Verifier {
let panic_handler = PanicHandler::new_in_arc(); let panic_handler = PanicHandler::new_in_arc();
panic_handler.forward_from(&block_queue); panic_handler.forward_from(&block_queue);
let secret_store = Arc::new(RwLock::new(SecretStore::new()));
secret_store.write().unwrap().try_import_existing();
Ok(Arc::new(Client { Ok(Arc::new(Client {
chain: chain, chain: chain,
engine: engine, engine: engine,
state_db: Mutex::new(state_db), state_db: Mutex::new(state_db),
block_queue: RwLock::new(block_queue), block_queue: block_queue,
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
import_lock: Mutex::new(()), import_lock: Mutex::new(()),
panic_handler: panic_handler, panic_handler: panic_handler,
@ -285,22 +164,20 @@ impl<V> Client<V> where V: Verifier {
author: RwLock::new(Address::new()), author: RwLock::new(Address::new()),
extra_data: RwLock::new(Vec::new()), extra_data: RwLock::new(Vec::new()),
verifier: PhantomData, verifier: PhantomData,
secret_store: secret_store,
})) }))
} }
/// Flush the block import queue. /// Flush the block import queue.
pub fn flush_queue(&self) { pub fn flush_queue(&self) {
self.block_queue.write().unwrap().flush(); self.block_queue.flush();
} }
fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
let mut last_hashes = LastHashes::new(); let mut last_hashes = LastHashes::new();
last_hashes.resize(256, H256::new()); last_hashes.resize(256, H256::new());
last_hashes[0] = parent_hash; last_hashes[0] = parent_hash;
let chain = self.chain.read().unwrap();
for i in 0..255 { for i in 0..255 {
match chain.block_details(&last_hashes[i]) { match self.chain.block_details(&last_hashes[i]) {
Some(details) => { Some(details) => {
last_hashes[i + 1] = details.parent.clone(); last_hashes[i + 1] = details.parent.clone();
}, },
@ -310,31 +187,26 @@ impl<V> Client<V> where V: Verifier {
last_hashes last_hashes
} }
/// Secret store (key manager)
pub fn secret_store(&self) -> &Arc<RwLock<SecretStore>> {
&self.secret_store
}
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<ClosedBlock, ()> { fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<ClosedBlock, ()> {
let engine = self.engine.deref().deref(); let engine = self.engine.deref().deref();
let header = &block.header; let header = &block.header;
// Check the block isn't so old we won't be able to enact it. // Check the block isn't so old we won't be able to enact it.
let best_block_number = self.chain.read().unwrap().best_block_number(); let best_block_number = self.chain.best_block_number();
if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY {
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
return Err(()); return Err(());
} }
// Verify Block Family // Verify Block Family
let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref()); let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.deref());
if let Err(e) = verify_family_result { if let Err(e) = verify_family_result {
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
return Err(()); return Err(());
}; };
// Check if Parent is in chain // Check if Parent is in chain
let chain_has_parent = self.chain.read().unwrap().block_header(&header.parent_hash); let chain_has_parent = self.chain.block_header(&header.parent_hash);
if let None = chain_has_parent { if let None = chain_has_parent {
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
return Err(()); return Err(());
@ -343,7 +215,7 @@ impl<V> Client<V> where V: Verifier {
// Enact Verified Block // Enact Verified Block
let parent = chain_has_parent.unwrap(); let parent = chain_has_parent.unwrap();
let last_hashes = self.build_last_hashes(header.parent_hash.clone()); let last_hashes = self.build_last_hashes(header.parent_hash.clone());
let db = self.state_db.lock().unwrap().clone(); let db = self.state_db.lock().unwrap().spawn();
let enact_result = enact_verified(&block, engine, db, &parent, last_hashes); let enact_result = enact_verified(&block, engine, db, &parent, last_hashes);
if let Err(e) = enact_result { if let Err(e) = enact_result {
@ -369,7 +241,7 @@ impl<V> Client<V> where V: Verifier {
let mut bad_blocks = HashSet::new(); let mut bad_blocks = HashSet::new();
let _import_lock = self.import_lock.lock(); let _import_lock = self.import_lock.lock();
let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import); let blocks = self.block_queue.drain(max_blocks_to_import);
let original_best = self.chain_info().best_block_hash; let original_best = self.chain_info().best_block_hash;
@ -390,8 +262,7 @@ impl<V> Client<V> where V: Verifier {
// Are we committing an era? // Are we committing an era?
let ancient = if header.number() >= HISTORY { let ancient = if header.number() >= HISTORY {
let n = header.number() - HISTORY; let n = header.number() - HISTORY;
let chain = self.chain.read().unwrap(); Some((n, self.chain.block_hash(n).unwrap()))
Some((n, chain.block_hash(n).unwrap()))
} else { } else {
None None
}; };
@ -405,8 +276,7 @@ impl<V> Client<V> where V: Verifier {
// And update the chain after commit to prevent race conditions // And update the chain after commit to prevent race conditions
// (when something is in chain but you are not able to fetch details) // (when something is in chain but you are not able to fetch details)
self.chain.write().unwrap() self.chain.insert_block(&block.bytes, receipts);
.insert_block(&block.bytes, receipts);
self.report.write().unwrap().accrue_block(&block); self.report.write().unwrap().accrue_block(&block);
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
@ -416,18 +286,16 @@ impl<V> Client<V> where V: Verifier {
let bad_blocks = bad_blocks.into_iter().collect::<Vec<H256>>(); let bad_blocks = bad_blocks.into_iter().collect::<Vec<H256>>();
{ {
let mut block_queue = self.block_queue.write().unwrap();
if !bad_blocks.is_empty() { if !bad_blocks.is_empty() {
block_queue.mark_as_bad(&bad_blocks); self.block_queue.mark_as_bad(&bad_blocks);
} }
if !good_blocks.is_empty() { if !good_blocks.is_empty() {
block_queue.mark_as_good(&good_blocks); self.block_queue.mark_as_good(&good_blocks);
} }
} }
{ {
let block_queue = self.block_queue.read().unwrap(); if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
if !good_blocks.is_empty() && block_queue.queue_info().is_empty() {
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
good: good_blocks, good: good_blocks,
bad: bad_blocks, bad: bad_blocks,
@ -446,12 +314,12 @@ impl<V> Client<V> where V: Verifier {
/// Get a copy of the best block's state. /// Get a copy of the best block's state.
pub fn state(&self) -> State { pub fn state(&self) -> State {
State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) State::from_existing(self.state_db.lock().unwrap().spawn(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce())
} }
/// Get info on the cache. /// Get info on the cache.
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
self.chain.read().unwrap().cache_size() self.chain.cache_size()
} }
/// Get the report. /// Get the report.
@ -463,13 +331,13 @@ impl<V> Client<V> where V: Verifier {
/// Tick the client. /// Tick the client.
pub fn tick(&self) { pub fn tick(&self) {
self.chain.read().unwrap().collect_garbage(); self.chain.collect_garbage();
self.block_queue.read().unwrap().collect_garbage(); self.block_queue.collect_garbage();
} }
/// Set up the cache behaviour. /// Set up the cache behaviour.
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size); self.chain.configure_cache(pref_cache_size, max_cache_size);
} }
fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> { fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> {
@ -484,9 +352,9 @@ impl<V> Client<V> where V: Verifier {
fn block_number(&self, id: BlockId) -> Option<BlockNumber> { fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
match id { match id {
BlockId::Number(number) => Some(number), BlockId::Number(number) => Some(number),
BlockId::Hash(ref hash) => self.chain.read().unwrap().block_number(hash), BlockId::Hash(ref hash) => self.chain.block_number(hash),
BlockId::Earliest => Some(0), BlockId::Earliest => Some(0),
BlockId::Latest => Some(self.chain.read().unwrap().best_block_number()) BlockId::Latest => Some(self.chain.best_block_number())
} }
} }
@ -512,17 +380,17 @@ impl<V> Client<V> where V: Verifier {
/// New chain head event. Restart mining operation. /// New chain head event. Restart mining operation.
pub fn prepare_sealing(&self) { pub fn prepare_sealing(&self) {
let h = self.chain.read().unwrap().best_block_hash(); let h = self.chain.best_block_hash();
let mut b = OpenBlock::new( let mut b = OpenBlock::new(
self.engine.deref().deref(), self.engine.deref().deref(),
self.state_db.lock().unwrap().clone(), self.state_db.lock().unwrap().spawn(),
match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} }, match self.chain.block_header(&h) { Some(ref x) => x, None => {return;} },
self.build_last_hashes(h.clone()), self.build_last_hashes(h.clone()),
self.author(), self.author(),
self.extra_data() self.extra_data()
); );
self.chain.read().unwrap().find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); }); self.chain.find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); });
// TODO: push transactions. // TODO: push transactions.
@ -536,14 +404,12 @@ impl<V> Client<V> where V: Verifier {
impl<V> BlockChainClient for Client<V> where V: Verifier { impl<V> BlockChainClient for Client<V> where V: Verifier {
fn block_header(&self, id: BlockId) -> Option<Bytes> { fn block_header(&self, id: BlockId) -> Option<Bytes> {
let chain = self.chain.read().unwrap(); Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
} }
fn block_body(&self, id: BlockId) -> Option<Bytes> { fn block_body(&self, id: BlockId) -> Option<Bytes> {
let chain = self.chain.read().unwrap(); Self::block_hash(&self.chain, id).and_then(|hash| {
Self::block_hash(&chain, id).and_then(|hash| { self.chain.block(&hash).map(|bytes| {
chain.block(&hash).map(|bytes| {
let rlp = Rlp::new(&bytes); let rlp = Rlp::new(&bytes);
let mut body = RlpStream::new_list(2); let mut body = RlpStream::new_list(2);
body.append_raw(rlp.at(1).as_raw(), 1); body.append_raw(rlp.at(1).as_raw(), 1);
@ -554,24 +420,21 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
} }
fn block(&self, id: BlockId) -> Option<Bytes> { fn block(&self, id: BlockId) -> Option<Bytes> {
let chain = self.chain.read().unwrap(); Self::block_hash(&self.chain, id).and_then(|hash| {
Self::block_hash(&chain, id).and_then(|hash| { self.chain.block(&hash)
chain.block(&hash)
}) })
} }
fn block_status(&self, id: BlockId) -> BlockStatus { fn block_status(&self, id: BlockId) -> BlockStatus {
let chain = self.chain.read().unwrap(); match Self::block_hash(&self.chain, id) {
match Self::block_hash(&chain, id) { Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain,
Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, Some(hash) => self.block_queue.block_status(&hash),
Some(hash) => self.block_queue.read().unwrap().block_status(&hash),
None => BlockStatus::Unknown None => BlockStatus::Unknown
} }
} }
fn block_total_difficulty(&self, id: BlockId) -> Option<U256> { fn block_total_difficulty(&self, id: BlockId) -> Option<U256> {
let chain = self.chain.read().unwrap(); Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty)
Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
} }
fn nonce(&self, address: &Address) -> U256 { fn nonce(&self, address: &Address) -> U256 {
@ -579,8 +442,7 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
} }
fn block_hash(&self, id: BlockId) -> Option<H256> { fn block_hash(&self, id: BlockId) -> Option<H256> {
let chain = self.chain.read().unwrap(); Self::block_hash(&self.chain, id)
Self::block_hash(&chain, id)
} }
fn code(&self, address: &Address) -> Option<Bytes> { fn code(&self, address: &Address) -> Option<Bytes> {
@ -588,20 +450,18 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
} }
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction> { fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction> {
let chain = self.chain.read().unwrap();
match id { match id {
TransactionId::Hash(ref hash) => chain.transaction_address(hash), TransactionId::Hash(ref hash) => self.chain.transaction_address(hash),
TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress { TransactionId::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress {
block_hash: hash, block_hash: hash,
index: index index: index
}) })
}.and_then(|address| chain.transaction(&address)) }.and_then(|address| self.chain.transaction(&address))
} }
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> { fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
let chain = self.chain.read().unwrap(); match self.chain.is_known(from) && self.chain.is_known(to) {
match chain.is_known(from) && chain.is_known(to) { true => Some(self.chain.tree_route(from.clone(), to.clone())),
true => Some(chain.tree_route(from.clone(), to.clone())),
false => None false => None
} }
} }
@ -617,43 +477,44 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
fn import_block(&self, bytes: Bytes) -> ImportResult { fn import_block(&self, bytes: Bytes) -> ImportResult {
{ {
let header = BlockView::new(&bytes).header_view(); let header = BlockView::new(&bytes).header_view();
if self.chain.read().unwrap().is_known(&header.sha3()) { if self.chain.is_known(&header.sha3()) {
return Err(x!(ImportError::AlreadyInChain)); return Err(x!(ImportError::AlreadyInChain));
} }
if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown { if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown {
return Err(x!(BlockError::UnknownParent(header.parent_hash()))); return Err(x!(BlockError::UnknownParent(header.parent_hash())));
} }
} }
self.block_queue.write().unwrap().import_block(bytes) self.block_queue.import_block(bytes)
} }
fn queue_info(&self) -> BlockQueueInfo { fn queue_info(&self) -> BlockQueueInfo {
self.block_queue.read().unwrap().queue_info() self.block_queue.queue_info()
} }
fn clear_queue(&self) { fn clear_queue(&self) {
self.block_queue.write().unwrap().clear(); self.block_queue.clear();
} }
fn chain_info(&self) -> BlockChainInfo { fn chain_info(&self) -> BlockChainInfo {
let chain = self.chain.read().unwrap();
BlockChainInfo { BlockChainInfo {
total_difficulty: chain.best_block_total_difficulty(), total_difficulty: self.chain.best_block_total_difficulty(),
pending_total_difficulty: chain.best_block_total_difficulty(), pending_total_difficulty: self.chain.best_block_total_difficulty(),
genesis_hash: chain.genesis_hash(), genesis_hash: self.chain.genesis_hash(),
best_block_hash: chain.best_block_hash(), best_block_hash: self.chain.best_block_hash(),
best_block_number: From::from(chain.best_block_number()) best_block_number: From::from(self.chain.best_block_number())
} }
} }
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>> { fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>> {
match (self.block_number(from_block), self.block_number(to_block)) { match (self.block_number(from_block), self.block_number(to_block)) {
(Some(from), Some(to)) => Some(self.chain.read().unwrap().blocks_with_bloom(bloom, from, to)), (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)),
_ => None _ => None
} }
} }
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> { fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
// TODO: lock blockchain only once
let mut blocks = filter.bloom_possibilities().iter() let mut blocks = filter.bloom_possibilities().iter()
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
.flat_map(|m| m) .flat_map(|m| m)
@ -665,9 +526,9 @@ impl<V> BlockChainClient for Client<V> where V: Verifier {
blocks.sort(); blocks.sort();
blocks.into_iter() blocks.into_iter()
.filter_map(|number| self.chain.read().unwrap().block_hash(number).map(|hash| (number, hash))) .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
.filter_map(|(number, hash)| self.chain.read().unwrap().block_receipts(&hash).map(|r| (number, hash, r.receipts))) .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
.filter_map(|(number, hash, receipts)| self.chain.read().unwrap().block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
.flat_map(|(number, hash, receipts, hashes)| { .flat_map(|(number, hash, receipts, hashes)| {
let mut log_index = 0; let mut log_index = 0;
receipts.into_iter() receipts.into_iter()

View File

@ -14,12 +14,19 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate rustc_version; pub use block_queue::BlockQueueConfig;
pub use blockchain::BlockChainConfig;
use util::journaldb;
use rustc_version::{version_meta, Channel}; /// Client configuration. Includes configs for all sub-systems.
#[derive(Debug, Default)]
fn main() { pub struct ClientConfig {
if let Channel::Nightly = version_meta().channel { /// Block queue configuration.
println!("cargo:rustc-cfg=nightly"); pub queue: BlockQueueConfig,
} /// Blockchain configuration.
pub blockchain: BlockChainConfig,
/// The JournalDB ("pruning") algorithm to use.
pub pruning: journaldb::Algorithm,
/// The name of the client instance.
pub name: String,
} }

44
ethcore/src/client/ids.rs Normal file
View File

@ -0,0 +1,44 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Unique identifiers.
use util::hash::H256;
use header::BlockNumber;
/// Uniquely identifies block.
#[derive(Debug, PartialEq, Clone)]
pub enum BlockId {
/// Block's sha3.
/// Querying by hash is always faster.
Hash(H256),
/// Block number within canon blockchain.
Number(BlockNumber),
/// Earliest block (genesis).
Earliest,
/// Latest mined block.
Latest
}
/// Uniquely identifies transaction.
#[derive(Debug, PartialEq, Clone)]
pub enum TransactionId {
/// Transaction's sha3.
Hash(H256),
/// Block id and transaction index within this block.
/// Querying by block position is always faster.
Location(BlockId, usize)
}

113
ethcore/src/client/mod.rs Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Blockchain database client.
mod client;
mod config;
mod ids;
mod test_client;
pub use self::client::*;
pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig};
pub use self::ids::{BlockId, TransactionId};
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
use std::sync::Mutex;
use util::bytes::Bytes;
use util::hash::{Address, H256, H2048};
use util::numbers::U256;
use blockchain::TreeRoute;
use block_queue::BlockQueueInfo;
use block::ClosedBlock;
use header::BlockNumber;
use transaction::LocalizedTransaction;
use log_entry::LocalizedLogEntry;
use filter::Filter;
use error::{ImportResult, Error};
/// Blockchain database client. Owns and manages a blockchain and a block queue.
pub trait BlockChainClient : Sync + Send {
/// Get raw block header data by block id.
fn block_header(&self, id: BlockId) -> Option<Bytes>;
/// Get raw block body data by block id.
/// Block body is an RLP list of two items: uncles and transactions.
fn block_body(&self, id: BlockId) -> Option<Bytes>;
/// Get raw block data by block header hash.
fn block(&self, id: BlockId) -> Option<Bytes>;
/// Get block status by block header hash.
fn block_status(&self, id: BlockId) -> BlockStatus;
/// Get block total difficulty.
fn block_total_difficulty(&self, id: BlockId) -> Option<U256>;
/// Get address nonce.
fn nonce(&self, address: &Address) -> U256;
/// Get block hash.
fn block_hash(&self, id: BlockId) -> Option<H256>;
/// Get address code.
fn code(&self, address: &Address) -> Option<Bytes>;
/// Get transaction with given hash.
fn transaction(&self, id: TransactionId) -> Option<LocalizedTransaction>;
/// Get a tree route between `from` and `to`.
/// See `BlockChain::tree_route`.
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute>;
/// Get latest state node
fn state_data(&self, hash: &H256) -> Option<Bytes>;
/// Get raw block receipts data by block header hash.
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
/// Import a block into the blockchain.
fn import_block(&self, bytes: Bytes) -> ImportResult;
/// Get block queue information.
fn queue_info(&self) -> BlockQueueInfo;
/// Clear block queue and abort all import activity.
fn clear_queue(&self);
/// Get blockchain information.
fn chain_info(&self) -> BlockChainInfo;
/// Get the best block header.
fn best_block_header(&self) -> Bytes {
// TODO: lock blockchain only once
self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap()
}
/// Returns numbers of blocks containing given bloom.
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>>;
/// Returns logs matching given filter.
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
/// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock.
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>>;
/// Submit `seal` as a valid solution for the header of `pow_hash`.
/// Will check the seal, but not actually insert the block into the chain.
fn submit_seal(&self, pow_hash: H256, seal: Vec<Bytes>) -> Result<(), Error>;
}

View File

@ -0,0 +1,336 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Test client.
use util::*;
use transaction::{Transaction, LocalizedTransaction, Action};
use blockchain::TreeRoute;
use client::{BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId};
use header::{Header as BlockHeader, BlockNumber};
use filter::Filter;
use log_entry::LocalizedLogEntry;
use receipt::Receipt;
use error::{ImportResult, Error};
use block_queue::BlockQueueInfo;
use block::ClosedBlock;
/// Test client.
pub struct TestBlockChainClient {
/// Blocks.
pub blocks: RwLock<HashMap<H256, Bytes>>,
/// Mapping of numbers to hashes.
pub numbers: RwLock<HashMap<usize, H256>>,
/// Genesis block hash.
pub genesis_hash: H256,
/// Last block hash.
pub last_hash: RwLock<H256>,
/// Difficulty.
pub difficulty: RwLock<U256>,
}
#[derive(Clone)]
/// Used for generating test client blocks.
pub enum EachBlockWith {
/// Plain block.
Nothing,
/// Block with an uncle.
Uncle,
/// Block with a transaction.
Transaction,
/// Block with an uncle and transaction.
UncleAndTransaction
}
impl TestBlockChainClient {
/// Creates new test client.
pub fn new() -> TestBlockChainClient {
let mut client = TestBlockChainClient {
blocks: RwLock::new(HashMap::new()),
numbers: RwLock::new(HashMap::new()),
genesis_hash: H256::new(),
last_hash: RwLock::new(H256::new()),
difficulty: RwLock::new(From::from(0)),
};
client.add_blocks(1, EachBlockWith::Nothing); // add genesis block
client.genesis_hash = client.last_hash.read().unwrap().clone();
client
}
/// Add blocks to test client.
pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) {
let len = self.numbers.read().unwrap().len();
for n in len..(len + count) {
let mut header = BlockHeader::new();
header.difficulty = From::from(n);
header.parent_hash = self.last_hash.read().unwrap().clone();
header.number = n as BlockNumber;
let uncles = match with {
EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
let mut uncles = RlpStream::new_list(1);
let mut uncle_header = BlockHeader::new();
uncle_header.difficulty = From::from(n);
uncle_header.parent_hash = self.last_hash.read().unwrap().clone();
uncle_header.number = n as BlockNumber;
uncles.append(&uncle_header);
header.uncles_hash = uncles.as_raw().sha3();
uncles
},
_ => RlpStream::new_list(0)
};
let txs = match with {
EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => {
let mut txs = RlpStream::new_list(1);
let keypair = KeyPair::create().unwrap();
let tx = Transaction {
action: Action::Create,
value: U256::from(100),
data: "3331600055".from_hex().unwrap(),
gas: U256::from(100_000),
gas_price: U256::one(),
nonce: U256::zero()
};
let signed_tx = tx.sign(&keypair.secret());
txs.append(&signed_tx);
txs.out()
},
_ => rlp::NULL_RLP.to_vec()
};
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&txs, 1);
rlp.append_raw(uncles.as_raw(), 1);
self.import_block(rlp.as_raw().to_vec()).unwrap();
}
}
/// TODO:
pub fn corrupt_block(&mut self, n: BlockNumber) {
let hash = self.block_hash(BlockId::Number(n)).unwrap();
let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap());
header.parent_hash = H256::new();
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&rlp::NULL_RLP, 1);
rlp.append_raw(&rlp::NULL_RLP, 1);
self.blocks.write().unwrap().insert(hash, rlp.out());
}
/// TODO:
pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 {
let blocks_read = self.numbers.read().unwrap();
let index = blocks_read.len() - delta;
blocks_read[&index].clone()
}
fn block_hash(&self, id: BlockId) -> Option<H256> {
match id {
BlockId::Hash(hash) => Some(hash),
BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(),
BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(),
BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned()
}
}
}
impl BlockChainClient for TestBlockChainClient {
fn block_total_difficulty(&self, _id: BlockId) -> Option<U256> {
Some(U256::zero())
}
fn block_hash(&self, _id: BlockId) -> Option<H256> {
unimplemented!();
}
fn nonce(&self, _address: &Address) -> U256 {
U256::zero()
}
fn code(&self, _address: &Address) -> Option<Bytes> {
unimplemented!();
}
fn transaction(&self, _id: TransactionId) -> Option<LocalizedTransaction> {
unimplemented!();
}
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option<Vec<BlockNumber>> {
unimplemented!();
}
fn logs(&self, _filter: Filter) -> Vec<LocalizedLogEntry> {
unimplemented!();
}
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
unimplemented!();
}
fn submit_seal(&self, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {
unimplemented!();
}
fn block_header(&self, id: BlockId) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
}
fn block_body(&self, id: BlockId) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| {
let mut stream = RlpStream::new_list(2);
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1);
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1);
stream.out()
}))
}
fn block(&self, id: BlockId) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned())
}
fn block_status(&self, id: BlockId) -> BlockStatus {
match id {
BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain,
BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain,
_ => BlockStatus::Unknown
}
}
// works only if blocks are one after another 1 -> 2 -> 3
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
Some(TreeRoute {
ancestor: H256::new(),
index: 0,
blocks: {
let numbers_read = self.numbers.read().unwrap();
let mut adding = false;
let mut blocks = Vec::new();
for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) {
if hash == to {
if adding {
blocks.push(hash.clone());
}
adding = false;
break;
}
if hash == from {
adding = true;
}
if adding {
blocks.push(hash.clone());
}
}
if adding { Vec::new() } else { blocks }
}
})
}
// TODO: returns just hashes instead of node state rlp(?)
fn state_data(&self, hash: &H256) -> Option<Bytes> {
// starts with 'f' ?
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
let mut rlp = RlpStream::new();
rlp.append(&hash.clone());
return Some(rlp.out());
}
None
}
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
// starts with 'f' ?
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
let receipt = Receipt::new(
H256::zero(),
U256::zero(),
vec![]);
let mut rlp = RlpStream::new();
rlp.append(&receipt);
return Some(rlp.out());
}
None
}
fn import_block(&self, b: Bytes) -> ImportResult {
let header = Rlp::new(&b).val_at::<BlockHeader>(0);
let h = header.hash();
let number: usize = header.number as usize;
if number > self.blocks.read().unwrap().len() {
panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number);
}
if number > 0 {
match self.blocks.read().unwrap().get(&header.parent_hash) {
Some(parent) => {
let parent = Rlp::new(parent).val_at::<BlockHeader>(0);
if parent.number != (header.number - 1) {
panic!("Unexpected block parent");
}
},
None => {
panic!("Unknown block parent {:?} for block {}", header.parent_hash, number);
}
}
}
let len = self.numbers.read().unwrap().len();
if number == len {
{
let mut difficulty = self.difficulty.write().unwrap();
*difficulty.deref_mut() = *difficulty.deref() + header.difficulty;
}
mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone());
self.blocks.write().unwrap().insert(h.clone(), b);
self.numbers.write().unwrap().insert(number, h.clone());
let mut parent_hash = header.parent_hash;
if number > 0 {
let mut n = number - 1;
while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash {
*self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone();
n -= 1;
parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::<BlockHeader>(0).parent_hash;
}
}
}
else {
self.blocks.write().unwrap().insert(h.clone(), b.to_vec());
}
Ok(h)
}
fn queue_info(&self) -> BlockQueueInfo {
BlockQueueInfo {
verified_queue_size: 0,
unverified_queue_size: 0,
verifying_queue_size: 0,
max_queue_size: 0,
max_mem_use: 0,
mem_used: 0,
}
}
fn clear_queue(&self) {
}
fn chain_info(&self) -> BlockChainInfo {
BlockChainInfo {
total_difficulty: *self.difficulty.read().unwrap(),
pending_total_difficulty: *self.difficulty.read().unwrap(),
genesis_hash: self.genesis_hash.clone(),
best_block_hash: self.last_hash.read().unwrap().clone(),
best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1,
}
}
}

View File

@ -202,7 +202,7 @@ impl Engine for Ethash {
} }
} }
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self #[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self
impl Ethash { impl Ethash {
fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
const EXP_DIFF_PERIOD: u64 = 100000; const EXP_DIFF_PERIOD: u64 = 100000;
@ -298,7 +298,7 @@ mod tests {
let genesis_header = engine.spec().genesis_header(); let genesis_header = engine.spec().genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()]; let last_hashes = vec![genesis_header.hash()];
let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]);
let b = b.close(); let b = b.close();
@ -311,7 +311,7 @@ mod tests {
let genesis_header = engine.spec().genesis_header(); let genesis_header = engine.spec().genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(db.as_hashdb_mut());
let last_hashes = vec![genesis_header.hash()]; let last_hashes = vec![genesis_header.hash()];
let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]);
let mut uncle = Header::new(); let mut uncle = Header::new();

View File

@ -61,7 +61,7 @@ mod tests {
let genesis_header = engine.spec().genesis_header(); let genesis_header = engine.spec().genesis_header();
let mut db_result = get_temp_journal_db(); let mut db_result = get_temp_journal_db();
let mut db = db_result.take(); let mut db = db_result.take();
engine.spec().ensure_db_good(&mut db); engine.spec().ensure_db_good(db.as_hashdb_mut());
let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce()); let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce());
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64));
assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64));

View File

@ -243,7 +243,7 @@ struct CodeReader<'a> {
code: &'a Bytes code: &'a Bytes
} }
#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))] #[cfg_attr(feature="dev", allow(len_without_is_empty))]
impl<'a> CodeReader<'a> { impl<'a> CodeReader<'a> {
/// Get `no_of_bytes` from code and convert to U256. Move PC /// Get `no_of_bytes` from code and convert to U256. Move PC
fn read(&mut self, no_of_bytes: usize) -> U256 { fn read(&mut self, no_of_bytes: usize) -> U256 {
@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> {
} }
} }
#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] #[cfg_attr(feature="dev", allow(enum_variant_names))]
enum InstructionCost { enum InstructionCost {
Gas(U256), Gas(U256),
GasMem(U256, U256), GasMem(U256, U256),
@ -347,7 +347,7 @@ impl evm::Evm for Interpreter {
} }
impl Interpreter { impl Interpreter {
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn get_gas_cost_mem(&self, fn get_gas_cost_mem(&self,
ext: &evm::Ext, ext: &evm::Ext,
instruction: Instruction, instruction: Instruction,

View File

@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> {
self.state.code(address).unwrap_or_else(|| vec![]) self.state.code(address).unwrap_or_else(|| vec![])
} }
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]
fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> { fn ret(&mut self, gas: &U256, data: &[u8]) -> Result<U256, evm::Error> {
match &mut self.output { match &mut self.output {
&mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe {

View File

@ -15,16 +15,16 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
// Clippy config // Clippy config
// TODO [todr] not really sure // TODO [todr] not really sure
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] #![cfg_attr(feature="dev", allow(needless_range_loop))]
// Shorter than if-else // Shorter than if-else
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] #![cfg_attr(feature="dev", allow(match_bool))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] #![cfg_attr(feature="dev", allow(clone_on_copy))]
//! Ethcore library //! Ethcore library
//! //!
@ -86,6 +86,7 @@ extern crate crossbeam;
#[cfg(feature = "jit" )] extern crate evmjit; #[cfg(feature = "jit" )] extern crate evmjit;
pub mod block; pub mod block;
pub mod block_queue;
pub mod client; pub mod client;
pub mod error; pub mod error;
pub mod ethereum; pub mod ethereum;
@ -119,7 +120,6 @@ mod substate;
mod executive; mod executive;
mod externalities; mod externalities;
mod verification; mod verification;
mod block_queue;
mod blockchain; mod blockchain;
#[cfg(test)] #[cfg(test)]

View File

@ -117,7 +117,7 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
} }
} }
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))] #[cfg_attr(feature="dev", allow(single_match))]
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) { fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
if let UserMessage(ref message) = *net_message { if let UserMessage(ref message) = *net_message {
match *message { match *message {

View File

@ -99,7 +99,7 @@ pub struct Spec {
genesis_state: PodState, genesis_state: PodState,
} }
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) #[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self)
impl Spec { impl Spec {
/// Convert this object into a boxed Engine of the right underlying type. /// Convert this object into a boxed Engine of the right underlying type.
// TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead.

View File

@ -31,7 +31,7 @@ pub type ApplyResult = Result<Receipt, Error>;
/// Representation of the entire state of all accounts in the system. /// Representation of the entire state of all accounts in the system.
pub struct State { pub struct State {
db: JournalDB, db: Box<JournalDB>,
root: H256, root: H256,
cache: RefCell<HashMap<Address, Option<Account>>>, cache: RefCell<HashMap<Address, Option<Account>>>,
snapshots: RefCell<Vec<HashMap<Address, Option<Option<Account>>>>>, snapshots: RefCell<Vec<HashMap<Address, Option<Option<Account>>>>>,
@ -41,11 +41,11 @@ pub struct State {
impl State { impl State {
/// Creates new state with empty state root /// Creates new state with empty state root
#[cfg(test)] #[cfg(test)]
pub fn new(mut db: JournalDB, account_start_nonce: U256) -> State { pub fn new(mut db: Box<JournalDB>, account_start_nonce: U256) -> State {
let mut root = H256::new(); let mut root = H256::new();
{ {
// init trie and reset root too null // init trie and reset root too null
let _ = SecTrieDBMut::new(&mut db, &mut root); let _ = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root);
} }
State { State {
@ -58,10 +58,10 @@ impl State {
} }
/// Creates new state with existing state root /// Creates new state with existing state root
pub fn from_existing(db: JournalDB, root: H256, account_start_nonce: U256) -> State { pub fn from_existing(db: Box<JournalDB>, root: H256, account_start_nonce: U256) -> State {
{ {
// trie should panic! if root does not exist // trie should panic! if root does not exist
let _ = SecTrieDB::new(&db, &root); let _ = SecTrieDB::new(db.as_hashdb(), &root);
} }
State { State {
@ -126,7 +126,7 @@ impl State {
} }
/// Destroy the current object and return root and database. /// Destroy the current object and return root and database.
pub fn drop(self) -> (H256, JournalDB) { pub fn drop(self) -> (H256, Box<JournalDB>) {
(self.root, self.db) (self.root, self.db)
} }
@ -148,7 +148,7 @@ impl State {
/// Determine whether an account exists. /// Determine whether an account exists.
pub fn exists(&self, a: &Address) -> bool { pub fn exists(&self, a: &Address) -> bool {
self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(&self.db, &self.root).contains(&a) self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(self.db.as_hashdb(), &self.root).contains(&a)
} }
/// Get the balance of account `a`. /// Get the balance of account `a`.
@ -163,7 +163,7 @@ impl State {
/// Mutate storage of account `address` so that it is `value` for `key`. /// Mutate storage of account `address` so that it is `value` for `key`.
pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { pub fn storage_at(&self, address: &Address, key: &H256) -> H256 {
self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(&self.db, address), key)) self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(self.db.as_hashdb(), address), key))
} }
/// Mutate storage of account `a` so that it is `value` for `key`. /// Mutate storage of account `a` so that it is `value` for `key`.
@ -224,7 +224,7 @@ impl State {
/// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit.
/// `accounts` is mutable because we may need to commit the code or storage and record that. /// `accounts` is mutable because we may need to commit the code or storage and record that.
#[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] #[cfg_attr(feature="dev", allow(match_ref_pats))]
pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) { pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap<Address, Option<Account>>) {
// first, commit the sub trees. // first, commit the sub trees.
// TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`?
@ -253,7 +253,7 @@ impl State {
/// Commits our cached account changes into the trie. /// Commits our cached account changes into the trie.
pub fn commit(&mut self) { pub fn commit(&mut self) {
assert!(self.snapshots.borrow().is_empty()); assert!(self.snapshots.borrow().is_empty());
Self::commit_into(&mut self.db, &mut self.root, self.cache.borrow_mut().deref_mut()); Self::commit_into(self.db.as_hashdb_mut(), &mut self.root, self.cache.borrow_mut().deref_mut());
} }
#[cfg(test)] #[cfg(test)]
@ -285,11 +285,11 @@ impl State {
fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option<Account> { fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option<Account> {
let have_key = self.cache.borrow().contains_key(a); let have_key = self.cache.borrow().contains_key(a);
if !have_key { if !have_key {
self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp))
} }
if require_code { if require_code {
if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() {
account.cache_code(&AccountDB::new(&self.db, a)); account.cache_code(&AccountDB::new(self.db.as_hashdb(), a));
} }
} }
unsafe { ::std::mem::transmute(self.cache.borrow().get(a).unwrap()) } unsafe { ::std::mem::transmute(self.cache.borrow().get(a).unwrap()) }
@ -305,7 +305,7 @@ impl State {
fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account { fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account {
let have_key = self.cache.borrow().contains_key(a); let have_key = self.cache.borrow().contains_key(a);
if !have_key { if !have_key {
self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp))
} else { } else {
self.note_cache(a); self.note_cache(a);
} }
@ -318,7 +318,7 @@ impl State {
unsafe { ::std::mem::transmute(self.cache.borrow_mut().get_mut(a).unwrap().as_mut().map(|account| { unsafe { ::std::mem::transmute(self.cache.borrow_mut().get_mut(a).unwrap().as_mut().map(|account| {
if require_code { if require_code {
account.cache_code(&AccountDB::new(&self.db, a)); account.cache_code(&AccountDB::new(self.db.as_hashdb(), a));
} }
account account
}).unwrap()) } }).unwrap()) }

View File

@ -250,9 +250,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult<BlockChain> {
} }
} }
pub fn get_temp_journal_db() -> GuardedTempResult<JournalDB> { pub fn get_temp_journal_db() -> GuardedTempResult<Box<JournalDB>> {
let temp = RandomTempPath::new(); let temp = RandomTempPath::new();
let journal_db = JournalDB::new(temp.as_str()); let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge);
GuardedTempResult { GuardedTempResult {
_temp: temp, _temp: temp,
result: Some(journal_db) result: Some(journal_db)
@ -268,8 +268,8 @@ pub fn get_temp_state() -> GuardedTempResult<State> {
} }
} }
pub fn get_temp_journal_db_in(path: &Path) -> JournalDB { pub fn get_temp_journal_db_in(path: &Path) -> Box<JournalDB> {
JournalDB::new(path.to_str().unwrap()) journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge)
} }
pub fn get_temp_state_in(path: &Path) -> State { pub fn get_temp_state_in(path: &Path) -> State {

View File

@ -80,7 +80,7 @@ impl Transaction {
} }
impl FromJson for SignedTransaction { impl FromJson for SignedTransaction {
#[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))] #[cfg_attr(feature="dev", allow(single_char_pattern))]
fn from_json(json: &Json) -> SignedTransaction { fn from_json(json: &Json) -> SignedTransaction {
let t = Transaction { let t = Transaction {
nonce: xjson!(&json["nonce"]), nonce: xjson!(&json["nonce"]),

13
hook.sh
View File

@ -1,3 +1,12 @@
#!/bin/sh #!/bin/sh
echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push FILE=./.git/hooks/pre-push
chmod +x ./.git/hooks/pre-push echo "#!/bin/sh\n" > $FILE
# Exit on any error
echo "set -e" >> $FILE
# Run release build
echo "cargo build --release --features dev" >> $FILE
# Build tests
echo "cargo test --no-run --features dev \\" >> $FILE
echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" >> $FILE
echo "" >> $FILE
chmod +x $FILE

View File

@ -17,8 +17,8 @@
//! Ethcore client application. //! Ethcore client application.
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
extern crate docopt; extern crate docopt;
extern crate rustc_serialize; extern crate rustc_serialize;
extern crate ethcore_util as util; extern crate ethcore_util as util;
@ -37,7 +37,7 @@ extern crate rpassword;
#[cfg(feature = "rpc")] #[cfg(feature = "rpc")]
extern crate ethcore_rpc as rpc; extern crate ethcore_rpc as rpc;
use std::net::{SocketAddr}; use std::net::{SocketAddr, IpAddr};
use std::env; use std::env;
use std::process::exit; use std::process::exit;
use std::path::PathBuf; use std::path::PathBuf;
@ -53,6 +53,7 @@ use ethsync::{EthSync, SyncConfig, SyncProvider};
use docopt::Docopt; use docopt::Docopt;
use daemonize::Daemonize; use daemonize::Daemonize;
use number_prefix::{binary_prefix, Standalone, Prefixed}; use number_prefix::{binary_prefix, Standalone, Prefixed};
use util::keys::store::*;
fn die_with_message(msg: &str) -> ! { fn die_with_message(msg: &str) -> ! {
println!("ERROR: {}", msg); println!("ERROR: {}", msg);
@ -70,28 +71,30 @@ Parity. Ethereum Client.
Copyright 2015, 2016 Ethcore (UK) Limited Copyright 2015, 2016 Ethcore (UK) Limited
Usage: Usage:
parity daemon <pid-file> [options] [ --no-bootstrap | <enode>... ] parity daemon <pid-file> [options]
parity account (new | list) parity account (new | list)
parity [options] [ --no-bootstrap | <enode>... ] parity [options]
Protocol Options: Protocol Options:
--chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file
or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead].
--testnet Equivalent to --chain testnet (geth-compatible). --testnet Equivalent to --chain testnet (geth-compatible).
--networkid INDEX Override the network identifier from the chain we are on. --networkid INDEX Override the network identifier from the chain we are on.
--pruning Client should prune the state/storage trie. --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive,
light (experimental), fast (experimental) [default: archive].
-d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity]
--db-path PATH Specify the database & configuration directory path [default: $HOME/.parity]
--keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys]
--identity NAME Specify your node's name. --identity NAME Specify your node's name.
Networking Options: Networking Options:
--no-bootstrap Don't bother trying to connect to any nodes initially. --port PORT Override the port on which the node should listen [default: 30303].
--listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304].
--public-address URL Specify the IP/port on which peers may connect.
--address URL Equivalent to --listen-address URL --public-address URL.
--peers NUM Try to maintain that many peers [default: 25]. --peers NUM Try to maintain that many peers [default: 25].
--nat METHOD Specify method to use for determining public address. Must be one of: any, none,
upnp, extip:(IP) [default: any].
--bootnodes NODES Specify additional comma-separated bootnodes.
--no-bootstrap Don't bother trying to connect to standard bootnodes.
--no-discovery Disable new peer discovery. --no-discovery Disable new peer discovery.
--no-upnp Disable trying to figure out the correct public adderss over UPnP.
--node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation.
API and Console Options: API and Console Options:
@ -100,7 +103,8 @@ API and Console Options:
--jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545].
--jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null].
--jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited
list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal].
--rpc Equivalent to --jsonrpc (geth-compatible). --rpc Equivalent to --jsonrpc (geth-compatible).
--rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible).
--rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible).
@ -110,7 +114,7 @@ API and Console Options:
Sealing/Mining Options: Sealing/Mining Options:
--author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards
from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63].
--extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters.
Memory Footprint Options: Memory Footprint Options:
--cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384].
@ -119,6 +123,21 @@ Memory Footprint Options:
--cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with
other cache options (geth-compatible). other cache options (geth-compatible).
Geth-Compatibility Options
--datadir PATH Equivalent to --db-path PATH.
--testnet Equivalent to --chain testnet.
--networkid INDEX Override the network identifier from the chain we are on.
--rpc Equivalent to --jsonrpc.
--rpcaddr HOST Equivalent to --jsonrpc-addr HOST.
--rpcport PORT Equivalent to --jsonrpc-port PORT.
--rpcapi APIS Equivalent to --jsonrpc-apis APIS.
--rpccorsdomain URL Equivalent to --jsonrpc-cors URL.
--maxpeers COUNT Equivalent to --peers COUNT.
--nodekey KEY Equivalent to --node-key KEY.
--nodiscover Equivalent to --no-discovery.
--etherbase ADDRESS Equivalent to --author ADDRESS.
--extradata STRING Equivalent to --extra-data STRING.
Miscellaneous Options: Miscellaneous Options:
-l --logging LOGGING Specify the logging level. -l --logging LOGGING Specify the logging level.
-v --version Show information about version. -v --version Show information about version.
@ -132,22 +151,18 @@ struct Args {
cmd_new: bool, cmd_new: bool,
cmd_list: bool, cmd_list: bool,
arg_pid_file: String, arg_pid_file: String,
arg_enode: Vec<String>,
flag_chain: String, flag_chain: String,
flag_testnet: bool, flag_db_path: String,
flag_datadir: String,
flag_networkid: Option<String>,
flag_identity: String, flag_identity: String,
flag_cache: Option<usize>, flag_cache: Option<usize>,
flag_keys_path: String, flag_keys_path: String,
flag_pruning: bool, flag_bootnodes: Option<String>,
flag_pruning: String,
flag_no_bootstrap: bool, flag_no_bootstrap: bool,
flag_listen_address: String, flag_port: u16,
flag_public_address: Option<String>,
flag_address: Option<String>,
flag_peers: usize, flag_peers: usize,
flag_no_discovery: bool, flag_no_discovery: bool,
flag_no_upnp: bool, flag_nat: String,
flag_node_key: Option<String>, flag_node_key: Option<String>,
flag_cache_pref_size: usize, flag_cache_pref_size: usize,
flag_cache_max_size: usize, flag_cache_max_size: usize,
@ -157,15 +172,24 @@ struct Args {
flag_jsonrpc_port: u16, flag_jsonrpc_port: u16,
flag_jsonrpc_cors: String, flag_jsonrpc_cors: String,
flag_jsonrpc_apis: String, flag_jsonrpc_apis: String,
flag_logging: Option<String>,
flag_version: bool,
// geth-compatibility...
flag_nodekey: Option<String>,
flag_nodiscover: bool,
flag_maxpeers: Option<usize>,
flag_author: String,
flag_extra_data: Option<String>,
flag_datadir: Option<String>,
flag_extradata: Option<String>,
flag_etherbase: Option<String>,
flag_rpc: bool, flag_rpc: bool,
flag_rpcaddr: Option<String>, flag_rpcaddr: Option<String>,
flag_rpcport: Option<u16>, flag_rpcport: Option<u16>,
flag_rpccorsdomain: Option<String>, flag_rpccorsdomain: Option<String>,
flag_rpcapi: Option<String>, flag_rpcapi: Option<String>,
flag_logging: Option<String>, flag_testnet: bool,
flag_version: bool, flag_networkid: Option<String>,
flag_author: String,
flag_extra_data: Option<String>,
} }
fn setup_log(init: &Option<String>) { fn setup_log(init: &Option<String>) {
@ -195,7 +219,7 @@ fn setup_log(init: &Option<String>) {
} }
#[cfg(feature = "rpc")] #[cfg(feature = "rpc")]
fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option<Arc<PanicHandler>> { fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, secret_store: Arc<AccountService>, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option<Arc<PanicHandler>> {
use rpc::v1::*; use rpc::v1::*;
let server = rpc::RpcServer::new(); let server = rpc::RpcServer::new();
@ -204,9 +228,10 @@ fn setup_rpc_server(client: Arc<Client>, sync: Arc<EthSync>, url: &str, cors_dom
"web3" => server.add_delegate(Web3Client::new().to_delegate()), "web3" => server.add_delegate(Web3Client::new().to_delegate()),
"net" => server.add_delegate(NetClient::new(&sync).to_delegate()), "net" => server.add_delegate(NetClient::new(&sync).to_delegate()),
"eth" => { "eth" => {
server.add_delegate(EthClient::new(&client, &sync).to_delegate()); server.add_delegate(EthClient::new(&client, &sync, &secret_store).to_delegate());
server.add_delegate(EthFilterClient::new(&client).to_delegate()); server.add_delegate(EthFilterClient::new(&client).to_delegate());
} }
"personal" => server.add_delegate(PersonalClient::new(&secret_store).to_delegate()),
_ => { _ => {
die!("{}: Invalid API name to be enabled.", api); die!("{}: Invalid API name to be enabled.", api);
} }
@ -245,15 +270,17 @@ impl Configuration {
} }
fn path(&self) -> String { fn path(&self) -> String {
self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) let d = self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path);
d.replace("$HOME", env::home_dir().unwrap().to_str().unwrap())
} }
fn author(&self) -> Address { fn author(&self) -> Address {
Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) let d = self.args.flag_etherbase.as_ref().unwrap_or(&self.args.flag_author);
Address::from_str(d).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author))
} }
fn extra_data(&self) -> Bytes { fn extra_data(&self) -> Bytes {
match self.args.flag_extra_data { match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) {
Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(),
None => version_data(), None => version_data(),
Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); } Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); }
@ -285,45 +312,36 @@ impl Configuration {
} }
fn init_nodes(&self, spec: &Spec) -> Vec<String> { fn init_nodes(&self, spec: &Spec) -> Vec<String> {
if self.args.flag_no_bootstrap { Vec::new() } else { let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() };
match self.args.arg_enode.len() { if let Some(ref x) = self.args.flag_bootnodes {
0 => spec.nodes().clone(), r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s))));
_ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s))).collect(),
}
} }
r
} }
#[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] #[cfg_attr(feature="dev", allow(useless_format))]
fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) { fn net_addresses(&self) -> (Option<SocketAddr>, Option<SocketAddr>) {
let mut listen_address = None; let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port));
let mut public_address = None; let public_address = if self.args.flag_nat.starts_with("extip:") {
let host = &self.args.flag_nat[6..];
if let Some(ref a) = self.args.flag_address { let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host));
public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --address", a))); Some(SocketAddr::new(host, self.args.flag_port))
listen_address = public_address; } else {
} listen_address.clone()
if listen_address.is_none() { };
listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address)));
}
if let Some(ref a) = self.args.flag_public_address {
if public_address.is_some() {
die!("Conflicting flags provided: --address and --public-address");
}
public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --public-address", a)));
}
(listen_address, public_address) (listen_address, public_address)
} }
fn net_settings(&self, spec: &Spec) -> NetworkConfiguration { fn net_settings(&self, spec: &Spec) -> NetworkConfiguration {
let mut ret = NetworkConfiguration::new(); let mut ret = NetworkConfiguration::new();
ret.nat_enabled = !self.args.flag_no_upnp; ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp";
ret.boot_nodes = self.init_nodes(spec); ret.boot_nodes = self.init_nodes(spec);
let (listen, public) = self.net_addresses(); let (listen, public) = self.net_addresses();
ret.listen_address = listen; ret.listen_address = listen;
ret.public_address = public; ret.public_address = public;
ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3()));
ret.discovery_enabled = !self.args.flag_no_discovery; ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover;
ret.ideal_peers = self.args.flag_peers as u32; ret.ideal_peers = self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32;
let mut net_path = PathBuf::from(&self.path()); let mut net_path = PathBuf::from(&self.path());
net_path.push("network"); net_path.push("network");
ret.config_path = Some(net_path.to_str().unwrap().to_owned()); ret.config_path = Some(net_path.to_str().unwrap().to_owned());
@ -402,7 +420,14 @@ impl Configuration {
client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; client_config.blockchain.max_cache_size = self.args.flag_cache_max_size;
} }
} }
client_config.prefer_journal = self.args.flag_pruning; client_config.pruning = match self.args.flag_pruning.as_str() {
"" => journaldb::Algorithm::Archive,
"archive" => journaldb::Algorithm::Archive,
"pruned" => journaldb::Algorithm::EarlyMerge,
"fast" => journaldb::Algorithm::OverlayRecent,
// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged.
_ => { die!("Invalid pruning method given."); }
};
client_config.name = self.args.flag_identity.clone(); client_config.name = self.args.flag_identity.clone();
client_config.queue.max_mem_use = self.args.flag_queue_max_size; client_config.queue.max_mem_use = self.args.flag_queue_max_size;
let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap();
@ -414,17 +439,20 @@ impl Configuration {
// Sync // Sync
let sync = EthSync::register(service.network(), sync_config, client); let sync = EthSync::register(service.network(), sync_config, client);
// Secret Store
let account_service = Arc::new(AccountService::new());
// Setup rpc // Setup rpc
if self.args.flag_jsonrpc || self.args.flag_rpc { if self.args.flag_jsonrpc || self.args.flag_rpc {
let url = format!("{}:{}", let url = format!("{}:{}",
self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr),
self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port)
); );
SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url));
let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors);
// TODO: use this as the API list. // TODO: use this as the API list.
let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis);
let server_handler = setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(",").collect());
if let Some(handler) = server_handler { if let Some(handler) = server_handler {
panic_handler.forward_from(handler.deref()); panic_handler.forward_from(handler.deref());
} }

View File

@ -26,9 +26,8 @@ serde_macros = { version = "0.7.0", optional = true }
[build-dependencies] [build-dependencies]
serde_codegen = { version = "0.7.0", optional = true } serde_codegen = { version = "0.7.0", optional = true }
syntex = "0.29.0" syntex = "0.29.0"
rustc_version = "0.1"
[features] [features]
default = ["serde_codegen"] default = ["serde_codegen"]
nightly = ["serde_macros"] nightly = ["serde_macros"]
dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]

View File

@ -14,10 +14,6 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate rustc_version;
use rustc_version::{version_meta, Channel};
#[cfg(not(feature = "serde_macros"))] #[cfg(not(feature = "serde_macros"))]
mod inner { mod inner {
extern crate syntex; extern crate syntex;
@ -46,7 +42,4 @@ mod inner {
fn main() { fn main() {
inner::main(); inner::main();
if let Channel::Nightly = version_meta().channel {
println!("cargo:rustc-cfg=nightly");
}
} }

View File

@ -27,23 +27,27 @@ use ethcore::block::{IsBlock};
use ethcore::views::*; use ethcore::views::*;
use ethcore::ethereum::Ethash; use ethcore::ethereum::Ethash;
use ethcore::ethereum::denominations::shannon; use ethcore::ethereum::denominations::shannon;
use ethcore::transaction::Transaction as EthTransaction;
use v1::traits::{Eth, EthFilter}; use v1::traits::{Eth, EthFilter};
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log};
use v1::helpers::{PollFilter, PollManager}; use v1::helpers::{PollFilter, PollManager};
use util::keys::store::AccountProvider;
/// Eth rpc implementation. /// Eth rpc implementation.
pub struct EthClient<C, S> where C: BlockChainClient, S: SyncProvider { pub struct EthClient<C, S, A> where C: BlockChainClient, S: SyncProvider, A: AccountProvider {
client: Weak<C>, client: Weak<C>,
sync: Weak<S>, sync: Weak<S>,
accounts: Weak<A>,
hashrates: RwLock<HashMap<H256, u64>>, hashrates: RwLock<HashMap<H256, u64>>,
} }
impl<C, S> EthClient<C, S> where C: BlockChainClient, S: SyncProvider { impl<C, S, A> EthClient<C, S, A> where C: BlockChainClient, S: SyncProvider, A: AccountProvider {
/// Creates new EthClient. /// Creates new EthClient.
pub fn new(client: &Arc<C>, sync: &Arc<S>) -> Self { pub fn new(client: &Arc<C>, sync: &Arc<S>, accounts: &Arc<A>) -> Self {
EthClient { EthClient {
client: Arc::downgrade(client), client: Arc::downgrade(client),
sync: Arc::downgrade(sync), sync: Arc::downgrade(sync),
accounts: Arc::downgrade(accounts),
hashrates: RwLock::new(HashMap::new()), hashrates: RwLock::new(HashMap::new()),
} }
} }
@ -94,7 +98,7 @@ impl<C, S> EthClient<C, S> where C: BlockChainClient, S: SyncProvider {
} }
} }
impl<C, S> Eth for EthClient<C, S> where C: BlockChainClient + 'static, S: SyncProvider + 'static { impl<C, S, A> Eth for EthClient<C, S, A> where C: BlockChainClient + 'static, S: SyncProvider + 'static, A: AccountProvider + 'static {
fn protocol_version(&self, params: Params) -> Result<Value, Error> { fn protocol_version(&self, params: Params) -> Result<Value, Error> {
match params { match params {
Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)),
@ -158,7 +162,7 @@ impl<C, S> Eth for EthClient<C, S> where C: BlockChainClient + 'static, S: SyncP
} }
} }
fn block_transaction_count(&self, params: Params) -> Result<Value, Error> { fn block_transaction_count_by_hash(&self, params: Params) -> Result<Value, Error> {
from_params::<(H256,)>(params) from_params::<(H256,)>(params)
.and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) {
Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()),
@ -166,6 +170,17 @@ impl<C, S> Eth for EthClient<C, S> where C: BlockChainClient + 'static, S: SyncP
}) })
} }
fn block_transaction_count_by_number(&self, params: Params) -> Result<Value, Error> {
from_params::<(BlockNumber,)>(params)
.and_then(|(block_number,)| match block_number {
BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending),
_ => match take_weak!(self.client).block(block_number.into()) {
Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()),
None => Ok(Value::Null)
}
})
}
fn block_uncles_count(&self, params: Params) -> Result<Value, Error> { fn block_uncles_count(&self, params: Params) -> Result<Value, Error> {
from_params::<(H256,)>(params) from_params::<(H256,)>(params)
.and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) {
@ -252,6 +267,24 @@ impl<C, S> Eth for EthClient<C, S> where C: BlockChainClient + 'static, S: SyncP
to_value(&true) to_value(&true)
}) })
} }
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
from_params::<(TransactionRequest, )>(params)
.and_then(|(transaction_request, )| {
let accounts = take_weak!(self.accounts);
match accounts.account_secret(&transaction_request.from) {
Ok(secret) => {
let sync = take_weak!(self.sync);
let transaction: EthTransaction = transaction_request.into();
let signed_transaction = transaction.sign(&secret);
let hash = signed_transaction.hash();
sync.insert_transaction(signed_transaction);
to_value(&hash)
},
Err(_) => { to_value(&U256::zero()) }
}
})
}
} }
/// Eth filter rpc implementation. /// Eth filter rpc implementation.

View File

@ -36,10 +36,15 @@ impl<S> NetClient<S> where S: SyncProvider {
impl<S> Net for NetClient<S> where S: SyncProvider + 'static { impl<S> Net for NetClient<S> where S: SyncProvider + 'static {
fn version(&self, _: Params) -> Result<Value, Error> { fn version(&self, _: Params) -> Result<Value, Error> {
Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned()))
} }
fn peer_count(&self, _params: Params) -> Result<Value, Error> { fn peer_count(&self, _params: Params) -> Result<Value, Error> {
Ok(Value::U64(take_weak!(self.sync).status().num_peers as u64)) Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned()))
}
fn is_listening(&self, _: Params) -> Result<Value, Error> {
// right now (11 march 2016), we are always listening for incoming connections
Ok(Value::Bool(true))
} }
} }

View File

@ -20,30 +20,28 @@ use jsonrpc_core::*;
use v1::traits::Personal; use v1::traits::Personal;
use util::keys::store::*; use util::keys::store::*;
use util::Address; use util::Address;
use std::sync::RwLock;
/// Account management (personal) rpc implementation. /// Account management (personal) rpc implementation.
pub struct PersonalClient { pub struct PersonalClient<A> where A: AccountProvider {
secret_store: Weak<RwLock<SecretStore>>, accounts: Weak<A>,
} }
impl PersonalClient { impl<A> PersonalClient<A> where A: AccountProvider {
/// Creates new PersonalClient /// Creates new PersonalClient
pub fn new(store: &Arc<RwLock<SecretStore>>) -> Self { pub fn new(store: &Arc<A>) -> Self {
PersonalClient { PersonalClient {
secret_store: Arc::downgrade(store), accounts: Arc::downgrade(store),
} }
} }
} }
impl Personal for PersonalClient { impl<A> Personal for PersonalClient<A> where A: AccountProvider + 'static {
fn accounts(&self, _: Params) -> Result<Value, Error> { fn accounts(&self, _: Params) -> Result<Value, Error> {
let store_wk = take_weak!(self.secret_store); let store = take_weak!(self.accounts);
let store = store_wk.read().unwrap();
match store.accounts() { match store.accounts() {
Ok(account_list) => { Ok(account_list) => {
Ok(Value::Array(account_list.iter() Ok(Value::Array(account_list.iter()
.map(|&(account, _)| Value::String(format!("{:?}", account))) .map(|&account| Value::String(format!("{:?}", account)))
.collect::<Vec<Value>>()) .collect::<Vec<Value>>())
) )
} }
@ -54,8 +52,7 @@ impl Personal for PersonalClient {
fn new_account(&self, params: Params) -> Result<Value, Error> { fn new_account(&self, params: Params) -> Result<Value, Error> {
from_params::<(String, )>(params).and_then( from_params::<(String, )>(params).and_then(
|(pass, )| { |(pass, )| {
let store_wk = take_weak!(self.secret_store); let store = take_weak!(self.accounts);
let mut store = store_wk.write().unwrap();
match store.new_account(&pass) { match store.new_account(&pass) {
Ok(address) => Ok(Value::String(format!("{:?}", address))), Ok(address) => Ok(Value::String(format!("{:?}", address))),
Err(_) => Err(Error::internal_error()) Err(_) => Err(Error::internal_error())
@ -67,8 +64,7 @@ impl Personal for PersonalClient {
fn unlock_account(&self, params: Params) -> Result<Value, Error> { fn unlock_account(&self, params: Params) -> Result<Value, Error> {
from_params::<(Address, String, u64)>(params).and_then( from_params::<(Address, String, u64)>(params).and_then(
|(account, account_pass, _)|{ |(account, account_pass, _)|{
let store_wk = take_weak!(self.secret_store); let store = take_weak!(self.accounts);
let store = store_wk.read().unwrap();
match store.unlock_account(&account, &account_pass) { match store.unlock_account(&account, &account_pass) {
Ok(_) => Ok(Value::Bool(true)), Ok(_) => Ok(Value::Bool(true)),
Err(_) => Ok(Value::Bool(false)), Err(_) => Ok(Value::Bool(false)),

View File

@ -21,9 +21,10 @@
pub mod traits; pub mod traits;
mod impls; mod impls;
mod types; mod types;
mod helpers;
#[cfg(test)] #[cfg(test)]
mod tests; mod tests;
mod helpers;
pub use self::traits::{Web3, Eth, EthFilter, Personal, Net}; pub use self::traits::{Web3, Eth, EthFilter, Personal, Net};
pub use self::impls::*; pub use self::impls::*;

View File

@ -14,12 +14,6 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate rustc_version; mod sync_provider;
use rustc_version::{version_meta, Channel}; pub use self::sync_provider::{Config, TestSyncProvider};
fn main() {
if let Channel::Nightly = version_meta().channel {
println!("cargo:rustc-cfg=nightly");
}
}

View File

@ -0,0 +1,58 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethcore::transaction::SignedTransaction;
use ethsync::{SyncProvider, SyncStatus, SyncState};
pub struct Config {
pub protocol_version: u8,
pub num_peers: usize,
}
pub struct TestSyncProvider {
status: SyncStatus,
}
impl TestSyncProvider {
pub fn new(config: Config) -> Self {
TestSyncProvider {
status: SyncStatus {
state: SyncState::NotSynced,
protocol_version: config.protocol_version,
start_block_number: 0,
last_imported_block_number: None,
highest_block_number: None,
blocks_total: 0,
blocks_received: 0,
num_peers: config.num_peers,
num_active_peers: 0,
mem_used: 0,
transaction_queue_pending: 0,
},
}
}
}
impl SyncProvider for TestSyncProvider {
fn status(&self) -> SyncStatus {
self.status.clone()
}
fn insert_transaction(&self, _transaction: SignedTransaction) {
unimplemented!()
}
}

View File

@ -1 +1,21 @@
//TODO: load custom blockchain state and test // Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//!TODO: load custom blockchain state and test
mod net;
mod web3;
mod helpers;

66
rpc/src/v1/tests/net.rs Normal file
View File

@ -0,0 +1,66 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use jsonrpc_core::IoHandler;
use v1::{Net, NetClient};
use v1::tests::helpers::{Config, TestSyncProvider};
fn sync_provider() -> Arc<TestSyncProvider> {
Arc::new(TestSyncProvider::new(Config {
protocol_version: 65,
num_peers: 120,
}))
}
#[test]
fn rpc_net_version() {
let sync = sync_provider();
let net = NetClient::new(&sync).to_delegate();
let io = IoHandler::new();
io.add_delegate(net);
let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn rpc_net_peer_count() {
let sync = sync_provider();
let net = NetClient::new(&sync).to_delegate();
let io = IoHandler::new();
io.add_delegate(net);
let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}
#[test]
fn rpc_net_listening() {
let sync = sync_provider();
let net = NetClient::new(&sync).to_delegate();
let io = IoHandler::new();
io.add_delegate(net);
let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(io.handle_request(request), Some(response.to_owned()));
}

33
rpc/src/v1/tests/web3.rs Normal file
View File

@ -0,0 +1,33 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use jsonrpc_core::IoHandler;
use util::version;
use v1::{Web3, Web3Client};
#[test]
fn rpc_web3_version() {
let web3 = Web3Client::new().to_delegate();
let io = IoHandler::new();
io.add_delegate(web3);
let v = version().to_owned().replace("Parity/", "Parity//");
let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref());
assert_eq!(io.handle_request(request), Some(response));
}

View File

@ -59,8 +59,11 @@ pub trait Eth: Sized + Send + Sync + 'static {
/// Returns the number of transactions sent from given address at given time (block number). /// Returns the number of transactions sent from given address at given time (block number).
fn transaction_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() } fn transaction_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
/// Returns the number of transactions in a block. /// Returns the number of transactions in a block given block hash.
fn block_transaction_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() } fn block_transaction_count_by_hash(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
/// Returns the number of transactions in a block given block number.
fn block_transaction_count_by_number(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
/// Returns the number of uncles in a given block. /// Returns the number of uncles in a given block.
fn block_uncles_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() } fn block_uncles_count(&self, _: Params) -> Result<Value, Error> { rpc_unimplemented!() }
@ -130,8 +133,8 @@ pub trait Eth: Sized + Send + Sync + 'static {
delegate.add_method("eth_balance", Eth::balance); delegate.add_method("eth_balance", Eth::balance);
delegate.add_method("eth_getStorageAt", Eth::storage_at); delegate.add_method("eth_getStorageAt", Eth::storage_at);
delegate.add_method("eth_getTransactionCount", Eth::transaction_count); delegate.add_method("eth_getTransactionCount", Eth::transaction_count);
delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count); delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash);
delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number);
delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count);
delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count);
delegate.add_method("eth_code", Eth::code_at); delegate.add_method("eth_code", Eth::code_at);

View File

@ -15,10 +15,12 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use rustc_serialize::hex::ToHex; use rustc_serialize::hex::ToHex;
use serde::{Serialize, Serializer}; use serde::{Serialize, Serializer, Deserialize, Deserializer, Error};
use serde::de::Visitor;
use util::common::FromHex;
/// Wrapper structure around vector of bytes. /// Wrapper structure around vector of bytes.
#[derive(Debug)] #[derive(Debug, PartialEq)]
pub struct Bytes(Vec<u8>); pub struct Bytes(Vec<u8>);
impl Bytes { impl Bytes {
@ -26,6 +28,7 @@ impl Bytes {
pub fn new(bytes: Vec<u8>) -> Bytes { pub fn new(bytes: Vec<u8>) -> Bytes {
Bytes(bytes) Bytes(bytes)
} }
pub fn to_vec(self) -> Vec<u8> { let Bytes(x) = self; x }
} }
impl Default for Bytes { impl Default for Bytes {
@ -44,6 +47,32 @@ impl Serialize for Bytes {
} }
} }
impl Deserialize for Bytes {
fn deserialize<D>(deserializer: &mut D) -> Result<Bytes, D::Error>
where D: Deserializer {
deserializer.deserialize(BytesVisitor)
}
}
struct BytesVisitor;
impl Visitor for BytesVisitor {
type Value = Bytes;
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
if value.len() >= 2 && &value[0..2] == "0x" {
Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![])))
} else {
Err(Error::custom("invalid hex"))
}
}
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
self.visit_str(value.as_ref())
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;

View File

@ -23,6 +23,7 @@ mod log;
mod optionals; mod optionals;
mod sync; mod sync;
mod transaction; mod transaction;
mod transaction_request;
pub use self::block::{Block, BlockTransactions}; pub use self::block::{Block, BlockTransactions};
pub use self::block_number::BlockNumber; pub use self::block_number::BlockNumber;
@ -33,3 +34,5 @@ pub use self::log::Log;
pub use self::optionals::OptionalValue; pub use self::optionals::OptionalValue;
pub use self::sync::{SyncStatus, SyncInfo}; pub use self::sync::{SyncStatus, SyncInfo};
pub use self::transaction::Transaction; pub use self::transaction::Transaction;
pub use self::transaction_request::TransactionRequest;

View File

@ -17,6 +17,7 @@
use util::numbers::*; use util::numbers::*;
use ethcore::transaction::{LocalizedTransaction, Action}; use ethcore::transaction::{LocalizedTransaction, Action};
use v1::types::{Bytes, OptionalValue}; use v1::types::{Bytes, OptionalValue};
use serde::Error;
#[derive(Debug, Default, Serialize)] #[derive(Debug, Default, Serialize)]
pub struct Transaction { pub struct Transaction {

View File

@ -0,0 +1,139 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::hash::Address;
use util::numbers::{Uint, U256};
use ethcore::transaction::{Action, Transaction};
use v1::types::Bytes;
#[derive(Debug, Default, PartialEq, Deserialize)]
pub struct TransactionRequest {
pub from: Address,
pub to: Option<Address>,
#[serde(rename="gasPrice")]
pub gas_price: Option<U256>,
pub gas: Option<U256>,
pub value: Option<U256>,
pub data: Option<Bytes>,
pub nonce: Option<U256>,
}
impl Into<Transaction> for TransactionRequest {
fn into(self) -> Transaction {
Transaction {
nonce: self.nonce.unwrap_or_else(U256::zero),
action: self.to.map_or(Action::Create, Action::Call),
gas: self.gas.unwrap_or_else(U256::zero),
gas_price: self.gas_price.unwrap_or_else(U256::zero),
value: self.value.unwrap_or_else(U256::zero),
data: self.data.map_or_else(Vec::new, |d| d.to_vec()),
}
}
}
#[cfg(test)]
mod tests {
use serde_json;
use util::numbers::{Uint, U256};
use util::hash::Address;
use ethcore::transaction::{Transaction, Action};
use v1::types::Bytes;
use super::*;
#[test]
fn transaction_request_into_transaction() {
let tr = TransactionRequest {
from: Address::default(),
to: Some(Address::from(10)),
gas_price: Some(U256::from(20)),
gas: Some(U256::from(10_000)),
value: Some(U256::from(1)),
data: Some(Bytes::new(vec![10, 20])),
nonce: Some(U256::from(12)),
};
assert_eq!(Transaction {
nonce: U256::from(12),
action: Action::Call(Address::from(10)),
gas: U256::from(10_000),
gas_price: U256::from(20),
value: U256::from(1),
data: vec![10, 20],
}, tr.into());
}
#[test]
fn empty_transaction_request_into_transaction() {
let tr = TransactionRequest {
from: Address::default(),
to: None,
gas_price: None,
gas: None,
value: None,
data: None,
nonce: None,
};
assert_eq!(Transaction {
nonce: U256::zero(),
action: Action::Create,
gas: U256::zero(),
gas_price: U256::zero(),
value: U256::zero(),
data: vec![],
}, tr.into());
}
#[test]
fn transaction_request_deserialize() {
let s = r#"{
"from":"0x0000000000000000000000000000000000000001",
"to":"0x0000000000000000000000000000000000000002",
"gasPrice":"0x1",
"gas":"0x2",
"value":"0x3",
"data":"0x123456",
"nonce":"0x4"
}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: Address::from(1),
to: Some(Address::from(2)),
gas_price: Some(U256::from(1)),
gas: Some(U256::from(2)),
value: Some(U256::from(3)),
data: Some(Bytes::new(vec![0x12, 0x34, 0x56])),
nonce: Some(U256::from(4)),
});
}
#[test]
fn transaction_request_deserialize_empty() {
let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#;
let deserialized: TransactionRequest = serde_json::from_str(s).unwrap();
assert_eq!(deserialized, TransactionRequest {
from: Address::from(1),
to: None,
gas_price: None,
gas: None,
value: None,
data: None,
nonce: None,
});
}
}

View File

@ -4,13 +4,9 @@ name = "ethsync"
version = "0.9.99" version = "0.9.99"
license = "GPL-3.0" license = "GPL-3.0"
authors = ["Ethcore <admin@ethcore.io"] authors = ["Ethcore <admin@ethcore.io"]
build = "build.rs"
[lib] [lib]
[build-dependencies]
rustc_version = "0.1"
[dependencies] [dependencies]
ethcore-util = { path = "../util" } ethcore-util = { path = "../util" }
ethcore = { path = "../ethcore" } ethcore = { path = "../ethcore" }
@ -25,4 +21,4 @@ rayon = "0.3.1"
[features] [features]
default = [] default = []
dev = ["ethcore/dev", "ethcore-util/dev"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev"]

View File

@ -120,6 +120,7 @@ pub enum SyncState {
} }
/// Syncing status and statistics /// Syncing status and statistics
#[derive(Clone)]
pub struct SyncStatus { pub struct SyncStatus {
/// State /// State
pub state: SyncState, pub state: SyncState,
@ -141,6 +142,8 @@ pub struct SyncStatus {
pub num_active_peers: usize, pub num_active_peers: usize,
/// Heap memory used in bytes /// Heap memory used in bytes
pub mem_used: usize, pub mem_used: usize,
/// Number of pending transactions in queue
pub transaction_queue_pending: usize,
} }
#[derive(PartialEq, Eq, Debug, Clone)] #[derive(PartialEq, Eq, Debug, Clone)]
@ -256,6 +259,7 @@ impl ChainSync {
blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 },
num_peers: self.peers.len(), num_peers: self.peers.len(),
num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(),
transaction_queue_pending: self.transaction_queue.lock().unwrap().status().pending,
mem_used: mem_used:
// TODO: https://github.com/servo/heapsize/pull/50 // TODO: https://github.com/servo/heapsize/pull/50
// self.downloading_hashes.heap_size_of_children() // self.downloading_hashes.heap_size_of_children()
@ -275,7 +279,7 @@ impl ChainSync {
} }
#[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()` #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()`
/// Rest sync. Clear all downloaded data but keep the queue /// Rest sync. Clear all downloaded data but keep the queue
fn reset(&mut self) { fn reset(&mut self) {
self.downloading_headers.clear(); self.downloading_headers.clear();
@ -343,7 +347,7 @@ impl ChainSync {
Ok(()) Ok(())
} }
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
/// Called by peer once it has new block headers during sync /// Called by peer once it has new block headers during sync
fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders);
@ -470,7 +474,7 @@ impl ChainSync {
} }
/// Called by peer once it has new block bodies /// Called by peer once it has new block bodies
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
let block_rlp = try!(r.at(0)); let block_rlp = try!(r.at(0));
let header_rlp = try!(block_rlp.at(0)); let header_rlp = try!(block_rlp.at(0));
@ -1303,11 +1307,11 @@ impl ChainSync {
} }
/// Add transaction to the transaction queue /// Add transaction to the transaction queue
pub fn insert_transaction<T>(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) pub fn insert_transaction<T>(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) -> Result<(), Error>
where T: Fn(&Address) -> U256 where T: Fn(&Address) -> U256
{ {
let mut queue = self.transaction_queue.lock().unwrap(); let mut queue = self.transaction_queue.lock().unwrap();
queue.add(transaction, fetch_nonce); queue.add(transaction, fetch_nonce)
} }
} }

View File

@ -15,11 +15,11 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] #![cfg_attr(feature="dev", allow(clone_on_copy))]
//! Blockchain sync module //! Blockchain sync module
//! Implements ethereum protocol version 63 as specified here: //! Implements ethereum protocol version 63 as specified here:
@ -146,7 +146,8 @@ impl SyncProvider for EthSync {
let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one();
let sync = self.sync.write().unwrap(); let sync = self.sync.write().unwrap();
sync.insert_transaction(transaction, &nonce_fn); sync.insert_transaction(transaction, &nonce_fn).unwrap_or_else(
|e| warn!(target: "sync", "Error inserting transaction to queue: {:?}", e));
} }
} }

View File

@ -231,7 +231,7 @@ impl<K, V> RangeCollection<K, V> for Vec<(K, Vec<V>)> where K: Ord + PartialEq +
} }
#[test] #[test]
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn test_range() { fn test_range() {
use std::cmp::{Ordering}; use std::cmp::{Ordering};

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*; use util::*;
use ethcore::client::{BlockChainClient, BlockId}; use ethcore::client::{BlockChainClient, BlockId, EachBlockWith};
use io::SyncIo; use io::SyncIo;
use chain::{SyncState}; use chain::{SyncState};
use super::helpers::*; use super::helpers::*;

View File

@ -15,309 +15,10 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*; use util::*;
use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo}; use ethcore::client::{TestBlockChainClient, BlockChainClient};
use ethcore::header::{Header as BlockHeader, BlockNumber};
use ethcore::error::*;
use io::SyncIo; use io::SyncIo;
use chain::ChainSync; use chain::ChainSync;
use ::SyncConfig; use ::SyncConfig;
use ethcore::receipt::Receipt;
use ethcore::transaction::{LocalizedTransaction, Transaction, Action};
use ethcore::filter::Filter;
use ethcore::log_entry::LocalizedLogEntry;
use ethcore::block::ClosedBlock;
pub struct TestBlockChainClient {
pub blocks: RwLock<HashMap<H256, Bytes>>,
pub numbers: RwLock<HashMap<usize, H256>>,
pub genesis_hash: H256,
pub last_hash: RwLock<H256>,
pub difficulty: RwLock<U256>,
}
#[derive(Clone)]
pub enum EachBlockWith {
Nothing,
Uncle,
Transaction,
UncleAndTransaction
}
impl TestBlockChainClient {
pub fn new() -> TestBlockChainClient {
let mut client = TestBlockChainClient {
blocks: RwLock::new(HashMap::new()),
numbers: RwLock::new(HashMap::new()),
genesis_hash: H256::new(),
last_hash: RwLock::new(H256::new()),
difficulty: RwLock::new(From::from(0)),
};
client.add_blocks(1, EachBlockWith::Nothing); // add genesis block
client.genesis_hash = client.last_hash.read().unwrap().clone();
client
}
pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) {
let len = self.numbers.read().unwrap().len();
for n in len..(len + count) {
let mut header = BlockHeader::new();
header.difficulty = From::from(n);
header.parent_hash = self.last_hash.read().unwrap().clone();
header.number = n as BlockNumber;
let uncles = match with {
EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
let mut uncles = RlpStream::new_list(1);
let mut uncle_header = BlockHeader::new();
uncle_header.difficulty = From::from(n);
uncle_header.parent_hash = self.last_hash.read().unwrap().clone();
uncle_header.number = n as BlockNumber;
uncles.append(&uncle_header);
header.uncles_hash = uncles.as_raw().sha3();
uncles
},
_ => RlpStream::new_list(0)
};
let txs = match with {
EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => {
let mut txs = RlpStream::new_list(1);
let keypair = KeyPair::create().unwrap();
let tx = Transaction {
action: Action::Create,
value: U256::from(100),
data: "3331600055".from_hex().unwrap(),
gas: U256::from(100_000),
gas_price: U256::one(),
nonce: U256::zero()
};
let signed_tx = tx.sign(&keypair.secret());
txs.append(&signed_tx);
txs.out()
},
_ => rlp::NULL_RLP.to_vec()
};
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&txs, 1);
rlp.append_raw(uncles.as_raw(), 1);
self.import_block(rlp.as_raw().to_vec()).unwrap();
}
}
pub fn corrupt_block(&mut self, n: BlockNumber) {
let hash = self.block_hash(BlockId::Number(n)).unwrap();
let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap());
header.parent_hash = H256::new();
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&rlp::NULL_RLP, 1);
rlp.append_raw(&rlp::NULL_RLP, 1);
self.blocks.write().unwrap().insert(hash, rlp.out());
}
pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 {
let blocks_read = self.numbers.read().unwrap();
let index = blocks_read.len() - delta;
blocks_read[&index].clone()
}
fn block_hash(&self, id: BlockId) -> Option<H256> {
match id {
BlockId::Hash(hash) => Some(hash),
BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(),
BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(),
BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned()
}
}
}
impl BlockChainClient for TestBlockChainClient {
fn block_total_difficulty(&self, _id: BlockId) -> Option<U256> {
Some(U256::zero())
}
fn block_hash(&self, _id: BlockId) -> Option<H256> {
unimplemented!();
}
fn nonce(&self, _address: &Address) -> U256 {
U256::zero()
}
fn code(&self, _address: &Address) -> Option<Bytes> {
unimplemented!();
}
fn transaction(&self, _id: TransactionId) -> Option<LocalizedTransaction> {
unimplemented!();
}
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option<Vec<BlockNumber>> {
unimplemented!();
}
fn logs(&self, _filter: Filter) -> Vec<LocalizedLogEntry> {
unimplemented!();
}
fn sealing_block(&self) -> &Mutex<Option<ClosedBlock>> {
unimplemented!();
}
fn submit_seal(&self, _pow_hash: H256, _seal: Vec<Bytes>) -> Result<(), Error> {
unimplemented!();
}
fn block_header(&self, id: BlockId) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec()))
}
fn block_body(&self, id: BlockId) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| {
let mut stream = RlpStream::new_list(2);
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1);
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1);
stream.out()
}))
}
fn block(&self, id: BlockId) -> Option<Bytes> {
self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned())
}
fn block_status(&self, id: BlockId) -> BlockStatus {
match id {
BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain,
BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain,
_ => BlockStatus::Unknown
}
}
// works only if blocks are one after another 1 -> 2 -> 3
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
Some(TreeRoute {
ancestor: H256::new(),
index: 0,
blocks: {
let numbers_read = self.numbers.read().unwrap();
let mut adding = false;
let mut blocks = Vec::new();
for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) {
if hash == to {
if adding {
blocks.push(hash.clone());
}
adding = false;
break;
}
if hash == from {
adding = true;
}
if adding {
blocks.push(hash.clone());
}
}
if adding { Vec::new() } else { blocks }
}
})
}
// TODO: returns just hashes instead of node state rlp(?)
fn state_data(&self, hash: &H256) -> Option<Bytes> {
// starts with 'f' ?
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
let mut rlp = RlpStream::new();
rlp.append(&hash.clone());
return Some(rlp.out());
}
None
}
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
// starts with 'f' ?
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
let receipt = Receipt::new(
H256::zero(),
U256::zero(),
vec![]);
let mut rlp = RlpStream::new();
rlp.append(&receipt);
return Some(rlp.out());
}
None
}
fn import_block(&self, b: Bytes) -> ImportResult {
let header = Rlp::new(&b).val_at::<BlockHeader>(0);
let h = header.hash();
let number: usize = header.number as usize;
if number > self.blocks.read().unwrap().len() {
panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number);
}
if number > 0 {
match self.blocks.read().unwrap().get(&header.parent_hash) {
Some(parent) => {
let parent = Rlp::new(parent).val_at::<BlockHeader>(0);
if parent.number != (header.number - 1) {
panic!("Unexpected block parent");
}
},
None => {
panic!("Unknown block parent {:?} for block {}", header.parent_hash, number);
}
}
}
let len = self.numbers.read().unwrap().len();
if number == len {
{
let mut difficulty = self.difficulty.write().unwrap();
*difficulty.deref_mut() = *difficulty.deref() + header.difficulty;
}
mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone());
self.blocks.write().unwrap().insert(h.clone(), b);
self.numbers.write().unwrap().insert(number, h.clone());
let mut parent_hash = header.parent_hash;
if number > 0 {
let mut n = number - 1;
while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash {
*self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone();
n -= 1;
parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::<BlockHeader>(0).parent_hash;
}
}
}
else {
self.blocks.write().unwrap().insert(h.clone(), b.to_vec());
}
Ok(h)
}
fn queue_info(&self) -> BlockQueueInfo {
BlockQueueInfo {
verified_queue_size: 0,
unverified_queue_size: 0,
verifying_queue_size: 0,
max_queue_size: 0,
max_mem_use: 0,
mem_used: 0,
}
}
fn clear_queue(&self) {
}
fn chain_info(&self) -> BlockChainInfo {
BlockChainInfo {
total_difficulty: *self.difficulty.read().unwrap(),
pending_total_difficulty: *self.difficulty.read().unwrap(),
genesis_hash: self.genesis_hash.clone(),
best_block_hash: self.last_hash.read().unwrap().clone(),
best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1,
}
}
}
pub struct TestIo<'p> { pub struct TestIo<'p> {
pub chain: &'p mut TestBlockChainClient, pub chain: &'p mut TestBlockChainClient,

View File

@ -684,8 +684,8 @@ mod test {
let mut txq = TransactionQueue::new(); let mut txq = TransactionQueue::new();
let (tx, tx2) = new_txs(U256::from(1)); let (tx, tx2) = new_txs(U256::from(1));
txq.add(tx.clone(), &prev_nonce); txq.add(tx.clone(), &prev_nonce).unwrap();
txq.add(tx2.clone(), &prev_nonce); txq.add(tx2.clone(), &prev_nonce).unwrap();
assert_eq!(txq.status().future, 2); assert_eq!(txq.status().future, 2);
// when // when

View File

@ -40,8 +40,7 @@ chrono = "0.2"
[features] [features]
default = [] default = []
dev = [] dev = ["clippy"]
[build-dependencies] [build-dependencies]
vergen = "*" vergen = "*"
rustc_version = "0.1"

View File

@ -36,10 +36,12 @@
//! The functions here are designed to be fast. //! The functions here are designed to be fast.
//! //!
#[cfg(all(asm_available, target_arch="x86_64"))]
use std::mem;
use std::fmt; use std::fmt;
use std::cmp; use std::cmp;
use std::mem;
use std::str::{FromStr}; use std::str::{FromStr};
use std::convert::From; use std::convert::From;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
@ -788,14 +790,11 @@ macro_rules! construct_uint {
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: serde::Error { fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: serde::Error {
// 0x + len // 0x + len
if value.len() != 2 + $n_words / 8 { if value.len() > 2 + $n_words * 16 {
return Err(serde::Error::custom("Invalid length.")); return Err(serde::Error::custom("Invalid length."));
} }
match $name::from_str(&value[2..]) { $name::from_str(&value[2..]).map_err(|_| serde::Error::custom("Invalid hex value."))
Ok(val) => Ok(val),
Err(_) => { return Err(serde::Error::custom("Invalid length.")); }
}
} }
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: serde::Error { fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: serde::Error {
@ -1103,7 +1102,7 @@ macro_rules! construct_uint {
} }
} }
#[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok. #[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok.
impl Hash for $name { impl Hash for $name {
fn hash<H>(&self, state: &mut H) where H: Hasher { fn hash<H>(&self, state: &mut H) where H: Hasher {
unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); } unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); }
@ -1485,7 +1484,7 @@ mod tests {
} }
#[test] #[test]
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] #[cfg_attr(feature="dev", allow(eq_op))]
pub fn uint256_comp_test() { pub fn uint256_comp_test() {
let small = U256([10u64, 0, 0, 0]); let small = U256([10u64, 0, 0, 0]);
let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]);
@ -2032,7 +2031,7 @@ mod tests {
#[test] #[test]
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn u256_multi_full_mul() { fn u256_multi_full_mul() {
let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0]));
assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result);

View File

@ -14,15 +14,10 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate rustc_version;
extern crate vergen; extern crate vergen;
use vergen::*; use vergen::*;
use rustc_version::{version_meta, Channel};
fn main() { fn main() {
vergen(OutputFns::all()).unwrap(); vergen(OutputFns::all()).unwrap();
if let Channel::Nightly = version_meta().channel {
println!("cargo:rustc-cfg=nightly");
}
} }

View File

@ -257,7 +257,7 @@ macro_rules! impl_hash {
return Err(serde::Error::custom("Invalid length.")); return Err(serde::Error::custom("Invalid length."));
} }
value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid valid hex.")) value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid hex value."))
} }
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: serde::Error { fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: serde::Error {
@ -305,7 +305,7 @@ macro_rules! impl_hash {
} }
impl Copy for $from {} impl Copy for $from {}
#[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))] #[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))]
impl Clone for $from { impl Clone for $from {
fn clone(&self) -> $from { fn clone(&self) -> $from {
unsafe { unsafe {
@ -637,7 +637,7 @@ mod tests {
use std::str::FromStr; use std::str::FromStr;
#[test] #[test]
#[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] #[cfg_attr(feature="dev", allow(eq_op))]
fn hash() { fn hash() {
let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]);
assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h);

View File

@ -20,7 +20,7 @@ use bytes::*;
use std::collections::HashMap; use std::collections::HashMap;
/// Trait modelling datastore keyed by a 32-byte Keccak hash. /// Trait modelling datastore keyed by a 32-byte Keccak hash.
pub trait HashDB { pub trait HashDB : AsHashDB {
/// Get the keys in the database together with number of underlying references. /// Get the keys in the database together with number of underlying references.
fn keys(&self) -> HashMap<H256, i32>; fn keys(&self) -> HashMap<H256, i32>;
@ -111,3 +111,16 @@ pub trait HashDB {
/// ``` /// ```
fn remove(&mut self, key: &H256) { self.kill(key) } fn remove(&mut self, key: &H256) { self.kill(key) }
} }
/// Upcast trait.
pub trait AsHashDB {
/// Perform upcast to HashDB for anything that derives from HashDB.
fn as_hashdb(&self) -> &HashDB;
/// Perform mutable upcast to HashDB for anything that derives from HashDB.
fn as_hashdb_mut(&mut self) -> &mut HashDB;
}
impl<T: HashDB> AsHashDB for T {
fn as_hashdb(&self) -> &HashDB { self }
fn as_hashdb_mut(&mut self) -> &mut HashDB { self }
}

View File

@ -0,0 +1,388 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation.
use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use super::traits::JournalDB;
use kvdb::{Database, DBTransaction, DatabaseConfig};
#[cfg(test)]
use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
pub struct ArchiveDB {
overlay: MemoryDB,
backing: Arc<Database>,
}
// all keys must be at least 12 bytes
const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION : u32 = 259;
impl ArchiveDB {
/// Create a new instance from file
pub fn new(path: &str) -> ArchiveDB {
let opts = DatabaseConfig {
prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix
};
let backing = Database::open(&opts, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
ArchiveDB {
overlay: MemoryDB::new(),
backing: Arc::new(backing),
}
}
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
fn new_temp() -> ArchiveDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap())
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
}
impl HashDB for ArchiveDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
let h = H256::from_slice(key.deref());
ret.insert(h, 1);
}
for (key, refs) in self.overlay.keys().into_iter() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}
ret
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
let k = self.overlay.raw(key);
match k {
Some(&(ref d, rc)) if rc > 0 => Some(d),
_ => {
if let Some(x) = self.payload(key) {
Some(&self.overlay.denote(key, x).0)
}
else {
None
}
}
}
}
fn exists(&self, key: &H256) -> bool {
self.lookup(key).is_some()
}
fn insert(&mut self, value: &[u8]) -> H256 {
self.overlay.insert(value)
}
fn emplace(&mut self, key: H256, value: Bytes) {
self.overlay.emplace(key, value);
}
fn kill(&mut self, key: &H256) {
self.overlay.kill(key);
}
}
impl JournalDB for ArchiveDB {
fn spawn(&self) -> Box<JournalDB> {
Box::new(ArchiveDB {
overlay: MemoryDB::new(),
backing: self.backing.clone(),
})
}
fn mem_used(&self) -> usize {
self.overlay.mem_used()
}
fn is_empty(&self) -> bool {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
}
fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result<u32, UtilError> {
let batch = DBTransaction::new();
let mut inserts = 0usize;
let mut deletes = 0usize;
for i in self.overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc > 0 {
assert!(rc == 1);
batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?");
inserts += 1;
}
if rc < 0 {
assert!(rc == -1);
deletes += 1;
}
}
try!(self.backing.write(batch));
Ok((inserts + deletes) as u32)
}
}
#[cfg(test)]
mod tests {
use common::*;
use super::*;
use hashdb::*;
use journaldb::traits::JournalDB;
#[test]
fn insert_same_in_fork() {
// history is 1
let mut jdb = ArchiveDB::new_temp();
let x = jdb.insert(b"X");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.remove(&x);
jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
let x = jdb.insert(b"X");
jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
assert!(jdb.exists(&x));
}
#[test]
fn long_history() {
// history is 3
let mut jdb = ArchiveDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
}
#[test]
fn complex() {
// history is 1
let mut jdb = ArchiveDB::new_temp();
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
assert!(jdb.exists(&baz));
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
}
#[test]
fn fork() {
// history is 1
let mut jdb = ArchiveDB::new_temp();
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
assert!(jdb.exists(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
#[test]
fn overwrite() {
// history is 1
let mut jdb = ArchiveDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
assert!(jdb.exists(&foo));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
#[test]
fn fork_same_key() {
// history is 1
let mut jdb = ArchiveDB::new_temp();
jdb.commit(0, &b"0".sha3(), None).unwrap();
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
#[test]
fn reopen() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let bar = H256::random();
let foo = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
// history is 1
let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec());
jdb.commit(0, &b"0".sha3(), None).unwrap();
foo
};
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
}
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
}
}
#[test]
fn reopen_remove() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let foo = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
// history is 1
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
// foo is ancient history.
jdb.insert(b"foo");
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
foo
};
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
}
}
#[test]
fn reopen_fork() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let (foo, _, _) = {
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
// history is 1
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
(foo, bar, baz)
};
{
let mut jdb = ArchiveDB::new(dir.to_str().unwrap());
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
}
}

80
util/src/journaldb/mod.rs Normal file
View File

@ -0,0 +1,80 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! JournalDB interface and implementation.
use common::*;
/// Export the journaldb module.
pub mod traits;
mod archivedb;
mod optiononedb;
mod overlay;
/// Export the JournalDB trait.
pub use self::traits::JournalDB;
/// A journal database algorithm.
#[derive(Debug)]
pub enum Algorithm {
/// Keep all keys forever.
Archive,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into backing database, journal retains knowledge of whether backing DB key is
/// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB.
EarlyMerge,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets
/// flushed in backing only at end of recent history.
OverlayRecent,
/// Ancient and recent history maintained separately; recent history lasts for particular
/// number of blocks.
///
/// References are counted in disk-backed DB.
RefCounted,
}
impl Default for Algorithm {
fn default() -> Algorithm { Algorithm::Archive }
}
impl fmt::Display for Algorithm {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", match self {
&Algorithm::Archive => "archive",
&Algorithm::EarlyMerge => "earlymerge",
&Algorithm::OverlayRecent => "overlayrecent",
&Algorithm::RefCounted => "refcounted",
})
}
}
/// Create a new JournalDB trait object.
pub fn new(path: &str, algorithm: Algorithm) -> Box<JournalDB> {
match algorithm {
Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)),
Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)),
Algorithm::OverlayRecent => Box::new(overlay::JournalOverlayDB::new(path)),
_ => unimplemented!(),
}
}

View File

@ -0,0 +1,618 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation.
use common::*;
use rlp::*;
use hashdb::*;
use memorydb::*;
use super::traits::JournalDB;
use kvdb::{Database, DBTransaction, DatabaseConfig};
#[cfg(test)]
use std::env;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay
/// and latent-removal semantics.
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
/// the removals actually take effect.
pub struct OptionOneDB {
overlay: MemoryDB,
backing: Arc<Database>,
counters: Option<Arc<RwLock<HashMap<H256, i32>>>>,
}
// all keys must be at least 12 bytes
const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION : u32 = 3;
const PADDING : [u8; 10] = [ 0u8; 10 ];
impl OptionOneDB {
/// Create a new instance from file
pub fn new(path: &str) -> OptionOneDB {
let opts = DatabaseConfig {
prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix
};
let backing = Database::open(&opts, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e);
});
if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => {},
v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v)
}
} else {
backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
}
let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing))));
OptionOneDB {
overlay: MemoryDB::new(),
backing: Arc::new(backing),
counters: counters,
}
}
/// Create a new instance with an anonymous temporary database.
#[cfg(test)]
fn new_temp() -> OptionOneDB {
let mut dir = env::temp_dir();
dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap())
}
fn morph_key(key: &H256, index: u8) -> Bytes {
let mut ret = key.bytes().to_owned();
ret.push(index);
ret
}
// The next three are valid only as long as there is an insert operation of `key` in the journal.
fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); }
fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); }
fn is_already_in(backing: &Database, key: &H256) -> bool {
backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some()
}
fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
for &(ref h, ref d) in inserts {
if let Some(c) = counters.get_mut(h) {
// already counting. increment.
*c += 1;
continue;
}
// this is the first entry for this node in the journal.
if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() {
// already in the backing DB. start counting, and remember it was already in.
Self::set_already_in(batch, &h);
counters.insert(h.clone(), 1);
continue;
}
// Gets removed when a key leaves the journal, so should never be set when we're placing a new key.
//Self::reset_already_in(&h);
assert!(!Self::is_already_in(backing, &h));
batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?");
}
}
fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap<H256, i32>) {
trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters);
for h in inserts {
if let Some(c) = counters.get_mut(h) {
// already counting. increment.
*c += 1;
continue;
}
// this is the first entry for this node in the journal.
// it is initialised to 1 if it was already in.
if Self::is_already_in(backing, h) {
trace!("replace_keys: Key {} was already in!", h);
counters.insert(h.clone(), 1);
}
}
trace!("replay_keys: (end) counters={:?}", counters);
}
fn kill_keys(deletes: Vec<H256>, counters: &mut HashMap<H256, i32>, batch: &DBTransaction) {
for h in deletes.into_iter() {
let mut n: Option<i32> = None;
if let Some(c) = counters.get_mut(&h) {
if *c > 1 {
*c -= 1;
continue;
} else {
n = Some(*c);
}
}
match n {
Some(i) if i == 1 => {
counters.remove(&h);
Self::reset_already_in(batch, &h);
}
None => {
// Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs.
//assert!(!Self::is_already_in(db, &h));
batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?");
}
_ => panic!("Invalid value in counters: {:?}", n),
}
}
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
fn read_counters(db: &Database) -> HashMap<H256, i32> {
let mut counters = HashMap::new();
if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") {
let mut era = decode::<u64>(&val);
loop {
let mut index = 0usize;
while let Some(rlp_data) = db.get({
let mut r = RlpStream::new_list(3);
r.append(&era);
r.append(&index);
r.append(&&PADDING[..]);
&r.drain()
}).expect("Low-level database error.") {
trace!("read_counters: era={}, index={}", era, index);
let rlp = Rlp::new(&rlp_data);
let inserts: Vec<H256> = rlp.val_at(1);
Self::replay_keys(&inserts, db, &mut counters);
index += 1;
};
if index == 0 || era == 0 {
break;
}
era -= 1;
}
}
trace!("Recovered {} counters", counters.len());
counters
}
}
impl HashDB for OptionOneDB {
fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() {
let h = H256::from_slice(key.deref());
ret.insert(h, 1);
}
for (key, refs) in self.overlay.keys().into_iter() {
let refs = *ret.get(&key).unwrap_or(&0) + refs;
ret.insert(key, refs);
}
ret
}
fn lookup(&self, key: &H256) -> Option<&[u8]> {
let k = self.overlay.raw(key);
match k {
Some(&(ref d, rc)) if rc > 0 => Some(d),
_ => {
if let Some(x) = self.payload(key) {
Some(&self.overlay.denote(key, x).0)
}
else {
None
}
}
}
}
fn exists(&self, key: &H256) -> bool {
self.lookup(key).is_some()
}
fn insert(&mut self, value: &[u8]) -> H256 {
self.overlay.insert(value)
}
fn emplace(&mut self, key: H256, value: Bytes) {
self.overlay.emplace(key, value);
}
fn kill(&mut self, key: &H256) {
self.overlay.kill(key);
}
}
impl JournalDB for OptionOneDB {
fn spawn(&self) -> Box<JournalDB> {
Box::new(OptionOneDB {
overlay: MemoryDB::new(),
backing: self.backing.clone(),
counters: self.counters.clone(),
})
}
fn mem_used(&self) -> usize {
self.overlay.mem_used() + match self.counters {
Some(ref c) => c.read().unwrap().heap_size_of_children(),
None => 0
}
}
fn is_empty(&self) -> bool {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
}
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
// journal format:
// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ]
// [era, n] => [ ... ]
// TODO: store reclaim_period.
// When we make a new commit, we make a journal of all blocks in the recent history and record
// all keys that were inserted and deleted. The journal is ordered by era; multiple commits can
// share the same era. This forms a data structure similar to a queue but whose items are tuples.
// By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history
// into ancient history) then only one commit from the tuple is considered canonical. This commit
// is kept in the main backing database, whereas any others from the same era are reverted.
//
// It is possible that a key, properly available in the backing database be deleted and re-inserted
// in the recent history queue, yet have both operations in commits that are eventually non-canonical.
// To avoid the original, and still required, key from being deleted, we maintain a reference count
// which includes an original key, if any.
//
// The semantics of the `counter` are:
// insert key k:
// counter already contains k: count += 1
// counter doesn't contain k:
// backing db contains k: count = 1
// backing db doesn't contain k: insert into backing db, count = 0
// delete key k:
// counter contains k (count is asserted to be non-zero):
// count > 1: counter -= 1
// count == 1: remove counter
// count == 0: remove key from backing db
// counter doesn't contain k: remove key from backing db
//
// Practically, this means that for each commit block turning from recent to ancient we do the
// following:
// is_canonical:
// inserts: Ignored (left alone in the backing database).
// deletes: Enacted; however, recent history queue is checked for ongoing references. This is
// reduced as a preference to deletion from the backing database.
// !is_canonical:
// inserts: Reverted; however, recent history queue is checked for ongoing references. This is
// reduced as a preference to deletion from the backing database.
// deletes: Ignored (they were never inserted).
//
// record new commit's details.
trace!("commit: #{} ({}), end era: {:?}", now, id, end);
let mut counters = self.counters.as_ref().unwrap().write().unwrap();
let batch = DBTransaction::new();
{
let mut index = 0usize;
let mut last;
while try!(self.backing.get({
let mut r = RlpStream::new_list(3);
r.append(&now);
r.append(&index);
r.append(&&PADDING[..]);
last = r.drain();
&last
})).is_some() {
index += 1;
}
let drained = self.overlay.drain();
let removes: Vec<H256> = drained
.iter()
.filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None})
.collect();
let inserts: Vec<(H256, Bytes)> = drained
.into_iter()
.filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None })
.collect();
let mut r = RlpStream::new_list(3);
r.append(id);
// Process the new inserts.
// We use the inserts for three things. For each:
// - we place into the backing DB or increment the counter if already in;
// - we note in the backing db that it was already in;
// - we write the key into our journal for this block;
r.begin_list(inserts.len());
inserts.iter().foreach(|&(k, _)| {r.append(&k);});
r.append(&removes);
Self::insert_keys(&inserts, &self.backing, &mut counters, &batch);
try!(batch.put(&last, r.as_raw()));
try!(batch.put(&LATEST_ERA_KEY, &encode(&now)));
}
// apply old commits' details
if let Some((end_era, canon_id)) = end {
let mut index = 0usize;
let mut last;
while let Some(rlp_data) = try!(self.backing.get({
let mut r = RlpStream::new_list(3);
r.append(&end_era);
r.append(&index);
r.append(&&PADDING[..]);
last = r.drain();
&last
})) {
let rlp = Rlp::new(&rlp_data);
let inserts: Vec<H256> = rlp.val_at(1);
let deletes: Vec<H256> = rlp.val_at(2);
// Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical
Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch);
try!(batch.delete(&last));
index += 1;
}
trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id);
}
try!(self.backing.write(batch));
// trace!("OptionOneDB::commit() deleted {} nodes", deletes);
Ok(0)
}
}
#[cfg(test)]
mod tests {
use common::*;
use super::*;
use hashdb::*;
use journaldb::traits::JournalDB;
#[test]
fn insert_same_in_fork() {
// history is 1
let mut jdb = OptionOneDB::new_temp();
let x = jdb.insert(b"X");
jdb.commit(1, &b"1".sha3(), None).unwrap();
jdb.commit(2, &b"2".sha3(), None).unwrap();
jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap();
jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.remove(&x);
jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap();
let x = jdb.insert(b"X");
jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap();
jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap();
jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap();
assert!(jdb.exists(&x));
}
#[test]
fn long_history() {
// history is 3
let mut jdb = OptionOneDB::new_temp();
let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.remove(&h);
jdb.commit(1, &b"1".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.commit(2, &b"2".sha3(), None).unwrap();
assert!(jdb.exists(&h));
jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&h));
jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(!jdb.exists(&h));
}
#[test]
fn complex() {
// history is 1
let mut jdb = OptionOneDB::new_temp();
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.remove(&foo);
jdb.remove(&bar);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
assert!(jdb.exists(&baz));
let foo = jdb.insert(b"foo");
jdb.remove(&baz);
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&bar));
assert!(jdb.exists(&baz));
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&bar));
assert!(!jdb.exists(&baz));
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(!jdb.exists(&foo));
assert!(!jdb.exists(&bar));
assert!(!jdb.exists(&baz));
}
#[test]
fn fork() {
// history is 1
let mut jdb = OptionOneDB::new_temp();
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
assert!(jdb.exists(&baz));
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&baz));
assert!(!jdb.exists(&bar));
}
#[test]
fn overwrite() {
// history is 1
let mut jdb = OptionOneDB::new_temp();
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.exists(&foo));
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
assert!(jdb.exists(&foo));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
#[test]
fn fork_same_key() {
// history is 1
let mut jdb = OptionOneDB::new_temp();
jdb.commit(0, &b"0".sha3(), None).unwrap();
let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.insert(b"foo");
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap();
assert!(jdb.exists(&foo));
}
#[test]
fn reopen() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let bar = H256::random();
let foo = {
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
// history is 1
let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec());
jdb.commit(0, &b"0".sha3(), None).unwrap();
foo
};
{
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
}
{
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
assert!(!jdb.exists(&foo));
}
}
#[test]
fn reopen_remove() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let foo = {
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
// history is 1
let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
// foo is ancient history.
jdb.insert(b"foo");
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
foo
};
{
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
jdb.remove(&foo);
jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap();
assert!(jdb.exists(&foo));
jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap();
jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap();
assert!(!jdb.exists(&foo));
}
}
#[test]
fn reopen_fork() {
let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex());
let (foo, bar, baz) = {
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
// history is 1
let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar");
jdb.commit(0, &b"0".sha3(), None).unwrap();
jdb.remove(&foo);
let baz = jdb.insert(b"baz");
jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap();
jdb.remove(&bar);
jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap();
(foo, bar, baz)
};
{
let mut jdb = OptionOneDB::new(dir.to_str().unwrap());
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.exists(&foo));
assert!(!jdb.exists(&baz));
assert!(!jdb.exists(&bar));
}
}
}

View File

@ -14,7 +14,7 @@
// You should have received a copy of the GNU General Public License // You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation. //! JournalDB over in-memory overlay
use common::*; use common::*;
use rlp::*; use rlp::*;
@ -23,13 +23,11 @@ use memorydb::*;
use kvdb::{Database, DBTransaction, DatabaseConfig}; use kvdb::{Database, DBTransaction, DatabaseConfig};
#[cfg(test)] #[cfg(test)]
use std::env; use std::env;
use super::JournalDB;
/// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// Implementation of the JournalDB trait for a disk-backed database with a memory overlay
/// and, possibly, latent-removal semantics. /// and, possibly, latent-removal semantics.
/// ///
/// If `journal_overlay` is `None`, then it behaves exactly like OverlayDB. If not it behaves
/// differently:
///
/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to
/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect
/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before
@ -58,10 +56,10 @@ use std::env;
/// the removed key is not present in the history overlay. /// the removed key is not present in the history overlay.
/// 7. Delete ancient record from memory and disk. /// 7. Delete ancient record from memory and disk.
/// ///
pub struct JournalDB { pub struct JournalOverlayDB {
transaction_overlay: MemoryDB, transaction_overlay: MemoryDB,
backing: Arc<Database>, backing: Arc<Database>,
journal_overlay: Option<Arc<RwLock<JournalOverlay>>>, journal_overlay: Arc<RwLock<JournalOverlay>>,
} }
#[derive(PartialEq)] #[derive(PartialEq)]
@ -84,9 +82,9 @@ impl HeapSizeOf for JournalEntry {
} }
} }
impl Clone for JournalDB { impl Clone for JournalOverlayDB {
fn clone(&self) -> JournalDB { fn clone(&self) -> JournalOverlayDB {
JournalDB { JournalOverlayDB {
transaction_overlay: MemoryDB::new(), transaction_overlay: MemoryDB::new(),
backing: self.backing.clone(), backing: self.backing.clone(),
journal_overlay: self.journal_overlay.clone(), journal_overlay: self.journal_overlay.clone(),
@ -97,45 +95,34 @@ impl Clone for JournalDB {
// all keys must be at least 12 bytes // all keys must be at least 12 bytes
const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ];
const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ];
const DB_VERSION : u32 = 0x200 + 3;
const DB_VERSION : u32 = 3;
const DB_VERSION_NO_JOURNAL : u32 = 3 + 256;
const PADDING : [u8; 10] = [ 0u8; 10 ]; const PADDING : [u8; 10] = [ 0u8; 10 ];
impl JournalDB { impl JournalOverlayDB {
/// Create a new instance from file /// Create a new instance from file
pub fn new(path: &str) -> JournalDB { pub fn new(path: &str) -> JournalOverlayDB {
Self::from_prefs(path, true) Self::from_prefs(path)
} }
/// Create a new instance from file /// Create a new instance from file
pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB { pub fn from_prefs(path: &str) -> JournalOverlayDB {
let opts = DatabaseConfig { let opts = DatabaseConfig {
prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix
}; };
let backing = Database::open(&opts, path).unwrap_or_else(|e| { let backing = Database::open(&opts, path).unwrap_or_else(|e| {
panic!("Error opening state db: {}", e); panic!("Error opening state db: {}", e);
}); });
let with_journal;
if !backing.is_empty() { if !backing.is_empty() {
match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) { match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::<u32>(&v))) {
Ok(Some(DB_VERSION)) => { with_journal = true; }, Ok(Some(DB_VERSION)) => {}
Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; },
v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v)
} }
} else { } else {
backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database"); backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database");
with_journal = prefer_journal;
} }
let journal_overlay = Arc::new(RwLock::new(JournalOverlayDB::read_overlay(&backing)));
let journal_overlay = if with_journal { JournalOverlayDB {
Some(Arc::new(RwLock::new(JournalDB::read_overlay(&backing))))
} else {
None
};
JournalDB {
transaction_overlay: MemoryDB::new(), transaction_overlay: MemoryDB::new(),
backing: Arc::new(backing), backing: Arc::new(backing),
journal_overlay: journal_overlay, journal_overlay: journal_overlay,
@ -144,61 +131,92 @@ impl JournalDB {
/// Create a new instance with an anonymous temporary database. /// Create a new instance with an anonymous temporary database.
#[cfg(test)] #[cfg(test)]
pub fn new_temp() -> JournalDB { pub fn new_temp() -> JournalOverlayDB {
let mut dir = env::temp_dir(); let mut dir = env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
Self::new(dir.to_str().unwrap()) Self::new(dir.to_str().unwrap())
} }
/// Check if this database has any commits #[cfg(test)]
pub fn is_empty(&self) -> bool { fn can_reconstruct_refs(&self) -> bool {
let reconstructed = Self::read_overlay(&self.backing);
let journal_overlay = self.journal_overlay.read().unwrap();
*journal_overlay == reconstructed
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
fn read_overlay(db: &Database) -> JournalOverlay {
let mut journal = HashMap::new();
let mut overlay = MemoryDB::new();
let mut count = 0;
let mut latest_era = 0;
if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") {
latest_era = decode::<u64>(&val);
let mut era = latest_era;
loop {
let mut index = 0usize;
while let Some(rlp_data) = db.get({
let mut r = RlpStream::new_list(3);
r.append(&era);
r.append(&index);
r.append(&&PADDING[..]);
&r.drain()
}).expect("Low-level database error.") {
trace!("read_overlay: era={}, index={}", era, index);
let rlp = Rlp::new(&rlp_data);
let id: H256 = rlp.val_at(0);
let insertions = rlp.at(1);
let deletions: Vec<H256> = rlp.val_at(2);
let mut inserted_keys = Vec::new();
for r in insertions.iter() {
let k: H256 = r.val_at(0);
let v: Bytes = r.val_at(1);
overlay.emplace(k.clone(), v);
inserted_keys.push(k);
count += 1;
}
journal.entry(era).or_insert_with(Vec::new).push(JournalEntry {
id: id,
insertions: inserted_keys,
deletions: deletions,
});
index += 1;
};
if index == 0 || era == 0 {
break;
}
era -= 1;
}
}
trace!("Recovered {} overlay entries, {} journal entries", count, journal.len());
JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era }
}
}
impl JournalDB for JournalOverlayDB {
fn spawn(&self) -> Box<JournalDB> {
Box::new(self.clone())
}
fn mem_used(&self) -> usize {
let mut mem = self.transaction_overlay.mem_used();
let overlay = self.journal_overlay.read().unwrap();
mem += overlay.backing_overlay.mem_used();
mem += overlay.journal.heap_size_of_children();
mem
}
fn is_empty(&self) -> bool {
self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none()
} }
/// Commit all recent insert operations. fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError> {
pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> {
let have_journal_overlay = self.journal_overlay.is_some();
if have_journal_overlay {
self.commit_with_overlay(now, id, end)
} else {
self.commit_without_overlay()
}
}
/// Drain the overlay and place it into a batch for the DB.
fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize {
let mut insertions = 0usize;
let mut deletions = 0usize;
for i in overlay.drain().into_iter() {
let (key, (value, rc)) = i;
if rc > 0 {
assert!(rc == 1);
batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?");
insertions += 1;
}
if rc < 0 {
assert!(rc == -1);
deletions += 1;
}
}
trace!("commit: Inserted {}, Deleted {} nodes", insertions, deletions);
insertions + deletions
}
/// Just commit the transaction overlay into the backing DB.
fn commit_without_overlay(&mut self) -> Result<(), UtilError> {
let batch = DBTransaction::new();
Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch);
try!(self.backing.write(batch));
Ok(())
}
/// Commit all recent insert operations and historical removals from the old era
/// to the backing database.
fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> {
// record new commit's details. // record new commit's details.
trace!("commit: #{} ({}), end era: {:?}", now, id, end); trace!("commit: #{} ({}), end era: {:?}", now, id, end);
let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); let mut journal_overlay = self.journal_overlay.write().unwrap();
let batch = DBTransaction::new(); let batch = DBTransaction::new();
{ {
let mut r = RlpStream::new_list(3); let mut r = RlpStream::new_list(3);
@ -280,80 +298,12 @@ impl JournalDB {
journal_overlay.journal.remove(&end_era); journal_overlay.journal.remove(&end_era);
} }
try!(self.backing.write(batch)); try!(self.backing.write(batch));
Ok(()) Ok(0)
} }
#[cfg(test)]
fn can_reconstruct_refs(&self) -> bool {
let reconstructed = Self::read_overlay(&self.backing);
let journal_overlay = self.journal_overlay.as_ref().unwrap().read().unwrap();
*journal_overlay == reconstructed
}
fn payload(&self, key: &H256) -> Option<Bytes> {
self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec())
}
fn read_overlay(db: &Database) -> JournalOverlay {
let mut journal = HashMap::new();
let mut overlay = MemoryDB::new();
let mut count = 0;
let mut latest_era = 0;
if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") {
latest_era = decode::<u64>(&val);
let mut era = latest_era;
loop {
let mut index = 0usize;
while let Some(rlp_data) = db.get({
let mut r = RlpStream::new_list(3);
r.append(&era);
r.append(&index);
r.append(&&PADDING[..]);
&r.drain()
}).expect("Low-level database error.") {
trace!("read_overlay: era={}, index={}", era, index);
let rlp = Rlp::new(&rlp_data);
let id: H256 = rlp.val_at(0);
let insertions = rlp.at(1);
let deletions: Vec<H256> = rlp.val_at(2);
let mut inserted_keys = Vec::new();
for r in insertions.iter() {
let k: H256 = r.val_at(0);
let v: Bytes = r.val_at(1);
overlay.emplace(k.clone(), v);
inserted_keys.push(k);
count += 1;
}
journal.entry(era).or_insert_with(Vec::new).push(JournalEntry {
id: id,
insertions: inserted_keys,
deletions: deletions,
});
index += 1;
};
if index == 0 || era == 0 {
break;
}
era -= 1;
}
}
trace!("Recovered {} overlay entries, {} journal entries", count, journal.len());
JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era }
}
/// Returns heap memory size used
pub fn mem_used(&self) -> usize {
let mut mem = self.transaction_overlay.mem_used();
if let Some(ref overlay) = self.journal_overlay.as_ref() {
let overlay = overlay.read().unwrap();
mem += overlay.backing_overlay.mem_used();
mem += overlay.journal.heap_size_of_children();
}
mem
}
} }
impl HashDB for JournalDB { impl HashDB for JournalOverlayDB {
fn keys(&self) -> HashMap<H256, i32> { fn keys(&self) -> HashMap<H256, i32> {
let mut ret: HashMap<H256, i32> = HashMap::new(); let mut ret: HashMap<H256, i32> = HashMap::new();
for (key, _) in self.backing.iter() { for (key, _) in self.backing.iter() {
@ -373,7 +323,7 @@ impl HashDB for JournalDB {
match k { match k {
Some(&(ref d, rc)) if rc > 0 => Some(d), Some(&(ref d, rc)) if rc > 0 => Some(d),
_ => { _ => {
let v = self.journal_overlay.as_ref().map_or(None, |ref j| j.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec())); let v = self.journal_overlay.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec());
match v { match v {
Some(x) => { Some(x) => {
Some(&self.transaction_overlay.denote(key, x).0) Some(&self.transaction_overlay.denote(key, x).0)
@ -412,11 +362,12 @@ mod tests {
use super::*; use super::*;
use hashdb::*; use hashdb::*;
use log::init_log; use log::init_log;
use journaldb::JournalDB;
#[test] #[test]
fn insert_same_in_fork() { fn insert_same_in_fork() {
// history is 1 // history is 1
let mut jdb = JournalDB::new_temp(); let mut jdb = JournalOverlayDB::new_temp();
let x = jdb.insert(b"X"); let x = jdb.insert(b"X");
jdb.commit(1, &b"1".sha3(), None).unwrap(); jdb.commit(1, &b"1".sha3(), None).unwrap();
@ -446,7 +397,7 @@ mod tests {
#[test] #[test]
fn long_history() { fn long_history() {
// history is 3 // history is 3
let mut jdb = JournalDB::new_temp(); let mut jdb = JournalOverlayDB::new_temp();
let h = jdb.insert(b"foo"); let h = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -469,7 +420,7 @@ mod tests {
#[test] #[test]
fn complex() { fn complex() {
// history is 1 // history is 1
let mut jdb = JournalDB::new_temp(); let mut jdb = JournalOverlayDB::new_temp();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -512,7 +463,7 @@ mod tests {
#[test] #[test]
fn fork() { fn fork() {
// history is 1 // history is 1
let mut jdb = JournalDB::new_temp(); let mut jdb = JournalOverlayDB::new_temp();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -544,7 +495,7 @@ mod tests {
#[test] #[test]
fn overwrite() { fn overwrite() {
// history is 1 // history is 1
let mut jdb = JournalDB::new_temp(); let mut jdb = JournalOverlayDB::new_temp();
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
@ -569,7 +520,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -597,7 +548,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -625,7 +576,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -663,7 +614,7 @@ mod tests {
let bar = H256::random(); let bar = H256::random();
let foo = { let foo = {
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.emplace(bar.clone(), b"bar".to_vec());
@ -673,14 +624,14 @@ mod tests {
}; };
{ {
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
} }
{ {
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));
assert!(jdb.exists(&bar)); assert!(jdb.exists(&bar));
jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap();
@ -695,7 +646,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -724,7 +675,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
@ -773,7 +724,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap();
@ -804,7 +755,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 4 // history is 4
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
@ -844,7 +795,7 @@ mod tests {
let foo = b"foo".sha3(); let foo = b"foo".sha3();
{ {
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 1 // history is 1
jdb.insert(b"foo"); jdb.insert(b"foo");
jdb.commit(0, &b"0".sha3(), None).unwrap(); jdb.commit(0, &b"0".sha3(), None).unwrap();
@ -865,7 +816,7 @@ mod tests {
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));
// incantation to reopen the db // incantation to reopen the db
}; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.remove(&foo); jdb.remove(&foo);
jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap();
@ -873,14 +824,14 @@ mod tests {
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));
// incantation to reopen the db // incantation to reopen the db
}; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));
// incantation to reopen the db // incantation to reopen the db
}; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
@ -893,7 +844,7 @@ mod tests {
let mut dir = ::std::env::temp_dir(); let mut dir = ::std::env::temp_dir();
dir.push(H32::random().hex()); dir.push(H32::random().hex());
let (foo, bar, baz) = { let (foo, bar, baz) = {
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
// history is 1 // history is 1
let foo = jdb.insert(b"foo"); let foo = jdb.insert(b"foo");
let bar = jdb.insert(b"bar"); let bar = jdb.insert(b"bar");
@ -911,7 +862,7 @@ mod tests {
}; };
{ {
let mut jdb = JournalDB::new(dir.to_str().unwrap()); let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap());
jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap();
assert!(jdb.can_reconstruct_refs()); assert!(jdb.can_reconstruct_refs());
assert!(jdb.exists(&foo)); assert!(jdb.exists(&foo));

View File

@ -0,0 +1,37 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Disk-backed HashDB implementation.
use common::*;
use hashdb::*;
/// A HashDB which can manage a short-term journal potentially containing many forks of mutually
/// exclusive actions.
pub trait JournalDB : HashDB + Send + Sync {
/// Return a copy of ourself, in a box.
fn spawn(&self) -> Box<JournalDB>;
/// Returns heap memory size used
fn mem_used(&self) -> usize;
/// Check if this database has any commits
fn is_empty(&self) -> bool;
/// Commit all recent insert operations and canonical historical commits' removals from the
/// old era to the backing database, reverting any non-canonical historical commit's inserts.
fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<u32, UtilError>;
}

View File

@ -78,6 +78,59 @@ struct AccountUnlock {
expires: DateTime<UTC>, expires: DateTime<UTC>,
} }
/// Basic account management trait
pub trait AccountProvider : Send + Sync {
/// Lists all accounts
fn accounts(&self) -> Result<Vec<Address>, ::std::io::Error>;
/// Unlocks account with the password provided
fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError>;
/// Creates account
fn new_account(&self, pass: &str) -> Result<Address, ::std::io::Error>;
/// Returns secret for unlocked account
fn account_secret(&self, account: &Address) -> Result<crypto::Secret, SigningError>;
/// Returns secret for unlocked account
fn sign(&self, account: &Address, message: &H256) -> Result<crypto::Signature, SigningError>;
}
/// Thread-safe accounts management
pub struct AccountService {
secret_store: RwLock<SecretStore>,
}
impl AccountProvider for AccountService {
/// Lists all accounts
fn accounts(&self) -> Result<Vec<Address>, ::std::io::Error> {
Ok(try!(self.secret_store.read().unwrap().accounts()).iter().map(|&(addr, _)| addr).collect::<Vec<Address>>())
}
/// Unlocks account with the password provided
fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> {
self.secret_store.read().unwrap().unlock_account(account, pass)
}
/// Creates account
fn new_account(&self, pass: &str) -> Result<Address, ::std::io::Error> {
self.secret_store.write().unwrap().new_account(pass)
}
/// Returns secret for unlocked account
fn account_secret(&self, account: &Address) -> Result<crypto::Secret, SigningError> {
self.secret_store.read().unwrap().account_secret(account)
}
/// Returns secret for unlocked account
fn sign(&self, account: &Address, message: &H256) -> Result<crypto::Signature, SigningError> {
self.secret_store.read().unwrap().sign(account, message)
}
}
impl AccountService {
/// New account service with the default location
pub fn new() -> AccountService {
let secret_store = RwLock::new(SecretStore::new());
secret_store.write().unwrap().try_import_existing();
AccountService {
secret_store: secret_store
}
}
}
impl SecretStore { impl SecretStore {
/// new instance of Secret Store in default home directory /// new instance of Secret Store in default home directory
pub fn new() -> SecretStore { pub fn new() -> SecretStore {

View File

@ -15,18 +15,18 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![warn(missing_docs)] #![warn(missing_docs)]
#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(feature="dev", feature(plugin))]
#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] #![cfg_attr(feature="dev", plugin(clippy))]
// Clippy settings // Clippy settings
// TODO [todr] not really sure // TODO [todr] not really sure
#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] #![cfg_attr(feature="dev", allow(needless_range_loop))]
// Shorter than if-else // Shorter than if-else
#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] #![cfg_attr(feature="dev", allow(match_bool))]
// We use that to be more explicit about handled cases // We use that to be more explicit about handled cases
#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] #![cfg_attr(feature="dev", allow(match_same_arms))]
// Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref.
#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] #![cfg_attr(feature="dev", allow(clone_on_copy))]
//! Ethcore-util library //! Ethcore-util library
//! //!
@ -154,7 +154,7 @@ pub use rlp::*;
pub use hashdb::*; pub use hashdb::*;
pub use memorydb::*; pub use memorydb::*;
pub use overlaydb::*; pub use overlaydb::*;
pub use journaldb::*; pub use journaldb::JournalDB;
pub use math::*; pub use math::*;
pub use crypto::*; pub use crypto::*;
pub use triehash::*; pub use triehash::*;

View File

@ -243,7 +243,7 @@ impl Discovery {
self.send_to(packet, address.clone()); self.send_to(packet, address.clone());
} }
#[cfg_attr(all(nightly, feature="dev"), allow(map_clone))] #[cfg_attr(feature="dev", allow(map_clone))]
fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> { fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec<NodeEntry> {
let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new(); let mut found: BTreeMap<u32, Vec<&NodeEntry>> = BTreeMap::new();
let mut count = 0; let mut count = 0;

View File

@ -507,7 +507,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count()); debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count());
} }
#[cfg_attr(all(nightly, feature="dev"), allow(single_match))] #[cfg_attr(feature="dev", allow(single_match))]
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) { fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
if self.have_session(id) if self.have_session(id)
{ {
@ -542,7 +542,7 @@ impl<Message> Host<Message> where Message: Send + Sync + Clone {
self.create_connection(socket, Some(id), io); self.create_connection(socket, Some(id), io);
} }
#[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))] #[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))]
fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) { fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext<NetworkIoMessage<Message>>) {
let nonce = self.info.write().unwrap().next_nonce(); let nonce = self.info.write().unwrap().next_nonce();
let mut handshakes = self.handshakes.write().unwrap(); let mut handshakes = self.handshakes.write().unwrap();

View File

@ -36,6 +36,7 @@ use kvdb::{Database};
/// ///
/// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()`
/// queries have an immediate effect in terms of these functions. /// queries have an immediate effect in terms of these functions.
//#[derive(Clone)]
pub struct OverlayDB { pub struct OverlayDB {
overlay: MemoryDB, overlay: MemoryDB,
backing: Arc<Database>, backing: Arc<Database>,

View File

@ -71,7 +71,7 @@ impl PanicHandler {
/// Invoke closure and catch any possible panics. /// Invoke closure and catch any possible panics.
/// In case of panic notifies all listeners about it. /// In case of panic notifies all listeners about it.
#[cfg_attr(all(nightly, feature="dev"), allow(deprecated))] #[cfg_attr(feature="dev", allow(deprecated))]
pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static { pub fn catch_panic<G, R>(&self, g: G) -> thread::Result<R> where G: FnOnce() -> R + Send + 'static {
let _guard = PanicGuard { handler: self }; let _guard = PanicGuard { handler: self };
let result = g(); let result = g();

View File

@ -54,7 +54,7 @@ pub struct TrieDB<'db> {
pub hash_count: usize, pub hash_count: usize,
} }
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] #[cfg_attr(feature="dev", allow(wrong_self_convention))]
impl<'db> TrieDB<'db> { impl<'db> TrieDB<'db> {
/// Create a new trie with the backing database `db` and `root` /// Create a new trie with the backing database `db` and `root`
/// Panics, if `root` does not exist /// Panics, if `root` does not exist

View File

@ -66,7 +66,7 @@ enum MaybeChanged<'a> {
Changed(Bytes), Changed(Bytes),
} }
#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] #[cfg_attr(feature="dev", allow(wrong_self_convention))]
impl<'db> TrieDBMut<'db> { impl<'db> TrieDBMut<'db> {
/// Create a new trie with the backing database `db` and empty `root` /// Create a new trie with the backing database `db` and empty `root`
/// Initialise to the state entailed by the genesis block. /// Initialise to the state entailed by the genesis block.
@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> {
} }
} }
#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] #[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
/// Determine the RLP of the node, assuming we're inserting `partial` into the /// Determine the RLP of the node, assuming we're inserting `partial` into the
/// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// node currently of data `old`. This will *not* delete any hash of `old` from the database;
/// it will just return the new RLP that includes the new node. /// it will just return the new RLP that includes the new node.