diff --git a/Cargo.lock b/Cargo.lock index f583a8747..627fbfa69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -219,7 +219,6 @@ dependencies = [ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -243,7 +242,6 @@ dependencies = [ "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -299,7 +297,6 @@ dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index 22d0f9288..196807a04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,20 +27,17 @@ ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } rpassword = "0.1" -[dev-dependencies] -ethcore = { path = "ethcore", features = ["dev"] } -ethcore-util = { path = "util", features = ["dev"] } -ethsync = { path = "sync", features = ["dev"] } -ethcore-rpc = { path = "rpc", features = ["dev"] } - [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] -dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"] +travis-nightly = ["ethcore/json-tests", "dev"] [[bin]] path = "parity/main.rs" name = "parity" + +[profile.release] +debug = false +lto = false diff --git a/README.md b/README.md index 4fd2a53cc..47a27e30e 100644 --- a/README.md +++ b/README.md @@ -34,9 +34,6 @@ Then, download and build Parity: git clone https://github.com/ethcore/parity cd parity -# parity should be built with rust beta -multirust override beta - # build in release mode cargo build --release ``` diff --git a/cargo.sh b/cargo.sh deleted file mode 100755 index 6870ab385..000000000 --- a/cargo.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -cargo "$@" --features dev-clippy diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index fbfe175d7..c3a3d32dc 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -5,10 +5,6 @@ license = "GPL-3.0" name = "ethcore" version = "0.9.99" authors = ["Ethcore "] -build = "build.rs" - -[build-dependencies] -rustc_version = "0.1" [dependencies] log = "0.3" @@ -31,5 +27,5 @@ jit = ["evmjit"] evm-debug = [] json-tests = [] test-heavy = [] -dev = [] +dev = ["clippy"] default = [] diff --git a/ethcore/src/basic_types.rs b/ethcore/src/basic_types.rs index 9cba8b3a0..5f6515c0d 100644 --- a/ethcore/src/basic_types.rs +++ b/ethcore/src/basic_types.rs @@ -24,7 +24,7 @@ pub type LogBloom = H2048; /// Constant 2048-bit datum for 0. Often used as a default. pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); -#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] +#[cfg_attr(feature="dev", allow(enum_variant_names))] /// Semantic boolean for when a seal/signature is included. pub enum Seal { /// The seal/signature is included. diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index b3894db94..7eb34670f 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,7 +16,7 @@ //! Blockchain block. -#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> +#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> use common::*; use engine::*; @@ -171,7 +171,7 @@ pub struct SealedBlock { impl<'x> OpenBlock<'x> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new(engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { + pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -317,7 +317,7 @@ impl ClosedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> JournalDB { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl SealedBlock { @@ -331,7 +331,7 @@ impl SealedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> JournalDB { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl IsBlock for SealedBlock { @@ -339,10 +339,10 @@ impl IsBlock for SealedBlock { } /// Enact the block given by block header, transactions and uncles -pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { - let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); + let s = State::from_existing(db.spawn(), parent.state_root().clone(), engine.account_start_nonce()); trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); } } @@ -357,20 +357,20 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let block = BlockView::new(block_bytes); let header = block.header(); enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let view = BlockView::new(&block.bytes); enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let header = BlockView::new(block_bytes).header_view(); Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal()))) } @@ -389,7 +389,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = b.close(); @@ -404,14 +404,14 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, vec![genesis_header.hash()]).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 3dfb98e8a..50db23dfe 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -95,7 +95,7 @@ pub struct BlockQueue { panic_handler: Arc, engine: Arc>, more_to_verify: Arc, - verification: Arc>, + verification: Arc, verifiers: Vec>, deleting: Arc, ready_signal: Arc, @@ -121,7 +121,7 @@ struct QueueSignal { } impl QueueSignal { - #[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))] + #[cfg_attr(feature="dev", allow(bool_comparison))] fn set(&self) { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); @@ -132,18 +132,23 @@ impl QueueSignal { } } -#[derive(Default)] struct Verification { - unverified: VecDeque, - verified: VecDeque, - verifying: VecDeque, - bad: HashSet, + // All locks must be captured in the order declared here. + unverified: Mutex>, + verified: Mutex>, + verifying: Mutex>, + bad: Mutex>, } impl BlockQueue { /// Creates a new queue instance. pub fn new(config: BlockQueueConfig, engine: Arc>, message_channel: IoChannel) -> BlockQueue { - let verification = Arc::new(Mutex::new(Verification::default())); + let verification = Arc::new(Verification { + unverified: Mutex::new(VecDeque::new()), + verified: Mutex::new(VecDeque::new()), + verifying: Mutex::new(VecDeque::new()), + bad: Mutex::new(HashSet::new()), + }); let more_to_verify = Arc::new(Condvar::new()); let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel }); let deleting = Arc::new(AtomicBool::new(false)); @@ -186,17 +191,17 @@ impl BlockQueue { } } - fn verify(verification: Arc>, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { + fn verify(verification: Arc, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { while !deleting.load(AtomicOrdering::Acquire) { { - let mut lock = verification.lock().unwrap(); + let mut unverified = verification.unverified.lock().unwrap(); - if lock.unverified.is_empty() && lock.verifying.is_empty() { + if unverified.is_empty() && verification.verifying.lock().unwrap().is_empty() { empty.notify_all(); } - while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { - lock = wait.wait(lock).unwrap(); + while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { + unverified = wait.wait(unverified).unwrap(); } if deleting.load(AtomicOrdering::Acquire) { @@ -205,39 +210,42 @@ impl BlockQueue { } let block = { - let mut v = verification.lock().unwrap(); - if v.unverified.is_empty() { + let mut unverified = verification.unverified.lock().unwrap(); + if unverified.is_empty() { continue; } - let block = v.unverified.pop_front().unwrap(); - v.verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); + let mut verifying = verification.verifying.lock().unwrap(); + let block = unverified.pop_front().unwrap(); + verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); block }; let block_hash = block.header.hash(); match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) { Ok(verified) => { - let mut v = verification.lock().unwrap(); - for e in &mut v.verifying { + let mut verifying = verification.verifying.lock().unwrap(); + for e in verifying.iter_mut() { if e.hash == block_hash { e.block = Some(verified); break; } } - if !v.verifying.is_empty() && v.verifying.front().unwrap().hash == block_hash { + if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash { // we're next! - let mut vref = v.deref_mut(); - BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); + let mut verified = verification.verified.lock().unwrap(); + let mut bad = verification.bad.lock().unwrap(); + BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } }, Err(err) => { - let mut v = verification.lock().unwrap(); + let mut verifying = verification.verifying.lock().unwrap(); + let mut verified = verification.verified.lock().unwrap(); + let mut bad = verification.bad.lock().unwrap(); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); - v.bad.insert(block_hash.clone()); - v.verifying.retain(|e| e.hash != block_hash); - let mut vref = v.deref_mut(); - BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); + bad.insert(block_hash.clone()); + verifying.retain(|e| e.hash != block_hash); + BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } } @@ -257,19 +265,21 @@ impl BlockQueue { } /// Clear the queue and stop verification activity. - pub fn clear(&mut self) { - let mut verification = self.verification.lock().unwrap(); - verification.unverified.clear(); - verification.verifying.clear(); - verification.verified.clear(); + pub fn clear(&self) { + let mut unverified = self.verification.unverified.lock().unwrap(); + let mut verifying = self.verification.verifying.lock().unwrap(); + let mut verified = self.verification.verified.lock().unwrap(); + unverified.clear(); + verifying.clear(); + verified.clear(); self.processing.write().unwrap().clear(); } - /// Wait for queue to be empty - pub fn flush(&mut self) { - let mut verification = self.verification.lock().unwrap(); - while !verification.unverified.is_empty() || !verification.verifying.is_empty() { - verification = self.empty.wait(verification).unwrap(); + /// Wait for unverified queue to be empty + pub fn flush(&self) { + let mut unverified = self.verification.unverified.lock().unwrap(); + while !unverified.is_empty() || !self.verification.verifying.lock().unwrap().is_empty() { + unverified = self.empty.wait(unverified).unwrap(); } } @@ -278,27 +288,28 @@ impl BlockQueue { if self.processing.read().unwrap().contains(&hash) { return BlockStatus::Queued; } - if self.verification.lock().unwrap().bad.contains(&hash) { + if self.verification.bad.lock().unwrap().contains(&hash) { return BlockStatus::Bad; } BlockStatus::Unknown } /// Add a block to the queue. - pub fn import_block(&mut self, bytes: Bytes) -> ImportResult { + pub fn import_block(&self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); let h = header.hash(); - if self.processing.read().unwrap().contains(&h) { - return Err(x!(ImportError::AlreadyQueued)); - } { - let mut verification = self.verification.lock().unwrap(); - if verification.bad.contains(&h) { + if self.processing.read().unwrap().contains(&h) { + return Err(x!(ImportError::AlreadyQueued)); + } + + let mut bad = self.verification.bad.lock().unwrap(); + if bad.contains(&h) { return Err(x!(ImportError::KnownBad)); } - if verification.bad.contains(&header.parent_hash) { - verification.bad.insert(h.clone()); + if bad.contains(&header.parent_hash) { + bad.insert(h.clone()); return Err(x!(ImportError::KnownBad)); } } @@ -306,48 +317,47 @@ impl BlockQueue { match verify_block_basic(&header, &bytes, self.engine.deref().deref()) { Ok(()) => { self.processing.write().unwrap().insert(h.clone()); - self.verification.lock().unwrap().unverified.push_back(UnverifiedBlock { header: header, bytes: bytes }); + self.verification.unverified.lock().unwrap().push_back(UnverifiedBlock { header: header, bytes: bytes }); self.more_to_verify.notify_all(); Ok(h) }, Err(err) => { warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); - self.verification.lock().unwrap().bad.insert(h.clone()); + self.verification.bad.lock().unwrap().insert(h.clone()); Err(err) } } } /// Mark given block and all its children as bad. Stops verification. - pub fn mark_as_bad(&mut self, block_hashes: &[H256]) { + pub fn mark_as_bad(&self, block_hashes: &[H256]) { if block_hashes.is_empty() { return; } - let mut verification_lock = self.verification.lock().unwrap(); + let mut verified_lock = self.verification.verified.lock().unwrap(); + let mut verified = verified_lock.deref_mut(); + let mut bad = self.verification.bad.lock().unwrap(); let mut processing = self.processing.write().unwrap(); - - let mut verification = verification_lock.deref_mut(); - - verification.bad.reserve(block_hashes.len()); + bad.reserve(block_hashes.len()); for hash in block_hashes { - verification.bad.insert(hash.clone()); + bad.insert(hash.clone()); processing.remove(&hash); } let mut new_verified = VecDeque::new(); - for block in verification.verified.drain(..) { - if verification.bad.contains(&block.header.parent_hash) { - verification.bad.insert(block.header.hash()); + for block in verified.drain(..) { + if bad.contains(&block.header.parent_hash) { + bad.insert(block.header.hash()); processing.remove(&block.header.hash()); } else { new_verified.push_back(block); } } - verification.verified = new_verified; + *verified = new_verified; } /// Mark given block as processed - pub fn mark_as_good(&mut self, block_hashes: &[H256]) { + pub fn mark_as_good(&self, block_hashes: &[H256]) { if block_hashes.is_empty() { return; } @@ -358,16 +368,16 @@ impl BlockQueue { } /// Removes up to `max` verified blocks from the queue - pub fn drain(&mut self, max: usize) -> Vec { - let mut verification = self.verification.lock().unwrap(); - let count = min(max, verification.verified.len()); + pub fn drain(&self, max: usize) -> Vec { + let mut verified = self.verification.verified.lock().unwrap(); + let count = min(max, verified.len()); let mut result = Vec::with_capacity(count); for _ in 0..count { - let block = verification.verified.pop_front().unwrap(); + let block = verified.pop_front().unwrap(); result.push(block); } self.ready_signal.reset(); - if !verification.verified.is_empty() { + if !verified.is_empty() { self.ready_signal.set(); } result @@ -375,28 +385,39 @@ impl BlockQueue { /// Get queue status. pub fn queue_info(&self) -> BlockQueueInfo { - let verification = self.verification.lock().unwrap(); + let (unverified_len, unverified_bytes) = { + let v = self.verification.unverified.lock().unwrap(); + (v.len(), v.heap_size_of_children()) + }; + let (verifying_len, verifying_bytes) = { + let v = self.verification.verifying.lock().unwrap(); + (v.len(), v.heap_size_of_children()) + }; + let (verified_len, verified_bytes) = { + let v = self.verification.verified.lock().unwrap(); + (v.len(), v.heap_size_of_children()) + }; BlockQueueInfo { - verified_queue_size: verification.verified.len(), - unverified_queue_size: verification.unverified.len(), - verifying_queue_size: verification.verifying.len(), + unverified_queue_size: unverified_len, + verifying_queue_size: verifying_len, + verified_queue_size: verified_len, max_queue_size: self.max_queue_size, max_mem_use: self.max_mem_use, mem_used: - verification.unverified.heap_size_of_children() - + verification.verifying.heap_size_of_children() - + verification.verified.heap_size_of_children(), + unverified_bytes + + verifying_bytes + + verified_bytes // TODO: https://github.com/servo/heapsize/pull/50 //+ self.processing.read().unwrap().heap_size_of_children(), } } + /// Optimise memory footprint of the heap fields. pub fn collect_garbage(&self) { { - let mut verification = self.verification.lock().unwrap(); - verification.unverified.shrink_to_fit(); - verification.verifying.shrink_to_fit(); - verification.verified.shrink_to_fit(); + self.verification.unverified.lock().unwrap().shrink_to_fit(); + self.verification.verifying.lock().unwrap().shrink_to_fit(); + self.verification.verified.lock().unwrap().shrink_to_fit(); } self.processing.write().unwrap().shrink_to_fit(); } @@ -444,7 +465,7 @@ mod tests { #[test] fn can_import_blocks() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); if let Err(e) = queue.import_block(get_good_dummy_block()) { panic!("error importing block that is valid by definition({:?})", e); } @@ -452,7 +473,7 @@ mod tests { #[test] fn returns_error_for_duplicates() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); if let Err(e) = queue.import_block(get_good_dummy_block()) { panic!("error importing block that is valid by definition({:?})", e); } @@ -471,7 +492,7 @@ mod tests { #[test] fn returns_ok_for_drained_duplicates() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); let block = get_good_dummy_block(); let hash = BlockView::new(&block).header().hash().clone(); if let Err(e) = queue.import_block(block) { @@ -488,7 +509,7 @@ mod tests { #[test] fn returns_empty_once_finished() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition"); queue.flush(); queue.drain(1); diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 8c21532c8..36db9dded 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -16,6 +16,7 @@ //! Blockchain database. +use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder}; use util::*; use header::*; use extras::*; @@ -134,8 +135,9 @@ struct CacheManager { /// /// **Does not do input data verification.** pub struct BlockChain { - pref_cache_size: usize, - max_cache_size: usize, + // All locks must be captured in the order declared here. + pref_cache_size: AtomicUsize, + max_cache_size: AtomicUsize, best_block: RwLock, @@ -157,6 +159,8 @@ pub struct BlockChain { // blooms indexing bloom_indexer: BloomIndexer, + + insert_lock: Mutex<()> } impl FilterDataSource for BlockChain { @@ -262,8 +266,8 @@ impl BlockChain { (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); let bc = BlockChain { - pref_cache_size: config.pref_cache_size, - max_cache_size: config.max_cache_size, + pref_cache_size: AtomicUsize::new(config.pref_cache_size), + max_cache_size: AtomicUsize::new(config.max_cache_size), best_block: RwLock::new(BestBlock::default()), blocks: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()), @@ -275,7 +279,8 @@ impl BlockChain { extras_db: extras_db, blocks_db: blocks_db, cache_man: RwLock::new(cache_man), - bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS) + bloom_indexer: BloomIndexer::new(BLOOM_INDEX_SIZE, BLOOM_LEVELS), + insert_lock: Mutex::new(()), }; // load best block @@ -318,9 +323,9 @@ impl BlockChain { } /// Set the cache configuration. - pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) { - self.pref_cache_size = pref_cache_size; - self.max_cache_size = max_cache_size; + pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { + self.pref_cache_size.store(pref_cache_size, AtomicOrder::Relaxed); + self.max_cache_size.store(max_cache_size, AtomicOrder::Relaxed); } /// Returns a tree route between `from` and `to`, which is a tuple of: @@ -424,6 +429,7 @@ impl BlockChain { return ImportRoute::none(); } + let _lock = self.insert_lock.lock(); // store block in db self.blocks_db.put(&hash, &bytes).unwrap(); @@ -446,48 +452,58 @@ impl BlockChain { let batch = DBTransaction::new(); batch.put(b"best", &update.info.hash).unwrap(); - // update best block - let mut best_block = self.best_block.write().unwrap(); - match update.info.location { - BlockLocation::Branch => (), - _ => { - *best_block = BestBlock { - hash: update.info.hash, - number: update.info.number, - total_difficulty: update.info.total_difficulty - }; + { + let mut write_details = self.block_details.write().unwrap(); + for (hash, details) in update.block_details.into_iter() { + batch.put_extras(&hash, &details); + self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone())); + write_details.insert(hash, details); } } - let mut write_hashes = self.block_hashes.write().unwrap(); - for (number, hash) in &update.block_hashes { - batch.put_extras(number, hash); - write_hashes.remove(number); + { + let mut write_receipts = self.block_receipts.write().unwrap(); + for (hash, receipt) in &update.block_receipts { + batch.put_extras(hash, receipt); + write_receipts.remove(hash); + } } - let mut write_details = self.block_details.write().unwrap(); - for (hash, details) in update.block_details.into_iter() { - batch.put_extras(&hash, &details); - write_details.insert(hash.clone(), details); - self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash)); + { + let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); + for (bloom_hash, blocks_bloom) in &update.blocks_blooms { + batch.put_extras(bloom_hash, blocks_bloom); + write_blocks_blooms.remove(bloom_hash); + } } - let mut write_receipts = self.block_receipts.write().unwrap(); - for (hash, receipt) in &update.block_receipts { - batch.put_extras(hash, receipt); - write_receipts.remove(hash); - } + // These cached values must be updated last and togeterh + { + let mut best_block = self.best_block.write().unwrap(); + let mut write_hashes = self.block_hashes.write().unwrap(); + let mut write_txs = self.transaction_addresses.write().unwrap(); - let mut write_txs = self.transaction_addresses.write().unwrap(); - for (hash, tx_address) in &update.transactions_addresses { - batch.put_extras(hash, tx_address); - write_txs.remove(hash); - } + // update best block + match update.info.location { + BlockLocation::Branch => (), + _ => { + *best_block = BestBlock { + hash: update.info.hash, + number: update.info.number, + total_difficulty: update.info.total_difficulty + }; + } + } - let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); - for (bloom_hash, blocks_bloom) in &update.blocks_blooms { - batch.put_extras(bloom_hash, blocks_bloom); - write_blocks_blooms.remove(bloom_hash); + for (number, hash) in &update.block_hashes { + batch.put_extras(number, hash); + write_hashes.remove(number); + } + + for (hash, tx_address) in &update.transactions_addresses { + batch.put_extras(hash, tx_address); + write_txs.remove(hash); + } } // update extras database @@ -781,11 +797,10 @@ impl BlockChain { /// Ticks our cache system and throws out any old data. pub fn collect_garbage(&self) { - if self.cache_size().total() < self.pref_cache_size { return; } + if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; } for _ in 0..COLLECTION_QUEUE_SIZE { { - let mut cache_man = self.cache_man.write().unwrap(); let mut blocks = self.blocks.write().unwrap(); let mut block_details = self.block_details.write().unwrap(); let mut block_hashes = self.block_hashes.write().unwrap(); @@ -793,6 +808,7 @@ impl BlockChain { let mut block_logs = self.block_logs.write().unwrap(); let mut blocks_blooms = self.blocks_blooms.write().unwrap(); let mut block_receipts = self.block_receipts.write().unwrap(); + let mut cache_man = self.cache_man.write().unwrap(); for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { cache_man.in_use.remove(&id); @@ -819,7 +835,7 @@ impl BlockChain { blocks_blooms.shrink_to_fit(); block_receipts.shrink_to_fit(); } - if self.cache_size().total() < self.max_cache_size { break; } + if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; } } // TODO: m_lastCollection = chrono::system_clock::now(); @@ -891,7 +907,7 @@ mod tests { } #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn test_find_uncles() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); @@ -929,7 +945,7 @@ mod tests { } #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn test_small_fork() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); diff --git a/ethcore/src/blockchain/mod.rs b/ethcore/src/blockchain/mod.rs index 6559d8364..29a4ee684 100644 --- a/ethcore/src/blockchain/mod.rs +++ b/ethcore/src/blockchain/mod.rs @@ -23,9 +23,9 @@ mod bloom_indexer; mod cache; mod tree_route; mod update; +mod import_route; #[cfg(test)] mod generator; -mod import_route; pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig}; pub use self::cache::CacheSize; diff --git a/ethcore/src/client.rs b/ethcore/src/client/client.rs similarity index 63% rename from ethcore/src/client.rs rename to ethcore/src/client/client.rs index 12227ca9f..4e8c34b33 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client/client.rs @@ -20,7 +20,6 @@ use std::marker::PhantomData; use std::sync::atomic::AtomicBool; use util::*; use util::panics::*; -use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; use header::{BlockNumber}; @@ -28,7 +27,6 @@ use state::State; use spec::Spec; use engine::Engine; use views::HeaderView; -use block_queue::BlockQueue; use service::{NetSyncMessage, SyncMessage}; use env_info::LastHashes; use verification::*; @@ -37,33 +35,10 @@ use transaction::LocalizedTransaction; use extras::TransactionAddress; use filter::Filter; use log_entry::LocalizedLogEntry; -use util::keys::store::SecretStore; -pub use block_queue::{BlockQueueConfig, BlockQueueInfo}; -pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize}; - -/// Uniquely identifies block. -#[derive(Debug, PartialEq, Clone)] -pub enum BlockId { - /// Block's sha3. - /// Querying by hash is always faster. - Hash(H256), - /// Block number within canon blockchain. - Number(BlockNumber), - /// Earliest block (genesis). - Earliest, - /// Latest mined block. - Latest -} - -/// Uniquely identifies transaction. -#[derive(Debug, PartialEq, Clone)] -pub enum TransactionId { - /// Transaction's sha3. - Hash(H256), - /// Block id and transaction index within this block. - /// Querying by block position is always faster. - Location(BlockId, usize) -} +use block_queue::{BlockQueue, BlockQueueInfo}; +use blockchain::{BlockChain, BlockProvider, TreeRoute}; +use client::{BlockId, TransactionId, ClientConfig, BlockChainClient}; +pub use blockchain::CacheSize as BlockChainCacheSize; /// General block status #[derive(Debug, Eq, PartialEq)] @@ -78,30 +53,6 @@ pub enum BlockStatus { Unknown, } -/// Client configuration. Includes configs for all sub-systems. -#[derive(Debug)] -pub struct ClientConfig { - /// Block queue configuration. - pub queue: BlockQueueConfig, - /// Blockchain configuration. - pub blockchain: BlockChainConfig, - /// Prefer journal rather than archive. - pub prefer_journal: bool, - /// The name of the client instance. - pub name: String, -} - -impl Default for ClientConfig { - fn default() -> ClientConfig { - ClientConfig { - queue: Default::default(), - blockchain: Default::default(), - prefer_journal: false, - name: Default::default(), - } - } -} - /// Information about the blockchain gathered together. #[derive(Debug)] pub struct BlockChainInfo { @@ -123,79 +74,8 @@ impl fmt::Display for BlockChainInfo { } } -/// Blockchain database client. Owns and manages a blockchain and a block queue. -pub trait BlockChainClient : Sync + Send { - /// Get raw block header data by block id. - fn block_header(&self, id: BlockId) -> Option; - - /// Get raw block body data by block id. - /// Block body is an RLP list of two items: uncles and transactions. - fn block_body(&self, id: BlockId) -> Option; - - /// Get raw block data by block header hash. - fn block(&self, id: BlockId) -> Option; - - /// Get block status by block header hash. - fn block_status(&self, id: BlockId) -> BlockStatus; - - /// Get block total difficulty. - fn block_total_difficulty(&self, id: BlockId) -> Option; - - /// Get address nonce. - fn nonce(&self, address: &Address) -> U256; - - /// Get block hash. - fn block_hash(&self, id: BlockId) -> Option; - - /// Get address code. - fn code(&self, address: &Address) -> Option; - - /// Get transaction with given hash. - fn transaction(&self, id: TransactionId) -> Option; - - /// Get a tree route between `from` and `to`. - /// See `BlockChain::tree_route`. - fn tree_route(&self, from: &H256, to: &H256) -> Option; - - /// Get latest state node - fn state_data(&self, hash: &H256) -> Option; - - /// Get raw block receipts data by block header hash. - fn block_receipts(&self, hash: &H256) -> Option; - - /// Import a block into the blockchain. - fn import_block(&self, bytes: Bytes) -> ImportResult; - - /// Get block queue information. - fn queue_info(&self) -> BlockQueueInfo; - - /// Clear block queue and abort all import activity. - fn clear_queue(&self); - - /// Get blockchain information. - fn chain_info(&self) -> BlockChainInfo; - - /// Get the best block header. - fn best_block_header(&self) -> Bytes { - self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap() - } - - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; - - /// Returns logs matching given filter. - fn logs(&self, filter: Filter) -> Vec; - - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - fn sealing_block(&self) -> &Mutex>; - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error>; -} - -#[derive(Default, Clone, Debug, Eq, PartialEq)] /// Report on the status of a client. +#[derive(Default, Clone, Debug, Eq, PartialEq)] pub struct ClientReport { /// How many blocks have been imported so far. pub blocks_imported: usize, @@ -219,10 +99,10 @@ impl ClientReport { /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client where V: Verifier { - chain: Arc>, + chain: Arc, engine: Arc>, - state_db: Mutex, - block_queue: RwLock, + state_db: Mutex>, + block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, panic_handler: Arc, @@ -233,7 +113,6 @@ pub struct Client where V: Verifier { author: RwLock
, extra_data: RwLock, verifier: PhantomData, - secret_store: Arc>, } const HISTORY: u64 = 1000; @@ -252,16 +131,19 @@ impl Client where V: Verifier { let mut dir = path.to_path_buf(); dir.push(H64::from(spec.genesis_header().hash()).hex()); //TODO: sec/fat: pruned/full versioning - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" })); + // version here is a bit useless now, since it's controlled only be the pruning algo. + dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, config.pruning)); let path = dir.as_path(); let gb = spec.genesis_block(); - let chain = Arc::new(RwLock::new(BlockChain::new(config.blockchain, &gb, path))); + let chain = Arc::new(BlockChain::new(config.blockchain, &gb, path)); let mut state_path = path.to_path_buf(); state_path.push("state"); let engine = Arc::new(try!(spec.to_engine())); - let mut state_db = JournalDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal); - if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) { + let state_path_str = state_path.to_str().unwrap(); + let mut state_db = journaldb::new(state_path_str, config.pruning); + + if state_db.is_empty() && engine.spec().ensure_db_good(state_db.as_hashdb_mut()) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } @@ -269,14 +151,11 @@ impl Client where V: Verifier { let panic_handler = PanicHandler::new_in_arc(); panic_handler.forward_from(&block_queue); - let secret_store = Arc::new(RwLock::new(SecretStore::new())); - secret_store.write().unwrap().try_import_existing(); - Ok(Arc::new(Client { chain: chain, engine: engine, state_db: Mutex::new(state_db), - block_queue: RwLock::new(block_queue), + block_queue: block_queue, report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler, @@ -285,22 +164,20 @@ impl Client where V: Verifier { author: RwLock::new(Address::new()), extra_data: RwLock::new(Vec::new()), verifier: PhantomData, - secret_store: secret_store, })) } /// Flush the block import queue. pub fn flush_queue(&self) { - self.block_queue.write().unwrap().flush(); + self.block_queue.flush(); } fn build_last_hashes(&self, parent_hash: H256) -> LastHashes { let mut last_hashes = LastHashes::new(); last_hashes.resize(256, H256::new()); last_hashes[0] = parent_hash; - let chain = self.chain.read().unwrap(); for i in 0..255 { - match chain.block_details(&last_hashes[i]) { + match self.chain.block_details(&last_hashes[i]) { Some(details) => { last_hashes[i + 1] = details.parent.clone(); }, @@ -310,31 +187,26 @@ impl Client where V: Verifier { last_hashes } - /// Secret store (key manager) - pub fn secret_store(&self) -> &Arc> { - &self.secret_store - } - fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result { let engine = self.engine.deref().deref(); let header = &block.header; // Check the block isn't so old we won't be able to enact it. - let best_block_number = self.chain.read().unwrap().best_block_number(); + let best_block_number = self.chain.best_block_number(); if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY { warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number); return Err(()); } // Verify Block Family - let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.read().unwrap().deref()); + let verify_family_result = V::verify_block_family(&header, &block.bytes, engine, self.chain.deref()); if let Err(e) = verify_family_result { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); }; // Check if Parent is in chain - let chain_has_parent = self.chain.read().unwrap().block_header(&header.parent_hash); + let chain_has_parent = self.chain.block_header(&header.parent_hash); if let None = chain_has_parent { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); return Err(()); @@ -343,7 +215,7 @@ impl Client where V: Verifier { // Enact Verified Block let parent = chain_has_parent.unwrap(); let last_hashes = self.build_last_hashes(header.parent_hash.clone()); - let db = self.state_db.lock().unwrap().clone(); + let db = self.state_db.lock().unwrap().spawn(); let enact_result = enact_verified(&block, engine, db, &parent, last_hashes); if let Err(e) = enact_result { @@ -369,7 +241,7 @@ impl Client where V: Verifier { let mut bad_blocks = HashSet::new(); let _import_lock = self.import_lock.lock(); - let blocks = self.block_queue.write().unwrap().drain(max_blocks_to_import); + let blocks = self.block_queue.drain(max_blocks_to_import); let original_best = self.chain_info().best_block_hash; @@ -390,8 +262,7 @@ impl Client where V: Verifier { // Are we committing an era? let ancient = if header.number() >= HISTORY { let n = header.number() - HISTORY; - let chain = self.chain.read().unwrap(); - Some((n, chain.block_hash(n).unwrap())) + Some((n, self.chain.block_hash(n).unwrap())) } else { None }; @@ -405,8 +276,7 @@ impl Client where V: Verifier { // And update the chain after commit to prevent race conditions // (when something is in chain but you are not able to fetch details) - self.chain.write().unwrap() - .insert_block(&block.bytes, receipts); + self.chain.insert_block(&block.bytes, receipts); self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); @@ -416,18 +286,16 @@ impl Client where V: Verifier { let bad_blocks = bad_blocks.into_iter().collect::>(); { - let mut block_queue = self.block_queue.write().unwrap(); if !bad_blocks.is_empty() { - block_queue.mark_as_bad(&bad_blocks); + self.block_queue.mark_as_bad(&bad_blocks); } if !good_blocks.is_empty() { - block_queue.mark_as_good(&good_blocks); + self.block_queue.mark_as_good(&good_blocks); } } { - let block_queue = self.block_queue.read().unwrap(); - if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { + if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { good: good_blocks, bad: bad_blocks, @@ -446,12 +314,12 @@ impl Client where V: Verifier { /// Get a copy of the best block's state. pub fn state(&self) -> State { - State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) + State::from_existing(self.state_db.lock().unwrap().spawn(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) } /// Get info on the cache. pub fn blockchain_cache_info(&self) -> BlockChainCacheSize { - self.chain.read().unwrap().cache_size() + self.chain.cache_size() } /// Get the report. @@ -463,13 +331,13 @@ impl Client where V: Verifier { /// Tick the client. pub fn tick(&self) { - self.chain.read().unwrap().collect_garbage(); - self.block_queue.read().unwrap().collect_garbage(); + self.chain.collect_garbage(); + self.block_queue.collect_garbage(); } /// Set up the cache behaviour. pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { - self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size); + self.chain.configure_cache(pref_cache_size, max_cache_size); } fn block_hash(chain: &BlockChain, id: BlockId) -> Option { @@ -484,9 +352,9 @@ impl Client where V: Verifier { fn block_number(&self, id: BlockId) -> Option { match id { BlockId::Number(number) => Some(number), - BlockId::Hash(ref hash) => self.chain.read().unwrap().block_number(hash), + BlockId::Hash(ref hash) => self.chain.block_number(hash), BlockId::Earliest => Some(0), - BlockId::Latest => Some(self.chain.read().unwrap().best_block_number()) + BlockId::Latest => Some(self.chain.best_block_number()) } } @@ -512,17 +380,17 @@ impl Client where V: Verifier { /// New chain head event. Restart mining operation. pub fn prepare_sealing(&self) { - let h = self.chain.read().unwrap().best_block_hash(); + let h = self.chain.best_block_hash(); let mut b = OpenBlock::new( self.engine.deref().deref(), - self.state_db.lock().unwrap().clone(), - match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} }, + self.state_db.lock().unwrap().spawn(), + match self.chain.block_header(&h) { Some(ref x) => x, None => {return;} }, self.build_last_hashes(h.clone()), self.author(), self.extra_data() ); - self.chain.read().unwrap().find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); }); + self.chain.find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); }); // TODO: push transactions. @@ -536,14 +404,12 @@ impl Client where V: Verifier { impl BlockChainClient for Client where V: Verifier { fn block_header(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) } fn block_body(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| { - chain.block(&hash).map(|bytes| { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash).map(|bytes| { let rlp = Rlp::new(&bytes); let mut body = RlpStream::new_list(2); body.append_raw(rlp.at(1).as_raw(), 1); @@ -554,24 +420,21 @@ impl BlockChainClient for Client where V: Verifier { } fn block(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| { - chain.block(&hash) + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash) }) } fn block_status(&self, id: BlockId) -> BlockStatus { - let chain = self.chain.read().unwrap(); - match Self::block_hash(&chain, id) { - Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.block_queue.read().unwrap().block_status(&hash), + match Self::block_hash(&self.chain, id) { + Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, + Some(hash) => self.block_queue.block_status(&hash), None => BlockStatus::Unknown } } fn block_total_difficulty(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) } fn nonce(&self, address: &Address) -> U256 { @@ -579,8 +442,7 @@ impl BlockChainClient for Client where V: Verifier { } fn block_hash(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id) + Self::block_hash(&self.chain, id) } fn code(&self, address: &Address) -> Option { @@ -588,20 +450,18 @@ impl BlockChainClient for Client where V: Verifier { } fn transaction(&self, id: TransactionId) -> Option { - let chain = self.chain.read().unwrap(); match id { - TransactionId::Hash(ref hash) => chain.transaction_address(hash), - TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress { + TransactionId::Hash(ref hash) => self.chain.transaction_address(hash), + TransactionId::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { block_hash: hash, index: index }) - }.and_then(|address| chain.transaction(&address)) + }.and_then(|address| self.chain.transaction(&address)) } fn tree_route(&self, from: &H256, to: &H256) -> Option { - let chain = self.chain.read().unwrap(); - match chain.is_known(from) && chain.is_known(to) { - true => Some(chain.tree_route(from.clone(), to.clone())), + match self.chain.is_known(from) && self.chain.is_known(to) { + true => Some(self.chain.tree_route(from.clone(), to.clone())), false => None } } @@ -617,43 +477,44 @@ impl BlockChainClient for Client where V: Verifier { fn import_block(&self, bytes: Bytes) -> ImportResult { { let header = BlockView::new(&bytes).header_view(); - if self.chain.read().unwrap().is_known(&header.sha3()) { + if self.chain.is_known(&header.sha3()) { return Err(x!(ImportError::AlreadyInChain)); } if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown { return Err(x!(BlockError::UnknownParent(header.parent_hash()))); } } - self.block_queue.write().unwrap().import_block(bytes) + self.block_queue.import_block(bytes) } fn queue_info(&self) -> BlockQueueInfo { - self.block_queue.read().unwrap().queue_info() + self.block_queue.queue_info() } fn clear_queue(&self) { - self.block_queue.write().unwrap().clear(); + self.block_queue.clear(); } fn chain_info(&self) -> BlockChainInfo { - let chain = self.chain.read().unwrap(); BlockChainInfo { - total_difficulty: chain.best_block_total_difficulty(), - pending_total_difficulty: chain.best_block_total_difficulty(), - genesis_hash: chain.genesis_hash(), - best_block_hash: chain.best_block_hash(), - best_block_number: From::from(chain.best_block_number()) + total_difficulty: self.chain.best_block_total_difficulty(), + pending_total_difficulty: self.chain.best_block_total_difficulty(), + genesis_hash: self.chain.genesis_hash(), + best_block_hash: self.chain.best_block_hash(), + best_block_number: From::from(self.chain.best_block_number()) } } fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option> { match (self.block_number(from_block), self.block_number(to_block)) { - (Some(from), Some(to)) => Some(self.chain.read().unwrap().blocks_with_bloom(bloom, from, to)), + (Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)), _ => None } } fn logs(&self, filter: Filter) -> Vec { + // TODO: lock blockchain only once + let mut blocks = filter.bloom_possibilities().iter() .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .flat_map(|m| m) @@ -665,9 +526,9 @@ impl BlockChainClient for Client where V: Verifier { blocks.sort(); blocks.into_iter() - .filter_map(|number| self.chain.read().unwrap().block_hash(number).map(|hash| (number, hash))) - .filter_map(|(number, hash)| self.chain.read().unwrap().block_receipts(&hash).map(|r| (number, hash, r.receipts))) - .filter_map(|(number, hash, receipts)| self.chain.read().unwrap().block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) + .filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash))) + .filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts))) + .filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes()))) .flat_map(|(number, hash, receipts, hashes)| { let mut log_index = 0; receipts.into_iter() diff --git a/sync/build.rs b/ethcore/src/client/config.rs similarity index 59% rename from sync/build.rs rename to ethcore/src/client/config.rs index 41b9a1b3e..89e95ea06 100644 --- a/sync/build.rs +++ b/ethcore/src/client/config.rs @@ -14,12 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; +pub use block_queue::BlockQueueConfig; +pub use blockchain::BlockChainConfig; +use util::journaldb; -use rustc_version::{version_meta, Channel}; - -fn main() { - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } +/// Client configuration. Includes configs for all sub-systems. +#[derive(Debug, Default)] +pub struct ClientConfig { + /// Block queue configuration. + pub queue: BlockQueueConfig, + /// Blockchain configuration. + pub blockchain: BlockChainConfig, + /// The JournalDB ("pruning") algorithm to use. + pub pruning: journaldb::Algorithm, + /// The name of the client instance. + pub name: String, } diff --git a/ethcore/src/client/ids.rs b/ethcore/src/client/ids.rs new file mode 100644 index 000000000..303657a76 --- /dev/null +++ b/ethcore/src/client/ids.rs @@ -0,0 +1,44 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Unique identifiers. + +use util::hash::H256; +use header::BlockNumber; + +/// Uniquely identifies block. +#[derive(Debug, PartialEq, Clone)] +pub enum BlockId { + /// Block's sha3. + /// Querying by hash is always faster. + Hash(H256), + /// Block number within canon blockchain. + Number(BlockNumber), + /// Earliest block (genesis). + Earliest, + /// Latest mined block. + Latest +} + +/// Uniquely identifies transaction. +#[derive(Debug, PartialEq, Clone)] +pub enum TransactionId { + /// Transaction's sha3. + Hash(H256), + /// Block id and transaction index within this block. + /// Querying by block position is always faster. + Location(BlockId, usize) +} diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs new file mode 100644 index 000000000..afdfb200a --- /dev/null +++ b/ethcore/src/client/mod.rs @@ -0,0 +1,113 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Blockchain database client. + +mod client; +mod config; +mod ids; +mod test_client; + +pub use self::client::*; +pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig}; +pub use self::ids::{BlockId, TransactionId}; +pub use self::test_client::{TestBlockChainClient, EachBlockWith}; + +use std::sync::Mutex; +use util::bytes::Bytes; +use util::hash::{Address, H256, H2048}; +use util::numbers::U256; +use blockchain::TreeRoute; +use block_queue::BlockQueueInfo; +use block::ClosedBlock; +use header::BlockNumber; +use transaction::LocalizedTransaction; +use log_entry::LocalizedLogEntry; +use filter::Filter; +use error::{ImportResult, Error}; + +/// Blockchain database client. Owns and manages a blockchain and a block queue. +pub trait BlockChainClient : Sync + Send { + /// Get raw block header data by block id. + fn block_header(&self, id: BlockId) -> Option; + + /// Get raw block body data by block id. + /// Block body is an RLP list of two items: uncles and transactions. + fn block_body(&self, id: BlockId) -> Option; + + /// Get raw block data by block header hash. + fn block(&self, id: BlockId) -> Option; + + /// Get block status by block header hash. + fn block_status(&self, id: BlockId) -> BlockStatus; + + /// Get block total difficulty. + fn block_total_difficulty(&self, id: BlockId) -> Option; + + /// Get address nonce. + fn nonce(&self, address: &Address) -> U256; + + /// Get block hash. + fn block_hash(&self, id: BlockId) -> Option; + + /// Get address code. + fn code(&self, address: &Address) -> Option; + + /// Get transaction with given hash. + fn transaction(&self, id: TransactionId) -> Option; + + /// Get a tree route between `from` and `to`. + /// See `BlockChain::tree_route`. + fn tree_route(&self, from: &H256, to: &H256) -> Option; + + /// Get latest state node + fn state_data(&self, hash: &H256) -> Option; + + /// Get raw block receipts data by block header hash. + fn block_receipts(&self, hash: &H256) -> Option; + + /// Import a block into the blockchain. + fn import_block(&self, bytes: Bytes) -> ImportResult; + + /// Get block queue information. + fn queue_info(&self) -> BlockQueueInfo; + + /// Clear block queue and abort all import activity. + fn clear_queue(&self); + + /// Get blockchain information. + fn chain_info(&self) -> BlockChainInfo; + + /// Get the best block header. + fn best_block_header(&self) -> Bytes { + // TODO: lock blockchain only once + self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap() + } + + /// Returns numbers of blocks containing given bloom. + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; + + /// Returns logs matching given filter. + fn logs(&self, filter: Filter) -> Vec; + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error>; +} + diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs new file mode 100644 index 000000000..207f1090f --- /dev/null +++ b/ethcore/src/client/test_client.rs @@ -0,0 +1,336 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Test client. + +use util::*; +use transaction::{Transaction, LocalizedTransaction, Action}; +use blockchain::TreeRoute; +use client::{BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId}; +use header::{Header as BlockHeader, BlockNumber}; +use filter::Filter; +use log_entry::LocalizedLogEntry; +use receipt::Receipt; +use error::{ImportResult, Error}; +use block_queue::BlockQueueInfo; +use block::ClosedBlock; + +/// Test client. +pub struct TestBlockChainClient { + /// Blocks. + pub blocks: RwLock>, + /// Mapping of numbers to hashes. + pub numbers: RwLock>, + /// Genesis block hash. + pub genesis_hash: H256, + /// Last block hash. + pub last_hash: RwLock, + /// Difficulty. + pub difficulty: RwLock, +} + +#[derive(Clone)] +/// Used for generating test client blocks. +pub enum EachBlockWith { + /// Plain block. + Nothing, + /// Block with an uncle. + Uncle, + /// Block with a transaction. + Transaction, + /// Block with an uncle and transaction. + UncleAndTransaction +} + +impl TestBlockChainClient { + /// Creates new test client. + pub fn new() -> TestBlockChainClient { + + let mut client = TestBlockChainClient { + blocks: RwLock::new(HashMap::new()), + numbers: RwLock::new(HashMap::new()), + genesis_hash: H256::new(), + last_hash: RwLock::new(H256::new()), + difficulty: RwLock::new(From::from(0)), + }; + client.add_blocks(1, EachBlockWith::Nothing); // add genesis block + client.genesis_hash = client.last_hash.read().unwrap().clone(); + client + } + + /// Add blocks to test client. + pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { + let len = self.numbers.read().unwrap().len(); + for n in len..(len + count) { + let mut header = BlockHeader::new(); + header.difficulty = From::from(n); + header.parent_hash = self.last_hash.read().unwrap().clone(); + header.number = n as BlockNumber; + let uncles = match with { + EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { + let mut uncles = RlpStream::new_list(1); + let mut uncle_header = BlockHeader::new(); + uncle_header.difficulty = From::from(n); + uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); + uncle_header.number = n as BlockNumber; + uncles.append(&uncle_header); + header.uncles_hash = uncles.as_raw().sha3(); + uncles + }, + _ => RlpStream::new_list(0) + }; + let txs = match with { + EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { + let mut txs = RlpStream::new_list(1); + let keypair = KeyPair::create().unwrap(); + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: U256::zero() + }; + let signed_tx = tx.sign(&keypair.secret()); + txs.append(&signed_tx); + txs.out() + }, + _ => rlp::NULL_RLP.to_vec() + }; + + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&txs, 1); + rlp.append_raw(uncles.as_raw(), 1); + self.import_block(rlp.as_raw().to_vec()).unwrap(); + } + } + + /// TODO: + pub fn corrupt_block(&mut self, n: BlockNumber) { + let hash = self.block_hash(BlockId::Number(n)).unwrap(); + let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); + header.parent_hash = H256::new(); + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&rlp::NULL_RLP, 1); + rlp.append_raw(&rlp::NULL_RLP, 1); + self.blocks.write().unwrap().insert(hash, rlp.out()); + } + + /// TODO: + pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { + let blocks_read = self.numbers.read().unwrap(); + let index = blocks_read.len() - delta; + blocks_read[&index].clone() + } + + fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(hash) => Some(hash), + BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(), + BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(), + BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned() + } + } +} + +impl BlockChainClient for TestBlockChainClient { + fn block_total_difficulty(&self, _id: BlockId) -> Option { + Some(U256::zero()) + } + + fn block_hash(&self, _id: BlockId) -> Option { + unimplemented!(); + } + + fn nonce(&self, _address: &Address) -> U256 { + U256::zero() + } + + fn code(&self, _address: &Address) -> Option { + unimplemented!(); + } + + fn transaction(&self, _id: TransactionId) -> Option { + unimplemented!(); + } + + fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { + unimplemented!(); + } + + fn logs(&self, _filter: Filter) -> Vec { + unimplemented!(); + } + + fn sealing_block(&self) -> &Mutex> { + unimplemented!(); + } + + fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { + unimplemented!(); + } + + fn block_header(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) + } + + fn block_body(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| { + let mut stream = RlpStream::new_list(2); + stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); + stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); + stream.out() + })) + } + + fn block(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned()) + } + + fn block_status(&self, id: BlockId) -> BlockStatus { + match id { + BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain, + BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain, + _ => BlockStatus::Unknown + } + } + + // works only if blocks are one after another 1 -> 2 -> 3 + fn tree_route(&self, from: &H256, to: &H256) -> Option { + Some(TreeRoute { + ancestor: H256::new(), + index: 0, + blocks: { + let numbers_read = self.numbers.read().unwrap(); + let mut adding = false; + + let mut blocks = Vec::new(); + for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { + if hash == to { + if adding { + blocks.push(hash.clone()); + } + adding = false; + break; + } + if hash == from { + adding = true; + } + if adding { + blocks.push(hash.clone()); + } + } + if adding { Vec::new() } else { blocks } + } + }) + } + + // TODO: returns just hashes instead of node state rlp(?) + fn state_data(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let mut rlp = RlpStream::new(); + rlp.append(&hash.clone()); + return Some(rlp.out()); + } + None + } + + fn block_receipts(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let receipt = Receipt::new( + H256::zero(), + U256::zero(), + vec![]); + let mut rlp = RlpStream::new(); + rlp.append(&receipt); + return Some(rlp.out()); + } + None + } + + fn import_block(&self, b: Bytes) -> ImportResult { + let header = Rlp::new(&b).val_at::(0); + let h = header.hash(); + let number: usize = header.number as usize; + if number > self.blocks.read().unwrap().len() { + panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); + } + if number > 0 { + match self.blocks.read().unwrap().get(&header.parent_hash) { + Some(parent) => { + let parent = Rlp::new(parent).val_at::(0); + if parent.number != (header.number - 1) { + panic!("Unexpected block parent"); + } + }, + None => { + panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); + } + } + } + let len = self.numbers.read().unwrap().len(); + if number == len { + { + let mut difficulty = self.difficulty.write().unwrap(); + *difficulty.deref_mut() = *difficulty.deref() + header.difficulty; + } + mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); + self.blocks.write().unwrap().insert(h.clone(), b); + self.numbers.write().unwrap().insert(number, h.clone()); + let mut parent_hash = header.parent_hash; + if number > 0 { + let mut n = number - 1; + while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { + *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); + n -= 1; + parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; + } + } + } + else { + self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); + } + Ok(h) + } + + fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { + verified_queue_size: 0, + unverified_queue_size: 0, + verifying_queue_size: 0, + max_queue_size: 0, + max_mem_use: 0, + mem_used: 0, + } + } + + fn clear_queue(&self) { + } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainInfo { + total_difficulty: *self.difficulty.read().unwrap(), + pending_total_difficulty: *self.difficulty.read().unwrap(), + genesis_hash: self.genesis_hash.clone(), + best_block_hash: self.last_hash.read().unwrap().clone(), + best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, + } + } +} diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index b0c0e4a9f..a882f66ae 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -202,7 +202,7 @@ impl Engine for Ethash { } } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self +#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self impl Ethash { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; @@ -298,7 +298,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = b.close(); @@ -311,7 +311,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let mut uncle = Header::new(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 0d1dcd8d5..8c2ae6b37 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -61,7 +61,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64)); diff --git a/ethcore/src/evm/interpreter.rs b/ethcore/src/evm/interpreter.rs index fb8d19357..7491321cb 100644 --- a/ethcore/src/evm/interpreter.rs +++ b/ethcore/src/evm/interpreter.rs @@ -243,7 +243,7 @@ struct CodeReader<'a> { code: &'a Bytes } -#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))] +#[cfg_attr(feature="dev", allow(len_without_is_empty))] impl<'a> CodeReader<'a> { /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { @@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> { } } -#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] +#[cfg_attr(feature="dev", allow(enum_variant_names))] enum InstructionCost { Gas(U256), GasMem(U256, U256), @@ -347,7 +347,7 @@ impl evm::Evm for Interpreter { } impl Interpreter { - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn get_gas_cost_mem(&self, ext: &evm::Ext, instruction: Instruction, diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index a1f5763ea..598921580 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> { self.state.code(address).unwrap_or_else(|| vec![]) } - #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] + #[cfg_attr(feature="dev", allow(match_ref_pats))] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 469364eb3..458a94476 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -15,16 +15,16 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] // Clippy config // TODO [todr] not really sure -#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] +#![cfg_attr(feature="dev", allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] +#![cfg_attr(feature="dev", allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +#![cfg_attr(feature="dev", allow(clone_on_copy))] //! Ethcore library //! @@ -86,6 +86,7 @@ extern crate crossbeam; #[cfg(feature = "jit" )] extern crate evmjit; pub mod block; +pub mod block_queue; pub mod client; pub mod error; pub mod ethereum; @@ -119,7 +120,6 @@ mod substate; mod executive; mod externalities; mod verification; -mod block_queue; mod blockchain; #[cfg(test)] diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index bd15ee501..6daf0d7b6 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -117,7 +117,7 @@ impl IoHandler for ClientIoHandler { } } - #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] + #[cfg_attr(feature="dev", allow(single_match))] fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { if let UserMessage(ref message) = *net_message { match *message { diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 774024351..2208350cc 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -99,7 +99,7 @@ pub struct Spec { genesis_state: PodState, } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) +#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index 7c1064abf..cb54654e6 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -31,7 +31,7 @@ pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. pub struct State { - db: JournalDB, + db: Box, root: H256, cache: RefCell>>, snapshots: RefCell>>>>, @@ -41,11 +41,11 @@ pub struct State { impl State { /// Creates new state with empty state root #[cfg(test)] - pub fn new(mut db: JournalDB, account_start_nonce: U256) -> State { + pub fn new(mut db: Box, account_start_nonce: U256) -> State { let mut root = H256::new(); { // init trie and reset root too null - let _ = SecTrieDBMut::new(&mut db, &mut root); + let _ = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root); } State { @@ -58,10 +58,10 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: JournalDB, root: H256, account_start_nonce: U256) -> State { + pub fn from_existing(db: Box, root: H256, account_start_nonce: U256) -> State { { // trie should panic! if root does not exist - let _ = SecTrieDB::new(&db, &root); + let _ = SecTrieDB::new(db.as_hashdb(), &root); } State { @@ -126,7 +126,7 @@ impl State { } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, JournalDB) { + pub fn drop(self) -> (H256, Box) { (self.root, self.db) } @@ -148,7 +148,7 @@ impl State { /// Determine whether an account exists. pub fn exists(&self, a: &Address) -> bool { - self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(&self.db, &self.root).contains(&a) + self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(self.db.as_hashdb(), &self.root).contains(&a) } /// Get the balance of account `a`. @@ -163,7 +163,7 @@ impl State { /// Mutate storage of account `address` so that it is `value` for `key`. pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { - self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(&self.db, address), key)) + self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(self.db.as_hashdb(), address), key)) } /// Mutate storage of account `a` so that it is `value` for `key`. @@ -224,7 +224,7 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. - #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] + #[cfg_attr(feature="dev", allow(match_ref_pats))] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? @@ -253,7 +253,7 @@ impl State { /// Commits our cached account changes into the trie. pub fn commit(&mut self) { assert!(self.snapshots.borrow().is_empty()); - Self::commit_into(&mut self.db, &mut self.root, self.cache.borrow_mut().deref_mut()); + Self::commit_into(self.db.as_hashdb_mut(), &mut self.root, self.cache.borrow_mut().deref_mut()); } #[cfg(test)] @@ -285,11 +285,11 @@ impl State { fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option { let have_key = self.cache.borrow().contains_key(a); if !have_key { - self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp)) } if require_code { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { - account.cache_code(&AccountDB::new(&self.db, a)); + account.cache_code(&AccountDB::new(self.db.as_hashdb(), a)); } } unsafe { ::std::mem::transmute(self.cache.borrow().get(a).unwrap()) } @@ -305,7 +305,7 @@ impl State { fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account { let have_key = self.cache.borrow().contains_key(a); if !have_key { - self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp)) } else { self.note_cache(a); } @@ -318,7 +318,7 @@ impl State { unsafe { ::std::mem::transmute(self.cache.borrow_mut().get_mut(a).unwrap().as_mut().map(|account| { if require_code { - account.cache_code(&AccountDB::new(&self.db, a)); + account.cache_code(&AccountDB::new(self.db.as_hashdb(), a)); } account }).unwrap()) } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index bb9a44614..dc3068560 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -250,9 +250,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { } } -pub fn get_temp_journal_db() -> GuardedTempResult { +pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = JournalDB::new(temp.as_str()); + let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge); GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -268,8 +268,8 @@ pub fn get_temp_state() -> GuardedTempResult { } } -pub fn get_temp_journal_db_in(path: &Path) -> JournalDB { - JournalDB::new(path.to_str().unwrap()) +pub fn get_temp_journal_db_in(path: &Path) -> Box { + journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge) } pub fn get_temp_state_in(path: &Path) -> State { diff --git a/ethcore/src/transaction.rs b/ethcore/src/transaction.rs index 733e5ac6b..a51824494 100644 --- a/ethcore/src/transaction.rs +++ b/ethcore/src/transaction.rs @@ -80,7 +80,7 @@ impl Transaction { } impl FromJson for SignedTransaction { - #[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))] + #[cfg_attr(feature="dev", allow(single_char_pattern))] fn from_json(json: &Json) -> SignedTransaction { let t = Transaction { nonce: xjson!(&json["nonce"]), diff --git a/hook.sh b/hook.sh index 354fddd5d..9780541fe 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,12 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push -chmod +x ./.git/hooks/pre-push +FILE=./.git/hooks/pre-push +echo "#!/bin/sh\n" > $FILE +# Exit on any error +echo "set -e" >> $FILE +# Run release build +echo "cargo build --release --features dev" >> $FILE +# Build tests +echo "cargo test --no-run --features dev \\" >> $FILE +echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" >> $FILE +echo "" >> $FILE +chmod +x $FILE diff --git a/parity/main.rs b/parity/main.rs index efff52e4e..bbcb17bae 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -17,8 +17,8 @@ //! Ethcore client application. #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -37,7 +37,7 @@ extern crate rpassword; #[cfg(feature = "rpc")] extern crate ethcore_rpc as rpc; -use std::net::{SocketAddr}; +use std::net::{SocketAddr, IpAddr}; use std::env; use std::process::exit; use std::path::PathBuf; @@ -53,6 +53,7 @@ use ethsync::{EthSync, SyncConfig, SyncProvider}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; +use util::keys::store::*; fn die_with_message(msg: &str) -> ! { println!("ERROR: {}", msg); @@ -70,28 +71,30 @@ Parity. Ethereum Client. Copyright 2015, 2016 Ethcore (UK) Limited Usage: - parity daemon [options] [ --no-bootstrap | ... ] + parity daemon [options] parity account (new | list) - parity [options] [ --no-bootstrap | ... ] + parity [options] Protocol Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. - --pruning Client should prune the state/storage trie. + --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, + light (experimental), fast (experimental) [default: archive]. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. Networking Options: - --no-bootstrap Don't bother trying to connect to any nodes initially. - --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. - --public-address URL Specify the IP/port on which peers may connect. - --address URL Equivalent to --listen-address URL --public-address URL. + --port PORT Override the port on which the node should listen [default: 30303]. --peers NUM Try to maintain that many peers [default: 25]. + --nat METHOD Specify method to use for determining public address. Must be one of: any, none, + upnp, extip:(IP) [default: any]. + --bootnodes NODES Specify additional comma-separated bootnodes. + --no-bootstrap Don't bother trying to connect to standard bootnodes. --no-discovery Disable new peer discovery. - --no-upnp Disable trying to figure out the correct public adderss over UPnP. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. API and Console Options: @@ -100,7 +103,8 @@ API and Console Options: --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal]. + --rpc Equivalent to --jsonrpc (geth-compatible). --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). @@ -110,7 +114,7 @@ API and Console Options: Sealing/Mining Options: --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. @@ -119,6 +123,21 @@ Memory Footprint Options: --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with other cache options (geth-compatible). +Geth-Compatibility Options + --datadir PATH Equivalent to --db-path PATH. + --testnet Equivalent to --chain testnet. + --networkid INDEX Override the network identifier from the chain we are on. + --rpc Equivalent to --jsonrpc. + --rpcaddr HOST Equivalent to --jsonrpc-addr HOST. + --rpcport PORT Equivalent to --jsonrpc-port PORT. + --rpcapi APIS Equivalent to --jsonrpc-apis APIS. + --rpccorsdomain URL Equivalent to --jsonrpc-cors URL. + --maxpeers COUNT Equivalent to --peers COUNT. + --nodekey KEY Equivalent to --node-key KEY. + --nodiscover Equivalent to --no-discovery. + --etherbase ADDRESS Equivalent to --author ADDRESS. + --extradata STRING Equivalent to --extra-data STRING. + Miscellaneous Options: -l --logging LOGGING Specify the logging level. -v --version Show information about version. @@ -132,22 +151,18 @@ struct Args { cmd_new: bool, cmd_list: bool, arg_pid_file: String, - arg_enode: Vec, flag_chain: String, - flag_testnet: bool, - flag_datadir: String, - flag_networkid: Option, + flag_db_path: String, flag_identity: String, flag_cache: Option, flag_keys_path: String, - flag_pruning: bool, + flag_bootnodes: Option, + flag_pruning: String, flag_no_bootstrap: bool, - flag_listen_address: String, - flag_public_address: Option, - flag_address: Option, + flag_port: u16, flag_peers: usize, flag_no_discovery: bool, - flag_no_upnp: bool, + flag_nat: String, flag_node_key: Option, flag_cache_pref_size: usize, flag_cache_max_size: usize, @@ -157,15 +172,24 @@ struct Args { flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, flag_jsonrpc_apis: String, + flag_logging: Option, + flag_version: bool, + // geth-compatibility... + flag_nodekey: Option, + flag_nodiscover: bool, + flag_maxpeers: Option, + flag_author: String, + flag_extra_data: Option, + flag_datadir: Option, + flag_extradata: Option, + flag_etherbase: Option, flag_rpc: bool, flag_rpcaddr: Option, flag_rpcport: Option, flag_rpccorsdomain: Option, flag_rpcapi: Option, - flag_logging: Option, - flag_version: bool, - flag_author: String, - flag_extra_data: Option, + flag_testnet: bool, + flag_networkid: Option, } fn setup_log(init: &Option) { @@ -195,7 +219,7 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { +fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { use rpc::v1::*; let server = rpc::RpcServer::new(); @@ -204,9 +228,10 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom "web3" => server.add_delegate(Web3Client::new().to_delegate()), "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), "eth" => { - server.add_delegate(EthClient::new(&client, &sync).to_delegate()); + server.add_delegate(EthClient::new(&client, &sync, &secret_store).to_delegate()); server.add_delegate(EthFilterClient::new(&client).to_delegate()); } + "personal" => server.add_delegate(PersonalClient::new(&secret_store).to_delegate()), _ => { die!("{}: Invalid API name to be enabled.", api); } @@ -245,15 +270,17 @@ impl Configuration { } fn path(&self) -> String { - self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) + let d = self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path); + d.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) } fn author(&self) -> Address { - Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) + let d = self.args.flag_etherbase.as_ref().unwrap_or(&self.args.flag_author); + Address::from_str(d).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) } fn extra_data(&self) -> Bytes { - match self.args.flag_extra_data { + match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) { Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), None => version_data(), Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); } @@ -285,45 +312,36 @@ impl Configuration { } fn init_nodes(&self, spec: &Spec) -> Vec { - if self.args.flag_no_bootstrap { Vec::new() } else { - match self.args.arg_enode.len() { - 0 => spec.nodes().clone(), - _ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s))).collect(), - } + let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() }; + if let Some(ref x) = self.args.flag_bootnodes { + r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s)))); } + r } - #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] + #[cfg_attr(feature="dev", allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { - let mut listen_address = None; - let mut public_address = None; - - if let Some(ref a) = self.args.flag_address { - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --address", a))); - listen_address = public_address; - } - if listen_address.is_none() { - listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address))); - } - if let Some(ref a) = self.args.flag_public_address { - if public_address.is_some() { - die!("Conflicting flags provided: --address and --public-address"); - } - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --public-address", a))); - } + let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port)); + let public_address = if self.args.flag_nat.starts_with("extip:") { + let host = &self.args.flag_nat[6..]; + let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); + Some(SocketAddr::new(host, self.args.flag_port)) + } else { + listen_address.clone() + }; (listen_address, public_address) } fn net_settings(&self, spec: &Spec) -> NetworkConfiguration { let mut ret = NetworkConfiguration::new(); - ret.nat_enabled = !self.args.flag_no_upnp; + ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; ret.boot_nodes = self.init_nodes(spec); let (listen, public) = self.net_addresses(); ret.listen_address = listen; ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); - ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_peers as u32; + ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; + ret.ideal_peers = self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); @@ -402,7 +420,14 @@ impl Configuration { client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; } } - client_config.prefer_journal = self.args.flag_pruning; + client_config.pruning = match self.args.flag_pruning.as_str() { + "" => journaldb::Algorithm::Archive, + "archive" => journaldb::Algorithm::Archive, + "pruned" => journaldb::Algorithm::EarlyMerge, + "fast" => journaldb::Algorithm::OverlayRecent, +// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged. + _ => { die!("Invalid pruning method given."); } + }; client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); @@ -414,17 +439,20 @@ impl Configuration { // Sync let sync = EthSync::register(service.network(), sync_config, client); + // Secret Store + let account_service = Arc::new(AccountService::new()); + // Setup rpc if self.args.flag_jsonrpc || self.args.flag_rpc { let url = format!("{}:{}", self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) ); - SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); + SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - let server_handler = setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); + let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(",").collect()); if let Some(handler) = server_handler { panic_handler.forward_from(handler.deref()); } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 2ce430e51..f324aba10 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -26,9 +26,8 @@ serde_macros = { version = "0.7.0", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } syntex = "0.29.0" -rustc_version = "0.1" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/build.rs b/rpc/build.rs index 3806f6fe5..659bc35eb 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -14,10 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; - -use rustc_version::{version_meta, Channel}; - #[cfg(not(feature = "serde_macros"))] mod inner { extern crate syntex; @@ -46,7 +42,4 @@ mod inner { fn main() { inner::main(); - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index a067b48fb..38e363624 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -27,23 +27,27 @@ use ethcore::block::{IsBlock}; use ethcore::views::*; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; +use ethcore::transaction::Transaction as EthTransaction; use v1::traits::{Eth, EthFilter}; -use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log}; +use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log}; use v1::helpers::{PollFilter, PollManager}; +use util::keys::store::AccountProvider; /// Eth rpc implementation. -pub struct EthClient where C: BlockChainClient, S: SyncProvider { +pub struct EthClient where C: BlockChainClient, S: SyncProvider, A: AccountProvider { client: Weak, sync: Weak, + accounts: Weak, hashrates: RwLock>, } -impl EthClient where C: BlockChainClient, S: SyncProvider { +impl EthClient where C: BlockChainClient, S: SyncProvider, A: AccountProvider { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, accounts: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), + accounts: Arc::downgrade(accounts), hashrates: RwLock::new(HashMap::new()), } } @@ -94,7 +98,7 @@ impl EthClient where C: BlockChainClient, S: SyncProvider { } } -impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncProvider + 'static { +impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncProvider + 'static, A: AccountProvider + 'static { fn protocol_version(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), @@ -158,7 +162,7 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncP } } - fn block_transaction_count(&self, params: Params) -> Result { + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), @@ -166,6 +170,17 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncP }) } + fn block_transaction_count_by_number(&self, params: Params) -> Result { + from_params::<(BlockNumber,)>(params) + .and_then(|(block_number,)| match block_number { + BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending), + _ => match take_weak!(self.client).block(block_number.into()) { + Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), + None => Ok(Value::Null) + } + }) + } + fn block_uncles_count(&self, params: Params) -> Result { from_params::<(H256,)>(params) .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { @@ -252,6 +267,24 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncP to_value(&true) }) } + + fn send_transaction(&self, params: Params) -> Result { + from_params::<(TransactionRequest, )>(params) + .and_then(|(transaction_request, )| { + let accounts = take_weak!(self.accounts); + match accounts.account_secret(&transaction_request.from) { + Ok(secret) => { + let sync = take_weak!(self.sync); + let transaction: EthTransaction = transaction_request.into(); + let signed_transaction = transaction.sign(&secret); + let hash = signed_transaction.hash(); + sync.insert_transaction(signed_transaction); + to_value(&hash) + }, + Err(_) => { to_value(&U256::zero()) } + } + }) + } } /// Eth filter rpc implementation. diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 5e67bf252..e52fc0bd4 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -36,10 +36,15 @@ impl NetClient where S: SyncProvider { impl Net for NetClient where S: SyncProvider + 'static { fn version(&self, _: Params) -> Result { - Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) + Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())) } fn peer_count(&self, _params: Params) -> Result { - Ok(Value::U64(take_weak!(self.sync).status().num_peers as u64)) + Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned())) + } + + fn is_listening(&self, _: Params) -> Result { + // right now (11 march 2016), we are always listening for incoming connections + Ok(Value::Bool(true)) } } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 48e1b1c6a..ce200244c 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -20,30 +20,28 @@ use jsonrpc_core::*; use v1::traits::Personal; use util::keys::store::*; use util::Address; -use std::sync::RwLock; /// Account management (personal) rpc implementation. -pub struct PersonalClient { - secret_store: Weak>, +pub struct PersonalClient where A: AccountProvider { + accounts: Weak, } -impl PersonalClient { +impl PersonalClient where A: AccountProvider { /// Creates new PersonalClient - pub fn new(store: &Arc>) -> Self { + pub fn new(store: &Arc) -> Self { PersonalClient { - secret_store: Arc::downgrade(store), + accounts: Arc::downgrade(store), } } } -impl Personal for PersonalClient { +impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let store = take_weak!(self.accounts); match store.accounts() { Ok(account_list) => { Ok(Value::Array(account_list.iter() - .map(|&(account, _)| Value::String(format!("{:?}", account))) + .map(|&account| Value::String(format!("{:?}", account))) .collect::>()) ) } @@ -54,8 +52,7 @@ impl Personal for PersonalClient { fn new_account(&self, params: Params) -> Result { from_params::<(String, )>(params).and_then( |(pass, )| { - let store_wk = take_weak!(self.secret_store); - let mut store = store_wk.write().unwrap(); + let store = take_weak!(self.accounts); match store.new_account(&pass) { Ok(address) => Ok(Value::String(format!("{:?}", address))), Err(_) => Err(Error::internal_error()) @@ -67,8 +64,7 @@ impl Personal for PersonalClient { fn unlock_account(&self, params: Params) -> Result { from_params::<(Address, String, u64)>(params).and_then( |(account, account_pass, _)|{ - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let store = take_weak!(self.accounts); match store.unlock_account(&account, &account_pass) { Ok(_) => Ok(Value::Bool(true)), Err(_) => Ok(Value::Bool(false)), diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 104a8b3f0..b82a20e89 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -21,9 +21,10 @@ pub mod traits; mod impls; mod types; +mod helpers; + #[cfg(test)] mod tests; -mod helpers; pub use self::traits::{Web3, Eth, EthFilter, Personal, Net}; pub use self::impls::*; diff --git a/ethcore/build.rs b/rpc/src/v1/tests/helpers/mod.rs similarity index 79% rename from ethcore/build.rs rename to rpc/src/v1/tests/helpers/mod.rs index 41b9a1b3e..501bfb2d3 100644 --- a/ethcore/build.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -14,12 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; +mod sync_provider; -use rustc_version::{version_meta, Channel}; - -fn main() { - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } -} +pub use self::sync_provider::{Config, TestSyncProvider}; diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs new file mode 100644 index 000000000..a3711d949 --- /dev/null +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -0,0 +1,58 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethcore::transaction::SignedTransaction; +use ethsync::{SyncProvider, SyncStatus, SyncState}; + +pub struct Config { + pub protocol_version: u8, + pub num_peers: usize, +} + +pub struct TestSyncProvider { + status: SyncStatus, +} + +impl TestSyncProvider { + pub fn new(config: Config) -> Self { + TestSyncProvider { + status: SyncStatus { + state: SyncState::NotSynced, + protocol_version: config.protocol_version, + start_block_number: 0, + last_imported_block_number: None, + highest_block_number: None, + blocks_total: 0, + blocks_received: 0, + num_peers: config.num_peers, + num_active_peers: 0, + mem_used: 0, + transaction_queue_pending: 0, + }, + } + } +} + +impl SyncProvider for TestSyncProvider { + fn status(&self) -> SyncStatus { + self.status.clone() + } + + fn insert_transaction(&self, _transaction: SignedTransaction) { + unimplemented!() + } +} + diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index bdf4567b6..3a38ced15 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -1 +1,21 @@ -//TODO: load custom blockchain state and test +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//!TODO: load custom blockchain state and test + +mod net; +mod web3; +mod helpers; diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs new file mode 100644 index 000000000..e24045ca6 --- /dev/null +++ b/rpc/src/v1/tests/net.rs @@ -0,0 +1,66 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use v1::{Net, NetClient}; +use v1::tests::helpers::{Config, TestSyncProvider}; + +fn sync_provider() -> Arc { + Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })) +} + +#[test] +fn rpc_net_version() { + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_net_peer_count() { + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_net_listening() { + let sync = sync_provider(); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/web3.rs b/rpc/src/v1/tests/web3.rs new file mode 100644 index 000000000..c717d361a --- /dev/null +++ b/rpc/src/v1/tests/web3.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::IoHandler; +use util::version; +use v1::{Web3, Web3Client}; + +#[test] +fn rpc_web3_version() { + let web3 = Web3Client::new().to_delegate(); + let io = IoHandler::new(); + io.add_delegate(web3); + + let v = version().to_owned().replace("Parity/", "Parity//"); + + let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref()); + + assert_eq!(io.handle_request(request), Some(response)); +} diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index d2aeb0f9e..8c24dd38c 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -55,12 +55,15 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Returns block with given number. fn block_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } - + /// Returns the number of transactions sent from given address at given time (block number). fn transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block. - fn block_transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns the number of transactions in a block given block hash. + fn block_transaction_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of transactions in a block given block number. + fn block_transaction_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the number of uncles in a given block. fn block_uncles_count(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -130,8 +133,8 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_balance", Eth::balance); delegate.add_method("eth_getStorageAt", Eth::storage_at); delegate.add_method("eth_getTransactionCount", Eth::transaction_count); - delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); + delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); + delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); delegate.add_method("eth_code", Eth::code_at); diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index f09f24e4d..0b14c30e8 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -15,10 +15,12 @@ // along with Parity. If not, see . use rustc_serialize::hex::ToHex; -use serde::{Serialize, Serializer}; +use serde::{Serialize, Serializer, Deserialize, Deserializer, Error}; +use serde::de::Visitor; +use util::common::FromHex; /// Wrapper structure around vector of bytes. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct Bytes(Vec); impl Bytes { @@ -26,6 +28,7 @@ impl Bytes { pub fn new(bytes: Vec) -> Bytes { Bytes(bytes) } + pub fn to_vec(self) -> Vec { let Bytes(x) = self; x } } impl Default for Bytes { @@ -36,7 +39,7 @@ impl Default for Bytes { } impl Serialize for Bytes { - fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer { let mut serialized = "0x".to_owned(); serialized.push_str(self.0.to_hex().as_ref()); @@ -44,6 +47,32 @@ impl Serialize for Bytes { } } +impl Deserialize for Bytes { + fn deserialize(deserializer: &mut D) -> Result + where D: Deserializer { + deserializer.deserialize(BytesVisitor) + } +} + +struct BytesVisitor; + +impl Visitor for BytesVisitor { + type Value = Bytes; + + fn visit_str(&mut self, value: &str) -> Result where E: Error { + if value.len() >= 2 && &value[0..2] == "0x" { + Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![]))) + } else { + Err(Error::custom("invalid hex")) + } + } + + fn visit_string(&mut self, value: String) -> Result where E: Error { + self.visit_str(value.as_ref()) + } +} + + #[cfg(test)] mod tests { use super::*; diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 34c1f1cff..ebc3bc0ff 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -23,6 +23,7 @@ mod log; mod optionals; mod sync; mod transaction; +mod transaction_request; pub use self::block::{Block, BlockTransactions}; pub use self::block_number::BlockNumber; @@ -33,3 +34,5 @@ pub use self::log::Log; pub use self::optionals::OptionalValue; pub use self::sync::{SyncStatus, SyncInfo}; pub use self::transaction::Transaction; +pub use self::transaction_request::TransactionRequest; + diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 232cf0bf3..0518a58ea 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -17,6 +17,7 @@ use util::numbers::*; use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; +use serde::Error; #[derive(Debug, Default, Serialize)] pub struct Transaction { diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs new file mode 100644 index 000000000..d40402ab5 --- /dev/null +++ b/rpc/src/v1/types/transaction_request.rs @@ -0,0 +1,139 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::hash::Address; +use util::numbers::{Uint, U256}; +use ethcore::transaction::{Action, Transaction}; +use v1::types::Bytes; + +#[derive(Debug, Default, PartialEq, Deserialize)] +pub struct TransactionRequest { + pub from: Address, + pub to: Option
, + #[serde(rename="gasPrice")] + pub gas_price: Option, + pub gas: Option, + pub value: Option, + pub data: Option, + pub nonce: Option, +} + +impl Into for TransactionRequest { + fn into(self) -> Transaction { + Transaction { + nonce: self.nonce.unwrap_or_else(U256::zero), + action: self.to.map_or(Action::Create, Action::Call), + gas: self.gas.unwrap_or_else(U256::zero), + gas_price: self.gas_price.unwrap_or_else(U256::zero), + value: self.value.unwrap_or_else(U256::zero), + data: self.data.map_or_else(Vec::new, |d| d.to_vec()), + } + } +} + +#[cfg(test)] +mod tests { + use serde_json; + use util::numbers::{Uint, U256}; + use util::hash::Address; + use ethcore::transaction::{Transaction, Action}; + use v1::types::Bytes; + use super::*; + + #[test] + fn transaction_request_into_transaction() { + let tr = TransactionRequest { + from: Address::default(), + to: Some(Address::from(10)), + gas_price: Some(U256::from(20)), + gas: Some(U256::from(10_000)), + value: Some(U256::from(1)), + data: Some(Bytes::new(vec![10, 20])), + nonce: Some(U256::from(12)), + }; + + assert_eq!(Transaction { + nonce: U256::from(12), + action: Action::Call(Address::from(10)), + gas: U256::from(10_000), + gas_price: U256::from(20), + value: U256::from(1), + data: vec![10, 20], + }, tr.into()); + } + + #[test] + fn empty_transaction_request_into_transaction() { + let tr = TransactionRequest { + from: Address::default(), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + }; + + assert_eq!(Transaction { + nonce: U256::zero(), + action: Action::Create, + gas: U256::zero(), + gas_price: U256::zero(), + value: U256::zero(), + data: vec![], + }, tr.into()); + } + + #[test] + fn transaction_request_deserialize() { + let s = r#"{ + "from":"0x0000000000000000000000000000000000000001", + "to":"0x0000000000000000000000000000000000000002", + "gasPrice":"0x1", + "gas":"0x2", + "value":"0x3", + "data":"0x123456", + "nonce":"0x4" + }"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, TransactionRequest { + from: Address::from(1), + to: Some(Address::from(2)), + gas_price: Some(U256::from(1)), + gas: Some(U256::from(2)), + value: Some(U256::from(3)), + data: Some(Bytes::new(vec![0x12, 0x34, 0x56])), + nonce: Some(U256::from(4)), + }); + } + + #[test] + fn transaction_request_deserialize_empty() { + let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, TransactionRequest { + from: Address::from(1), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + }); + } +} diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 46baa8a83..0097cd47e 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -4,13 +4,9 @@ name = "ethsync" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore self.starting_block => x - self.starting_block, _ => 0 }, num_peers: self.peers.len(), num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), + transaction_queue_pending: self.transaction_queue.lock().unwrap().status().pending, mem_used: // TODO: https://github.com/servo/heapsize/pull/50 // self.downloading_hashes.heap_size_of_children() @@ -275,7 +279,7 @@ impl ChainSync { } - #[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()` + #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Rest sync. Clear all downloaded data but keep the queue fn reset(&mut self) { self.downloading_headers.clear(); @@ -343,7 +347,7 @@ impl ChainSync { Ok(()) } - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); @@ -470,7 +474,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); @@ -1303,11 +1307,11 @@ impl ChainSync { } /// Add transaction to the transaction queue - pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) + pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) -> Result<(), Error> where T: Fn(&Address) -> U256 { let mut queue = self.transaction_queue.lock().unwrap(); - queue.add(transaction, fetch_nonce); + queue.add(transaction, fetch_nonce) } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 3b79e5614..0eaf5d40e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +#![cfg_attr(feature="dev", allow(clone_on_copy))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: @@ -146,7 +146,8 @@ impl SyncProvider for EthSync { let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); let sync = self.sync.write().unwrap(); - sync.insert_transaction(transaction, &nonce_fn); + sync.insert_transaction(transaction, &nonce_fn).unwrap_or_else( + |e| warn!(target: "sync", "Error inserting transaction to queue: {:?}", e)); } } diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index 826a67121..664d7c7a3 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -42,7 +42,7 @@ pub trait RangeCollection { fn remove_head(&mut self, start: &K); /// Remove all elements >= `start` in the range that contains `start` fn remove_tail(&mut self, start: &K); - /// Remove all elements >= `start` + /// Remove all elements >= `start` fn remove_from(&mut self, start: &K); /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); @@ -231,7 +231,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } #[test] -#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] +#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn test_range() { use std::cmp::{Ordering}; diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 855aa79a6..eebbdb164 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockId}; +use ethcore::client::{BlockChainClient, BlockId, EachBlockWith}; use io::SyncIo; use chain::{SyncState}; use super::helpers::*; diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 253587f11..ca4ae5158 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -15,309 +15,10 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo}; -use ethcore::header::{Header as BlockHeader, BlockNumber}; -use ethcore::error::*; +use ethcore::client::{TestBlockChainClient, BlockChainClient}; use io::SyncIo; use chain::ChainSync; use ::SyncConfig; -use ethcore::receipt::Receipt; -use ethcore::transaction::{LocalizedTransaction, Transaction, Action}; -use ethcore::filter::Filter; -use ethcore::log_entry::LocalizedLogEntry; -use ethcore::block::ClosedBlock; - -pub struct TestBlockChainClient { - pub blocks: RwLock>, - pub numbers: RwLock>, - pub genesis_hash: H256, - pub last_hash: RwLock, - pub difficulty: RwLock, -} - -#[derive(Clone)] -pub enum EachBlockWith { - Nothing, - Uncle, - Transaction, - UncleAndTransaction -} - -impl TestBlockChainClient { - pub fn new() -> TestBlockChainClient { - - let mut client = TestBlockChainClient { - blocks: RwLock::new(HashMap::new()), - numbers: RwLock::new(HashMap::new()), - genesis_hash: H256::new(), - last_hash: RwLock::new(H256::new()), - difficulty: RwLock::new(From::from(0)), - }; - client.add_blocks(1, EachBlockWith::Nothing); // add genesis block - client.genesis_hash = client.last_hash.read().unwrap().clone(); - client - } - - pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { - let len = self.numbers.read().unwrap().len(); - for n in len..(len + count) { - let mut header = BlockHeader::new(); - header.difficulty = From::from(n); - header.parent_hash = self.last_hash.read().unwrap().clone(); - header.number = n as BlockNumber; - let uncles = match with { - EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { - let mut uncles = RlpStream::new_list(1); - let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); - uncle_header.number = n as BlockNumber; - uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); - uncles - }, - _ => RlpStream::new_list(0) - }; - let txs = match with { - EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { - let mut txs = RlpStream::new_list(1); - let keypair = KeyPair::create().unwrap(); - let tx = Transaction { - action: Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::one(), - nonce: U256::zero() - }; - let signed_tx = tx.sign(&keypair.secret()); - txs.append(&signed_tx); - txs.out() - }, - _ => rlp::NULL_RLP.to_vec() - }; - - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&txs, 1); - rlp.append_raw(uncles.as_raw(), 1); - self.import_block(rlp.as_raw().to_vec()).unwrap(); - } - } - - pub fn corrupt_block(&mut self, n: BlockNumber) { - let hash = self.block_hash(BlockId::Number(n)).unwrap(); - let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); - header.parent_hash = H256::new(); - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&rlp::NULL_RLP, 1); - rlp.append_raw(&rlp::NULL_RLP, 1); - self.blocks.write().unwrap().insert(hash, rlp.out()); - } - - pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { - let blocks_read = self.numbers.read().unwrap(); - let index = blocks_read.len() - delta; - blocks_read[&index].clone() - } - - fn block_hash(&self, id: BlockId) -> Option { - match id { - BlockId::Hash(hash) => Some(hash), - BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(), - BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(), - BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned() - } - } -} - -impl BlockChainClient for TestBlockChainClient { - fn block_total_difficulty(&self, _id: BlockId) -> Option { - Some(U256::zero()) - } - - fn block_hash(&self, _id: BlockId) -> Option { - unimplemented!(); - } - - fn nonce(&self, _address: &Address) -> U256 { - U256::zero() - } - - fn code(&self, _address: &Address) -> Option { - unimplemented!(); - } - - fn transaction(&self, _id: TransactionId) -> Option { - unimplemented!(); - } - - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { - unimplemented!(); - } - - fn logs(&self, _filter: Filter) -> Vec { - unimplemented!(); - } - - fn sealing_block(&self) -> &Mutex> { - unimplemented!(); - } - - fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { - unimplemented!(); - } - - fn block_header(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) - } - - fn block_body(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| { - let mut stream = RlpStream::new_list(2); - stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); - stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); - stream.out() - })) - } - - fn block(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned()) - } - - fn block_status(&self, id: BlockId) -> BlockStatus { - match id { - BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain, - BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain, - _ => BlockStatus::Unknown - } - } - - // works only if blocks are one after another 1 -> 2 -> 3 - fn tree_route(&self, from: &H256, to: &H256) -> Option { - Some(TreeRoute { - ancestor: H256::new(), - index: 0, - blocks: { - let numbers_read = self.numbers.read().unwrap(); - let mut adding = false; - - let mut blocks = Vec::new(); - for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { - if hash == to { - if adding { - blocks.push(hash.clone()); - } - adding = false; - break; - } - if hash == from { - adding = true; - } - if adding { - blocks.push(hash.clone()); - } - } - if adding { Vec::new() } else { blocks } - } - }) - } - - // TODO: returns just hashes instead of node state rlp(?) - fn state_data(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let mut rlp = RlpStream::new(); - rlp.append(&hash.clone()); - return Some(rlp.out()); - } - None - } - - fn block_receipts(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let receipt = Receipt::new( - H256::zero(), - U256::zero(), - vec![]); - let mut rlp = RlpStream::new(); - rlp.append(&receipt); - return Some(rlp.out()); - } - None - } - - fn import_block(&self, b: Bytes) -> ImportResult { - let header = Rlp::new(&b).val_at::(0); - let h = header.hash(); - let number: usize = header.number as usize; - if number > self.blocks.read().unwrap().len() { - panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); - } - if number > 0 { - match self.blocks.read().unwrap().get(&header.parent_hash) { - Some(parent) => { - let parent = Rlp::new(parent).val_at::(0); - if parent.number != (header.number - 1) { - panic!("Unexpected block parent"); - } - }, - None => { - panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); - } - } - } - let len = self.numbers.read().unwrap().len(); - if number == len { - { - let mut difficulty = self.difficulty.write().unwrap(); - *difficulty.deref_mut() = *difficulty.deref() + header.difficulty; - } - mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); - self.blocks.write().unwrap().insert(h.clone(), b); - self.numbers.write().unwrap().insert(number, h.clone()); - let mut parent_hash = header.parent_hash; - if number > 0 { - let mut n = number - 1; - while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { - *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); - n -= 1; - parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; - } - } - } - else { - self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); - } - Ok(h) - } - - fn queue_info(&self) -> BlockQueueInfo { - BlockQueueInfo { - verified_queue_size: 0, - unverified_queue_size: 0, - verifying_queue_size: 0, - max_queue_size: 0, - max_mem_use: 0, - mem_used: 0, - } - } - - fn clear_queue(&self) { - } - - fn chain_info(&self) -> BlockChainInfo { - BlockChainInfo { - total_difficulty: *self.difficulty.read().unwrap(), - pending_total_difficulty: *self.difficulty.read().unwrap(), - genesis_hash: self.genesis_hash.clone(), - best_block_hash: self.last_hash.read().unwrap().clone(), - best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, - } - } -} pub struct TestIo<'p> { pub chain: &'p mut TestBlockChainClient, diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 243939a4c..618eb6a0b 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -684,8 +684,8 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); - txq.add(tx.clone(), &prev_nonce); - txq.add(tx2.clone(), &prev_nonce); + txq.add(tx.clone(), &prev_nonce).unwrap(); + txq.add(tx2.clone(), &prev_nonce).unwrap(); assert_eq!(txq.status().future, 2); // when diff --git a/util/Cargo.toml b/util/Cargo.toml index 0ce27ec2b..74e4d7226 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -40,8 +40,7 @@ chrono = "0.2" [features] default = [] -dev = [] +dev = ["clippy"] [build-dependencies] vergen = "*" -rustc_version = "0.1" diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index 959df0944..c18ed839c 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -36,10 +36,12 @@ //! The functions here are designed to be fast. //! + +#[cfg(all(asm_available, target_arch="x86_64"))] +use std::mem; use std::fmt; use std::cmp; -use std::mem; use std::str::{FromStr}; use std::convert::From; use std::hash::{Hash, Hasher}; @@ -788,14 +790,11 @@ macro_rules! construct_uint { fn visit_str(&mut self, value: &str) -> Result where E: serde::Error { // 0x + len - if value.len() != 2 + $n_words / 8 { + if value.len() > 2 + $n_words * 16 { return Err(serde::Error::custom("Invalid length.")); } - match $name::from_str(&value[2..]) { - Ok(val) => Ok(val), - Err(_) => { return Err(serde::Error::custom("Invalid length.")); } - } + $name::from_str(&value[2..]).map_err(|_| serde::Error::custom("Invalid hex value.")) } fn visit_string(&mut self, value: String) -> Result where E: serde::Error { @@ -1103,7 +1102,7 @@ macro_rules! construct_uint { } } - #[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok. + #[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok. impl Hash for $name { fn hash(&self, state: &mut H) where H: Hasher { unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); } @@ -1485,7 +1484,7 @@ mod tests { } #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] + #[cfg_attr(feature="dev", allow(eq_op))] pub fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -2032,7 +2031,7 @@ mod tests { #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn u256_multi_full_mul() { let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); diff --git a/util/build.rs b/util/build.rs index 0b9b233e0..b0b64a380 100644 --- a/util/build.rs +++ b/util/build.rs @@ -14,15 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; extern crate vergen; use vergen::*; -use rustc_version::{version_meta, Channel}; fn main() { vergen(OutputFns::all()).unwrap(); - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } } diff --git a/util/src/hash.rs b/util/src/hash.rs index 4eb96b53e..fce0720d1 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -257,7 +257,7 @@ macro_rules! impl_hash { return Err(serde::Error::custom("Invalid length.")); } - value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid valid hex.")) + value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid hex value.")) } fn visit_string(&mut self, value: String) -> Result where E: serde::Error { @@ -305,7 +305,7 @@ macro_rules! impl_hash { } impl Copy for $from {} - #[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))] + #[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))] impl Clone for $from { fn clone(&self) -> $from { unsafe { @@ -637,7 +637,7 @@ mod tests { use std::str::FromStr; #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] + #[cfg_attr(feature="dev", allow(eq_op))] fn hash() { let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 4d8cbaba1..e622c4b99 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -20,7 +20,7 @@ use bytes::*; use std::collections::HashMap; /// Trait modelling datastore keyed by a 32-byte Keccak hash. -pub trait HashDB { +pub trait HashDB : AsHashDB { /// Get the keys in the database together with number of underlying references. fn keys(&self) -> HashMap; @@ -111,3 +111,16 @@ pub trait HashDB { /// ``` fn remove(&mut self, key: &H256) { self.kill(key) } } + +/// Upcast trait. +pub trait AsHashDB { + /// Perform upcast to HashDB for anything that derives from HashDB. + fn as_hashdb(&self) -> &HashDB; + /// Perform mutable upcast to HashDB for anything that derives from HashDB. + fn as_hashdb_mut(&mut self) -> &mut HashDB; +} + +impl AsHashDB for T { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs new file mode 100644 index 000000000..a8b9c1f74 --- /dev/null +++ b/util/src/journaldb/archivedb.rs @@ -0,0 +1,388 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct ArchiveDB { + overlay: MemoryDB, + backing: Arc, +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 259; + +impl ArchiveDB { + /// Create a new instance from file + pub fn new(path: &str) -> ArchiveDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + ArchiveDB { + overlay: MemoryDB::new(), + backing: Arc::new(backing), + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> ArchiveDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } +} + +impl HashDB for ArchiveDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for ArchiveDB { + fn spawn(&self) -> Box { + Box::new(ArchiveDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result { + let batch = DBTransaction::new(); + let mut inserts = 0usize; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc > 0 { + assert!(rc == 1); + batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); + inserts += 1; + } + if rc < 0 { + assert!(rc == -1); + deletes += 1; + } + } + try!(self.backing.write(batch)); + Ok((inserts + deletes) as u32) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + use journaldb::traits::JournalDB; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = ArchiveDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + } + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + } + } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + + // foo is ancient history. + + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + foo + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, _, _) = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + } +} diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs new file mode 100644 index 000000000..cf8e7d392 --- /dev/null +++ b/util/src/journaldb/mod.rs @@ -0,0 +1,80 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! JournalDB interface and implementation. + +use common::*; + +/// Export the journaldb module. +pub mod traits; +mod archivedb; +mod optiononedb; +mod overlay; + +/// Export the JournalDB trait. +pub use self::traits::JournalDB; + +/// A journal database algorithm. +#[derive(Debug)] +pub enum Algorithm { + /// Keep all keys forever. + Archive, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into backing database, journal retains knowledge of whether backing DB key is + /// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB. + EarlyMerge, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets + /// flushed in backing only at end of recent history. + OverlayRecent, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// References are counted in disk-backed DB. + RefCounted, +} + +impl Default for Algorithm { + fn default() -> Algorithm { Algorithm::Archive } +} + +impl fmt::Display for Algorithm { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", match self { + &Algorithm::Archive => "archive", + &Algorithm::EarlyMerge => "earlymerge", + &Algorithm::OverlayRecent => "overlayrecent", + &Algorithm::RefCounted => "refcounted", + }) + } +} + +/// Create a new JournalDB trait object. +pub fn new(path: &str, algorithm: Algorithm) -> Box { + match algorithm { + Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), + Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)), + Algorithm::OverlayRecent => Box::new(overlay::JournalOverlayDB::new(path)), + _ => unimplemented!(), + } +} diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs new file mode 100644 index 000000000..dfa7c8ec1 --- /dev/null +++ b/util/src/journaldb/optiononedb.rs @@ -0,0 +1,618 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct OptionOneDB { + overlay: MemoryDB, + backing: Arc, + counters: Option>>>, +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 3; +const PADDING : [u8; 10] = [ 0u8; 10 ]; + +impl OptionOneDB { + /// Create a new instance from file + pub fn new(path: &str) -> OptionOneDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); + OptionOneDB { + overlay: MemoryDB::new(), + backing: Arc::new(backing), + counters: counters, + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> OptionOneDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + fn morph_key(key: &H256, index: u8) -> Bytes { + let mut ret = key.bytes().to_owned(); + ret.push(index); + ret + } + + // The next three are valid only as long as there is an insert operation of `key` in the journal. + fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } + fn is_already_in(backing: &Database, key: &H256) -> bool { + backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() + } + + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + for &(ref h, ref d) in inserts { + if let Some(c) = counters.get_mut(h) { + // already counting. increment. + *c += 1; + continue; + } + + // this is the first entry for this node in the journal. + if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { + // already in the backing DB. start counting, and remember it was already in. + Self::set_already_in(batch, &h); + counters.insert(h.clone(), 1); + continue; + } + + // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. + //Self::reset_already_in(&h); + assert!(!Self::is_already_in(backing, &h)); + batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); + } + } + + fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { + trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); + for h in inserts { + if let Some(c) = counters.get_mut(h) { + // already counting. increment. + *c += 1; + continue; + } + + // this is the first entry for this node in the journal. + // it is initialised to 1 if it was already in. + if Self::is_already_in(backing, h) { + trace!("replace_keys: Key {} was already in!", h); + counters.insert(h.clone(), 1); + } + } + trace!("replay_keys: (end) counters={:?}", counters); + } + + fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { + for h in deletes.into_iter() { + let mut n: Option = None; + if let Some(c) = counters.get_mut(&h) { + if *c > 1 { + *c -= 1; + continue; + } else { + n = Some(*c); + } + } + match n { + Some(i) if i == 1 => { + counters.remove(&h); + Self::reset_already_in(batch, &h); + } + None => { + // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. + //assert!(!Self::is_already_in(db, &h)); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + } + _ => panic!("Invalid value in counters: {:?}", n), + } + } + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_counters(db: &Database) -> HashMap { + let mut counters = HashMap::new(); + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val); + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + trace!("read_counters: era={}, index={}", era, index); + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + Self::replay_keys(&inserts, db, &mut counters); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + trace!("Recovered {} counters", counters.len()); + counters + } +} + +impl HashDB for OptionOneDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for OptionOneDB { + fn spawn(&self) -> Box { + Box::new(OptionOneDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + counters: self.counters.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.counters { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store reclaim_period. + + // When we make a new commit, we make a journal of all blocks in the recent history and record + // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can + // share the same era. This forms a data structure similar to a queue but whose items are tuples. + // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history + // into ancient history) then only one commit from the tuple is considered canonical. This commit + // is kept in the main backing database, whereas any others from the same era are reverted. + // + // It is possible that a key, properly available in the backing database be deleted and re-inserted + // in the recent history queue, yet have both operations in commits that are eventually non-canonical. + // To avoid the original, and still required, key from being deleted, we maintain a reference count + // which includes an original key, if any. + // + // The semantics of the `counter` are: + // insert key k: + // counter already contains k: count += 1 + // counter doesn't contain k: + // backing db contains k: count = 1 + // backing db doesn't contain k: insert into backing db, count = 0 + // delete key k: + // counter contains k (count is asserted to be non-zero): + // count > 1: counter -= 1 + // count == 1: remove counter + // count == 0: remove key from backing db + // counter doesn't contain k: remove key from backing db + // + // Practically, this means that for each commit block turning from recent to ancient we do the + // following: + // is_canonical: + // inserts: Ignored (left alone in the backing database). + // deletes: Enacted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // !is_canonical: + // inserts: Reverted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // deletes: Ignored (they were never inserted). + // + + // record new commit's details. + trace!("commit: #{} ({}), end era: {:?}", now, id, end); + let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let batch = DBTransaction::new(); + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&now); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })).is_some() { + index += 1; + } + + let drained = self.overlay.drain(); + let removes: Vec = drained + .iter() + .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) + .collect(); + let inserts: Vec<(H256, Bytes)> = drained + .into_iter() + .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) + .collect(); + + let mut r = RlpStream::new_list(3); + r.append(id); + + // Process the new inserts. + // We use the inserts for three things. For each: + // - we place into the backing DB or increment the counter if already in; + // - we note in the backing db that it was already in; + // - we write the key into our journal for this block; + + r.begin_list(inserts.len()); + inserts.iter().foreach(|&(k, _)| {r.append(&k);}); + r.append(&removes); + Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); + try!(batch.put(&last, r.as_raw())); + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + } + + // apply old commits' details + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })) { + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + let deletes: Vec = rlp.val_at(2); + // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical + Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); + try!(batch.delete(&last)); + index += 1; + } + trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + } + + try!(self.backing.write(batch)); +// trace!("OptionOneDB::commit() deleted {} nodes", deletes); + Ok(0) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + use journaldb::traits::JournalDB; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = OptionOneDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = OptionOneDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = OptionOneDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = OptionOneDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = OptionOneDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key() { + // history is 1 + let mut jdb = OptionOneDB::new_temp(); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + } + + { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + + // foo is ancient history. + + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + foo + }; + + { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } +} diff --git a/util/src/journaldb.rs b/util/src/journaldb/overlay.rs similarity index 84% rename from util/src/journaldb.rs rename to util/src/journaldb/overlay.rs index 8bff08a77..e91709041 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb/overlay.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Disk-backed HashDB implementation. +//! JournalDB over in-memory overlay use common::*; use rlp::*; @@ -23,13 +23,11 @@ use memorydb::*; use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; +use super::JournalDB; -/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// Implementation of the JournalDB trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. /// -/// If `journal_overlay` is `None`, then it behaves exactly like OverlayDB. If not it behaves -/// differently: -/// /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before @@ -58,10 +56,10 @@ use std::env; /// the removed key is not present in the history overlay. /// 7. Delete ancient record from memory and disk. /// -pub struct JournalDB { +pub struct JournalOverlayDB { transaction_overlay: MemoryDB, backing: Arc, - journal_overlay: Option>>, + journal_overlay: Arc>, } #[derive(PartialEq)] @@ -84,9 +82,9 @@ impl HeapSizeOf for JournalEntry { } } -impl Clone for JournalDB { - fn clone(&self) -> JournalDB { - JournalDB { +impl Clone for JournalOverlayDB { + fn clone(&self) -> JournalOverlayDB { + JournalOverlayDB { transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), journal_overlay: self.journal_overlay.clone(), @@ -97,45 +95,34 @@ impl Clone for JournalDB { // all keys must be at least 12 bytes const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; - -const DB_VERSION : u32 = 3; -const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; - +const DB_VERSION : u32 = 0x200 + 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl JournalDB { +impl JournalOverlayDB { /// Create a new instance from file - pub fn new(path: &str) -> JournalDB { - Self::from_prefs(path, true) + pub fn new(path: &str) -> JournalOverlayDB { + Self::from_prefs(path) } /// Create a new instance from file - pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB { + pub fn from_prefs(path: &str) -> JournalOverlayDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; let backing = Database::open(&opts, path).unwrap_or_else(|e| { panic!("Error opening state db: {}", e); }); - let with_journal; if !backing.is_empty() { match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => { with_journal = true; }, - Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; }, + Ok(Some(DB_VERSION)) => {} v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) } } else { - backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database"); - with_journal = prefer_journal; + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - - let journal_overlay = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_overlay(&backing)))) - } else { - None - }; - JournalDB { + let journal_overlay = Arc::new(RwLock::new(JournalOverlayDB::read_overlay(&backing))); + JournalOverlayDB { transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), journal_overlay: journal_overlay, @@ -144,61 +131,92 @@ impl JournalDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - pub fn new_temp() -> JournalDB { + pub fn new_temp() -> JournalOverlayDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) } - /// Check if this database has any commits - pub fn is_empty(&self) -> bool { + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&self.backing); + let journal_overlay = self.journal_overlay.read().unwrap(); + *journal_overlay == reconstructed + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_overlay(db: &Database) -> JournalOverlay { + let mut journal = HashMap::new(); + let mut overlay = MemoryDB::new(); + let mut count = 0; + let mut latest_era = 0; + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + latest_era = decode::(&val); + let mut era = latest_era; + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + trace!("read_overlay: era={}, index={}", era, index); + let rlp = Rlp::new(&rlp_data); + let id: H256 = rlp.val_at(0); + let insertions = rlp.at(1); + let deletions: Vec = rlp.val_at(2); + let mut inserted_keys = Vec::new(); + for r in insertions.iter() { + let k: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + overlay.emplace(k.clone(), v); + inserted_keys.push(k); + count += 1; + } + journal.entry(era).or_insert_with(Vec::new).push(JournalEntry { + id: id, + insertions: inserted_keys, + deletions: deletions, + }); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); + JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } + } +} + +impl JournalDB for JournalOverlayDB { + fn spawn(&self) -> Box { + Box::new(self.clone()) + } + + fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + let overlay = self.journal_overlay.read().unwrap(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.journal.heap_size_of_children(); + mem + } + + fn is_empty(&self) -> bool { self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() } - /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { - let have_journal_overlay = self.journal_overlay.is_some(); - if have_journal_overlay { - self.commit_with_overlay(now, id, end) - } else { - self.commit_without_overlay() - } - } - - /// Drain the overlay and place it into a batch for the DB. - fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut insertions = 0usize; - let mut deletions = 0usize; - for i in overlay.drain().into_iter() { - let (key, (value, rc)) = i; - if rc > 0 { - assert!(rc == 1); - batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - insertions += 1; - } - if rc < 0 { - assert!(rc == -1); - deletions += 1; - } - } - trace!("commit: Inserted {}, Deleted {} nodes", insertions, deletions); - insertions + deletions - } - - /// Just commit the transaction overlay into the backing DB. - fn commit_without_overlay(&mut self) -> Result<(), UtilError> { - let batch = DBTransaction::new(); - Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); - try!(self.backing.write(batch)); - Ok(()) - } - - /// Commit all recent insert operations and historical removals from the old era - /// to the backing database. - fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); + let mut journal_overlay = self.journal_overlay.write().unwrap(); let batch = DBTransaction::new(); { let mut r = RlpStream::new_list(3); @@ -280,80 +298,12 @@ impl JournalDB { journal_overlay.journal.remove(&end_era); } try!(self.backing.write(batch)); - Ok(()) + Ok(0) } - #[cfg(test)] - fn can_reconstruct_refs(&self) -> bool { - let reconstructed = Self::read_overlay(&self.backing); - let journal_overlay = self.journal_overlay.as_ref().unwrap().read().unwrap(); - *journal_overlay == reconstructed - } - - fn payload(&self, key: &H256) -> Option { - self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) - } - - fn read_overlay(db: &Database) -> JournalOverlay { - let mut journal = HashMap::new(); - let mut overlay = MemoryDB::new(); - let mut count = 0; - let mut latest_era = 0; - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - latest_era = decode::(&val); - let mut era = latest_era; - loop { - let mut index = 0usize; - while let Some(rlp_data) = db.get({ - let mut r = RlpStream::new_list(3); - r.append(&era); - r.append(&index); - r.append(&&PADDING[..]); - &r.drain() - }).expect("Low-level database error.") { - trace!("read_overlay: era={}, index={}", era, index); - let rlp = Rlp::new(&rlp_data); - let id: H256 = rlp.val_at(0); - let insertions = rlp.at(1); - let deletions: Vec = rlp.val_at(2); - let mut inserted_keys = Vec::new(); - for r in insertions.iter() { - let k: H256 = r.val_at(0); - let v: Bytes = r.val_at(1); - overlay.emplace(k.clone(), v); - inserted_keys.push(k); - count += 1; - } - journal.entry(era).or_insert_with(Vec::new).push(JournalEntry { - id: id, - insertions: inserted_keys, - deletions: deletions, - }); - index += 1; - }; - if index == 0 || era == 0 { - break; - } - era -= 1; - } - } - trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); - JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } - } - - /// Returns heap memory size used - pub fn mem_used(&self) -> usize { - let mut mem = self.transaction_overlay.mem_used(); - if let Some(ref overlay) = self.journal_overlay.as_ref() { - let overlay = overlay.read().unwrap(); - mem += overlay.backing_overlay.mem_used(); - mem += overlay.journal.heap_size_of_children(); - } - mem - } } -impl HashDB for JournalDB { +impl HashDB for JournalOverlayDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -373,7 +323,7 @@ impl HashDB for JournalDB { match k { Some(&(ref d, rc)) if rc > 0 => Some(d), _ => { - let v = self.journal_overlay.as_ref().map_or(None, |ref j| j.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec())); + let v = self.journal_overlay.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec()); match v { Some(x) => { Some(&self.transaction_overlay.denote(key, x).0) @@ -412,11 +362,12 @@ mod tests { use super::*; use hashdb::*; use log::init_log; + use journaldb::JournalDB; #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = JournalOverlayDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -446,7 +397,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalDB::new_temp(); + let mut jdb = JournalOverlayDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -469,7 +420,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = JournalOverlayDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -512,7 +463,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = JournalOverlayDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -544,7 +495,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = JournalOverlayDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -569,7 +520,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -597,7 +548,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -625,7 +576,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -663,7 +614,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -673,14 +624,14 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -695,7 +646,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -724,7 +675,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -773,7 +724,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -804,7 +755,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -844,7 +795,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -865,7 +816,7 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -873,14 +824,14 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -893,7 +844,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -911,7 +862,7 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs new file mode 100644 index 000000000..25e132339 --- /dev/null +++ b/util/src/journaldb/traits.rs @@ -0,0 +1,37 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use hashdb::*; + +/// A HashDB which can manage a short-term journal potentially containing many forks of mutually +/// exclusive actions. +pub trait JournalDB : HashDB + Send + Sync { + /// Return a copy of ourself, in a box. + fn spawn(&self) -> Box; + + /// Returns heap memory size used + fn mem_used(&self) -> usize; + + /// Check if this database has any commits + fn is_empty(&self) -> bool; + + /// Commit all recent insert operations and canonical historical commits' removals from the + /// old era to the backing database, reverting any non-canonical historical commit's inserts. + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; +} diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index dcc165259..ea97cc80e 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -78,6 +78,59 @@ struct AccountUnlock { expires: DateTime, } +/// Basic account management trait +pub trait AccountProvider : Send + Sync { + /// Lists all accounts + fn accounts(&self) -> Result, ::std::io::Error>; + /// Unlocks account with the password provided + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError>; + /// Creates account + fn new_account(&self, pass: &str) -> Result; + /// Returns secret for unlocked account + fn account_secret(&self, account: &Address) -> Result; + /// Returns secret for unlocked account + fn sign(&self, account: &Address, message: &H256) -> Result; +} + +/// Thread-safe accounts management +pub struct AccountService { + secret_store: RwLock, +} + +impl AccountProvider for AccountService { + /// Lists all accounts + fn accounts(&self) -> Result, ::std::io::Error> { + Ok(try!(self.secret_store.read().unwrap().accounts()).iter().map(|&(addr, _)| addr).collect::>()) + } + /// Unlocks account with the password provided + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + self.secret_store.read().unwrap().unlock_account(account, pass) + } + /// Creates account + fn new_account(&self, pass: &str) -> Result { + self.secret_store.write().unwrap().new_account(pass) + } + /// Returns secret for unlocked account + fn account_secret(&self, account: &Address) -> Result { + self.secret_store.read().unwrap().account_secret(account) + } + /// Returns secret for unlocked account + fn sign(&self, account: &Address, message: &H256) -> Result { + self.secret_store.read().unwrap().sign(account, message) + } +} + +impl AccountService { + /// New account service with the default location + pub fn new() -> AccountService { + let secret_store = RwLock::new(SecretStore::new()); + secret_store.write().unwrap().try_import_existing(); + AccountService { + secret_store: secret_store + } + } +} + impl SecretStore { /// new instance of Secret Store in default home directory pub fn new() -> SecretStore { diff --git a/util/src/lib.rs b/util/src/lib.rs index 59d66a325..b22cba55d 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] // Clippy settings // TODO [todr] not really sure -#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] +#![cfg_attr(feature="dev", allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] +#![cfg_attr(feature="dev", allow(match_bool))] // We use that to be more explicit about handled cases -#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] +#![cfg_attr(feature="dev", allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +#![cfg_attr(feature="dev", allow(clone_on_copy))] //! Ethcore-util library //! @@ -154,7 +154,7 @@ pub use rlp::*; pub use hashdb::*; pub use memorydb::*; pub use overlaydb::*; -pub use journaldb::*; +pub use journaldb::JournalDB; pub use math::*; pub use crypto::*; pub use triehash::*; diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 644af22af..4f3384894 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -243,7 +243,7 @@ impl Discovery { self.send_to(packet, address.clone()); } - #[cfg_attr(all(nightly, feature="dev"), allow(map_clone))] + #[cfg_attr(feature="dev", allow(map_clone))] fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec { let mut found: BTreeMap> = BTreeMap::new(); let mut count = 0; diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 2d1af55ba..ece24a1d1 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -507,7 +507,7 @@ impl Host where Message: Send + Sync + Clone { debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count()); } - #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] + #[cfg_attr(feature="dev", allow(single_match))] fn connect_peer(&self, id: &NodeId, io: &IoContext>) { if self.have_session(id) { @@ -542,7 +542,7 @@ impl Host where Message: Send + Sync + Clone { self.create_connection(socket, Some(id), io); } - #[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))] + #[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))] fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext>) { let nonce = self.info.write().unwrap().next_nonce(); let mut handshakes = self.handshakes.write().unwrap(); diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 3c80f4148..7c9b6b04b 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -36,6 +36,7 @@ use kvdb::{Database}; /// /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. +//#[derive(Clone)] pub struct OverlayDB { overlay: MemoryDB, backing: Arc, diff --git a/util/src/panics.rs b/util/src/panics.rs index 70ce0bc33..05d266b8b 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -71,7 +71,7 @@ impl PanicHandler { /// Invoke closure and catch any possible panics. /// In case of panic notifies all listeners about it. - #[cfg_attr(all(nightly, feature="dev"), allow(deprecated))] + #[cfg_attr(feature="dev", allow(deprecated))] pub fn catch_panic(&self, g: G) -> thread::Result where G: FnOnce() -> R + Send + 'static { let _guard = PanicGuard { handler: self }; let result = g(); diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 182b87063..06076d273 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -54,7 +54,7 @@ pub struct TrieDB<'db> { pub hash_count: usize, } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] +#[cfg_attr(feature="dev", allow(wrong_self_convention))] impl<'db> TrieDB<'db> { /// Create a new trie with the backing database `db` and `root` /// Panics, if `root` does not exist diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 3d5c366e5..3d75fa3e1 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -66,7 +66,7 @@ enum MaybeChanged<'a> { Changed(Bytes), } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] +#[cfg_attr(feature="dev", allow(wrong_self_convention))] impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. @@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> { } } - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] /// Determine the RLP of the node, assuming we're inserting `partial` into the /// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// it will just return the new RLP that includes the new node.