From c8076b2f9d9ac45e1a431366eaa5710cedfdcccc Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 21 Feb 2016 19:46:29 +0100 Subject: [PATCH 001/222] Threading performance optimizations --- Cargo.lock | 3 +- Cargo.toml | 3 + ethcore/src/block_queue.rs | 146 +++++++++++++++++++----------------- ethcore/src/client.rs | 30 ++++---- ethcore/src/verification.rs | 14 +--- util/sha3/build.rs | 2 +- util/src/lib.rs | 2 + util/src/thread.rs | 43 +++++++++++ 8 files changed, 148 insertions(+), 95 deletions(-) create mode 100644 util/src/thread.rs diff --git a/Cargo.lock b/Cargo.lock index cf747f3cc..50274857f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,7 +151,6 @@ dependencies = [ [[package]] name = "eth-secp256k1" version = "0.5.4" -source = "git+https://github.com/arkpar/rust-secp256k1.git#45503e1de68d909b1862e3f2bdb9e1cdfdff3f1e" dependencies = [ "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -223,7 +222,7 @@ dependencies = [ "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "eth-secp256k1 0.5.4 (git+https://github.com/arkpar/rust-secp256k1.git)", + "eth-secp256k1 0.5.4", "ethcore-devtools 0.9.99", "heapsize 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index 7fdfc2bee..f28829180 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,3 +30,6 @@ travis-nightly = ["ethcore/json-tests", "dev"] [[bin]] path = "parity/main.rs" name = "parity" + +[profile.release] +debug = true diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index c39f158f0..a51a1e900 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -63,7 +63,7 @@ pub struct BlockQueue { panic_handler: Arc, engine: Arc>, more_to_verify: Arc, - verification: Arc>, + verification: Arc, verifiers: Vec>, deleting: Arc, ready_signal: Arc, @@ -98,12 +98,11 @@ impl QueueSignal { } } -#[derive(Default)] struct Verification { - unverified: VecDeque, - verified: VecDeque, - verifying: VecDeque, - bad: HashSet, + unverified: Mutex>, + verified: Mutex>, + verifying: Mutex>, + bad: Mutex>, } const MAX_UNVERIFIED_QUEUE_SIZE: usize = 50000; @@ -111,7 +110,12 @@ const MAX_UNVERIFIED_QUEUE_SIZE: usize = 50000; impl BlockQueue { /// Creates a new queue instance. pub fn new(engine: Arc>, message_channel: IoChannel) -> BlockQueue { - let verification = Arc::new(Mutex::new(Verification::default())); + let verification = Arc::new(Verification { + unverified: Mutex::new(VecDeque::new()), + verified: Mutex::new(VecDeque::new()), + verifying: Mutex::new(VecDeque::new()), + bad: Mutex::new(HashSet::new()), + }); let more_to_verify = Arc::new(Condvar::new()); let ready_signal = Arc::new(QueueSignal { signalled: AtomicBool::new(false), message_channel: message_channel }); let deleting = Arc::new(AtomicBool::new(false)); @@ -119,7 +123,7 @@ impl BlockQueue { let panic_handler = PanicHandler::new_in_arc(); let mut verifiers: Vec> = Vec::new(); - let thread_count = max(::num_cpus::get(), 3) - 2; + let thread_count = max(::num_cpus::get(), 5) - 0; for i in 0..thread_count { let verification = verification.clone(); let engine = engine.clone(); @@ -133,7 +137,8 @@ impl BlockQueue { .name(format!("Verifier #{}", i)) .spawn(move || { panic_handler.catch_panic(move || { - BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty) + lower_thread_priority(); + BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty) }).unwrap() }) .expect("Error starting block verification thread") @@ -152,17 +157,17 @@ impl BlockQueue { } } - fn verify(verification: Arc>, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { + fn verify(verification: Arc, engine: Arc>, wait: Arc, ready: Arc, deleting: Arc, empty: Arc) { while !deleting.load(AtomicOrdering::Acquire) { { - let mut lock = verification.lock().unwrap(); + let mut unverified = verification.unverified.lock().unwrap(); - if lock.unverified.is_empty() && lock.verifying.is_empty() { + if unverified.is_empty() && verification.verifying.lock().unwrap().is_empty() { empty.notify_all(); } - while lock.unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { - lock = wait.wait(lock).unwrap(); + while unverified.is_empty() && !deleting.load(AtomicOrdering::Acquire) { + unverified = wait.wait(unverified).unwrap(); } if deleting.load(AtomicOrdering::Acquire) { @@ -171,39 +176,42 @@ impl BlockQueue { } let block = { - let mut v = verification.lock().unwrap(); - if v.unverified.is_empty() { + let mut unverified = verification.unverified.lock().unwrap(); + if unverified.is_empty() { continue; } - let block = v.unverified.pop_front().unwrap(); - v.verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); + let mut verifying = verification.verifying.lock().unwrap(); + let block = unverified.pop_front().unwrap(); + verifying.push_back(VerifyingBlock{ hash: block.header.hash(), block: None }); block }; let block_hash = block.header.hash(); match verify_block_unordered(block.header, block.bytes, engine.deref().deref()) { Ok(verified) => { - let mut v = verification.lock().unwrap(); - for e in &mut v.verifying { + let mut verifying = verification.verifying.lock().unwrap(); + for e in verifying.iter_mut() { if e.hash == block_hash { e.block = Some(verified); break; } } - if !v.verifying.is_empty() && v.verifying.front().unwrap().hash == block_hash { + if !verifying.is_empty() && verifying.front().unwrap().hash == block_hash { // we're next! - let mut vref = v.deref_mut(); - BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); + let mut verified = verification.verified.lock().unwrap(); + let mut bad = verification.bad.lock().unwrap(); + BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } }, Err(err) => { - let mut v = verification.lock().unwrap(); + let mut verifying = verification.verifying.lock().unwrap(); + let mut verified = verification.verified.lock().unwrap(); + let mut bad = verification.bad.lock().unwrap(); warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", block_hash, err); - v.bad.insert(block_hash.clone()); - v.verifying.retain(|e| e.hash != block_hash); - let mut vref = v.deref_mut(); - BlockQueue::drain_verifying(&mut vref.verifying, &mut vref.verified, &mut vref.bad); + bad.insert(block_hash.clone()); + verifying.retain(|e| e.hash != block_hash); + BlockQueue::drain_verifying(&mut verifying, &mut verified, &mut bad); ready.set(); } } @@ -223,19 +231,21 @@ impl BlockQueue { } /// Clear the queue and stop verification activity. - pub fn clear(&mut self) { - let mut verification = self.verification.lock().unwrap(); - verification.unverified.clear(); - verification.verifying.clear(); - verification.verified.clear(); + pub fn clear(&self) { + let mut unverified = self.verification.unverified.lock().unwrap(); + let mut verifying = self.verification.verifying.lock().unwrap(); + let mut verified = self.verification.verified.lock().unwrap(); + unverified.clear(); + verifying.clear(); + verified.clear(); self.processing.write().unwrap().clear(); } - /// Wait for queue to be empty - pub fn flush(&mut self) { - let mut verification = self.verification.lock().unwrap(); - while !verification.unverified.is_empty() || !verification.verifying.is_empty() { - verification = self.empty.wait(verification).unwrap(); + /// Wait for unverified queue to be empty + pub fn flush(&self) { + let mut unverified = self.verification.unverified.lock().unwrap(); + while !unverified.is_empty() || !self.verification.verifying.lock().unwrap().is_empty() { + unverified = self.empty.wait(unverified).unwrap(); } } @@ -244,27 +254,29 @@ impl BlockQueue { if self.processing.read().unwrap().contains(&hash) { return BlockStatus::Queued; } - if self.verification.lock().unwrap().bad.contains(&hash) { + if self.verification.bad.lock().unwrap().contains(&hash) { return BlockStatus::Bad; } BlockStatus::Unknown } /// Add a block to the queue. - pub fn import_block(&mut self, bytes: Bytes) -> ImportResult { + pub fn import_block(&self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); let h = header.hash(); - if self.processing.read().unwrap().contains(&h) { - return Err(ImportError::AlreadyQueued); - } { - let mut verification = self.verification.lock().unwrap(); - if verification.bad.contains(&h) { + if self.processing.read().unwrap().contains(&h) { + return Err(ImportError::AlreadyQueued); + } + } + { + let mut bad = self.verification.bad.lock().unwrap(); + if bad.contains(&h) { return Err(ImportError::Bad(None)); } - if verification.bad.contains(&header.parent_hash) { - verification.bad.insert(h.clone()); + if bad.contains(&header.parent_hash) { + bad.insert(h.clone()); return Err(ImportError::Bad(None)); } } @@ -272,39 +284,40 @@ impl BlockQueue { match verify_block_basic(&header, &bytes, self.engine.deref().deref()) { Ok(()) => { self.processing.write().unwrap().insert(h.clone()); - self.verification.lock().unwrap().unverified.push_back(UnVerifiedBlock { header: header, bytes: bytes }); + self.verification.unverified.lock().unwrap().push_back(UnVerifiedBlock { header: header, bytes: bytes }); self.more_to_verify.notify_all(); Ok(h) }, Err(err) => { warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), err); - self.verification.lock().unwrap().bad.insert(h.clone()); + self.verification.bad.lock().unwrap().insert(h.clone()); Err(From::from(err)) } } } /// Mark given block and all its children as bad. Stops verification. - pub fn mark_as_bad(&mut self, hash: &H256) { - let mut verification_lock = self.verification.lock().unwrap(); - let mut verification = verification_lock.deref_mut(); - verification.bad.insert(hash.clone()); + pub fn mark_as_bad(&self, hash: &H256) { + let mut verified_lock = self.verification.verified.lock().unwrap(); + let mut verified = verified_lock.deref_mut(); + let mut bad = self.verification.bad.lock().unwrap(); + bad.insert(hash.clone()); self.processing.write().unwrap().remove(&hash); let mut new_verified = VecDeque::new(); - for block in verification.verified.drain(..) { - if verification.bad.contains(&block.header.parent_hash) { - verification.bad.insert(block.header.hash()); + for block in verified.drain(..) { + if bad.contains(&block.header.parent_hash) { + bad.insert(block.header.hash()); self.processing.write().unwrap().remove(&block.header.hash()); } else { new_verified.push_back(block); } } - verification.verified = new_verified; + *verified = new_verified; } /// Mark given block as processed - pub fn mark_as_good(&mut self, hashes: &[H256]) { + pub fn mark_as_good(&self, hashes: &[H256]) { let mut processing = self.processing.write().unwrap(); for h in hashes { processing.remove(&h); @@ -312,16 +325,16 @@ impl BlockQueue { } /// Removes up to `max` verified blocks from the queue - pub fn drain(&mut self, max: usize) -> Vec { - let mut verification = self.verification.lock().unwrap(); - let count = min(max, verification.verified.len()); + pub fn drain(&self, max: usize) -> Vec { + let mut verified = self.verification.verified.lock().unwrap(); + let count = min(max, verified.len()); let mut result = Vec::with_capacity(count); for _ in 0..count { - let block = verification.verified.pop_front().unwrap(); + let block = verified.pop_front().unwrap(); result.push(block); } self.ready_signal.reset(); - if !verification.verified.is_empty() { + if !verified.is_empty() { self.ready_signal.set(); } result @@ -329,11 +342,10 @@ impl BlockQueue { /// Get queue status. pub fn queue_info(&self) -> BlockQueueInfo { - let verification = self.verification.lock().unwrap(); BlockQueueInfo { - verified_queue_size: verification.verified.len(), - unverified_queue_size: verification.unverified.len(), - verifying_queue_size: verification.verifying.len(), + unverified_queue_size: self.verification.unverified.lock().unwrap().len(), + verifying_queue_size: self.verification.verifying.lock().unwrap().len(), + verified_queue_size: self.verification.verified.lock().unwrap().len(), } } } diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index c3ec4b4d0..0c8580117 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -172,7 +172,7 @@ pub struct Client { chain: Arc>, engine: Arc>, state_db: Mutex, - block_queue: RwLock, + block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, panic_handler: Arc, @@ -231,7 +231,7 @@ impl Client { chain: chain, engine: engine, state_db: Mutex::new(state_db), - block_queue: RwLock::new(block_queue), + block_queue: block_queue, report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler @@ -240,7 +240,7 @@ impl Client { /// Flush the block import queue. pub fn flush_queue(&self) { - self.block_queue.write().unwrap().flush(); + self.block_queue.flush(); } /// This is triggered by a message coming from a block queue when the block is ready for insertion @@ -248,11 +248,11 @@ impl Client { let mut ret = 0; let mut bad = HashSet::new(); let _import_lock = self.import_lock.lock(); - let blocks = self.block_queue.write().unwrap().drain(128); + let blocks = self.block_queue.drain(128); let mut good_blocks = Vec::with_capacity(128); for block in blocks { if bad.contains(&block.header.parent_hash) { - self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); + self.block_queue.mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); continue; } @@ -260,7 +260,7 @@ impl Client { let header = &block.header; if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - self.block_queue.write().unwrap().mark_as_bad(&header.hash()); + self.block_queue.mark_as_bad(&header.hash()); bad.insert(block.header.hash()); break; }; @@ -268,7 +268,7 @@ impl Client { Some(p) => p, None => { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); - self.block_queue.write().unwrap().mark_as_bad(&header.hash()); + self.block_queue.mark_as_bad(&header.hash()); bad.insert(block.header.hash()); break; }, @@ -292,13 +292,13 @@ impl Client { Err(e) => { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); bad.insert(block.header.hash()); - self.block_queue.write().unwrap().mark_as_bad(&header.hash()); + self.block_queue.mark_as_bad(&header.hash()); break; } }; if let Err(e) = verify_block_final(&header, result.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - self.block_queue.write().unwrap().mark_as_bad(&header.hash()); + self.block_queue.mark_as_bad(&header.hash()); break; } @@ -317,8 +317,8 @@ impl Client { trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); ret += 1; } - self.block_queue.write().unwrap().mark_as_good(&good_blocks); - if !good_blocks.is_empty() && self.block_queue.read().unwrap().queue_info().is_empty() { + self.block_queue.mark_as_good(&good_blocks); + if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::BlockVerified)).unwrap(); } ret @@ -389,7 +389,7 @@ impl BlockChainClient for Client { let chain = self.chain.read().unwrap(); match Self::block_hash(&chain, id) { Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, - Some(hash) => self.block_queue.read().unwrap().block_status(&hash), + Some(hash) => self.block_queue.block_status(&hash), None => BlockStatus::Unknown } } @@ -434,15 +434,15 @@ impl BlockChainClient for Client { if self.block_status(BlockId::Hash(header.parent_hash)) == BlockStatus::Unknown { return Err(ImportError::UnknownParent); } - self.block_queue.write().unwrap().import_block(bytes) + self.block_queue.import_block(bytes) } fn queue_info(&self) -> BlockQueueInfo { - self.block_queue.read().unwrap().queue_info() + self.block_queue.queue_info() } fn clear_queue(&self) { - self.block_queue.write().unwrap().clear(); + self.block_queue.clear(); } fn chain_info(&self) -> BlockChainInfo { diff --git a/ethcore/src/verification.rs b/ethcore/src/verification.rs index c7d5e265f..fa9467e95 100644 --- a/ethcore/src/verification.rs +++ b/ethcore/src/verification.rs @@ -57,18 +57,12 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res /// Still operates on a individual block /// Returns a PreVerifiedBlock structure populated with transactions pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result { - try!(engine.verify_block_unordered(&header, Some(&bytes))); - for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::
()) { - try!(engine.verify_block_unordered(&u, None)); - } // Verify transactions. let mut transactions = Vec::new(); - { - let v = BlockView::new(&bytes); - for t in v.transactions() { - try!(engine.verify_transaction(&t, &header)); - transactions.push(t); - } + let v = BlockView::new(&bytes); + for t in v.transactions() { + try!(engine.verify_transaction(&t, &header)); + transactions.push(t); } Ok(PreVerifiedBlock { header: header, diff --git a/util/sha3/build.rs b/util/sha3/build.rs index bbe16d720..9eb36fdb9 100644 --- a/util/sha3/build.rs +++ b/util/sha3/build.rs @@ -21,6 +21,6 @@ extern crate gcc; fn main() { - gcc::compile_library("libtinykeccak.a", &["src/tinykeccak.c"]); + gcc::Config::new().file("src/tinykeccak.c").flag("-O3").compile("libtinykeccak.a"); } diff --git a/util/src/lib.rs b/util/src/lib.rs index 2b7438cf3..5c8bd4fb0 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -143,6 +143,7 @@ pub mod network; pub mod log; pub mod panics; pub mod keys; +mod thread; pub use common::*; pub use misc::*; @@ -163,4 +164,5 @@ pub use semantic_version::*; pub use network::*; pub use io::*; pub use log::*; +pub use thread::*; diff --git a/util/src/thread.rs b/util/src/thread.rs new file mode 100644 index 000000000..b86ca3e86 --- /dev/null +++ b/util/src/thread.rs @@ -0,0 +1,43 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Thread management helpers + +use libc::{c_int, pthread_self, pthread_t}; + +#[repr(C)] +struct sched_param { + priority: c_int, + padding: c_int, +} + +extern { + fn setpriority(which: c_int, who: c_int, prio: c_int) -> c_int; + fn pthread_setschedparam(thread: pthread_t, policy: c_int, param: *const sched_param) -> c_int; +} +const PRIO_DARWIN_THREAD: c_int = 3; +const PRIO_DARWIN_BG: c_int = 0x1000; +const SCHED_RR: c_int = 2; + +/// Lower thread priority and put it into background mode +#[cfg(target_os="macos")] +pub fn lower_thread_priority() { + let sp = sched_param { priority: 0, padding: 0 }; + if unsafe { pthread_setschedparam(pthread_self(), SCHED_RR, &sp) } == -1 { + trace!("Could not decrease thread piority"); + } + //unsafe { setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG); } +} From 778fa92ebe82bcf7c739b4750a5443f2ed575802 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 22 Feb 2016 00:36:59 +0100 Subject: [PATCH 002/222] Remove locks from the block chain --- Cargo.lock | 3 +- Cargo.toml | 2 +- ethcore/src/block_queue.rs | 12 +++---- ethcore/src/blockchain.rs | 48 ++++++++++++++++------------ ethcore/src/client.rs | 63 +++++++++++++++++-------------------- ethcore/src/verification.rs | 14 ++++++--- util/src/lib.rs | 2 -- util/src/thread.rs | 43 ------------------------- 8 files changed, 75 insertions(+), 112 deletions(-) delete mode 100644 util/src/thread.rs diff --git a/Cargo.lock b/Cargo.lock index 50274857f..cf747f3cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -151,6 +151,7 @@ dependencies = [ [[package]] name = "eth-secp256k1" version = "0.5.4" +source = "git+https://github.com/arkpar/rust-secp256k1.git#45503e1de68d909b1862e3f2bdb9e1cdfdff3f1e" dependencies = [ "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", @@ -222,7 +223,7 @@ dependencies = [ "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "eth-secp256k1 0.5.4", + "eth-secp256k1 0.5.4 (git+https://github.com/arkpar/rust-secp256k1.git)", "ethcore-devtools 0.9.99", "heapsize 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/Cargo.toml b/Cargo.toml index f28829180..8bc94a3a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,4 +32,4 @@ path = "parity/main.rs" name = "parity" [profile.release] -debug = true +debug = false diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index a51a1e900..ba9867966 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -99,6 +99,7 @@ impl QueueSignal { } struct Verification { + // All locks must be captured in the order declared here. unverified: Mutex>, verified: Mutex>, verifying: Mutex>, @@ -123,7 +124,7 @@ impl BlockQueue { let panic_handler = PanicHandler::new_in_arc(); let mut verifiers: Vec> = Vec::new(); - let thread_count = max(::num_cpus::get(), 5) - 0; + let thread_count = max(::num_cpus::get(), 3) - 2; for i in 0..thread_count { let verification = verification.clone(); let engine = engine.clone(); @@ -137,7 +138,6 @@ impl BlockQueue { .name(format!("Verifier #{}", i)) .spawn(move || { panic_handler.catch_panic(move || { - lower_thread_priority(); BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting, empty) }).unwrap() }) @@ -392,7 +392,7 @@ mod tests { #[test] fn can_import_blocks() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); if let Err(e) = queue.import_block(get_good_dummy_block()) { panic!("error importing block that is valid by definition({:?})", e); } @@ -400,7 +400,7 @@ mod tests { #[test] fn returns_error_for_duplicates() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); if let Err(e) = queue.import_block(get_good_dummy_block()) { panic!("error importing block that is valid by definition({:?})", e); } @@ -419,7 +419,7 @@ mod tests { #[test] fn returns_ok_for_drained_duplicates() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); let block = get_good_dummy_block(); let hash = BlockView::new(&block).header().hash().clone(); if let Err(e) = queue.import_block(block) { @@ -436,7 +436,7 @@ mod tests { #[test] fn returns_empty_once_finished() { - let mut queue = get_test_queue(); + let queue = get_test_queue(); queue.import_block(get_good_dummy_block()).expect("error importing block that is valid by definition"); queue.flush(); queue.drain(1); diff --git a/ethcore/src/blockchain.rs b/ethcore/src/blockchain.rs index cc9ff56fd..22d409e8e 100644 --- a/ethcore/src/blockchain.rs +++ b/ethcore/src/blockchain.rs @@ -16,6 +16,7 @@ //! Blockchain database. +use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder}; use util::*; use rocksdb::{DB, WriteBatch, Writable}; use header::*; @@ -147,8 +148,9 @@ struct CacheManager { /// /// **Does not do input data verification.** pub struct BlockChain { - pref_cache_size: usize, - max_cache_size: usize, + // All locks must be captured in the order declared here. + pref_cache_size: AtomicUsize, + max_cache_size: AtomicUsize, best_block: RwLock, @@ -166,6 +168,7 @@ pub struct BlockChain { blocks_db: DB, cache_man: RwLock, + insert_lock: Mutex<()> } impl BlockProvider for BlockChain { @@ -261,8 +264,8 @@ impl BlockChain { (0..COLLECTION_QUEUE_SIZE).foreach(|_| cache_man.cache_usage.push_back(HashSet::new())); let bc = BlockChain { - pref_cache_size: 1 << 14, - max_cache_size: 1 << 20, + pref_cache_size: AtomicUsize::new(1 << 14), + max_cache_size: AtomicUsize::new(1 << 20), best_block: RwLock::new(BestBlock::new()), blocks: RwLock::new(HashMap::new()), block_details: RwLock::new(HashMap::new()), @@ -273,6 +276,7 @@ impl BlockChain { extras_db: extras_db, blocks_db: blocks_db, cache_man: RwLock::new(cache_man), + insert_lock: Mutex::new(()), }; // load best block @@ -315,9 +319,9 @@ impl BlockChain { } /// Set the cache configuration. - pub fn configure_cache(&mut self, pref_cache_size: usize, max_cache_size: usize) { - self.pref_cache_size = pref_cache_size; - self.max_cache_size = max_cache_size; + pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { + self.pref_cache_size.store(pref_cache_size, AtomicOrder::Relaxed); + self.max_cache_size.store(max_cache_size, AtomicOrder::Relaxed); } /// Returns a tree route between `from` and `to`, which is a tuple of: @@ -435,22 +439,26 @@ impl BlockChain { return; } + let _lock = self.insert_lock.lock(); // store block in db self.blocks_db.put(&hash, &bytes).unwrap(); let (batch, new_best, details) = self.block_to_extras_insert_batch(bytes); - // update best block - let mut best_block = self.best_block.write().unwrap(); - if let Some(b) = new_best { - *best_block = b; + { + // update best block + let mut best_block = self.best_block.write().unwrap(); + if let Some(b) = new_best { + *best_block = b; + } } - // update caches - let mut write = self.block_details.write().unwrap(); - write.remove(&header.parent_hash()); - write.insert(hash.clone(), details); - self.note_used(CacheID::Block(hash)); - + { + // update caches + let mut write = self.block_details.write().unwrap(); + write.remove(&header.parent_hash()); + write.insert(hash.clone(), details); + self.note_used(CacheID::Block(hash)); + } // update extras database self.extras_db.write(batch).unwrap(); } @@ -622,17 +630,17 @@ impl BlockChain { /// Ticks our cache system and throws out any old data. pub fn collect_garbage(&self) { - if self.cache_size().total() < self.pref_cache_size { return; } + if self.cache_size().total() < self.pref_cache_size.load(AtomicOrder::Relaxed) { return; } for _ in 0..COLLECTION_QUEUE_SIZE { { - let mut cache_man = self.cache_man.write().unwrap(); let mut blocks = self.blocks.write().unwrap(); let mut block_details = self.block_details.write().unwrap(); let mut block_hashes = self.block_hashes.write().unwrap(); let mut transaction_addresses = self.transaction_addresses.write().unwrap(); let mut block_logs = self.block_logs.write().unwrap(); let mut blocks_blooms = self.blocks_blooms.write().unwrap(); + let mut cache_man = self.cache_man.write().unwrap(); for id in cache_man.cache_usage.pop_back().unwrap().into_iter() { cache_man.in_use.remove(&id); @@ -650,7 +658,7 @@ impl BlockChain { // TODO: handle block_hashes properly. block_hashes.clear(); } - if self.cache_size().total() < self.max_cache_size { break; } + if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; } } // TODO: m_lastCollection = chrono::system_clock::now(); diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 0c8580117..68801520c 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -169,7 +169,7 @@ impl ClientReport { /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. /// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue. pub struct Client { - chain: Arc>, + chain: Arc, engine: Arc>, state_db: Mutex, block_queue: BlockQueue, @@ -190,7 +190,7 @@ impl Client { dir.push(format!("v{}-sec-pruned", CLIENT_DB_VER_STR)); let path = dir.as_path(); let gb = spec.genesis_block(); - let chain = Arc::new(RwLock::new(BlockChain::new(&gb, path))); + let chain = Arc::new(BlockChain::new(&gb, path)); let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); @@ -258,13 +258,13 @@ impl Client { } let header = &block.header; - if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { + if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.deref()) { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); self.block_queue.mark_as_bad(&header.hash()); bad.insert(block.header.hash()); break; }; - let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { + let parent = match self.chain.block_header(&header.parent_hash) { Some(p) => p, None => { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); @@ -278,7 +278,7 @@ impl Client { last_hashes.resize(256, H256::new()); last_hashes[0] = header.parent_hash.clone(); for i in 0..255 { - match self.chain.read().unwrap().block_details(&last_hashes[i]) { + match self.chain.block_details(&last_hashes[i]) { Some(details) => { last_hashes[i + 1] = details.parent.clone(); }, @@ -304,9 +304,9 @@ impl Client { good_blocks.push(header.hash().clone()); - self.chain.write().unwrap().insert_block(&block.bytes); //TODO: err here? + self.chain.insert_block(&block.bytes); //TODO: err here? let ancient = if header.number() >= HISTORY { Some(header.number() - HISTORY) } else { None }; - match result.drain().commit(header.number(), &header.hash(), ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap()))) { + match result.drain().commit(header.number(), &header.hash(), ancient.map(|n|(n, self.chain.block_hash(n).unwrap()))) { Ok(_) => (), Err(e) => { warn!(target: "client", "State DB commit failed: {:?}", e); @@ -331,7 +331,7 @@ impl Client { /// Get info on the cache. pub fn cache_info(&self) -> CacheSize { - self.chain.read().unwrap().cache_size() + self.chain.cache_size() } /// Get the report. @@ -341,12 +341,12 @@ impl Client { /// Tick the client. pub fn tick(&self) { - self.chain.read().unwrap().collect_garbage(); + self.chain.collect_garbage(); } /// Set up the cache behaviour. pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) { - self.chain.write().unwrap().configure_cache(pref_cache_size, max_cache_size); + self.chain.configure_cache(pref_cache_size, max_cache_size); } fn block_hash(chain: &BlockChain, id: BlockId) -> Option { @@ -361,14 +361,12 @@ impl Client { impl BlockChainClient for Client { fn block_header(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) } fn block_body(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| { - chain.block(&hash).map(|bytes| { + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash).map(|bytes| { let rlp = Rlp::new(&bytes); let mut body = RlpStream::new_list(2); body.append_raw(rlp.at(1).as_raw(), 1); @@ -379,24 +377,21 @@ impl BlockChainClient for Client { } fn block(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| { - chain.block(&hash) + Self::block_hash(&self.chain, id).and_then(|hash| { + self.chain.block(&hash) }) } fn block_status(&self, id: BlockId) -> BlockStatus { - let chain = self.chain.read().unwrap(); - match Self::block_hash(&chain, id) { - Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain, + match Self::block_hash(&self.chain, id) { + Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain, Some(hash) => self.block_queue.block_status(&hash), None => BlockStatus::Unknown } } fn block_total_difficulty(&self, id: BlockId) -> Option { - let chain = self.chain.read().unwrap(); - Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) + Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty) } fn code(&self, address: &Address) -> Option { @@ -404,18 +399,17 @@ impl BlockChainClient for Client { } fn transaction(&self, id: TransactionId) -> Option { - let chain = self.chain.read().unwrap(); match id { - TransactionId::Hash(ref hash) => chain.transaction_address(hash), - TransactionId::Location(id, index) => Self::block_hash(&chain, id).map(|hash| TransactionAddress { + TransactionId::Hash(ref hash) => self.chain.transaction_address(hash), + TransactionId::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress { block_hash: hash, index: index }) - }.and_then(|address| chain.transaction(&address)) + }.and_then(|address| self.chain.transaction(&address)) } fn tree_route(&self, from: &H256, to: &H256) -> Option { - self.chain.read().unwrap().tree_route(from.clone(), to.clone()) + self.chain.tree_route(from.clone(), to.clone()) } fn state_data(&self, _hash: &H256) -> Option { @@ -428,7 +422,7 @@ impl BlockChainClient for Client { fn import_block(&self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); - if self.chain.read().unwrap().is_known(&header.hash()) { + if self.chain.is_known(&header.hash()) { return Err(ImportError::AlreadyInChain); } if self.block_status(BlockId::Hash(header.parent_hash)) == BlockStatus::Unknown { @@ -446,13 +440,12 @@ impl BlockChainClient for Client { } fn chain_info(&self) -> BlockChainInfo { - let chain = self.chain.read().unwrap(); BlockChainInfo { - total_difficulty: chain.best_block_total_difficulty(), - pending_total_difficulty: chain.best_block_total_difficulty(), - genesis_hash: chain.genesis_hash(), - best_block_hash: chain.best_block_hash(), - best_block_number: From::from(chain.best_block_number()) + total_difficulty: self.chain.best_block_total_difficulty(), + pending_total_difficulty: self.chain.best_block_total_difficulty(), + genesis_hash: self.chain.genesis_hash(), + best_block_hash: self.chain.best_block_hash(), + best_block_number: From::from(self.chain.best_block_number()) } } } diff --git a/ethcore/src/verification.rs b/ethcore/src/verification.rs index fa9467e95..c7d5e265f 100644 --- a/ethcore/src/verification.rs +++ b/ethcore/src/verification.rs @@ -57,12 +57,18 @@ pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Res /// Still operates on a individual block /// Returns a PreVerifiedBlock structure populated with transactions pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> Result { + try!(engine.verify_block_unordered(&header, Some(&bytes))); + for u in Rlp::new(&bytes).at(2).iter().map(|rlp| rlp.as_val::
()) { + try!(engine.verify_block_unordered(&u, None)); + } // Verify transactions. let mut transactions = Vec::new(); - let v = BlockView::new(&bytes); - for t in v.transactions() { - try!(engine.verify_transaction(&t, &header)); - transactions.push(t); + { + let v = BlockView::new(&bytes); + for t in v.transactions() { + try!(engine.verify_transaction(&t, &header)); + transactions.push(t); + } } Ok(PreVerifiedBlock { header: header, diff --git a/util/src/lib.rs b/util/src/lib.rs index 5c8bd4fb0..2b7438cf3 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -143,7 +143,6 @@ pub mod network; pub mod log; pub mod panics; pub mod keys; -mod thread; pub use common::*; pub use misc::*; @@ -164,5 +163,4 @@ pub use semantic_version::*; pub use network::*; pub use io::*; pub use log::*; -pub use thread::*; diff --git a/util/src/thread.rs b/util/src/thread.rs deleted file mode 100644 index b86ca3e86..000000000 --- a/util/src/thread.rs +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Thread management helpers - -use libc::{c_int, pthread_self, pthread_t}; - -#[repr(C)] -struct sched_param { - priority: c_int, - padding: c_int, -} - -extern { - fn setpriority(which: c_int, who: c_int, prio: c_int) -> c_int; - fn pthread_setschedparam(thread: pthread_t, policy: c_int, param: *const sched_param) -> c_int; -} -const PRIO_DARWIN_THREAD: c_int = 3; -const PRIO_DARWIN_BG: c_int = 0x1000; -const SCHED_RR: c_int = 2; - -/// Lower thread priority and put it into background mode -#[cfg(target_os="macos")] -pub fn lower_thread_priority() { - let sp = sched_param { priority: 0, padding: 0 }; - if unsafe { pthread_setschedparam(pthread_self(), SCHED_RR, &sp) } == -1 { - trace!("Could not decrease thread piority"); - } - //unsafe { setpriority(PRIO_DARWIN_THREAD, 0, PRIO_DARWIN_BG); } -} From cb4d17825bc6353461549a4a9c03424cda525cae Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 29 Feb 2016 19:49:29 +0100 Subject: [PATCH 003/222] Fixed lock order --- Cargo.toml | 1 + ethcore/src/blockchain/blockchain.rs | 78 +++++++++++++++------------- 2 files changed, 44 insertions(+), 35 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 25b7caa85..df3beef49 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,3 +35,4 @@ name = "parity" [profile.release] debug = false +lto = false diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 7339f3a1a..f30a674e6 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -433,48 +433,56 @@ impl BlockChain { batch.put(b"best", &update.info.hash).unwrap(); // These cached values must be updated atomically - let mut best_block = self.best_block.write().unwrap(); - let mut write_hashes = self.block_hashes.write().unwrap(); - let mut write_txs = self.transaction_addresses.write().unwrap(); + { + let mut best_block = self.best_block.write().unwrap(); + let mut write_hashes = self.block_hashes.write().unwrap(); + let mut write_txs = self.transaction_addresses.write().unwrap(); - // update best block - match update.info.location { - BlockLocation::Branch => (), - _ => { - *best_block = BestBlock { - hash: update.info.hash, - number: update.info.number, - total_difficulty: update.info.total_difficulty - }; + // update best block + match update.info.location { + BlockLocation::Branch => (), + _ => { + *best_block = BestBlock { + hash: update.info.hash, + number: update.info.number, + total_difficulty: update.info.total_difficulty + }; + } + } + + for (number, hash) in &update.block_hashes { + batch.put_extras(number, hash); + write_hashes.remove(number); + } + + for (hash, tx_address) in &update.transactions_addresses { + batch.put_extras(hash, tx_address); + write_txs.remove(hash); } } - for (number, hash) in &update.block_hashes { - batch.put_extras(number, hash); - write_hashes.remove(number); + { + let mut write_details = self.block_details.write().unwrap(); + for (hash, details) in update.block_details.into_iter() { + batch.put_extras(&hash, &details); + write_details.insert(hash, details); + } } - let mut write_details = self.block_details.write().unwrap(); - for (hash, details) in update.block_details.into_iter() { - batch.put_extras(&hash, &details); - write_details.insert(hash, details); + { + let mut write_receipts = self.block_receipts.write().unwrap(); + for (hash, receipt) in &update.block_receipts { + batch.put_extras(hash, receipt); + write_receipts.remove(hash); + } } - let mut write_receipts = self.block_receipts.write().unwrap(); - for (hash, receipt) in &update.block_receipts { - batch.put_extras(hash, receipt); - write_receipts.remove(hash); - } - - for (hash, tx_address) in &update.transactions_addresses { - batch.put_extras(hash, tx_address); - write_txs.remove(hash); - } - - let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); - for (bloom_hash, blocks_bloom) in &update.blocks_blooms { - batch.put_extras(bloom_hash, blocks_bloom); - write_blocks_blooms.remove(bloom_hash); + { + let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); + for (bloom_hash, blocks_bloom) in &update.blocks_blooms { + batch.put_extras(bloom_hash, blocks_bloom); + write_blocks_blooms.remove(bloom_hash); + } } // update extras database @@ -580,7 +588,7 @@ impl BlockChain { /// This function returns modified transaction addresses. fn prepare_transaction_addresses_update(&self, block_bytes: &[u8], info: &BlockInfo) -> HashMap { let block = BlockView::new(block_bytes); - let transaction_hashes = block.transaction_hashes(); + let transaction_hashes = block.transaction_hashes(); transaction_hashes.into_iter() .enumerate() From d0129ff67b5e69436df88a6758e81d880e924a23 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 29 Feb 2016 21:15:39 +0100 Subject: [PATCH 004/222] Fixed cache memory leak --- ethcore/src/blockchain/blockchain.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index f30a674e6..23e9aaac9 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -465,6 +465,7 @@ impl BlockChain { let mut write_details = self.block_details.write().unwrap(); for (hash, details) in update.block_details.into_iter() { batch.put_extras(&hash, &details); + self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone())); write_details.insert(hash, details); } } @@ -769,6 +770,14 @@ impl BlockChain { // TODO: handle block_hashes properly. block_hashes.clear(); + + blocks.shrink_to_fit(); + block_details.shrink_to_fit(); + block_hashes.shrink_to_fit(); + transaction_addresses.shrink_to_fit(); + block_logs.shrink_to_fit(); + blocks_blooms.shrink_to_fit(); + block_receipts.shrink_to_fit(); } if self.cache_size().total() < self.max_cache_size.load(AtomicOrder::Relaxed) { break; } } From c889d9b3eb21c1b54989d741c6b34bed4bfc30ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 25 Feb 2016 16:58:18 +0100 Subject: [PATCH 005/222] Exposing transaction queue pending in RPC --- rpc/src/v1/impls/eth.rs | 13 ++++++++++++- rpc/src/v1/traits/eth.rs | 13 ++++++++----- sync/src/chain.rs | 3 +++ 3 files changed, 23 insertions(+), 6 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 00bce5437..6d66a2c6d 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -152,7 +152,7 @@ impl Eth for EthClient { } } - fn block_transaction_count(&self, params: Params) -> Result { + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), @@ -160,6 +160,17 @@ impl Eth for EthClient { }) } + fn block_transaction_count_by_number(&self, params: Params) -> Result { + from_params::<(BlockNumber,)>(params) + .and_then(|(block_number,)| match block_number { + BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending), + _ => match take_weak!(self.client).block(block_number.into()) { + Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), + None => Ok(Value::Null) + } + }) + } + fn block_uncles_count(&self, params: Params) -> Result { from_params::<(H256,)>(params) .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index d2aeb0f9e..8c24dd38c 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -55,12 +55,15 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Returns block with given number. fn block_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } - + /// Returns the number of transactions sent from given address at given time (block number). fn transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block. - fn block_transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns the number of transactions in a block given block hash. + fn block_transaction_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of transactions in a block given block number. + fn block_transaction_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the number of uncles in a given block. fn block_uncles_count(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -130,8 +133,8 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_balance", Eth::balance); delegate.add_method("eth_getStorageAt", Eth::storage_at); delegate.add_method("eth_getTransactionCount", Eth::transaction_count); - delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count); - delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count); + delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); + delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); delegate.add_method("eth_code", Eth::code_at); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 9edab791a..90f7b0d2a 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -140,6 +140,8 @@ pub struct SyncStatus { pub num_active_peers: usize, /// Heap memory used in bytes pub mem_used: usize, + /// Number of pending transactions in queue + pub transaction_queue_pending: usize, } #[derive(PartialEq, Eq, Debug, Clone)] @@ -255,6 +257,7 @@ impl ChainSync { blocks_total: match self.highest_block { Some(x) if x > self.starting_block => x - self.starting_block, _ => 0 }, num_peers: self.peers.len(), num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), + transaction_queue_pending: self.transaction_queue.lock().unwrap().status().pending, mem_used: // TODO: https://github.com/servo/heapsize/pull/50 // self.downloading_hashes.heap_size_of_children() From 324e070581305ded5b9c40942f86837756f2116d Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 2 Mar 2016 01:24:06 +0100 Subject: [PATCH 006/222] Reverted some changes --- ethcore/src/block_queue.rs | 3 +-- util/sha3/build.rs | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index f61eb565d..de411c6e2 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -302,8 +302,7 @@ impl BlockQueue { if self.processing.read().unwrap().contains(&h) { return Err(ImportError::AlreadyQueued); } - } - { + let mut bad = self.verification.bad.lock().unwrap(); if bad.contains(&h) { return Err(ImportError::Bad(None)); diff --git a/util/sha3/build.rs b/util/sha3/build.rs index 9eb36fdb9..bbe16d720 100644 --- a/util/sha3/build.rs +++ b/util/sha3/build.rs @@ -21,6 +21,6 @@ extern crate gcc; fn main() { - gcc::Config::new().file("src/tinykeccak.c").flag("-O3").compile("libtinykeccak.a"); + gcc::compile_library("libtinykeccak.a", &["src/tinykeccak.c"]); } From af5ed8b5f7af5b576fed3181dc03c45fdee50f1b Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 4 Mar 2016 20:10:07 +0300 Subject: [PATCH 007/222] rpc-signing-extend --- parity/main.rs | 1 + rpc/src/v1/impls/eth.rs | 17 ++++++++++++++++- rpc/src/v1/impls/personal.rs | 21 ++++++++++----------- rpc/src/v1/types/bytes.rs | 32 ++++++++++++++++++++++++++++++-- rpc/src/v1/types/mod.rs.in | 2 ++ rpc/src/v1/types/transaction.rs | 13 +++++++++++++ 6 files changed, 72 insertions(+), 14 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index b991f36cd..91a884beb 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -157,6 +157,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom server.add_delegate(EthClient::new(&client, &sync).to_delegate()); server.add_delegate(EthFilterClient::new(&client).to_delegate()); server.add_delegate(NetClient::new(&sync).to_delegate()); + server.add_delegate(PersonalClient::new(&client).to_delegate()); server.start_async(url, cors_domain); } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 2313d5114..16b68f90f 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -29,7 +29,7 @@ use ethcore::views::*; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; use v1::traits::{Eth, EthFilter}; -use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, OptionalValue, Index, Filter, Log}; +use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log}; use v1::helpers::{PollFilter, PollManager}; /// Eth rpc implementation. @@ -253,6 +253,21 @@ impl Eth for EthClient { to_value(&true) }) } + + fn send_transaction(&self, params: Params) -> Result { + from_params::<(TransactionRequest, )>(params) + .and_then(|(transaction_request, )| { + let client = take_weak!(self.client); + let store = client.secret_store().read().unwrap(); + match store.account_secret(&transaction_request.from) { + Ok(_) => { + // todo: actually sign and push to queue transaction here + Ok(Value::Bool(true)) + }, + Err(_) => { Ok(Value::Bool(false ))} + } + }) + } } /// Eth filter rpc implementation. diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 48e1b1c6a..a2788b9d9 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -18,28 +18,27 @@ use std::sync::{Arc, Weak}; use jsonrpc_core::*; use v1::traits::Personal; -use util::keys::store::*; use util::Address; -use std::sync::RwLock; +use ethcore::client::Client; /// Account management (personal) rpc implementation. pub struct PersonalClient { - secret_store: Weak>, + client: Weak, } impl PersonalClient { /// Creates new PersonalClient - pub fn new(store: &Arc>) -> Self { + pub fn new(client: &Arc) -> Self { PersonalClient { - secret_store: Arc::downgrade(store), + client: Arc::downgrade(client), } } } impl Personal for PersonalClient { fn accounts(&self, _: Params) -> Result { - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let client = take_weak!(self.client); + let store = client.secret_store().read().unwrap(); match store.accounts() { Ok(account_list) => { Ok(Value::Array(account_list.iter() @@ -54,8 +53,8 @@ impl Personal for PersonalClient { fn new_account(&self, params: Params) -> Result { from_params::<(String, )>(params).and_then( |(pass, )| { - let store_wk = take_weak!(self.secret_store); - let mut store = store_wk.write().unwrap(); + let client = take_weak!(self.client); + let mut store = client.secret_store().write().unwrap(); match store.new_account(&pass) { Ok(address) => Ok(Value::String(format!("{:?}", address))), Err(_) => Err(Error::internal_error()) @@ -67,8 +66,8 @@ impl Personal for PersonalClient { fn unlock_account(&self, params: Params) -> Result { from_params::<(Address, String, u64)>(params).and_then( |(account, account_pass, _)|{ - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let client = take_weak!(self.client); + let store = client.secret_store().read().unwrap(); match store.unlock_account(&account, &account_pass) { Ok(_) => Ok(Value::Bool(true)), Err(_) => Ok(Value::Bool(false)), diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index f09f24e4d..44809ac70 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -15,7 +15,9 @@ // along with Parity. If not, see . use rustc_serialize::hex::ToHex; -use serde::{Serialize, Serializer}; +use serde::{Serialize, Serializer, Deserialize, Deserializer, Error}; +use serde::de::Visitor; +use util::common::FromHex; /// Wrapper structure around vector of bytes. #[derive(Debug)] @@ -36,7 +38,7 @@ impl Default for Bytes { } impl Serialize for Bytes { - fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> + fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: Serializer { let mut serialized = "0x".to_owned(); serialized.push_str(self.0.to_hex().as_ref()); @@ -44,6 +46,32 @@ impl Serialize for Bytes { } } +impl Deserialize for Bytes { + fn deserialize(deserializer: &mut D) -> Result + where D: Deserializer { + deserializer.deserialize(BytesVisitor) + } +} + +struct BytesVisitor; + +impl Visitor for BytesVisitor { + type Value = Bytes; + + fn visit_str(&mut self, value: &str) -> Result where E: Error { + if value.len() >= 2 && &value[0..2] == "0x" { + Ok(Bytes::new(FromHex::from_hex(&value[2..]).unwrap_or_else(|_| vec![]))) + } else { + Err(Error::custom("invalid hex")) + } + } + + fn visit_string(&mut self, value: String) -> Result where E: Error { + self.visit_str(value.as_ref()) + } +} + + #[cfg(test)] mod tests { use super::*; diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 34c1f1cff..2b2390ecb 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -33,3 +33,5 @@ pub use self::log::Log; pub use self::optionals::OptionalValue; pub use self::sync::{SyncStatus, SyncInfo}; pub use self::transaction::Transaction; +pub use self::transaction::TransactionRequest; + diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 232cf0bf3..7d40d8a49 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -17,6 +17,7 @@ use util::numbers::*; use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; +use serde::{Deserializer, Error}; #[derive(Debug, Default, Serialize)] pub struct Transaction { @@ -37,6 +38,18 @@ pub struct Transaction { pub input: Bytes } +#[derive(Debug, Default, Serialize, Deserialize)] +pub struct TransactionRequest { + pub from: Address, + pub to: Option
, + #[serde(rename="gasPrice")] + pub gas_price: Option, + pub gas: Option, + pub value: Option, + pub data: Bytes, + pub nonce: Option, +} + impl From for Transaction { fn from(t: LocalizedTransaction) -> Transaction { Transaction { From 0109e5e9d4fa1110b59d907b32a158cd3b3d5762 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 13:03:34 +0100 Subject: [PATCH 008/222] Removing memory leak when transactions are dropped from set --- sync/src/transaction_queue.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 83665dfda..7f9f21638 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -113,22 +113,24 @@ impl TransactionSet { self.by_address.insert(sender, nonce, order); } - fn enforce_limit(&mut self, by_hash: &HashMap) { + fn enforce_limit(&mut self, by_hash: &mut HashMap) { let len = self.by_priority.len(); if len <= self.limit { return; } - let to_drop : Vec<&VerifiedTransaction> = { + let to_drop : Vec<(Address, U256)> = { self.by_priority .iter() .skip(self.limit) .map(|order| by_hash.get(&order.hash).expect("Inconsistency in queue detected.")) + .map(|tx| (tx.sender(), tx.nonce())) .collect() }; - for tx in to_drop { - self.drop(&tx.sender(), &tx.nonce()); + for (sender, nonce) in to_drop { + let order = self.drop(&sender, &nonce).expect("Droping transaction failed."); + by_hash.remove(&order.hash).expect("Inconsistency in queue."); } } @@ -270,7 +272,7 @@ impl TransactionQueue { self.by_hash.remove(&order.hash); } } - self.future.enforce_limit(&self.by_hash); + self.future.enforce_limit(&mut self.by_hash); // And now lets check if there is some chain of transactions in future // that should be placed in current @@ -335,7 +337,7 @@ impl TransactionQueue { self.by_hash.insert(tx.hash(), tx); // We have a gap - put to future self.future.insert(address, nonce, order); - self.future.enforce_limit(&self.by_hash); + self.future.enforce_limit(&mut self.by_hash); return; } else if next_nonce > nonce { // Droping transaction @@ -354,7 +356,7 @@ impl TransactionQueue { let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); // Enforce limit - self.current.enforce_limit(&self.by_hash); + self.current.enforce_limit(&mut self.by_hash); } } @@ -413,7 +415,7 @@ mod test { let (tx1, tx2) = new_txs(U256::from(1)); let tx1 = VerifiedTransaction::new(tx1); let tx2 = VerifiedTransaction::new(tx2); - let by_hash = { + let mut by_hash = { let mut x = HashMap::new(); let tx1 = VerifiedTransaction::new(tx1.transaction.clone()); let tx2 = VerifiedTransaction::new(tx2.transaction.clone()); @@ -430,9 +432,10 @@ mod test { assert_eq!(set.by_address.len(), 2); // when - set.enforce_limit(&by_hash); + set.enforce_limit(&mut by_hash); // then + assert_eq!(by_hash.len(), 1); assert_eq!(set.by_priority.len(), 1); assert_eq!(set.by_address.len(), 1); assert_eq!(set.by_priority.iter().next().unwrap().clone(), order1); From 78a39d3ac9b360d59c7acb430182e3fe35c0e096 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 14:34:15 +0100 Subject: [PATCH 009/222] Avoid importing same transaction twice (especially with different nonce_height) --- sync/src/transaction_queue.rs | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 7f9f21638..51ff211f6 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -129,7 +129,7 @@ impl TransactionSet { }; for (sender, nonce) in to_drop { - let order = self.drop(&sender, &nonce).expect("Droping transaction failed."); + let order = self.drop(&sender, &nonce).expect("Dropping transaction failed."); by_hash.remove(&order.hash).expect("Inconsistency in queue."); } } @@ -322,6 +322,12 @@ impl TransactionQueue { fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) where T: Fn(&Address) -> U256 { + + if self.by_hash.get(&tx.hash()).is_some() { + // Transaction is already imported. + return; + } + let nonce = tx.nonce(); let address = tx.sender(); @@ -355,7 +361,6 @@ impl TransactionQueue { // But maybe there are some more items waiting in future? let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); - // Enforce limit self.current.enforce_limit(&mut self.by_hash); } } @@ -636,7 +641,26 @@ mod test { } #[test] - fn should_accept_same_transaction_twice() { + fn should_not_insert_same_transaction_twice() { + // given + let nonce = |a: &Address| default_nonce(a) + U256::one(); + let mut txq = TransactionQueue::new(); + let (_tx1, tx2) = new_txs(U256::from(1)); + txq.add(tx2.clone(), &default_nonce); + assert_eq!(txq.status().future, 1); + assert_eq!(txq.status().pending, 0); + + // when + txq.add(tx2.clone(), &nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 1); + assert_eq!(stats.pending, 0); + } + + #[test] + fn should_accept_same_transaction_twice_if_removed() { // given let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::from(1)); From 1aaae7b55333625c572fc77cc722ea98c2517825 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 5 Mar 2016 16:42:02 +0300 Subject: [PATCH 010/222] [ci skip] codegen bug --- rpc/src/v1/types/transaction.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 7d40d8a49..c24bcd08f 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -18,6 +18,8 @@ use util::numbers::*; use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; use serde::{Deserializer, Error}; +use ethcore; +use util; #[derive(Debug, Default, Serialize)] pub struct Transaction { @@ -50,6 +52,22 @@ pub struct TransactionRequest { pub nonce: Option, } +impl TransactionRequest { + fn to_eth(self) -> (ethcore::transaction::Transaction, Address) { + (ethcore::transaction::Transaction { + nonce: self.nonce.unwrap_or(U256::zero()), + action: match self.to { + None => ethcore::transaction::Action::Create, + Some(addr) => ethcore::transaction::Action::Call(addr) + }, + gas: self.gas.unwrap_or(U256::zero()), + gas_price: self.gas_price.unwrap_or(U256::zero()), + value: self.value.unwrap_or(U256::zero()), + data: { let (ref x) = self.data; x } + }, self.from) + } +} + impl From for Transaction { fn from(t: LocalizedTransaction) -> Transaction { Transaction { From 765d7179f583245a432ec1f6e7c684836c60edd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 15:43:04 +0100 Subject: [PATCH 011/222] Failing tests for transaction queue --- sync/src/transaction_queue.rs | 77 ++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 51ff211f6..503af7b16 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -129,7 +129,7 @@ impl TransactionSet { }; for (sender, nonce) in to_drop { - let order = self.drop(&sender, &nonce).expect("Dropping transaction failed."); + let order = self.drop(&sender, &nonce).expect("Dropping transaction found in priority queue failed."); by_hash.remove(&order.hash).expect("Inconsistency in queue."); } } @@ -325,6 +325,7 @@ impl TransactionQueue { if self.by_hash.get(&tx.hash()).is_some() { // Transaction is already imported. + trace!(target: "sync", "Dropping already imported transaction with hash: {:?}", tx.hash()); return; } @@ -370,6 +371,7 @@ impl TransactionQueue { mod test { extern crate rustc_serialize; use self::rustc_serialize::hex::FromHex; + use std::ops::Deref; use std::collections::{HashMap, BTreeSet}; use util::crypto::KeyPair; use util::numbers::{U256, Uint}; @@ -702,4 +704,77 @@ mod test { assert_eq!(stats.pending, 2); } + #[test] + fn should_replace_same_transaction_when_has_higher_fee() { + // given + let mut txq = TransactionQueue::new(); + let keypair = KeyPair::create().unwrap(); + let tx = new_unsigned_tx(U256::from(123)).sign(&keypair.secret()); + let tx2 = { + let mut tx2 = tx.deref().clone(); + tx2.gas_price = U256::from(200); + tx2.sign(&keypair.secret()) + }; + + // when + txq.add(tx, &default_nonce); + txq.add(tx2, &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 1); + assert_eq!(stats.future, 0); + assert_eq!(txq.top_transactions(1)[0].gas_price, U256::from(200)); + } + + #[test] + fn should_replace_same_transaction_when_importing_to_futures() { + // given + let mut txq = TransactionQueue::new(); + let keypair = KeyPair::create().unwrap(); + let tx0 = new_unsigned_tx(U256::from(123)).sign(&keypair.secret()); + let tx1 = { + let mut tx1 = tx0.deref().clone(); + tx1.nonce = U256::from(124); + tx1.sign(&keypair.secret()) + }; + let tx2 = { + let mut tx2 = tx1.deref().clone(); + tx2.gas_price = U256::from(200); + tx2.sign(&keypair.secret()) + }; + + // when + txq.add(tx1, &default_nonce); + txq.add(tx2, &default_nonce); + assert_eq!(txq.status().future, 1); + txq.add(tx0, &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 2); + assert_eq!(txq.top_transactions(2)[1].gas_price, U256::from(200)); + } + + #[test] + fn should_recalculate_height_when_removing_from_future() { + // given + let previous_nonce = |a: &Address| default_nonce(a) - U256::one(); + let mut txq = TransactionQueue::new(); + let (tx1, tx2) = new_txs(U256::one()); + txq.add(tx1.clone(), &previous_nonce); + txq.add(tx2, &previous_nonce); + assert_eq!(txq.status().future, 2); + + // when + txq.remove(&tx1.hash(), &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 1); + } + + } From 6afa1c85b7862e504ee254ffb123ef14e1607213 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:20:41 +0100 Subject: [PATCH 012/222] Replacing transactions instead of just inserting --- sync/src/transaction_queue.rs | 42 ++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 503af7b16..e05210af2 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -108,9 +108,9 @@ struct TransactionSet { } impl TransactionSet { - fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) { + fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { self.by_priority.insert(order.clone()); - self.by_address.insert(sender, nonce, order); + self.by_address.insert(sender, nonce, order) } fn enforce_limit(&mut self, by_hash: &mut HashMap) { @@ -332,38 +332,54 @@ impl TransactionQueue { let nonce = tx.nonce(); let address = tx.sender(); + let state_nonce = fetch_nonce(&address); let next_nonce = self.last_nonces .get(&address) .cloned() - .map_or_else(|| fetch_nonce(&address), |n| n + U256::one()); + .map_or(state_nonce, |n| n + U256::one()); // Check height if nonce > next_nonce { - let order = TransactionOrder::for_transaction(&tx, next_nonce); - // Insert to by_hash - self.by_hash.insert(tx.hash(), tx); // We have a gap - put to future - self.future.insert(address, nonce, order); + Self::replace_transaction(tx, next_nonce, &mut self.future, &mut self.by_hash); self.future.enforce_limit(&mut self.by_hash); return; - } else if next_nonce > nonce { + } else if nonce < state_nonce { // Droping transaction trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce); return; } let base_nonce = fetch_nonce(&address); - let order = TransactionOrder::for_transaction(&tx, base_nonce); - // Insert to by_hash - self.by_hash.insert(tx.hash(), tx); - // Insert to current - self.current.insert(address.clone(), nonce, order); + Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); // But maybe there are some more items waiting in future? let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); self.current.enforce_limit(&mut self.by_hash); } + + fn replace_transaction(tx: VerifiedTransaction, base_nonce: U256, set: &mut TransactionSet, by_hash: &mut HashMap) { + let order = TransactionOrder::for_transaction(&tx, base_nonce); + let hash = tx.hash(); + let address = tx.sender(); + let nonce = tx.nonce(); + + by_hash.insert(hash.clone(), tx); + if let Some(old) = set.insert(address, nonce, order.clone()) { + // There was already transaction in queue. Let's check which one should stay + if old.cmp(&order) == Ordering::Greater { + assert!(old.nonce_height == order.nonce_height, "Both transactions should have the same height."); + // Put back old transaction since it has greater priority (higher gas_price) + set.insert(address, nonce, old); + by_hash.remove(&hash); + } else { + // Make sure we remove old transaction entirely + set.by_priority.remove(&old); + by_hash.remove(&old.hash); + } + } + } } From bb8a79f18c44575d59ce68d4a9b5c03009679585 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 5 Mar 2016 18:29:01 +0300 Subject: [PATCH 013/222] finalizing --- rpc/src/v1/impls/eth.rs | 12 ++++++++---- rpc/src/v1/types/bytes.rs | 1 + rpc/src/v1/types/transaction.rs | 6 +++--- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 16b68f90f..91ccaa05a 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -260,11 +260,15 @@ impl Eth for EthClient { let client = take_weak!(self.client); let store = client.secret_store().read().unwrap(); match store.account_secret(&transaction_request.from) { - Ok(_) => { - // todo: actually sign and push to queue transaction here - Ok(Value::Bool(true)) + Ok(secret) => { + let sync = take_weak!(self.sync); + let (transaction, _) = transaction_request.to_eth(); + let signed_transaction = transaction.sign(&secret); + let hash = signed_transaction.hash(); + sync.insert_transaction(signed_transaction); + to_value(&hash) }, - Err(_) => { Ok(Value::Bool(false ))} + Err(_) => { to_value(&U256::zero()) } } }) } diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index 44809ac70..466fbebde 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -28,6 +28,7 @@ impl Bytes { pub fn new(bytes: Vec) -> Bytes { Bytes(bytes) } + pub fn to_vec(self) -> Vec { let Bytes(x) = self; x } } impl Default for Bytes { diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index c24bcd08f..17b42cfcf 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -19,7 +19,6 @@ use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; use serde::{Deserializer, Error}; use ethcore; -use util; #[derive(Debug, Default, Serialize)] pub struct Transaction { @@ -53,7 +52,8 @@ pub struct TransactionRequest { } impl TransactionRequest { - fn to_eth(self) -> (ethcore::transaction::Transaction, Address) { + /// maps transaction request to the transaction that can be signed and inserted + pub fn to_eth(self) -> (ethcore::transaction::Transaction, Address) { (ethcore::transaction::Transaction { nonce: self.nonce.unwrap_or(U256::zero()), action: match self.to { @@ -63,7 +63,7 @@ impl TransactionRequest { gas: self.gas.unwrap_or(U256::zero()), gas_price: self.gas_price.unwrap_or(U256::zero()), value: self.value.unwrap_or(U256::zero()), - data: { let (ref x) = self.data; x } + data: self.data.to_vec() }, self.from) } } From 0a7fc4af738ed597239036e5c39fca0473c61512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:42:34 +0100 Subject: [PATCH 014/222] Recalculating heights in future when removing transaction --- sync/src/transaction_queue.rs | 68 ++++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 24 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index e05210af2..24bb772d7 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -238,26 +238,50 @@ impl TransactionQueue { // We don't know this transaction return; } + let transaction = transaction.unwrap(); let sender = transaction.sender(); let nonce = transaction.nonce(); + let current_nonce = fetch_nonce(&sender); // Remove from future - self.future.drop(&sender, &nonce); - - // Remove from current - let order = self.current.drop(&sender, &nonce); - if order.is_none() { + let order = self.future.drop(&sender, &nonce); + if order.is_some() { + self.recalculate_future_for_sender(&sender, current_nonce); + // And now lets check if there is some chain of transactions in future + // that should be placed in current + self.move_future_txs(sender.clone(), current_nonce, current_nonce); return; } - // Let's remove transactions where tx.nonce < current_nonce - // and if there are any future transactions matching current_nonce+1 - move to current - let current_nonce = fetch_nonce(&sender); - // We will either move transaction to future or remove it completely - // so there will be no transactions from this sender in current - self.last_nonces.remove(&sender); + // Remove from current + let order = self.current.drop(&sender, &nonce); + if order.is_some() { + // We will either move transaction to future or remove it completely + // so there will be no transactions from this sender in current + self.last_nonces.remove(&sender); + // This should move all current transactions to future and remove old transactions + self.move_all_to_future(&sender, current_nonce); + // And now lets check if there is some chain of transactions in future + // that should be placed in current. It should also update last_nonces. + self.move_future_txs(sender.clone(), current_nonce, current_nonce); + return; + } + } + fn recalculate_future_for_sender(&mut self, sender: &Address, current_nonce: U256) { + // We need to drain all transactions for current sender from future and reinsert them with updated height + let all_nonces_from_sender = match self.future.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in all_nonces_from_sender { + let order = self.future.drop(&sender, &k).unwrap(); + self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + } + } + + fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) { let all_nonces_from_sender = match self.current.by_address.row(&sender) { Some(row_map) => row_map.keys().cloned().collect::>(), None => vec![], @@ -273,14 +297,9 @@ impl TransactionQueue { } } self.future.enforce_limit(&mut self.by_hash); - - // And now lets check if there is some chain of transactions in future - // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { - self.last_nonces.insert(sender, new_current_top); - } } + /// Returns top transactions from the queue pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority @@ -299,11 +318,11 @@ impl TransactionQueue { self.last_nonces.clear(); } - fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) -> Option { + fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { { let by_nonce = self.future.by_address.row_mut(&address); if let None = by_nonce { - return None; + return; } let mut by_nonce = by_nonce.unwrap(); while let Some(order) = by_nonce.remove(¤t_nonce) { @@ -316,8 +335,8 @@ impl TransactionQueue { } } self.future.by_address.clear_if_empty(&address); - // Returns last inserted nonce - Some(current_nonce - U256::one()) + // Update last inserted nonce + self.last_nonces.insert(address, current_nonce - U256::one()); } fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) @@ -353,9 +372,9 @@ impl TransactionQueue { let base_nonce = fetch_nonce(&address); Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); + self.last_nonces.insert(address.clone(), nonce); // But maybe there are some more items waiting in future? - let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); - self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); + self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.current.enforce_limit(&mut self.by_hash); } @@ -777,6 +796,7 @@ mod test { fn should_recalculate_height_when_removing_from_future() { // given let previous_nonce = |a: &Address| default_nonce(a) - U256::one(); + let next_nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::one()); txq.add(tx1.clone(), &previous_nonce); @@ -784,7 +804,7 @@ mod test { assert_eq!(txq.status().future, 2); // when - txq.remove(&tx1.hash(), &default_nonce); + txq.remove(&tx1.hash(), &next_nonce); // then let stats = txq.status(); From cc3839ae5744ab887c701c9007eda6162cddff2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:46:04 +0100 Subject: [PATCH 015/222] Revert "Revert "Transaction Queue integration"" This reverts commit d330f0b7b7fa5db1b5891d7c1e4e61136603fed5. Conflicts: sync/src/transaction_queue.rs --- Cargo.lock | 19 ++++++ ethcore/src/client.rs | 21 +++++-- ethcore/src/service.rs | 2 +- sync/Cargo.toml | 1 + sync/src/chain.rs | 107 ++++++++++++++++++++++++++++------ sync/src/lib.rs | 14 +++-- sync/src/tests/chain.rs | 51 ++++++++-------- sync/src/tests/helpers.rs | 61 ++++++++++++++----- sync/src/transaction_queue.rs | 17 +++--- 9 files changed, 217 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..510e69b59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,6 +146,14 @@ dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "deque" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "docopt" version = "0.6.78" @@ -285,6 +293,7 @@ dependencies = [ "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -655,6 +664,16 @@ dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rayon" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex" version = "0.1.54" diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 858185873..852ba6a36 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -138,6 +138,9 @@ pub trait BlockChainClient : Sync + Send { /// Get block total difficulty. fn block_total_difficulty(&self, id: BlockId) -> Option; + /// Get address nonce. + fn nonce(&self, address: &Address) -> U256; + /// Get block hash. fn block_hash(&self, id: BlockId) -> Option; @@ -365,18 +368,14 @@ impl Client where V: Verifier { bad_blocks.insert(header.hash()); continue; } - let closed_block = self.check_and_close_block(&block); if let Err(_) = closed_block { bad_blocks.insert(header.hash()); break; } - - // Insert block - let closed_block = closed_block.unwrap(); - self.chain.write().unwrap().insert_block(&block.bytes, closed_block.block().receipts().clone()); good_blocks.push(header.hash()); + // Are we committing an era? let ancient = if header.number() >= HISTORY { let n = header.number() - HISTORY; let chain = self.chain.read().unwrap(); @@ -386,10 +385,16 @@ impl Client where V: Verifier { }; // Commit results + let closed_block = closed_block.unwrap(); + let receipts = closed_block.block().receipts().clone(); closed_block.drain() .commit(header.number(), &header.hash(), ancient) .expect("State DB commit failed."); + // And update the chain + self.chain.write().unwrap() + .insert_block(&block.bytes, receipts); + self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } @@ -408,7 +413,7 @@ impl Client where V: Verifier { if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { good: good_blocks, - bad: bad_blocks, + retracted: bad_blocks, })).unwrap(); } } @@ -581,6 +586,10 @@ impl BlockChainClient for Client where V: Verifier { Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } + fn nonce(&self, address: &Address) -> U256 { + self.state().nonce(address) + } + fn block_hash(&self, id: BlockId) -> Option { let chain = self.chain.read().unwrap(); Self::block_hash(&chain, id) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 756d02407..a80adb0ba 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -30,7 +30,7 @@ pub enum SyncMessage { /// Hashes of blocks imported to blockchain good: Vec, /// Hashes of blocks not imported to blockchain - bad: Vec, + retracted: Vec, }, /// A block is ready BlockVerified, diff --git a/sync/Cargo.toml b/sync/Cargo.toml index f10a772e3..0097cd47e 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -17,6 +17,7 @@ time = "0.1.34" rand = "0.3.13" heapsize = "0.3" rustc-serialize = "0.3" +rayon = "0.3.1" [features] default = [] diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 530cfa424..ddf30854a 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -30,14 +30,17 @@ /// use util::*; +use rayon::prelude::*; use std::mem::{replace}; -use ethcore::views::{HeaderView}; +use ethcore::views::{HeaderView, BlockView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; use range_collection::{RangeCollection, ToUsize, FromUsize}; use ethcore::error::*; use ethcore::block::Block; +use ethcore::transaction::SignedTransaction; use io::SyncIo; +use transaction_queue::TransactionQueue; use time; use super::SyncConfig; @@ -209,6 +212,8 @@ pub struct ChainSync { max_download_ahead_blocks: usize, /// Network ID network_id: U256, + /// Transactions Queue + transaction_queue: Mutex, } type RlpResponseResult = Result, PacketDecodeError>; @@ -234,6 +239,7 @@ impl ChainSync { last_send_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, + transaction_queue: Mutex::new(TransactionQueue::new()), } } @@ -292,6 +298,7 @@ impl ChainSync { self.starting_block = 0; self.highest_block = None; self.have_common_block = false; + self.transaction_queue.lock().unwrap().clear(); self.starting_block = io.chain().chain_info().best_block_number; self.state = SyncState::NotSynced; } @@ -484,7 +491,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } @@ -921,8 +928,16 @@ impl ChainSync { } } /// Called when peer sends us new transactions - fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - Ok(()) + fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + let chain = io.chain(); + let item_count = r.item_count(); + trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); + let fetch_latest_nonce = |a : &Address| chain.nonce(a); + for i in 0..item_count { + let tx: SignedTransaction = try!(r.val_at(i)); + self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce); + } + Ok(()) } /// Send Status message @@ -1248,6 +1263,37 @@ impl ChainSync { } self.last_send_block_number = chain.best_block_number; } + + /// called when block is imported to chain, updates transactions queue + pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) { + fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { + let block = chain + .block(BlockId::Hash(hash.clone())) + // Client should send message after commit to db and inserting to chain. + .expect("Expected in-chain blocks."); + let block = BlockView::new(&block); + block.transactions() + } + + + let chain = io.chain(); + let good = good.par_iter().map(|h| fetch_transactions(chain, h)); + let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h)); + + good.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + retracted.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } } #[cfg(test)] @@ -1388,7 +1434,7 @@ mod tests { #[test] fn finds_lagging_peers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10)); let chain_info = client.chain_info(); @@ -1402,7 +1448,7 @@ mod tests { #[test] fn calculates_tree_for_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(15, false); + client.add_blocks(15, EachBlockWith::Uncle); let start = client.block_hash_delta_minus(4); let end = client.block_hash_delta_minus(2); @@ -1419,7 +1465,7 @@ mod tests { #[test] fn sends_new_hashes_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1438,7 +1484,7 @@ mod tests { #[test] fn sends_latest_block_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1456,7 +1502,7 @@ mod tests { #[test] fn handles_peer_new_block_mallformed() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let block_data = get_dummy_block(11, client.chain_info().best_block_hash); @@ -1474,7 +1520,7 @@ mod tests { #[test] fn handles_peer_new_block() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); @@ -1492,7 +1538,7 @@ mod tests { #[test] fn handles_peer_new_block_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1508,7 +1554,7 @@ mod tests { #[test] fn handles_peer_new_hashes() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1524,7 +1570,7 @@ mod tests { #[test] fn handles_peer_new_hashes_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1542,7 +1588,7 @@ mod tests { #[test] fn hashes_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1560,7 +1606,7 @@ mod tests { #[test] fn block_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1573,10 +1619,37 @@ mod tests { assert!(result.is_ok()); } + #[test] + fn should_add_transactions_to_queue() { + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); + + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + + let mut queue = VecDeque::new(); + let io = TestIo::new(&mut client, &mut queue, None); + + // when + sync.chain_new_blocks(&io, &[], &good_blocks); + assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); + assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); + sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks); + + // then + let status = sync.transaction_queue.lock().unwrap().status(); + assert_eq!(status.pending, 1); + assert_eq!(status.future, 0); + } + #[test] fn returns_requested_block_headers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); @@ -1600,7 +1673,7 @@ mod tests { #[test] fn returns_requested_block_headers_reverse() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..d67a09f3b 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -54,6 +54,7 @@ extern crate ethcore; extern crate env_logger; extern crate time; extern crate rand; +extern crate rayon; #[macro_use] extern crate heapsize; @@ -70,8 +71,7 @@ use io::NetSyncIo; mod chain; mod io; mod range_collection; -// TODO [todr] Made public to suppress dead code warnings -pub mod transaction_queue; +mod transaction_queue; #[cfg(test)] mod tests; @@ -153,8 +153,14 @@ impl NetworkProtocolHandler for EthSync { } fn message(&self, io: &NetworkContext, message: &SyncMessage) { - if let SyncMessage::BlockVerified = *message { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); + match *message { + SyncMessage::BlockVerified => { + self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); + }, + SyncMessage::NewChainBlocks { ref good, ref retracted } => { + let sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted); + } } } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index b01c894a0..58f50916e 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -24,8 +24,8 @@ use super::helpers::*; fn two_peers() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); @@ -35,8 +35,8 @@ fn two_peers() { fn status_after_sync() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync(); let status = net.peer(0).sync.status(); assert_eq!(status.state, SyncState::Idle); @@ -45,8 +45,8 @@ fn status_after_sync() { #[test] fn takes_few_steps() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(100, false); - net.peer_mut(2).chain.add_blocks(100, false); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Uncle); let total_steps = net.sync(); assert!(total_steps < 7); } @@ -56,8 +56,9 @@ fn empty_blocks() { ::env_logger::init().ok(); let mut net = TestNet::new(3); for n in 0..200 { - net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); - net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); + let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle }; + net.peer_mut(1).chain.add_blocks(5, with.clone()); + net.peer_mut(2).chain.add_blocks(5, with); } net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); @@ -68,14 +69,14 @@ fn empty_blocks() { fn forked() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(0).chain.add_blocks(300, false); - net.peer_mut(1).chain.add_blocks(300, false); - net.peer_mut(2).chain.add_blocks(300, false); - net.peer_mut(0).chain.add_blocks(100, true); //fork - net.peer_mut(1).chain.add_blocks(200, false); - net.peer_mut(2).chain.add_blocks(200, false); - net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 - net.peer_mut(2).chain.add_blocks(10, true); + net.peer_mut(0).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Nothing); //fork + net.peer_mut(1).chain.add_blocks(200, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); //fork between 1 and 2 + net.peer_mut(2).chain.add_blocks(10, EachBlockWith::Nothing); // peer 1 has the best chain of 601 blocks let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); net.sync(); @@ -87,8 +88,8 @@ fn forked() { #[test] fn restart() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync_steps(8); @@ -109,8 +110,8 @@ fn status_empty() { #[test] fn status_packet() { let mut net = TestNet::new(2); - net.peer_mut(0).chain.add_blocks(100, false); - net.peer_mut(1).chain.add_blocks(1, false); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(1, EachBlockWith::Uncle); net.start(); @@ -123,10 +124,10 @@ fn status_packet() { #[test] fn propagate_hashes() { let mut net = TestNet::new(6); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.peer_mut(0).chain.add_blocks(10, false); + net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); @@ -149,10 +150,10 @@ fn propagate_hashes() { #[test] fn propagate_blocks() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.peer_mut(0).chain.add_blocks(10, false); + net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); @@ -164,7 +165,7 @@ fn propagate_blocks() { #[test] fn restart_on_malformed_block() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.peer_mut(1).chain.corrupt_block(6); net.sync_steps(10); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index e170a4a85..5b53ad90b 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -22,7 +22,7 @@ use io::SyncIo; use chain::ChainSync; use ::SyncConfig; use ethcore::receipt::Receipt; -use ethcore::transaction::LocalizedTransaction; +use ethcore::transaction::{LocalizedTransaction, Transaction, Action}; use ethcore::filter::Filter; use ethcore::log_entry::LocalizedLogEntry; @@ -34,6 +34,14 @@ pub struct TestBlockChainClient { pub difficulty: RwLock, } +#[derive(Clone)] +pub enum EachBlockWith { + Nothing, + Uncle, + Transaction, + UncleAndTransaction +} + impl TestBlockChainClient { pub fn new() -> TestBlockChainClient { @@ -44,30 +52,53 @@ impl TestBlockChainClient { last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), }; - client.add_blocks(1, true); // add genesis block + client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); client } - pub fn add_blocks(&mut self, count: usize, empty: bool) { + pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { let len = self.numbers.read().unwrap().len(); for n in len..(len + count) { let mut header = BlockHeader::new(); header.difficulty = From::from(n); header.parent_hash = self.last_hash.read().unwrap().clone(); header.number = n as BlockNumber; - let mut uncles = RlpStream::new_list(if empty {0} else {1}); - if !empty { - let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); - uncle_header.number = n as BlockNumber; - uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); - } + let uncles = match with { + EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { + let mut uncles = RlpStream::new_list(1); + let mut uncle_header = BlockHeader::new(); + uncle_header.difficulty = From::from(n); + uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); + uncle_header.number = n as BlockNumber; + uncles.append(&uncle_header); + header.uncles_hash = uncles.as_raw().sha3(); + uncles + }, + _ => RlpStream::new_list(0) + }; + let txs = match with { + EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { + let mut txs = RlpStream::new_list(1); + let keypair = KeyPair::create().unwrap(); + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: U256::zero() + }; + let signed_tx = tx.sign(&keypair.secret()); + txs.append(&signed_tx); + txs.out() + }, + _ => rlp::NULL_RLP.to_vec() + }; + let mut rlp = RlpStream::new_list(3); rlp.append(&header); - rlp.append_raw(&rlp::NULL_RLP, 1); + rlp.append_raw(&txs, 1); rlp.append_raw(uncles.as_raw(), 1); self.import_block(rlp.as_raw().to_vec()).unwrap(); } @@ -109,6 +140,10 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } + fn nonce(&self, _address: &Address) -> U256 { + U256::zero() + } + fn code(&self, _address: &Address) -> Option { unimplemented!(); } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 100435530..8b38c64ad 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -221,19 +221,19 @@ impl TransactionQueue { /// Removes all transactions identified by hashes given in slice /// /// If gap is introduced marks subsequent transactions as future - pub fn remove_all(&mut self, txs: &[H256], fetch_nonce: T) + pub fn remove_all(&mut self, transaction_hashes: &[H256], fetch_nonce: T) where T: Fn(&Address) -> U256 { - for tx in txs { - self.remove(&tx, &fetch_nonce); + for hash in transaction_hashes { + self.remove(&hash, &fetch_nonce); } } /// Removes transaction identified by hashes from queue. /// /// If gap is introduced marks subsequent transactions as future - pub fn remove(&mut self, hash: &H256, fetch_nonce: &T) + pub fn remove(&mut self, transaction_hash: &H256, fetch_nonce: &T) where T: Fn(&Address) -> U256 { - let transaction = self.by_hash.remove(hash); + let transaction = self.by_hash.remove(transaction_hash); if transaction.is_none() { // We don't know this transaction return; @@ -244,7 +244,6 @@ impl TransactionQueue { let nonce = transaction.nonce(); let current_nonce = fetch_nonce(&sender); - println!("Removing tx: {:?}", transaction.transaction); // Remove from future let order = self.future.drop(&sender, &nonce); if order.is_some() { @@ -292,7 +291,6 @@ impl TransactionQueue { // Goes to future or is removed let order = self.current.drop(&sender, &k).unwrap(); if k >= current_nonce { - println!("Moving to future: {:?}", order); self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); } else { self.by_hash.remove(&order.hash); @@ -302,7 +300,7 @@ impl TransactionQueue { // And now lets check if there is some chain of transactions in future // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce - U256::one(), current_nonce) { + if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { self.last_nonces.insert(sender, new_current_top); } } @@ -337,7 +335,6 @@ impl TransactionQueue { // remove also from priority and hash self.future.by_priority.remove(&order); // Put to current - println!("Moved: {:?}", order); let order = order.update_height(current_nonce.clone(), first_nonce); self.current.insert(address.clone(), current_nonce, order); current_nonce = current_nonce + U256::one(); @@ -366,7 +363,6 @@ impl TransactionQueue { .cloned() .map_or(state_nonce, |n| n + U256::one()); - println!("Expected next: {:?}, got: {:?}", next_nonce, nonce); // Check height if nonce > next_nonce { // We have a gap - put to future @@ -375,6 +371,7 @@ impl TransactionQueue { return; } else if nonce < state_nonce { // Droping transaction + trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce); return; } From 8915974cf0aba8e26f27294cb3510edd600252fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:48:03 +0100 Subject: [PATCH 016/222] Fixing compilation --- sync/src/transaction_queue.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 8b38c64ad..24bb772d7 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -297,12 +297,6 @@ impl TransactionQueue { } } self.future.enforce_limit(&mut self.by_hash); - - // And now lets check if there is some chain of transactions in future - // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { - self.last_nonces.insert(sender, new_current_top); - } } From ae1c1b918faea16a81d2fe582446a148fdea3c84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:51:01 +0100 Subject: [PATCH 017/222] Fixing compilation --- sync/src/transaction_queue.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 8b38c64ad..24bb772d7 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -297,12 +297,6 @@ impl TransactionQueue { } } self.future.enforce_limit(&mut self.by_hash); - - // And now lets check if there is some chain of transactions in future - // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { - self.last_nonces.insert(sender, new_current_top); - } } From c13afcf40485411788b6817cb324a23f9de32d59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 17:06:04 +0100 Subject: [PATCH 018/222] Removing assertion and just comparing fees --- sync/src/transaction_queue.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 24bb772d7..f14d94c8c 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -387,8 +387,9 @@ impl TransactionQueue { by_hash.insert(hash.clone(), tx); if let Some(old) = set.insert(address, nonce, order.clone()) { // There was already transaction in queue. Let's check which one should stay - if old.cmp(&order) == Ordering::Greater { - assert!(old.nonce_height == order.nonce_height, "Both transactions should have the same height."); + let old_fee = old.gas_price; + let new_fee = order.gas_price; + if old_fee.cmp(&new_fee) == Ordering::Greater { // Put back old transaction since it has greater priority (higher gas_price) set.insert(address, nonce, old); by_hash.remove(&hash); From 18cbea394d53abde1780efbb1cc3ea0c8e678ce1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 17:14:48 +0100 Subject: [PATCH 019/222] Small renaming --- sync/src/transaction_queue.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index f14d94c8c..463607cae 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -247,10 +247,10 @@ impl TransactionQueue { // Remove from future let order = self.future.drop(&sender, &nonce); if order.is_some() { - self.recalculate_future_for_sender(&sender, current_nonce); + self.update_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current - self.move_future_txs(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); return; } @@ -264,12 +264,12 @@ impl TransactionQueue { self.move_all_to_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current. It should also update last_nonces. - self.move_future_txs(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); return; } } - fn recalculate_future_for_sender(&mut self, sender: &Address, current_nonce: U256) { + fn update_future(&mut self, sender: &Address, current_nonce: U256) { // We need to drain all transactions for current sender from future and reinsert them with updated height let all_nonces_from_sender = match self.future.by_address.row(&sender) { Some(row_map) => row_map.keys().cloned().collect::>(), @@ -318,7 +318,7 @@ impl TransactionQueue { self.last_nonces.clear(); } - fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { + fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { { let by_nonce = self.future.by_address.row_mut(&address); if let None = by_nonce { @@ -374,7 +374,7 @@ impl TransactionQueue { Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); self.last_nonces.insert(address.clone(), nonce); // But maybe there are some more items waiting in future? - self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); + self.move_matching_future_to_current(address.clone(), nonce + U256::one(), base_nonce); self.current.enforce_limit(&mut self.by_hash); } From 4a53d62be436513d81209db0d4a88ccc0f3aef06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 17:41:35 +0100 Subject: [PATCH 020/222] Fixing inconsistency when replacing transactions in queue --- sync/src/transaction_queue.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 463607cae..b98772199 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -348,8 +348,8 @@ impl TransactionQueue { return; } - let nonce = tx.nonce(); let address = tx.sender(); + let nonce = tx.nonce(); let state_nonce = fetch_nonce(&address); let next_nonce = self.last_nonces @@ -370,7 +370,6 @@ impl TransactionQueue { } let base_nonce = fetch_nonce(&address); - Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); self.last_nonces.insert(address.clone(), nonce); // But maybe there are some more items waiting in future? @@ -391,7 +390,9 @@ impl TransactionQueue { let new_fee = order.gas_price; if old_fee.cmp(&new_fee) == Ordering::Greater { // Put back old transaction since it has greater priority (higher gas_price) - set.insert(address, nonce, old); + set.by_address.insert(address, nonce, old); + // and remove new one + set.by_priority.remove(&order); by_hash.remove(&hash); } else { // Make sure we remove old transaction entirely From 57e6e1e1b59188cdf8d378b81c33842d5c5feaf7 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 5 Mar 2016 20:15:19 +0300 Subject: [PATCH 021/222] [ci ship] redundant lines --- sync/src/transaction_queue.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index b98772199..3e0d931b5 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -813,6 +813,4 @@ mod test { assert_eq!(stats.future, 0); assert_eq!(stats.pending, 1); } - - } From e100ecbeacf6bac923f4b8e416621f98668b4830 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 5 Mar 2016 23:47:28 +0300 Subject: [PATCH 022/222] exposing in lib --- sync/src/chain.rs | 4 ++++ sync/src/lib.rs | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ddf30854a..fd1771045 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1294,6 +1294,10 @@ impl ChainSync { transaction_queue.add_all(txs, |a| chain.nonce(a)); }); } + + pub fn transaction_queue(&self) -> &Mutex { + return &self.transaction_queue; + } } #[cfg(test)] diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d67a09f3b..a6480b0ad 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -128,6 +128,16 @@ impl EthSync { pub fn restart(&mut self, io: &mut NetworkContext) { self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref())); } + + /// Insert transaction in transaction queue + pub fn insert_transaction(&self, transaction: SignedTransaction) { + use util::numbers::*; + + let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); + let sync = self.sync.write().unwrap(); + let mut queue = sync.transaction_queue().lock().unwrap(); + queue.add(transaction, &nonce_fn); + } } impl NetworkProtocolHandler for EthSync { From ad8135668392aa733dea014231a3a56469dae5fc Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 6 Mar 2016 00:48:00 +0300 Subject: [PATCH 023/222] fix namespace --- sync/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index a6480b0ad..e352144bd 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -130,7 +130,7 @@ impl EthSync { } /// Insert transaction in transaction queue - pub fn insert_transaction(&self, transaction: SignedTransaction) { + pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction) { use util::numbers::*; let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); From aaf2e0c3fbdc0cd3e125988e96987486d73bf395 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 6 Mar 2016 11:04:13 +0100 Subject: [PATCH 024/222] Locking outside of loop --- sync/src/chain.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ddf30854a..a8bcb653f 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -933,9 +933,11 @@ impl ChainSync { let item_count = r.item_count(); trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); let fetch_latest_nonce = |a : &Address| chain.nonce(a); + + let mut transaction_queue = self.transaction_queue.lock().unwrap(); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce); + transaction_queue.add(tx, &fetch_latest_nonce); } Ok(()) } From e91de785281d59b591079c16c798d7252991b593 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 6 Mar 2016 11:11:59 +0100 Subject: [PATCH 025/222] Renaming back bad as retracted --- ethcore/src/client.rs | 4 +++- ethcore/src/service.rs | 2 ++ sync/src/chain.rs | 10 +++++----- sync/src/lib.rs | 4 ++-- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 852ba6a36..123847a7f 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -413,7 +413,9 @@ impl Client where V: Verifier { if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { good: good_blocks, - retracted: bad_blocks, + bad: bad_blocks, + // TODO [todr] were to take those from? + retracted: vec![], })).unwrap(); } } diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index a80adb0ba..443d09e3b 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -30,6 +30,8 @@ pub enum SyncMessage { /// Hashes of blocks imported to blockchain good: Vec, /// Hashes of blocks not imported to blockchain + bad: Vec, + /// Hashes of blocks that were removed from canonical chain retracted: Vec, }, /// A block is ready diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a8bcb653f..fcc9f49c8 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1267,7 +1267,7 @@ impl ChainSync { } /// called when block is imported to chain, updates transactions queue - pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) { + pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(hash.clone())) @@ -1280,14 +1280,14 @@ impl ChainSync { let chain = io.chain(); let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h)); + let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); good.for_each(|txs| { let mut transaction_queue = self.transaction_queue.lock().unwrap(); let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); }); - retracted.for_each(|txs| { + bad.for_each(|txs| { // populate sender for tx in &txs { let _sender = tx.sender(); @@ -1637,10 +1637,10 @@ mod tests { let io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&io, &[], &good_blocks); + sync.chain_new_blocks(&io, &[], &good_blocks, &[]); assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks); + sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks, &[]); // then let status = sync.transaction_queue.lock().unwrap().status(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d67a09f3b..8a30385a2 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -157,9 +157,9 @@ impl NetworkProtocolHandler for EthSync { SyncMessage::BlockVerified => { self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); }, - SyncMessage::NewChainBlocks { ref good, ref retracted } => { + SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { let sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted); + self.sync.write().unwrap().chain_new_blocks(&sync_io, good, bad, retracted); } } } From d77d9ad9d84105f2c0b4cd53b90f835e3087f669 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 6 Mar 2016 17:28:50 +0100 Subject: [PATCH 026/222] JournalDB with history overlay --- ethcore/src/client.rs | 8 +- parity/main.rs | 3 +- util/src/journaldb.rs | 326 ++++++++++++++++++++++++++---------------- util/src/memorydb.rs | 6 + 4 files changed, 216 insertions(+), 127 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 858185873..2b5ec5ccb 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -190,6 +190,8 @@ pub struct ClientReport { pub transactions_applied: usize, /// How much gas has been processed so far. pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, } impl ClientReport { @@ -222,7 +224,7 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "4.0"; +const CLIENT_DB_VER_STR: &'static str = "5.0"; impl Client { /// Create a new client with given spec and DB path. @@ -432,7 +434,9 @@ impl Client where V: Verifier { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.read().unwrap().clone() + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report } /// Tick the client. diff --git a/parity/main.rs b/parity/main.rs index 3f4243a0a..605fb315d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -395,7 +395,7 @@ impl Informant { let sync_info = sync.status(); if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]", + println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -408,6 +408,7 @@ impl Informant { queue_info.unverified_queue_size, queue_info.verified_queue_size, + Informant::format_bytes(report.state_db_mem), Informant::format_bytes(cache_info.total()), Informant::format_bytes(queue_info.mem_used), Informant::format_bytes(sync_info.mem_used), diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 01e53f819..48bd94d64 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -35,17 +35,36 @@ use std::env; /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. pub struct JournalDB { - overlay: MemoryDB, + transaction_overlay: MemoryDB, backing: Arc, - counters: Option>>>, + journal_overlay: Option>>, +} + +struct JournalOverlay { + backing_overlay: MemoryDB, + journal: VecDeque +} + +struct JournalEntry { + id: H256, + index: usize, + era: u64, + insertions: Vec, + deletions: Vec, +} + +impl HeapSizeOf for JournalEntry { + fn heap_size_of_children(&self) -> usize { + self.insertions.heap_size_of_children() + self.deletions.heap_size_of_children() + } } impl Clone for JournalDB { fn clone(&self) -> JournalDB { JournalDB { - overlay: MemoryDB::new(), + transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), - counters: self.counters.clone(), + journal_overlay: self.journal_overlay.clone(), } } } @@ -60,7 +79,6 @@ const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; const PADDING : [u8; 10] = [ 0u8; 10 ]; impl JournalDB { - /// Create a new instance from file pub fn new(path: &str) -> JournalDB { Self::from_prefs(path, true) @@ -86,15 +104,16 @@ impl JournalDB { with_journal = prefer_journal; } - let counters = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) + + let journal_overlay = if with_journal { + Some(Arc::new(RwLock::new(JournalDB::read_overlay(&backing)))) } else { None }; JournalDB { - overlay: MemoryDB::new(), + transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), - counters: counters, + journal_overlay: journal_overlay, } } @@ -113,71 +132,48 @@ impl JournalDB { /// Commit all recent insert operations. pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) + let have_journal_overlay = self.journal_overlay.is_some(); + if have_journal_overlay { + self.commit_with_overlay(now, id, end) } else { - self.commit_without_counters() + self.commit_without_overlay() } } /// Drain the overlay and place it into a batch for the DB. fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut inserts = 0usize; - let mut deletes = 0usize; + let mut insertions = 0usize; + let mut deletions = 0usize; for i in overlay.drain().into_iter() { let (key, (value, rc)) = i; if rc > 0 { assert!(rc == 1); batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - inserts += 1; + insertions += 1; } if rc < 0 { assert!(rc == -1); - deletes += 1; + deletions += 1; } } - trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); - inserts + deletes + trace!("commit: Inserted {}, Deleted {} nodes", insertions, deletions); + insertions + deletions } - /// Just commit the overlay into the backing DB. - fn commit_without_counters(&mut self) -> Result { + /// Just commit the transaction overlay into the backing DB. + fn commit_without_overlay(&mut self) -> Result { let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); + let ret = Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); try!(self.backing.write(batch)); Ok(ret as u32) } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: - // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] - // [era, n] => [ ... ] - - // TODO: store reclaim_period. - - // when we make a new commit, we journal the inserts and removes. - // for each end_era that we journaled that we are no passing by, - // we remove all of its removes assuming it is canonical and all - // of its inserts otherwise. - // - // We also keep reference counters for each key inserted in the journal to handle - // the following cases where key K must not be deleted from the DB when processing removals : - // Given H is the journal size in eras, 0 <= C <= H. - // Key K is removed in era A(N) and re-inserted in canonical era B(N + C). - // Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C). - // Key K is added in non-canonical era A'(N) canonical B(N + C). - // - // The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions - // is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased - // and the key is safe to delete. - + fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); let batch = DBTransaction::new(); { let mut index = 0usize; @@ -204,90 +200,83 @@ impl JournalDB { } let mut r = RlpStream::new_list(3); - let inserts: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); + let mut tx = self.transaction_overlay.drain(); + let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); + let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); // Increase counter for each inserted key no matter if the block is canonical or not. - for i in &inserts { - *counters.entry(i.clone()).or_insert(0) += 1; - } - let removes: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect(); + let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); r.append(id); - r.append(&inserts); - r.append(&removes); + r.begin_list(inserted_keys.len()); + for (k, v) in insertions { + r.begin_list(2); + r.append(&k); + r.append(&v); + journal_overlay.backing_overlay.emplace(k, v); + } + r.append(&removed_keys); try!(batch.put(&last, r.as_raw())); try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.journal.push_back(JournalEntry { id: id.clone(), index: index, era: now, insertions: inserted_keys, deletions: removed_keys }); } // apply old commits' details + if let Some((end_era, canon_id)) = end { - let mut index = 0usize; - let mut last; - let mut to_remove: Vec = Vec::new(); - let mut canon_inserts: Vec = Vec::new(); - while let Some(rlp_data) = try!(self.backing.get({ + let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); + let mut canon_deletions: Vec = Vec::new(); + let mut overlay_deletions: Vec = Vec::new(); + while journal_overlay.journal.front().map_or(false, |e| e.era <= end_era) { + let mut journal = journal_overlay.journal.pop_front().unwrap(); + //delete the record from the db let mut r = RlpStream::new_list(3); - r.append(&end_era); - r.append(&index); + r.append(&journal.era); + r.append(&journal.index); r.append(&&PADDING[..]); - last = r.drain(); - &last - })) { - let rlp = Rlp::new(&rlp_data); - let mut inserts: Vec = rlp.val_at(1); - JournalDB::decrease_counters(&inserts, &mut counters); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - if canon_id == rlp.val_at(0) { - let mut canon_deletes: Vec = rlp.val_at(2); - trace!("Purging nodes deleted from canon: {:?}", canon_deletes); - to_remove.append(&mut canon_deletes); - canon_inserts = inserts; + try!(batch.delete(&r.drain())); + trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): +{} -{} entries", end_era, journal.index, journal.id, canon_id, journal.insertions.len(), journal.deletions.len()); + { + if canon_id == journal.id { + for h in &journal.insertions { + match journal_overlay.backing_overlay.raw(&h) { + Some(&(ref d, rc)) if rc > 0 => canon_insertions.push((h.clone(), d.clone())), //TODO: optimizie this to avoid data copy + _ => () + } + } + canon_deletions = journal.deletions; + } + overlay_deletions.append(&mut journal.insertions); } - else { - trace!("Purging nodes inserted in non-canon: {:?}", inserts); - to_remove.append(&mut inserts); + if canon_id == journal.id { } - trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::(0), canon_id, to_remove.len()); - try!(batch.delete(&last)); - index += 1; } - - let canon_inserts = canon_inserts.drain(..).collect::>(); - // Purge removed keys if they are not referenced and not re-inserted in the canon commit - let mut deletes = 0; - trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::>()); - for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) { - try!(batch.delete(&h)); - deletes += 1; + // apply canon inserts first + for (k, v) in canon_insertions { + try!(batch.put(&k, &v)); } - trace!("Total nodes purged: {}", deletes); + // clean the overlay + for k in overlay_deletions { + journal_overlay.backing_overlay.kill(&k); + } + // apply removes + for k in canon_deletions { + if !journal_overlay.backing_overlay.exists(&k) { + try!(batch.delete(&k)); + } + } + journal_overlay.backing_overlay.purge(); } - - // Commit overlay insertions - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) - } - - - // Decrease counters for given keys. Deletes obsolete counters - fn decrease_counters(keys: &[H256], counters: &mut HashMap) { - for i in keys.iter() { - let delete_counter = { - let cnt = counters.get_mut(i).expect("Missing key counter"); - *cnt -= 1; - *cnt == 0 - }; - if delete_counter { - counters.remove(i); - } - } + Ok(0 as u32) } fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &Database) -> HashMap { - let mut res = HashMap::new(); + fn read_overlay(db: &Database) -> JournalOverlay { + let mut journal = VecDeque::new(); + let mut overlay = MemoryDB::new(); + let mut count = 0; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); loop { @@ -300,10 +289,24 @@ impl JournalDB { &r.drain() }).expect("Low-level database error.") { let rlp = Rlp::new(&rlp_data); - let to_add: Vec = rlp.val_at(1); - for h in to_add { - *res.entry(h).or_insert(0) += 1; + let id: H256 = rlp.val_at(0); + let insertions = rlp.at(1); + let deletions: Vec = rlp.val_at(2); + let mut inserted_keys = Vec::new(); + for r in insertions.iter() { + let k: H256 = r.val_at(0); + let v: Bytes = r.val_at(1); + overlay.emplace(k.clone(), v); + inserted_keys.push(k); + count += 1; } + journal.push_front(JournalEntry { + id: id, + index: index, + era: era, + insertions: inserted_keys, + deletions: deletions, + }); index += 1; }; if index == 0 || era == 0 { @@ -312,8 +315,19 @@ impl JournalDB { era -= 1; } } - trace!("Recovered {} counters", res.len()); - res + trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); + JournalOverlay { backing_overlay: overlay, journal: journal } + } + + /// Returns heap memory size used + pub fn mem_used(&self) -> usize { + let mut mem = self.transaction_overlay.mem_used(); + if let Some(ref overlay) = self.journal_overlay.as_ref() { + let overlay = overlay.read().unwrap(); + mem += overlay.backing_overlay.mem_used(); + mem += overlay.journal.heap_size_of_children(); + } + mem } } @@ -325,7 +339,7 @@ impl HashDB for JournalDB { ret.insert(h, 1); } - for (key, refs) in self.overlay.keys().into_iter() { + for (key, refs) in self.transaction_overlay.keys().into_iter() { let refs = *ret.get(&key).unwrap_or(&0) + refs; ret.insert(key, refs); } @@ -333,15 +347,23 @@ impl HashDB for JournalDB { } fn lookup(&self, key: &H256) -> Option<&[u8]> { - let k = self.overlay.raw(key); + let k = self.transaction_overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), _ => { - if let Some(x) = self.payload(key) { - Some(&self.overlay.denote(key, x).0) - } - else { - None + let v = self.journal_overlay.as_ref().map_or(None, |ref j| j.read().unwrap().backing_overlay.lookup(key).map(|v| v.to_vec())); + match v { + Some(x) => { + Some(&self.transaction_overlay.denote(key, x).0) + } + _ => { + if let Some(x) = self.payload(key) { + Some(&self.transaction_overlay.denote(key, x).0) + } + else { + None + } + } } } } @@ -352,13 +374,13 @@ impl HashDB for JournalDB { } fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) + self.transaction_overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); + self.transaction_overlay.emplace(key, value); } fn kill(&mut self, key: &H256) { - self.overlay.kill(key); + self.transaction_overlay.kill(key); } } @@ -492,11 +514,13 @@ mod tests { fn reopen() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); + let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); foo }; @@ -510,8 +534,62 @@ mod tests { { let mut jdb = JournalDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(!jdb.exists(&foo)); } } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } } diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 680a6e1d0..9cd018935 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -21,6 +21,7 @@ use bytes::*; use rlp::*; use sha3::*; use hashdb::*; +use heapsize::*; use std::mem; use std::collections::HashMap; @@ -143,6 +144,11 @@ impl MemoryDB { } self.raw(key).unwrap() } + + /// Returns the size of allocated heap memory + pub fn mem_used(&self) -> usize { + self.data.heap_size_of_children() + } } static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; From 744c4c7d8bba32c5c5e61eb8c9b32bd52498fd30 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 7 Mar 2016 07:06:55 +0100 Subject: [PATCH 027/222] JournalDB documentation --- util/src/journaldb.rs | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 48bd94d64..1c62a9960 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -27,13 +27,37 @@ use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and, possibly, latent-removal semantics. /// -/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves +/// If `journal_overlay` is `None`, then it behaves exactly like OverlayDB. If not it behaves /// differently: /// /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. +/// +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history +/// overlay on each `commit()` +/// - History overlay contains all data inserted during the history period. When the node +/// in the overlay becomes ancient it is written to disk on `commit()` +/// +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track +/// data nodes that go out of history scope and must be written to disk. +/// +/// Commit workflow: +/// Create a new journal record from the transaction overlay. +/// Inseart each node from the transaction overlay into the History overlay increasing reference +/// count if it is already there. Note that the reference counting is managed by `MemoryDB` +/// Clear the transaction overlay. +/// For a canonical journal record that becomes ancient inserts its insertions into the disk DB +/// For each journal record that goes out of the history scope (becomes ancient) remove its +/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// if reaches zero. +/// For a canonical journal record that becomes ancient delete its removals from the disk only if +/// the removed key is not present in the history overlay. +/// Delete ancient record from memory and disk. +/// pub struct JournalDB { transaction_overlay: MemoryDB, backing: Arc, @@ -220,7 +244,6 @@ impl JournalDB { } // apply old commits' details - if let Some((end_era, canon_id)) = end { let mut canon_insertions: Vec<(H256, Bytes)> = Vec::new(); let mut canon_deletions: Vec = Vec::new(); From 3153d12bd9997f223a8a04ec40e8db21fb161663 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 11:40:44 +0100 Subject: [PATCH 028/222] feature enabled when compiling without --release --- Cargo.toml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..d1094a110 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,15 +12,22 @@ rustc-serialize = "0.3" docopt = "0.6" time = "0.1" ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } -clippy = { version = "0.0.44", optional = true } -ethcore-util = { path = "util" } -ethcore = { path = "ethcore" } -ethsync = { path = "sync" } -ethcore-rpc = { path = "rpc", optional = true } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" -ethcore-devtools = { path = "devtools" } number_prefix = "0.2" +clippy = { version = "0.0.44", optional = true } + +ethcore = { path = "ethcore" } +ethcore-util = { path = "util" } +ethsync = { path = "sync" } +ethcore-devtools = { path = "devtools" } +ethcore-rpc = { path = "rpc", optional = true } + +[dev-dependencies] +ethcore = { path = "ethcore", features = ["dev"]} +ethcore-util = { path = "util", features = ["dev"] } +ethsync = { path = "sync", features = ["dev"] } +ethcore-rpc = { path = "rpc", features = ["dev"]} [features] default = ["rpc"] From e83f8561041216a56a47cba39abd9ed3a0385961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 12:16:37 +0100 Subject: [PATCH 029/222] Merging chain_blocks_verified to chain_new_blocks --- sync/src/chain.rs | 78 +++++++++++++++++++++------------------ sync/src/lib.rs | 10 ++--- sync/src/tests/chain.rs | 8 ++-- sync/src/tests/helpers.rs | 4 +- 4 files changed, 52 insertions(+), 48 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index fc0a19aba..e598c4572 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -207,7 +207,7 @@ pub struct ChainSync { /// True if common block for our and remote chain has been found have_common_block: bool, /// Last propagated block number - last_send_block_number: BlockNumber, + last_sent_block_number: BlockNumber, /// Max blocks to download ahead max_download_ahead_blocks: usize, /// Network ID @@ -236,7 +236,7 @@ impl ChainSync { last_imported_hash: None, syncing_difficulty: U256::from(0u64), have_common_block: false, - last_send_block_number: 0, + last_sent_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, transaction_queue: Mutex::new(TransactionQueue::new()), @@ -1248,26 +1248,25 @@ impl ChainSync { sent } + fn propagate_latest_blocks(&mut self, io: &mut SyncIo) { + let chain_info = io.chain().chain_info(); + if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { + let blocks = self.propagate_blocks(&chain_info, io); + let hashes = self.propagate_new_hashes(&chain_info, io); + if blocks != 0 || hashes != 0 { + trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); + } + } + self.last_sent_block_number = chain_info.best_block_number; + } + /// Maintain other peers. Send out any new blocks and transactions pub fn maintain_sync(&mut self, io: &mut SyncIo) { self.check_resume(io); } - /// should be called once chain has new block, triggers the latest block propagation - pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) { - let chain = io.chain().chain_info(); - if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let blocks = self.propagate_blocks(&chain, io); - let hashes = self.propagate_new_hashes(&chain, io); - if blocks != 0 || hashes != 0 { - trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); - } - } - self.last_send_block_number = chain.best_block_number; - } - - /// called when block is imported to chain, updates transactions queue - pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { + /// called when block is imported to chain, updates transactions queue and propagates the blocks + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(hash.clone())) @@ -1278,24 +1277,31 @@ impl ChainSync { } - let chain = io.chain(); - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); + { + let chain = io.chain(); + let good = good.par_iter().map(|h| fetch_transactions(chain, h)); + let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); - good.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); - transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); - }); - bad.for_each(|txs| { - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); - }); + good.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + bad.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } + + // Propagate latests blocks + self.propagate_latest_blocks(io); + // TODO [todr] propagate transactions? } + } #[cfg(test)] @@ -1634,13 +1640,13 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&io, &[], &good_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]); assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]); // then let status = sync.transaction_queue.lock().unwrap().status(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 8a30385a2..b5869642c 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -154,13 +154,11 @@ impl NetworkProtocolHandler for EthSync { fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::BlockVerified => { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); - }, SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { - let sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&sync_io, good, bad, retracted); - } + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); + }, + _ => {/* Ignore other messages */}, } } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 58f50916e..855aa79a6 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -129,8 +129,8 @@ fn propagate_hashes() { net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); // 5 peers to sync assert_eq!(5, net.peer(0).queue.len()); @@ -154,8 +154,8 @@ fn propagate_blocks() { net.sync(); net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); assert!(!net.peer(0).queue.is_empty()); // NEW_BLOCK_PACKET diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 5b53ad90b..d01dba0b2 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -455,8 +455,8 @@ impl TestNet { self.peers.iter().all(|p| p.queue.is_empty()) } - pub fn trigger_block_verified(&mut self, peer_id: usize) { + pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]); } } From ec3698066b8dd3f5a061498a9203644511cf8e21 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 12:21:11 +0100 Subject: [PATCH 030/222] Normal CLI options with geth. Support node identity. Support fine-grained JSONRPC API enabling. --- ethcore/src/client.rs | 3 + parity/main.rs | 130 ++++++++++++++++++++++++++++++------------ 2 files changed, 96 insertions(+), 37 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 9688cc527..8471666aa 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -87,6 +87,8 @@ pub struct ClientConfig { pub blockchain: BlockChainConfig, /// Prefer journal rather than archive. pub prefer_journal: bool, + /// The name of the client instance. + pub name: String, } impl Default for ClientConfig { @@ -95,6 +97,7 @@ impl Default for ClientConfig { queue: Default::default(), blockchain: Default::default(), prefer_journal: false, + name: Default::default(), } } } diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..43b0504f1 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -53,6 +53,16 @@ use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; +fn die_with_message(msg: &str) -> ! { + println!("ERROR: {}", msg); + exit(1); +} + +#[macro_export] +macro_rules! die { + ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); +} + const USAGE: &'static str = r#" Parity. Ethereum Client. By Wood/Paronyan/Kotewicz/Drwięga/Volf. @@ -62,13 +72,16 @@ Usage: parity daemon [options] [ --no-bootstrap | ... ] parity [options] [ --no-bootstrap | ... ] -Options: +Protocol Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file - or frontier, mainnet, morden, or testnet [default: frontier]. + or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. + --testnet Equivalent to --chain testnet (geth-compatible). + --networkid INDEX Override the network identifier from the chain we are on. --archive Client should not prune the state/storage trie. - -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] + -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --identity NAME Specify your node's name. +Networking Options: --no-bootstrap Don't bother trying to connect to any nodes initially. --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. --public-address URL Specify the IP/port on which peers may connect. @@ -78,18 +91,32 @@ Options: --no-upnp Disable trying to figure out the correct public adderss over UPnP. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. +API and Console Options: + -j --jsonrpc Enable the JSON-RPC API sever. + --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API server [default: 127.0.0.1]. + --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. + --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. + --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited + list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + --rpc Equivalent to --jsonrpc (geth-compatible). + --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). + --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). + --rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible). + --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). + +Sealing/Mining Options: + --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards + from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. + --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + +Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. + --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with + other cache options (geth-compatible). - -j --jsonrpc Enable the JSON-RPC API sever. - --jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545]. - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. - - --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards - from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. - +Miscellaneous Options: -l --logging LOGGING Specify the logging level. -v --version Show information about version. -h --help Show this screen. @@ -101,14 +128,18 @@ struct Args { arg_pid_file: String, arg_enode: Vec, flag_chain: String, + flag_testnet: bool, flag_db_path: String, + flag_networkid: Option, + flag_identity: String, + flag_cache: Option, flag_keys_path: String, flag_archive: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, flag_address: Option, - flag_peers: u32, + flag_peers: usize, flag_no_discovery: bool, flag_no_upnp: bool, flag_node_key: Option, @@ -116,8 +147,15 @@ struct Args { flag_cache_max_size: usize, flag_queue_max_size: usize, flag_jsonrpc: bool, - flag_jsonrpc_url: String, + flag_jsonrpc_addr: String, + flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, + flag_jsonrpc_apis: String, + flag_rpc: bool, + flag_rpcaddr: Option, + flag_rpcport: Option, + flag_rpccorsdomain: Option, + flag_rpcapi: Option, flag_logging: Option, flag_version: bool, flag_author: String, @@ -151,14 +189,23 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) { +fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); - server.add_delegate(Web3Client::new().to_delegate()); - server.add_delegate(EthClient::new(&client, &sync).to_delegate()); - server.add_delegate(EthFilterClient::new(&client).to_delegate()); - server.add_delegate(NetClient::new(&sync).to_delegate()); + for api in apis.into_iter() { + match api { + "web3" => server.add_delegate(Web3Client::new().to_delegate()), + "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), + "eth" => { + server.add_delegate(EthClient::new(&client, &sync).to_delegate()); + server.add_delegate(EthFilterClient::new(&client).to_delegate()); + } + _ => { + die!("{}: Invalid API name to be enabled.", api); + } + } + } server.start_async(url, cors_domain); } @@ -179,16 +226,6 @@ By Wood/Paronyan/Kotewicz/Drwięga/Volf.\ ", version()); } -fn die_with_message(msg: &str) -> ! { - println!("ERROR: {}", msg); - exit(1); -} - -#[macro_export] -macro_rules! die { - ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); -} - struct Configuration { args: Args } @@ -221,8 +258,11 @@ impl Configuration { } fn spec(&self) -> Spec { + if self.args.flag_testnet { + return ethereum::new_morden(); + } match self.args.flag_chain.as_ref() { - "frontier" | "mainnet" => ethereum::new_frontier(), + "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), "morden" | "testnet" => ethereum::new_morden(), "olympic" => ethereum::new_olympic(), f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()), @@ -276,7 +316,7 @@ impl Configuration { ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_peers; + ret.ideal_peers = self.args.flag_peers as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); @@ -307,13 +347,22 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); let mut sync_config = SyncConfig::default(); - sync_config.network_id = spec.network_id(); + sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id()); // Build client let mut client_config = ClientConfig::default(); - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + match self.args.flag_cache { + Some(mb) => { + client_config.blockchain.max_cache_size = mb * 1024 * 1024; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; + } + None => { + client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; + client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + } + } client_config.prefer_journal = !self.args.flag_archive; + client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); let client = service.client().clone(); @@ -324,9 +373,16 @@ impl Configuration { let sync = EthSync::register(service.network(), sync_config, client); // Setup rpc - if self.args.flag_jsonrpc { - setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors); - SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); + if self.args.flag_jsonrpc || self.args.flag_rpc { + let url = format!("{}:{}", + self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), + self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) + ); + SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); + let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); + // TODO: use this as the API list. + let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); + setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); } // Register IO handler From cbc2c0cf0c76bf12361641c6825e21e82bdec7ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:33:00 +0100 Subject: [PATCH 031/222] Fixing clippy warnings. When building on nightly it is required to enable clippy --- Cargo.lock | 4 +++ Cargo.toml | 13 ++++++--- build.rs | 25 ++++++++++++++++ cargo.sh | 2 ++ ethcore/Cargo.toml | 6 +++- ethcore/build.rs | 25 ++++++++++++++++ ethcore/src/basic_types.rs | 2 +- ethcore/src/block.rs | 4 +-- ethcore/src/block_queue.rs | 4 +-- ethcore/src/blockchain/blockchain.rs | 4 +-- ethcore/src/ethereum/ethash.rs | 2 +- ethcore/src/evm/interpreter.rs | 6 ++-- ethcore/src/evm/tests.rs | 11 ++++--- ethcore/src/externalities.rs | 6 ++-- ethcore/src/lib.rs | 10 +++---- ethcore/src/service.rs | 9 +++--- ethcore/src/spec.rs | 10 +++---- ethcore/src/state.rs | 2 +- ethcore/src/transaction.rs | 2 +- ethcore/src/verification/mod.rs | 2 ++ hook.sh | 2 +- parity/main.rs | 6 ++-- rpc/Cargo.toml | 3 +- rpc/build.rs | 23 +++++++++++++++ sync/Cargo.toml | 6 +++- sync/build.rs | 25 ++++++++++++++++ sync/src/chain.rs | 7 +++-- sync/src/lib.rs | 6 ++-- sync/src/range_collection.rs | 2 +- util/Cargo.toml | 2 +- util/bigint/src/uint.rs | 6 ++-- util/build.rs | 21 ++++++++++++++ util/src/hash.rs | 4 +-- util/src/journaldb.rs | 43 +++++++++++++++------------- util/src/kvdb.rs | 3 +- util/src/lib.rs | 12 ++++---- util/src/network/discovery.rs | 22 +++++++------- util/src/network/host.rs | 4 +-- util/src/panics.rs | 2 +- util/src/trie/triedb.rs | 18 ++++++------ util/src/trie/triedbmut.rs | 36 +++++++++++------------ 41 files changed, 272 insertions(+), 130 deletions(-) create mode 100644 build.rs create mode 100755 cargo.sh create mode 100644 ethcore/build.rs create mode 100644 sync/build.rs diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..61f152f69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,6 +16,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -209,6 +210,7 @@ dependencies = [ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -232,6 +234,7 @@ dependencies = [ "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -286,6 +289,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index d1094a110..d8e05bb20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,10 @@ name = "parity" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -24,17 +28,18 @@ ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } [dev-dependencies] -ethcore = { path = "ethcore", features = ["dev"]} +ethcore = { path = "ethcore", features = ["dev"] } ethcore-util = { path = "util", features = ["dev"] } ethsync = { path = "sync", features = ["dev"] } -ethcore-rpc = { path = "rpc", features = ["dev"]} +ethcore-rpc = { path = "rpc", features = ["dev"] } [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "dev"] +travis-nightly = ["ethcore/json-tests", "clippy", "dev"] [[bin]] path = "parity/main.rs" diff --git a/build.rs b/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/cargo.sh b/cargo.sh new file mode 100755 index 000000000..6870ab385 --- /dev/null +++ b/cargo.sh @@ -0,0 +1,2 @@ +#!/bin/sh +cargo "$@" --features dev-clippy diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index c3a3d32dc..fbfe175d7 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -5,6 +5,10 @@ license = "GPL-3.0" name = "ethcore" version = "0.9.99" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -27,5 +31,5 @@ jit = ["evmjit"] evm-debug = [] json-tests = [] test-heavy = [] -dev = ["clippy"] +dev = [] default = [] diff --git a/ethcore/build.rs b/ethcore/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/ethcore/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/ethcore/src/basic_types.rs b/ethcore/src/basic_types.rs index 5f6515c0d..9cba8b3a0 100644 --- a/ethcore/src/basic_types.rs +++ b/ethcore/src/basic_types.rs @@ -24,7 +24,7 @@ pub type LogBloom = H2048; /// Constant 2048-bit datum for 0. Often used as a default. pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); -#[cfg_attr(feature="dev", allow(enum_variant_names))] +#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] /// Semantic boolean for when a seal/signature is included. pub enum Seal { /// The seal/signature is included. diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 68f647e37..b3894db94 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,7 +16,7 @@ //! Blockchain block. -#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> +#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> use common::*; use engine::*; @@ -274,7 +274,7 @@ impl<'x> OpenBlock<'x> { s.block.base.header.note_dirty(); ClosedBlock { - block: s.block, + block: s.block, uncle_bytes: uncle_bytes, } } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 490a17995..de6802a4f 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -121,7 +121,7 @@ struct QueueSignal { } impl QueueSignal { - #[cfg_attr(feature="dev", allow(bool_comparison))] + #[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))] fn set(&self) { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); @@ -385,7 +385,7 @@ impl BlockQueue { } } - pub fn collect_garbage(&self) { + pub fn collect_garbage(&self) { { let mut verification = self.verification.lock().unwrap(); verification.unverified.shrink_to_fit(); diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e529f50af..d7c9d7975 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -884,7 +884,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_find_uncles() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); @@ -922,7 +922,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_small_fork() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index f9810b964..b0c0e4a9f 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -202,7 +202,7 @@ impl Engine for Ethash { } } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self impl Ethash { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; diff --git a/ethcore/src/evm/interpreter.rs b/ethcore/src/evm/interpreter.rs index 7491321cb..fb8d19357 100644 --- a/ethcore/src/evm/interpreter.rs +++ b/ethcore/src/evm/interpreter.rs @@ -243,7 +243,7 @@ struct CodeReader<'a> { code: &'a Bytes } -#[cfg_attr(feature="dev", allow(len_without_is_empty))] +#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))] impl<'a> CodeReader<'a> { /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { @@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> { } } -#[cfg_attr(feature="dev", allow(enum_variant_names))] +#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] enum InstructionCost { Gas(U256), GasMem(U256, U256), @@ -347,7 +347,7 @@ impl evm::Evm for Interpreter { } impl Interpreter { - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn get_gas_cost_mem(&self, ext: &evm::Ext, instruction: Instruction, diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 9d4dd3bc4..dc84a9a05 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -25,9 +25,8 @@ struct FakeLogEntry { } #[derive(PartialEq, Eq, Hash, Debug)] -#[cfg_attr(feature="dev", allow(enum_variant_names))] // Common prefix is C ;) enum FakeCallType { - CALL, CREATE + Call, Create } #[derive(PartialEq, Eq, Hash, Debug)] @@ -94,7 +93,7 @@ impl Ext for FakeExt { fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CREATE, + call_type: FakeCallType::Create, gas: *gas, sender_address: None, receive_address: None, @@ -115,7 +114,7 @@ impl Ext for FakeExt { _output: &mut [u8]) -> MessageCallResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: *gas, sender_address: Some(sender_address.clone()), receive_address: Some(receive_address.clone()), @@ -909,7 +908,7 @@ fn test_calls(factory: super::Factory) { }; assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(code_address.clone()), @@ -918,7 +917,7 @@ fn test_calls(factory: super::Factory) { code_address: Some(code_address.clone()) }); assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(address.clone()), diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index beb8d62a1..a1f5763ea 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> { self.state.code(address).unwrap_or_else(|| vec![]) } - #[cfg_attr(feature="dev", allow(match_ref_pats))] + #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { @@ -226,9 +226,9 @@ impl<'a> Ext for Externalities<'a> { fn log(&mut self, topics: Vec, data: &[u8]) { let address = self.origin_info.address.clone(); - self.substate.logs.push(LogEntry { + self.substate.logs.push(LogEntry { address: address, - topics: topics, + topics: topics, data: data.to_vec() }); } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 938da02a0..469364eb3 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -15,16 +15,16 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Clippy config // TODO [todr] not really sure -#![cfg_attr(feature="dev", allow(needless_range_loop))] +#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(feature="dev", allow(match_bool))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Ethcore library //! diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 756d02407..33dca8de7 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -115,12 +115,11 @@ impl IoHandler for ClientIoHandler { } } - #[cfg_attr(feature="dev", allow(match_ref_pats))] - #[cfg_attr(feature="dev", allow(single_match))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { - if let &UserMessage(ref message) = net_message { - match message { - &SyncMessage::BlockVerified => { + if let UserMessage(ref message) = *net_message { + match *message { + SyncMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }, _ => {}, // ignore other messages diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 38a0dda53..774024351 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -99,7 +99,7 @@ pub struct Spec { genesis_state: PodState, } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. @@ -136,7 +136,7 @@ impl Spec { uncles_hash: RlpStream::new_list(0).out().sha3(), extra_data: self.extra_data.clone(), state_root: self.state_root().clone(), - receipts_root: self.receipts_root.clone(), + receipts_root: self.receipts_root.clone(), log_bloom: H2048::new().clone(), gas_used: self.gas_used.clone(), gas_limit: self.gas_limit.clone(), @@ -182,7 +182,7 @@ impl Spec { ) } }; - + self.parent_hash = H256::from_json(&genesis["parentHash"]); self.transactions_root = genesis.find("transactionsTrie").and_then(|_| Some(H256::from_json(&genesis["transactionsTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); self.receipts_root = genesis.find("receiptTrie").and_then(|_| Some(H256::from_json(&genesis["receiptTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); @@ -249,7 +249,7 @@ impl FromJson for Spec { ) } }; - + Spec { name: json.find("name").map_or("unknown", |j| j.as_string().unwrap()).to_owned(), engine_name: json["engineName"].as_string().unwrap().to_owned(), @@ -278,7 +278,7 @@ impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { if !db.contains(&self.state_root()) { - let mut root = H256::new(); + let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); for (address, account) in self.genesis_state.get().iter() { diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index c13678c38..7c1064abf 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -224,7 +224,7 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. - #[cfg_attr(feature="dev", allow(match_ref_pats))] + #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? diff --git a/ethcore/src/transaction.rs b/ethcore/src/transaction.rs index a51824494..733e5ac6b 100644 --- a/ethcore/src/transaction.rs +++ b/ethcore/src/transaction.rs @@ -80,7 +80,7 @@ impl Transaction { } impl FromJson for SignedTransaction { - #[cfg_attr(feature="dev", allow(single_char_pattern))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))] fn from_json(json: &Json) -> SignedTransaction { let t = Transaction { nonce: xjson!(&json["nonce"]), diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 260121989..fe1f406cc 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -17,9 +17,11 @@ pub mod verification; pub mod verifier; mod canon_verifier; +#[cfg(test)] mod noop_verifier; pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; +#[cfg(test)] pub use self::noop_verifier::NoopVerifier; diff --git a/hook.sh b/hook.sh index 106ffe4f0..354fddd5d 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,3 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push +echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push chmod +x ./.git/hooks/pre-push diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..4055fcf46 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -17,8 +17,8 @@ //! Ethcore client application. #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -246,7 +246,7 @@ impl Configuration { } } - #[cfg_attr(feature="dev", allow(useless_format))] + #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { let mut listen_address = None; let mut public_address = None; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..07c0eb85d 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -26,8 +26,9 @@ serde_macros = { version = "0.7.0", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } syntex = "0.29.0" +rustc_version = "0.1" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/build.rs b/rpc/build.rs index b5adeaba1..3806f6fe5 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -1,3 +1,23 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + #[cfg(not(feature = "serde_macros"))] mod inner { extern crate syntex; @@ -26,4 +46,7 @@ mod inner { fn main() { inner::main(); + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index f10a772e3..fd4b9c46f 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -4,9 +4,13 @@ name = "ethsync" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 63640f87f..0feae01b0 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -268,7 +268,7 @@ impl ChainSync { } - #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` + #[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Rest sync. Clear all downloaded data but keep the queue fn reset(&mut self) { self.downloading_headers.clear(); @@ -335,7 +335,7 @@ impl ChainSync { Ok(()) } - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); @@ -462,6 +462,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); @@ -484,7 +485,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..3ce33e31f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index dc2f4e446..dad732fe8 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -207,7 +207,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } #[test] -#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] +#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_range() { use std::cmp::{Ordering}; diff --git a/util/Cargo.toml b/util/Cargo.toml index 9c5cb3fe3..0ce27ec2b 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -40,7 +40,7 @@ chrono = "0.2" [features] default = [] -dev = ["clippy"] +dev = [] [build-dependencies] vergen = "*" diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index bd57e9d6d..959df0944 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -1103,7 +1103,7 @@ macro_rules! construct_uint { } } - #[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok. + #[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok. impl Hash for $name { fn hash(&self, state: &mut H) where H: Hasher { unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); } @@ -1485,7 +1485,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(eq_op))] + #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] pub fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -2032,7 +2032,7 @@ mod tests { #[test] - #[cfg_attr(feature = "dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn u256_multi_full_mul() { let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); diff --git a/util/build.rs b/util/build.rs index eed080e29..0b9b233e0 100644 --- a/util/build.rs +++ b/util/build.rs @@ -1,7 +1,28 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; extern crate vergen; use vergen::*; +use rustc_version::{version_meta, Channel}; fn main() { vergen(OutputFns::all()).unwrap(); + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } } diff --git a/util/src/hash.rs b/util/src/hash.rs index 73fa33b47..4eb96b53e 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -305,7 +305,7 @@ macro_rules! impl_hash { } impl Copy for $from {} - #[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))] + #[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))] impl Clone for $from { fn clone(&self) -> $from { unsafe { @@ -637,7 +637,7 @@ mod tests { use std::str::FromStr; #[test] - #[cfg_attr(feature="dev", allow(eq_op))] + #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] fn hash() { let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 57af857a9..3228f2201 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -27,7 +27,7 @@ use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. @@ -158,7 +158,7 @@ impl JournalDB { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { for &(ref h, ref d) in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -181,7 +181,7 @@ impl JournalDB { } } - fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { + fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { println!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); for h in inserts { if let Some(c) = counters.get_mut(h) { @@ -211,12 +211,12 @@ impl JournalDB { n = Some(*c); } } - match &n { - &Some(i) if i == 1 => { + match n { + Some(i) if i == 1 => { counters.remove(&h); Self::reset_already_in(batch, &h); } - &None => { + None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); @@ -229,7 +229,7 @@ impl JournalDB { /// Commit all recent insert operations and historical removals from the old era /// to the backing database. fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -242,12 +242,12 @@ impl JournalDB { // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history // into ancient history) then only one commit from the tuple is considered canonical. This commit // is kept in the main backing database, whereas any others from the same era are reverted. - // + // // It is possible that a key, properly available in the backing database be deleted and re-inserted // in the recent history queue, yet have both operations in commits that are eventually non-canonical. // To avoid the original, and still required, key from being deleted, we maintain a reference count // which includes an original key, if any. - // + // // The semantics of the `counter` are: // insert key k: // counter already contains k: count += 1 @@ -255,7 +255,7 @@ impl JournalDB { // backing db contains k: count = 1 // backing db doesn't contain k: insert into backing db, count = 0 // delete key k: - // counter contains k (count is asserted to be non-zero): + // counter contains k (count is asserted to be non-zero): // count > 1: counter -= 1 // count == 1: remove counter // count == 0: remove key from backing db @@ -274,7 +274,7 @@ impl JournalDB { // // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); + trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut counters = self.counters.as_ref().unwrap().write().unwrap(); let batch = DBTransaction::new(); { @@ -295,7 +295,7 @@ impl JournalDB { let drained = self.overlay.drain(); let removes: Vec = drained .iter() - .filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned() + .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) .collect(); let inserts: Vec<(H256, Bytes)> = drained .into_iter() @@ -382,12 +382,15 @@ impl JournalDB { /// Returns heap memory size used pub fn mem_used(&self) -> usize { - self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 } + self.overlay.mem_used() + match self.counters { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } } } impl HashDB for JournalDB { - fn keys(&self) -> HashMap { + fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { let h = H256::from_slice(key.deref()); @@ -401,7 +404,7 @@ impl HashDB for JournalDB { ret } - fn lookup(&self, key: &H256) -> Option<&[u8]> { + fn lookup(&self, key: &H256) -> Option<&[u8]> { let k = self.overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), @@ -416,18 +419,18 @@ impl HashDB for JournalDB { } } - fn exists(&self, key: &H256) -> bool { + fn exists(&self, key: &H256) -> bool { self.lookup(key).is_some() } - fn insert(&mut self, value: &[u8]) -> H256 { + fn insert(&mut self, value: &[u8]) -> H256 { self.overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); + self.overlay.emplace(key, value); } - fn kill(&mut self, key: &H256) { - self.overlay.kill(key); + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 43a9fc532..a2fa2215a 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -55,8 +55,7 @@ pub struct DatabaseIterator<'a> { impl<'a> Iterator for DatabaseIterator<'a> { type Item = (Box<[u8]>, Box<[u8]>); - #[cfg_attr(feature="dev", allow(type_complexity))] - fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> { + fn next(&mut self) -> Option { self.iter.next() } } diff --git a/util/src/lib.rs b/util/src/lib.rs index a50ba8da4..59d66a325 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Clippy settings // TODO [todr] not really sure -#![cfg_attr(feature="dev", allow(needless_range_loop))] +#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(feature="dev", allow(match_bool))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // We use that to be more explicit about handled cases -#![cfg_attr(feature="dev", allow(match_same_arms))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Ethcore-util library //! diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index e52d5d25f..644af22af 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -113,14 +113,14 @@ impl Discovery { } /// Add a new node to discovery table. Pings the node. - pub fn add_node(&mut self, e: NodeEntry) { + pub fn add_node(&mut self, e: NodeEntry) { let endpoint = e.endpoint.clone(); self.update_node(e); self.ping(&endpoint); } /// Add a list of known nodes to the table. - pub fn init_node_list(&mut self, mut nodes: Vec) { + pub fn init_node_list(&mut self, mut nodes: Vec) { for n in nodes.drain(..) { self.update_node(n); } @@ -243,7 +243,7 @@ impl Discovery { self.send_to(packet, address.clone()); } - #[cfg_attr(feature="dev", allow(map_clone))] + #[cfg_attr(all(nightly, feature="dev"), allow(map_clone))] fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec { let mut found: BTreeMap> = BTreeMap::new(); let mut count = 0; @@ -251,7 +251,7 @@ impl Discovery { // Sort nodes by distance to target for bucket in buckets { for node in &bucket.nodes { - let distance = Discovery::distance(target, &node.address.id); + let distance = Discovery::distance(target, &node.address.id); found.entry(distance).or_insert_with(Vec::new).push(&node.address); if count == BUCKET_SIZE { // delete the most distant element @@ -310,7 +310,7 @@ impl Discovery { None }), Ok(_) => None, - Err(e) => { + Err(e) => { warn!("Error reading UPD socket: {:?}", e); None } @@ -339,7 +339,7 @@ impl Discovery { PACKET_PONG => self.on_pong(&rlp, &node_id, &from), PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from), PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from), - _ => { + _ => { debug!("Unknown UDP packet: {}", packet_id); Ok(None) } @@ -367,14 +367,14 @@ impl Discovery { } else { self.update_node(entry.clone()); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); } let hash = rlp.as_raw().sha3(); let mut response = RlpStream::new_list(2); dest.to_rlp_list(&mut response); response.append(&hash); self.send_packet(PACKET_PONG, from, &response.drain()); - + Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() })) } @@ -391,7 +391,7 @@ impl Discovery { } self.clear_ping(node); let mut added_map = HashMap::new(); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); Ok(None) } @@ -466,8 +466,8 @@ impl Discovery { pub fn round(&mut self) -> Option { let removed = self.check_expired(false); self.discover(); - if !removed.is_empty() { - Some(TableUpdates { added: HashMap::new(), removed: removed }) + if !removed.is_empty() { + Some(TableUpdates { added: HashMap::new(), removed: removed }) } else { None } } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index ece24a1d1..2d1af55ba 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -507,7 +507,7 @@ impl Host where Message: Send + Sync + Clone { debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count()); } - #[cfg_attr(feature="dev", allow(single_match))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] fn connect_peer(&self, id: &NodeId, io: &IoContext>) { if self.have_session(id) { @@ -542,7 +542,7 @@ impl Host where Message: Send + Sync + Clone { self.create_connection(socket, Some(id), io); } - #[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))] + #[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))] fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext>) { let nonce = self.info.write().unwrap().next_nonce(); let mut handshakes = self.handshakes.write().unwrap(); diff --git a/util/src/panics.rs b/util/src/panics.rs index 05d266b8b..70ce0bc33 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -71,7 +71,7 @@ impl PanicHandler { /// Invoke closure and catch any possible panics. /// In case of panic notifies all listeners about it. - #[cfg_attr(feature="dev", allow(deprecated))] + #[cfg_attr(all(nightly, feature="dev"), allow(deprecated))] pub fn catch_panic(&self, g: G) -> thread::Result where G: FnOnce() -> R + Send + 'static { let _guard = PanicGuard { handler: self }; let result = g(); diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index c4b5e120c..182b87063 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -22,7 +22,7 @@ use super::trietraits::*; use super::node::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -54,7 +54,7 @@ pub struct TrieDB<'db> { pub hash_count: usize, } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] impl<'db> TrieDB<'db> { /// Create a new trie with the backing database `db` and `root` /// Panics, if `root` does not exist @@ -63,16 +63,16 @@ impl<'db> TrieDB<'db> { flushln!("TrieDB::new({}): Trie root not found!", root); panic!("Trie root not found!"); } - TrieDB { - db: db, + TrieDB { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -142,7 +142,7 @@ impl<'db> TrieDB<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -358,7 +358,7 @@ impl<'db> fmt::Debug for TrieDB<'db> { fn iterator() { use memorydb::*; use super::triedbmut::*; - + let d = vec![ &b"A"[..], &b"AA"[..], &b"AB"[..], &b"B"[..] ]; let mut memdb = MemoryDB::new(); diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 829c1e518..3d5c366e5 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -23,7 +23,7 @@ use super::journal::*; use super::trietraits::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -66,21 +66,21 @@ enum MaybeChanged<'a> { Changed(Bytes), } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { let mut r = TrieDBMut{ - db: db, + db: db, root: root, - hash_count: 0 - }; + hash_count: 0 + }; // set root rlp - *r.root = SHA3_NULL_RLP.clone(); - r + *r.root = SHA3_NULL_RLP.clone(); + r } /// Create a new trie with the backing database `db` and `root`. @@ -91,21 +91,21 @@ impl<'db> TrieDBMut<'db> { flushln!("Trie root not found {}", root); panic!("Trie root not found!"); } - TrieDBMut { - db: db, + TrieDBMut { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Get the backing database. - pub fn db_mut(&'db mut self) -> &'db mut HashDB { - self.db + pub fn db_mut(&'db mut self) -> &'db mut HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -184,7 +184,7 @@ impl<'db> TrieDBMut<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> { } } - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] /// Determine the RLP of the node, assuming we're inserting `partial` into the /// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// it will just return the new RLP that includes the new node. @@ -378,7 +378,7 @@ impl<'db> TrieDBMut<'db> { // original had empty slot - place a leaf there. true if old_rlp.at(i).is_empty() => journal.new_node(Self::compose_leaf(&partial.mid(1), value), &mut s), // original has something there already; augment. - true => { + true => { let new = self.augmented(self.take_node(&old_rlp.at(i), journal), &partial.mid(1), value, journal); journal.new_node(new, &mut s); } From b61c0397bc476ca2253f43724646fc703eb0fdb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:36:38 +0100 Subject: [PATCH 032/222] removing unused variable --- util/src/journaldb.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 378bc2de5..35ad83fa0 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -617,7 +617,6 @@ mod tests { fn reopen_remove() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); From ab42ec8c81bc91f82b3b81d404e5da2e81382aff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:40:39 +0100 Subject: [PATCH 033/222] Removing unneeded lifetime --- ethcore/src/blockchain/generator/generator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs index 51e6294fc..88c9577e2 100644 --- a/ethcore/src/blockchain/generator/generator.rs +++ b/ethcore/src/blockchain/generator/generator.rs @@ -29,7 +29,7 @@ pub trait ChainIterator: Iterator + Sized { /// Blocks generated by fork will have lower difficulty than current chain. fn fork(&self, fork_number: usize) -> Fork where Self: Clone; /// Should be called to make every consecutive block have given bloom. - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self>; + fn with_bloom(&mut self, bloom: H2048) -> Bloom; /// Should be called to complete block. Without complete, block may have incorrect hash. fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>; /// Completes and generates block. @@ -44,7 +44,7 @@ impl ChainIterator for I where I: Iterator + Sized { } } - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self> { + fn with_bloom(&mut self, bloom: H2048) -> Bloom { Bloom { iter: self, bloom: bloom From 655bb0ed5db4eb014d959b03b3edc43cfb5ebf4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 12:36:06 +0100 Subject: [PATCH 034/222] Additional documentation for transaction queue --- sync/src/lib.rs | 1 + sync/src/transaction_queue.rs | 103 +++++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index b5869642c..39a06af8f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -72,6 +72,7 @@ mod chain; mod io; mod range_collection; mod transaction_queue; +pub use transaction_queue::TransactionQueue; #[cfg(test)] mod tests; diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..8270c6e27 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -17,6 +17,67 @@ // TODO [todr] - own transactions should have higher priority //! Transaction Queue +//! +//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions +//! and orders them by priority. Top priority transactions are those with low nonce height (difference between +//! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used +//! for comparison (higher gas price = higher priority). +//! +//! # Usage Example +//! +//! ```rust +//! extern crate ethcore_util as util; +//! extern crate ethcore; +//! extern crate ethsync; +//! extern crate rustc_serialize; +//! +//! use util::crypto::KeyPair; +//! use util::hash::Address; +//! use util::numbers::{Uint, U256}; +//! use ethsync::TransactionQueue; +//! use ethcore::transaction::*; +//! use rustc_serialize::hex::FromHex; +//! +//! fn main() { +//! let key = KeyPair::create().unwrap(); +//! let t1 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), +//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(10) }; +//! let t2 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), +//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(11) }; +//! +//! let st1 = t1.sign(&key.secret()); +//! let st2 = t2.sign(&key.secret()); +//! let default_nonce = |_a: &Address| U256::from(10); +//! +//! let mut txq = TransactionQueue::new(); +//! txq.add(st2.clone(), &default_nonce); +//! txq.add(st1.clone(), &default_nonce); +//! +//! // Check status +//! assert_eq!(txq.status().pending, 2); +//! // Check top transactions +//! let top = txq.top_transactions(3); +//! assert_eq!(top.len(), 2); +//! assert_eq!(top[0], st1); +//! assert_eq!(top[1], st2); +//! +//! // And when transaction is removed (but nonce haven't changed) +//! // it will move invalid transactions to future +//! txq.remove(&st1.hash(), &default_nonce); +//! assert_eq!(txq.status().pending, 0); +//! assert_eq!(txq.status().future, 1); +//! assert_eq!(txq.top_transactions(3).len(), 0); +//! } +//! +//! +//! # Maintaing valid state +//! +//! 1. Whenever transaction is imported to queue (to queue) all other transactions from this sender are revalidated in current. It means that they are moved to future and back again (height recalculation & gap filling). +//! 2. Whenever transaction is removed: +//! - When it's removed from `future` - all `future` transactions heights are recalculated and then +//! we check if the transactions should go to `current` (comparing state nonce) +//! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. +//! use std::cmp::{Ordering}; use std::collections::{HashMap, BTreeSet}; @@ -27,9 +88,16 @@ use ethcore::transaction::*; #[derive(Clone, Debug)] +/// Light structure used to identify transaction and it's order struct TransactionOrder { + /// Primary ordering factory. Difference between transaction nonce and expected nonce in state + /// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5) + /// High nonce_height = Low priority (processed later) nonce_height: U256, + /// Gas Price of the transaction. + /// Low gas price = Low priority (processed later) gas_price: U256, + /// Hash to identify associated transaction hash: H256, } @@ -70,7 +138,7 @@ impl Ord for TransactionOrder { let a_gas = self.gas_price; let b_gas = b.gas_price; if a_gas != b_gas { - return a_gas.cmp(&b_gas); + return b_gas.cmp(&a_gas); } // Compare hashes @@ -78,6 +146,7 @@ impl Ord for TransactionOrder { } } +/// Verified transaction (with sender) struct VerifiedTransaction { transaction: SignedTransaction } @@ -101,6 +170,11 @@ impl VerifiedTransaction { } } +/// Holds transactions accessible by (address, nonce) and by priority +/// +/// TransactionSet keeps number of entries below limit, but it doesn't +/// automatically happen during `insert/remove` operations. +/// You have to call `enforce_limit` to remove lowest priority transactions from set. struct TransactionSet { by_priority: BTreeSet, by_address: Table, @@ -108,11 +182,15 @@ struct TransactionSet { } impl TransactionSet { + /// Inserts `TransactionOrder` to this set fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { self.by_priority.insert(order.clone()); self.by_address.insert(sender, nonce, order) } + /// Remove low priority transactions if there is more then specified by given `limit`. + /// + /// It drops transactions from this set but also removes associated `VerifiedTransaction`. fn enforce_limit(&mut self, by_hash: &mut HashMap) { let len = self.by_priority.len(); if len <= self.limit { @@ -134,6 +212,7 @@ impl TransactionSet { } } + /// Drop transaction from this set (remove from `by_priority` and `by_address`) fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { if let Some(tx_order) = self.by_address.remove(sender, nonce) { self.by_priority.remove(&tx_order); @@ -142,6 +221,7 @@ impl TransactionSet { None } + /// Drop all transactions. fn clear(&mut self) { self.by_priority.clear(); self.by_address.clear(); @@ -260,6 +340,8 @@ impl TransactionQueue { // We will either move transaction to future or remove it completely // so there will be no transactions from this sender in current self.last_nonces.remove(&sender); + // First update height of transactions in future to avoid collisions + self.update_future(&sender, current_nonce); // This should move all current transactions to future and remove old transactions self.move_all_to_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future @@ -269,6 +351,7 @@ impl TransactionQueue { } } + /// Update height of all transactions in future transactions set. fn update_future(&mut self, sender: &Address, current_nonce: U256) { // We need to drain all transactions for current sender from future and reinsert them with updated height let all_nonces_from_sender = match self.future.by_address.row(&sender) { @@ -281,6 +364,8 @@ impl TransactionQueue { } } + /// Drop all transactions from given sender from `current`. + /// Either moves them to `future` or removes them from queue completely. fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) { let all_nonces_from_sender = match self.current.by_address.row(&sender) { Some(row_map) => row_map.keys().cloned().collect::>(), @@ -300,7 +385,7 @@ impl TransactionQueue { } - /// Returns top transactions from the queue + /// Returns top transactions from the queue ordered by priority. pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority .iter() @@ -318,6 +403,8 @@ impl TransactionQueue { self.last_nonces.clear(); } + /// Checks if there are any transactions in `future` that should actually be promoted to `current` + /// (because nonce matches). fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { { let by_nonce = self.future.by_address.row_mut(&address); @@ -339,6 +426,14 @@ impl TransactionQueue { self.last_nonces.insert(address, current_nonce - U256::one()); } + /// Adds VerifiedTransaction to this queue. + /// + /// Determines if it should be placed in current or future. When transaction is + /// imported to `current` also checks if there are any `future` transactions that should be promoted because of + /// this. + /// + /// It ignores transactions that has already been imported (same `hash`) and replaces the transaction + /// iff `(address, nonce)` is the same but `gas_price` is higher. fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) where T: Fn(&Address) -> U256 { @@ -377,6 +472,10 @@ impl TransactionQueue { self.current.enforce_limit(&mut self.by_hash); } + /// Replaces transaction in given set (could be `future` or `current`). + /// + /// If there is already transaction with same `(sender, nonce)` it will be replaced iff `gas_price` is higher. + /// One of the transactions is dropped from set and also removed from queue entirely (from `by_hash`). fn replace_transaction(tx: VerifiedTransaction, base_nonce: U256, set: &mut TransactionSet, by_hash: &mut HashMap) { let order = TransactionOrder::for_transaction(&tx, base_nonce); let hash = tx.hash(); From 799d3bd2c8e15e71a027caf7e8c836b578163161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 12:42:32 +0100 Subject: [PATCH 035/222] Fixing doc test for queue --- sync/src/transaction_queue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 8270c6e27..4b4a6226b 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -68,7 +68,7 @@ //! assert_eq!(txq.status().future, 1); //! assert_eq!(txq.top_transactions(3).len(), 0); //! } -//! +//! ``` //! //! # Maintaing valid state //! From 99a6802b619cb3c1c821ed9645d509dfc9cbc9b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 15:46:44 +0100 Subject: [PATCH 036/222] Moving block sealing and transaction_queue to separate create --- Cargo.lock | 18 ++- Cargo.toml | 1 + ethcore/src/client.rs | 105 ++++------------ miner/Cargo.toml | 20 +++ miner/src/lib.rs | 86 +++++++++++++ miner/src/miner.rs | 149 +++++++++++++++++++++++ {sync => miner}/src/transaction_queue.rs | 0 parity/main.rs | 73 ++++++----- rpc/Cargo.toml | 1 + rpc/src/lib.rs | 1 + rpc/src/v1/impls/eth.rs | 13 +- sync/Cargo.toml | 5 +- sync/src/chain.rs | 56 ++------- sync/src/lib.rs | 14 +-- 14 files changed, 375 insertions(+), 167 deletions(-) create mode 100644 miner/Cargo.toml create mode 100644 miner/src/lib.rs create mode 100644 miner/src/miner.rs rename {sync => miner}/src/transaction_queue.rs (100%) diff --git a/Cargo.lock b/Cargo.lock index 510e69b59..505fcac63 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11,6 +11,7 @@ dependencies = [ "ethcore-devtools 0.9.99", "ethcore-rpc 0.9.99", "ethcore-util 0.9.99", + "ethminer 0.9.99", "ethsync 0.9.99", "fdlimit 0.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -235,6 +236,7 @@ dependencies = [ "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", + "ethminer 0.9.99", "ethsync 0.9.99", "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -282,6 +284,19 @@ dependencies = [ "vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethminer" +version = "0.9.99" +dependencies = [ + "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore 0.9.99", + "ethcore-util 0.9.99", + "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ethsync" version = "0.9.99" @@ -290,11 +305,10 @@ dependencies = [ "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", + "ethminer 0.9.99", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..7e5bc334b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,6 +16,7 @@ clippy = { version = "0.0.44", optional = true } ethcore-util = { path = "util" } ethcore = { path = "ethcore" } ethsync = { path = "sync" } +ethminer = { path = "miner" } ethcore-rpc = { path = "rpc", optional = true } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 874fc9646..af1745ca8 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -17,7 +17,6 @@ //! Blockchain database client. use std::marker::PhantomData; -use std::sync::atomic::AtomicBool; use util::*; use util::panics::*; use blockchain::{BlockChain, BlockProvider}; @@ -185,6 +184,9 @@ pub trait BlockChainClient : Sync + Send { /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; + + fn prepare_sealing(&self, author: Address, extra_data: Bytes) -> Option; + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result; } #[derive(Default, Clone, Debug, Eq, PartialEq)] @@ -219,12 +221,6 @@ pub struct Client where V: Verifier { report: RwLock, import_lock: Mutex<()>, panic_handler: Arc, - - // for sealing... - sealing_enabled: AtomicBool, - sealing_block: Mutex>, - author: RwLock
, - extra_data: RwLock, verifier: PhantomData, secret_store: Arc>, } @@ -273,10 +269,6 @@ impl Client where V: Verifier { report: RwLock::new(Default::default()), import_lock: Mutex::new(()), panic_handler: panic_handler, - sealing_enabled: AtomicBool::new(false), - sealing_block: Mutex::new(None), - author: RwLock::new(Address::new()), - extra_data: RwLock::new(Vec::new()), verifier: PhantomData, secret_store: secret_store, })) @@ -425,10 +417,6 @@ impl Client where V: Verifier { } } - if self.chain_info().best_block_hash != original_best && self.sealing_enabled.load(atomic::Ordering::Relaxed) { - self.prepare_sealing(); - } - imported } @@ -477,85 +465,46 @@ impl Client where V: Verifier { BlockId::Latest => Some(self.chain.read().unwrap().best_block_number()) } } +} - /// Get the author that we will seal blocks as. - pub fn author(&self) -> Address { - self.author.read().unwrap().clone() + +// TODO: need MinerService MinerIoHandler + +impl BlockChainClient for Client where V: Verifier { + + + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { + block.try_seal(self.engine.deref().deref(), seal) } - /// Set the author that we will seal blocks as. - pub fn set_author(&self, author: Address) { - *self.author.write().unwrap() = author; - } - - /// Get the extra_data that we will seal blocks wuth. - pub fn extra_data(&self) -> Bytes { - self.extra_data.read().unwrap().clone() - } - - /// Set the extra_data that we will seal blocks with. - pub fn set_extra_data(&self, extra_data: Bytes) { - *self.extra_data.write().unwrap() = extra_data; - } - - /// New chain head event. Restart mining operation. - pub fn prepare_sealing(&self) { + fn prepare_sealing(&self, author: Address, extra_data: Bytes) -> Option { + let engine = self.engine.deref().deref(); let h = self.chain.read().unwrap().best_block_hash(); + let mut b = OpenBlock::new( - self.engine.deref().deref(), + engine, self.state_db.lock().unwrap().clone(), - match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} }, + match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => { return None; } }, self.build_last_hashes(h.clone()), - self.author(), - self.extra_data() + author, + extra_data, ); - self.chain.read().unwrap().find_uncle_headers(&h, self.engine.deref().deref().maximum_uncle_age()).unwrap().into_iter().take(self.engine.deref().deref().maximum_uncle_count()).foreach(|h| { b.push_uncle(h).unwrap(); }); + self.chain.read().unwrap().find_uncle_headers(&h, engine.maximum_uncle_age()) + .unwrap() + .into_iter() + .take(engine.maximum_uncle_count()) + .foreach(|h| { + b.push_uncle(h).unwrap(); + }); // TODO: push transactions. let b = b.close(); trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number()); - *self.sealing_block.lock().unwrap() = Some(b); + Some(b) } - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - pub fn sealing_block(&self) -> &Mutex> { - if self.sealing_block.lock().unwrap().is_none() { - self.sealing_enabled.store(true, atomic::Ordering::Relaxed); - // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. - self.prepare_sealing(); - } - &self.sealing_block - } - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - pub fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let mut maybe_b = self.sealing_block.lock().unwrap(); - match *maybe_b { - Some(ref b) if b.hash() == pow_hash => {} - _ => { return Err(Error::PowHashInvalid); } - } - - let b = maybe_b.take(); - match b.unwrap().try_seal(self.engine.deref().deref(), seal) { - Err(old) => { - *maybe_b = Some(old); - Err(Error::PowInvalid) - } - Ok(sealed) => { - // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. - try!(self.import_block(sealed.rlp_bytes())); - Ok(()) - } - } - } -} - -// TODO: need MinerService MinerIoHandler - -impl BlockChainClient for Client where V: Verifier { fn block_header(&self, id: BlockId) -> Option { let chain = self.chain.read().unwrap(); Self::block_hash(&chain, id).and_then(|hash| chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec())) diff --git a/miner/Cargo.toml b/miner/Cargo.toml new file mode 100644 index 000000000..0972aa122 --- /dev/null +++ b/miner/Cargo.toml @@ -0,0 +1,20 @@ +[package] +description = "Ethminer library" +homepage = "http://ethcore.io" +license = "GPL-3.0" +name = "ethminer" +version = "0.9.99" +authors = ["Ethcore "] + +[dependencies] +ethcore-util = { path = "../util" } +ethcore = { path = "../ethcore" } +log = "0.3" +env_logger = "0.3" +rustc-serialize = "0.3" +rayon = "0.3.1" +clippy = { version = "0.0.44", optional = true } + +[features] +dev = ["clippy"] +default = [] diff --git a/miner/src/lib.rs b/miner/src/lib.rs new file mode 100644 index 000000000..e8a50e9b5 --- /dev/null +++ b/miner/src/lib.rs @@ -0,0 +1,86 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +#![warn(missing_docs)] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] + +#[macro_use] +extern crate log; +#[macro_use] +extern crate ethcore_util as util; +extern crate ethcore; +extern crate env_logger; +extern crate rayon; + +mod miner; +mod transaction_queue; + +use util::{Bytes, H256, Address}; +use std::ops::*; +use std::sync::*; +use util::TimerToken; +use ethcore::block::*; +use ethcore::error::*; +use ethcore::client::{Client, BlockChainClient}; +use ethcore::transaction::*; +use miner::Miner; + +pub struct EthMiner { + miner: Miner, + /// Shared blockchain client. TODO: this should evetually become an IPC endpoint + chain: Arc, +} + +impl EthMiner { + /// Creates and register protocol with the network service + pub fn new(chain: Arc) -> Arc { + Arc::new(EthMiner { + miner: Miner::new(), + chain: chain, + }) + } + + pub fn sealing_block(&self) -> &Mutex> { + self.miner.sealing_block(self.chain.deref()) + } + + pub fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { + self.miner.submit_seal(self.chain.deref(), pow_hash, seal) + } + + /// Set the author that we will seal blocks as. + pub fn set_author(&self, author: Address) { + self.miner.set_author(author); + } + + /// Set the extra_data that we will seal blocks with. + pub fn set_extra_data(&self, extra_data: Bytes) { + self.miner.set_extra_data(extra_data); + } + + pub fn import_transactions(&self, transactions: Vec) { + let chain = self.chain.deref(); + let fetch_latest_nonce = |a : &Address| chain.nonce(a); + + self.miner.import_transactions(transactions, fetch_latest_nonce); + } + + pub fn chain_new_blocks(&self, good: &[H256], bad: &[H256], retracted: &[H256]) { + let mut chain = self.chain.deref(); + self.miner.chain_new_blocks(chain, good, bad, retracted); + } +} diff --git a/miner/src/miner.rs b/miner/src/miner.rs new file mode 100644 index 000000000..1a48d5288 --- /dev/null +++ b/miner/src/miner.rs @@ -0,0 +1,149 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::*; +use std::sync::atomic::AtomicBool; +use rayon::prelude::*; +use ethcore::views::{HeaderView, BlockView}; +use ethcore::header::{BlockNumber, Header as BlockHeader}; +use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; +use ethcore::block::*; +use ethcore::error::*; +use ethcore::transaction::SignedTransaction; +use transaction_queue::TransactionQueue; + +pub struct Miner { + /// Transactions Queue + transaction_queue: Mutex, + + // for sealing... + sealing_enabled: AtomicBool, + sealing_block: Mutex>, + author: RwLock
, + extra_data: RwLock, +} + +impl Miner { + pub fn new() -> Miner { + Miner { + transaction_queue: Mutex::new(TransactionQueue::new()), + sealing_enabled: AtomicBool::new(false), + sealing_block: Mutex::new(None), + author: RwLock::new(Address::new()), + extra_data: RwLock::new(Vec::new()), + } + } + + pub fn import_transactions(&self, transactions: Vec, fetch_nonce: T) + where T: Fn(&Address) -> U256 { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(transactions, fetch_nonce); + } + + /// Get the author that we will seal blocks as. + pub fn author(&self) -> Address { + self.author.read().unwrap().clone() + } + + /// Set the author that we will seal blocks as. + pub fn set_author(&self, author: Address) { + *self.author.write().unwrap() = author; + } + + /// Get the extra_data that we will seal blocks wuth. + pub fn extra_data(&self) -> Bytes { + self.extra_data.read().unwrap().clone() + } + + /// Set the extra_data that we will seal blocks with. + pub fn set_extra_data(&self, extra_data: Bytes) { + *self.extra_data.write().unwrap() = extra_data; + } + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, chain: &BlockChainClient) { + let b = chain.prepare_sealing(self.author.read().unwrap().clone(), self.extra_data.read().unwrap().clone()); + *self.sealing_block.lock().unwrap() = b; + } + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + pub fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex> { + if self.sealing_block.lock().unwrap().is_none() { + self.sealing_enabled.store(true, atomic::Ordering::Relaxed); + // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. + self.prepare_sealing(chain); + } + &self.sealing_block + } + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + pub fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { + let mut maybe_b = self.sealing_block.lock().unwrap(); + match *maybe_b { + Some(ref b) if b.hash() == pow_hash => {} + _ => { return Err(Error::PowHashInvalid); } + } + + let b = maybe_b.take(); + match chain.try_seal(b.unwrap(), seal) { + Err(old) => { + *maybe_b = Some(old); + Err(Error::PowInvalid) + } + Ok(sealed) => { + // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. + try!(chain.import_block(sealed.rlp_bytes())); + Ok(()) + } + } + } + + /// called when block is imported to chain, updates transactions queue and propagates the blocks + pub fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]) { + fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { + let block = chain + .block(BlockId::Hash(hash.clone())) + // Client should send message after commit to db and inserting to chain. + .expect("Expected in-chain blocks."); + let block = BlockView::new(&block); + block.transactions() + } + + { + let good = good.par_iter().map(|h| fetch_transactions(chain, h)); + let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); + + good.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + bad.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } + + if self.sealing_enabled.load(atomic::Ordering::Relaxed) { + self.prepare_sealing(chain); + } + } +} diff --git a/sync/src/transaction_queue.rs b/miner/src/transaction_queue.rs similarity index 100% rename from sync/src/transaction_queue.rs rename to miner/src/transaction_queue.rs diff --git a/parity/main.rs b/parity/main.rs index 43b0504f1..ef088ab5b 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -24,6 +24,7 @@ extern crate rustc_serialize; extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; +extern crate ethminer; #[macro_use] extern crate log as rlog; extern crate env_logger; @@ -49,6 +50,7 @@ use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethsync::{EthSync, SyncConfig}; +use ethminer::{EthMiner}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; @@ -79,6 +81,7 @@ Protocol Options: --networkid INDEX Override the network identifier from the chain we are on. --archive Client should not prune the state/storage trie. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. Networking Options: @@ -107,13 +110,13 @@ API and Console Options: Sealing/Mining Options: --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. - --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with + --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with other cache options (geth-compatible). Miscellaneous Options: @@ -129,7 +132,7 @@ struct Args { arg_enode: Vec, flag_chain: String, flag_testnet: bool, - flag_db_path: String, + flag_datadir: String, flag_networkid: Option, flag_identity: String, flag_cache: Option, @@ -189,7 +192,7 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { +fn setup_rpc_server(client: Arc, sync: Arc, miner: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); @@ -198,7 +201,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom "web3" => server.add_delegate(Web3Client::new().to_delegate()), "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), "eth" => { - server.add_delegate(EthClient::new(&client, &sync).to_delegate()); + server.add_delegate(EthClient::new(&client, &sync, &miner).to_delegate()); server.add_delegate(EthFilterClient::new(&client).to_delegate()); } _ => { @@ -238,7 +241,7 @@ impl Configuration { } fn path(&self) -> String { - self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) + self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) } fn author(&self) -> Address { @@ -323,6 +326,32 @@ impl Configuration { ret } + fn client_config(&self) -> ClientConfig { + let mut client_config = ClientConfig::default(); + match self.args.flag_cache { + Some(mb) => { + client_config.blockchain.max_cache_size = mb * 1024 * 1024; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; + } + None => { + client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; + client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + } + } + client_config.prefer_journal = !self.args.flag_archive; + client_config.name = self.args.flag_identity.clone(); + client_config.queue.max_mem_use = self.args.flag_queue_max_size; + client_config + } + + fn sync_config(&self, spec: &Spec) -> SyncConfig { + let mut sync_config = SyncConfig::default(); + sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| { + U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id)) + }).unwrap_or(spec.network_id()); + sync_config + } + fn execute(&self) { if self.args.flag_version { print_version(); @@ -346,31 +375,19 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); - let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id()); + let sync_config = self.sync_config(&spec); // Build client - let mut client_config = ClientConfig::default(); - match self.args.flag_cache { - Some(mb) => { - client_config.blockchain.max_cache_size = mb * 1024 * 1024; - client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; - } - None => { - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; - } - } - client_config.prefer_journal = !self.args.flag_archive; - client_config.name = self.args.flag_identity.clone(); - client_config.queue.max_mem_use = self.args.flag_queue_max_size; - let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); - let client = service.client().clone(); - client.set_author(self.author()); - client.set_extra_data(self.extra_data()); + let mut service = ClientService::start(self.client_config(), spec, net_settings, &Path::new(&self.path())).unwrap(); + let client = service.client(); + + // Miner + let miner = EthMiner::new(client.clone()); + miner.set_author(self.author()); + miner.set_extra_data(self.extra_data()); // Sync - let sync = EthSync::register(service.network(), sync_config, client); + let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone()); // Setup rpc if self.args.flag_jsonrpc || self.args.flag_rpc { @@ -382,7 +399,7 @@ impl Configuration { let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); + setup_rpc_server(service.client(), sync.clone(), miner.clone(), &url, cors, apis.split(",").collect()); } // Register IO handler diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..f6d468f47 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,6 +18,7 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } +ethminer = { path = "../miner" } clippy = { version = "0.0.44", optional = true } rustc-serialize = "0.3" transient-hashmap = "0.1" diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 0653a0c33..299084a6d 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -27,6 +27,7 @@ extern crate jsonrpc_http_server; extern crate ethcore_util as util; extern crate ethcore; extern crate ethsync; +extern crate ethminer; extern crate transient_hashmap; use self::jsonrpc_core::{IoHandler, IoDelegate}; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7113c55b1..11c6fe8d0 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -18,6 +18,7 @@ use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; use ethsync::{EthSync, SyncState}; +use ethminer::{EthMiner}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; @@ -36,15 +37,17 @@ use v1::helpers::{PollFilter, PollManager}; pub struct EthClient { client: Weak, sync: Weak, + miner: Weak, hashrates: RwLock>, } impl EthClient { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, miner: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), + miner: Arc::downgrade(miner), hashrates: RwLock::new(HashMap::new()), } } @@ -220,8 +223,8 @@ impl Eth for EthClient { fn work(&self, params: Params) -> Result { match params { Params::None => { - let c = take_weak!(self.client); - let u = c.sealing_block().lock().unwrap(); + let miner = take_weak!(self.miner); + let u = miner.sealing_block().lock().unwrap(); match *u { Some(ref b) => { let pow_hash = b.hash(); @@ -239,9 +242,9 @@ impl Eth for EthClient { fn submit_work(&self, params: Params) -> Result { from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| { // trace!("Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); - let c = take_weak!(self.client); + let miner = take_weak!(self.miner); let seal = vec![encode(&mix_hash).to_vec(), encode(&nonce).to_vec()]; - let r = c.submit_seal(pow_hash, seal); + let r = miner.submit_seal(pow_hash, seal); to_value(&r.is_ok()) }) } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 0097cd47e..748065deb 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -10,15 +10,14 @@ authors = ["Ethcore , + /// Miner + miner: Arc, } type RlpResponseResult = Result, PacketDecodeError>; impl ChainSync { /// Create a new instance of syncing strategy. - pub fn new(config: SyncConfig) -> ChainSync { + pub fn new(config: SyncConfig, miner: Arc) -> ChainSync { ChainSync { state: SyncState::NotSynced, starting_block: 0, @@ -239,7 +238,7 @@ impl ChainSync { last_sent_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, - transaction_queue: Mutex::new(TransactionQueue::new()), + miner: miner, } } @@ -298,7 +297,6 @@ impl ChainSync { self.starting_block = 0; self.highest_block = None; self.have_common_block = false; - self.transaction_queue.lock().unwrap().clear(); self.starting_block = io.chain().chain_info().best_block_number; self.state = SyncState::NotSynced; } @@ -927,16 +925,15 @@ impl ChainSync { } /// Called when peer sends us new transactions fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - let chain = io.chain(); let item_count = r.item_count(); trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); - let fetch_latest_nonce = |a : &Address| chain.nonce(a); - let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let mut transactions = Vec::with_capacity(item_count); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - transaction_queue.add(tx, &fetch_latest_nonce); + transactions.push(tx); } + self.miner.import_transactions(transactions); Ok(()) } @@ -1263,38 +1260,9 @@ impl ChainSync { self.check_resume(io); } - /// called when block is imported to chain, updates transactions queue and propagates the blocks - pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { - fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { - let block = chain - .block(BlockId::Hash(hash.clone())) - // Client should send message after commit to db and inserting to chain. - .expect("Expected in-chain blocks."); - let block = BlockView::new(&block); - block.transactions() - } - - - { - let chain = io.chain(); - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); - - good.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); - transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); - }); - bad.for_each(|txs| { - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); - }); - } - + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], retracted: &[H256]) { + // notify miner + self.miner.chain_new_blocks(good, bad, retracted); // Propagate latests blocks self.propagate_latest_blocks(io); // TODO [todr] propagate transactions? diff --git a/sync/src/lib.rs b/sync/src/lib.rs index b5869642c..0d6044135 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -51,27 +51,27 @@ extern crate log; #[macro_use] extern crate ethcore_util as util; extern crate ethcore; +extern crate ethminer; extern crate env_logger; extern crate time; extern crate rand; -extern crate rayon; #[macro_use] extern crate heapsize; use std::ops::*; use std::sync::*; -use ethcore::client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use util::TimerToken; use util::{U256, ONE_U256}; -use chain::ChainSync; +use ethcore::client::Client; use ethcore::service::SyncMessage; +use ethminer::EthMiner; use io::NetSyncIo; +use chain::ChainSync; mod chain; mod io; mod range_collection; -mod transaction_queue; #[cfg(test)] mod tests; @@ -105,10 +105,10 @@ pub use self::chain::{SyncStatus, SyncState}; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc) -> Arc { + pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc, miner: Arc) -> Arc { let sync = Arc::new(EthSync { chain: chain, - sync: RwLock::new(ChainSync::new(config)), + sync: RwLock::new(ChainSync::new(config, miner)), }); service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); sync @@ -154,7 +154,7 @@ impl NetworkProtocolHandler for EthSync { fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { + SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted} => { let mut sync_io = NetSyncIo::new(io, self.chain.deref()); self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); }, From 84444c697ce69ac8bc3f1126b6d6987af3a2df6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 15:53:12 +0100 Subject: [PATCH 037/222] Adding ethminer to dev/ci files --- .travis.yml | 7 ++++--- Cargo.toml | 2 +- cov.sh | 23 +++++++++++++++++------ doc.sh | 9 ++++++++- hook.sh | 2 +- rpc/Cargo.toml | 2 +- test.sh | 3 ++- 7 files changed, 34 insertions(+), 14 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7213b8f09..48487e0d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,11 +14,11 @@ matrix: - rust: nightly include: - rust: stable - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: beta - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: nightly - env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" cache: apt: true directories: @@ -51,6 +51,7 @@ after_success: | ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethcore-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethsync-* && ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethcore_rpc-* && + ./kcov-master/tmp/usr/local/bin/kcov --exclude-pattern /usr/,/.cargo,/root/.multirust,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests target/kcov target/debug/deps/ethminer-* && ./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /usr/,/.cargo,/root/.multirust target/kcov target/debug/parity-* && [ $TRAVIS_BRANCH = master ] && [ $TRAVIS_PULL_REQUEST = false ] && diff --git a/Cargo.toml b/Cargo.toml index 7e5bc334b..a501baaab 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -26,7 +26,7 @@ number_prefix = "0.2" [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethminer/dev"] travis-beta = ["ethcore/json-tests"] travis-nightly = ["ethcore/json-tests", "dev"] diff --git a/cov.sh b/cov.sh index a1fa29e46..d60ef223d 100755 --- a/cov.sh +++ b/cov.sh @@ -15,12 +15,23 @@ if ! type kcov > /dev/null; then exit 1 fi -cargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --no-run || exit $? +cargo test \ + -p ethash \ + -p ethcore-util \ + -p ethcore \ + -p ethsync \ + -p ethcore-rpc \ + -p parity \ + -p ethminer \ + --no-run || exit $? rm -rf target/coverage mkdir -p target/coverage -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethash-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethsync-* -kcov --exclude-pattern ~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* + +EXCLUDE="~/.multirust,rocksdb,secp256k1,src/tests,util/json-tests,util/src/network/tests,sync/src/tests,ethcore/src/tests,ethcore/src/evm/tests" +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethash-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_util-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethsync-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethcore_rpc-* +kcov --exclude-pattern $EXCLUDE --include-pattern src --verify target/coverage target/debug/deps/ethminer-* xdg-open target/coverage/index.html diff --git a/doc.sh b/doc.sh index 2fd5ac20f..a5e5e2e13 100755 --- a/doc.sh +++ b/doc.sh @@ -1,4 +1,11 @@ #!/bin/sh # generate documentation only for partiy and ethcore libraries -cargo doc --no-deps --verbose -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity +cargo doc --no-deps --verbose \ + -p ethash \ + -p ethcore-util \ + -p ethcore \ + -p ethsync \ + -p ethcore-rpc \ + -p parity \ + -p ethminer diff --git a/hook.sh b/hook.sh index 106ffe4f0..313639640 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,3 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push +echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer --features dev" > ./.git/hooks/pre-push chmod +x ./.git/hooks/pre-push diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index f6d468f47..d0174be59 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -31,4 +31,4 @@ syntex = "0.29.0" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethminer/dev"] diff --git a/test.sh b/test.sh index 0f5edb0d1..e1881a8ad 100755 --- a/test.sh +++ b/test.sh @@ -1,4 +1,5 @@ #!/bin/sh # Running Parity Full Test Sute -cargo test --features ethcore/json-tests $1 -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity +cargo test --features ethcore/json-tests $1 -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p +ethminer From 9acb36af871d8b0c84f3a48a4587e9d64ae68456 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 16:23:32 +0100 Subject: [PATCH 038/222] Fixing tests compilation. Removing ethminer dependency on client --- ethcore/src/log_entry.rs | 2 +- ethcore/src/tests/client.rs | 17 ++++---------- miner/src/lib.rs | 46 ++++++------------------------------- miner/src/miner.rs | 19 ++++++++++++--- parity/main.rs | 2 +- rpc/src/v1/impls/eth.rs | 7 ++++-- sync/src/chain.rs | 21 +++++++++-------- sync/src/tests/helpers.rs | 12 +++++++++- 8 files changed, 58 insertions(+), 68 deletions(-) diff --git a/ethcore/src/log_entry.rs b/ethcore/src/log_entry.rs index a75e6fcc1..63d09b4f0 100644 --- a/ethcore/src/log_entry.rs +++ b/ethcore/src/log_entry.rs @@ -111,7 +111,7 @@ mod tests { let bloom = H2048::from_str("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap(); let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap(); let log = LogEntry { - address: address, + address: address, topics: vec![], data: vec![] }; diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 001d1729b..d31a780e6 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -132,16 +132,9 @@ fn can_mine() { let dummy_blocks = get_good_dummy_block_seq(2); let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); let client = client_result.reference(); - let b = client.sealing_block(); - let pow_hash = { - let u = b.lock().unwrap(); - match *u { - Some(ref b) => { - assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); - b.hash() - } - None => { panic!(); } - } - }; - assert!(client.submit_seal(pow_hash, vec![]).is_ok()); + + let b = client.prepare_sealing(Address::default(), vec![]).unwrap(); + + assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); + assert!(client.try_seal(b, vec![]).is_ok()); } diff --git a/miner/src/lib.rs b/miner/src/lib.rs index e8a50e9b5..ae6235393 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -29,58 +29,26 @@ extern crate rayon; mod miner; mod transaction_queue; -use util::{Bytes, H256, Address}; use std::ops::*; use std::sync::*; -use util::TimerToken; -use ethcore::block::*; -use ethcore::error::*; -use ethcore::client::{Client, BlockChainClient}; -use ethcore::transaction::*; -use miner::Miner; +pub use miner::Miner; pub struct EthMiner { miner: Miner, - /// Shared blockchain client. TODO: this should evetually become an IPC endpoint - chain: Arc, } impl EthMiner { /// Creates and register protocol with the network service - pub fn new(chain: Arc) -> Arc { + pub fn new() -> Arc { Arc::new(EthMiner { miner: Miner::new(), - chain: chain, }) } +} +impl Deref for EthMiner { + type Target = Miner; - pub fn sealing_block(&self) -> &Mutex> { - self.miner.sealing_block(self.chain.deref()) - } - - pub fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { - self.miner.submit_seal(self.chain.deref(), pow_hash, seal) - } - - /// Set the author that we will seal blocks as. - pub fn set_author(&self, author: Address) { - self.miner.set_author(author); - } - - /// Set the extra_data that we will seal blocks with. - pub fn set_extra_data(&self, extra_data: Bytes) { - self.miner.set_extra_data(extra_data); - } - - pub fn import_transactions(&self, transactions: Vec) { - let chain = self.chain.deref(); - let fetch_latest_nonce = |a : &Address| chain.nonce(a); - - self.miner.import_transactions(transactions, fetch_latest_nonce); - } - - pub fn chain_new_blocks(&self, good: &[H256], bad: &[H256], retracted: &[H256]) { - let mut chain = self.chain.deref(); - self.miner.chain_new_blocks(chain, good, bad, retracted); + fn deref(&self) -> &Self::Target { + &self.miner } } diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 1a48d5288..76130b261 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -23,7 +23,7 @@ use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; use ethcore::block::*; use ethcore::error::*; use ethcore::transaction::SignedTransaction; -use transaction_queue::TransactionQueue; +use transaction_queue::{TransactionQueue, TransactionQueueStatus}; pub struct Miner { /// Transactions Queue @@ -36,6 +36,11 @@ pub struct Miner { extra_data: RwLock, } +pub struct MinerStatus { + pub transaction_queue_pending: usize, + pub transaction_queue_future: usize, +} + impl Miner { pub fn new() -> Miner { Miner { @@ -47,6 +52,14 @@ impl Miner { } } + pub fn status(&self) -> MinerStatus { + let status = self.transaction_queue.lock().unwrap().status(); + MinerStatus { + transaction_queue_pending: status.pending, + transaction_queue_future: status.future, + } + } + pub fn import_transactions(&self, transactions: Vec, fetch_nonce: T) where T: Fn(&Address) -> U256 { let mut transaction_queue = self.transaction_queue.lock().unwrap(); @@ -55,7 +68,7 @@ impl Miner { /// Get the author that we will seal blocks as. pub fn author(&self) -> Address { - self.author.read().unwrap().clone() + *self.author.read().unwrap() } /// Set the author that we will seal blocks as. @@ -75,7 +88,7 @@ impl Miner { /// New chain head event. Restart mining operation. fn prepare_sealing(&self, chain: &BlockChainClient) { - let b = chain.prepare_sealing(self.author.read().unwrap().clone(), self.extra_data.read().unwrap().clone()); + let b = chain.prepare_sealing(*self.author.read().unwrap(), self.extra_data.read().unwrap().clone()); *self.sealing_block.lock().unwrap() = b; } diff --git a/parity/main.rs b/parity/main.rs index ef088ab5b..a0bc87a03 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -382,7 +382,7 @@ impl Configuration { let client = service.client(); // Miner - let miner = EthMiner::new(client.clone()); + let miner = EthMiner::new(); miner.set_author(self.author()); miner.set_extra_data(self.extra_data()); diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 11c6fe8d0..46d875c99 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -17,6 +17,7 @@ //! Eth rpc implementation. use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; +use std::ops::Deref; use ethsync::{EthSync, SyncState}; use ethminer::{EthMiner}; use jsonrpc_core::*; @@ -224,7 +225,8 @@ impl Eth for EthClient { match params { Params::None => { let miner = take_weak!(self.miner); - let u = miner.sealing_block().lock().unwrap(); + let client = take_weak!(self.client); + let u = miner.sealing_block(client.deref()).lock().unwrap(); match *u { Some(ref b) => { let pow_hash = b.hash(); @@ -243,8 +245,9 @@ impl Eth for EthClient { from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| { // trace!("Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash); let miner = take_weak!(self.miner); + let client = take_weak!(self.client); let seal = vec![encode(&mix_hash).to_vec(), encode(&nonce).to_vec()]; - let r = miner.submit_seal(pow_hash, seal); + let r = miner.submit_seal(client.deref(), pow_hash, seal); to_value(&r.is_ok()) }) } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index d3277eccc..c607f53b1 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -31,7 +31,7 @@ use util::*; use std::mem::{replace}; -use ethcore::views::{HeaderView, BlockView}; +use ethcore::views::{HeaderView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; use range_collection::{RangeCollection, ToUsize, FromUsize}; @@ -933,7 +933,9 @@ impl ChainSync { let tx: SignedTransaction = try!(r.val_at(i)); transactions.push(tx); } - self.miner.import_transactions(transactions); + let chain = io.chain(); + let fetch_nonce = |a: &Address| chain.nonce(a); + self.miner.import_transactions(transactions, fetch_nonce); Ok(()) } @@ -1262,7 +1264,7 @@ impl ChainSync { pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], retracted: &[H256]) { // notify miner - self.miner.chain_new_blocks(good, bad, retracted); + self.miner.chain_new_blocks(io.chain(), good, bad, retracted); // Propagate latests blocks self.propagate_latest_blocks(io); // TODO [todr] propagate transactions? @@ -1279,6 +1281,7 @@ mod tests { use super::{PeerInfo, PeerAsking}; use ethcore::header::*; use ethcore::client::*; + use ethminer::EthMiner; fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); @@ -1388,7 +1391,7 @@ mod tests { } fn dummy_sync_with_peer(peer_latest_hash: H256) -> ChainSync { - let mut sync = ChainSync::new(SyncConfig::default()); + let mut sync = ChainSync::new(SyncConfig::default(), EthMiner::new()); sync.peers.insert(0, PeerInfo { protocol_version: 0, @@ -1610,14 +1613,14 @@ mod tests { // when sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]); - assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); - assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); + assert_eq!(sync.miner.status().transaction_queue_future, 0); + assert_eq!(sync.miner.status().transaction_queue_pending, 1); sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]); // then - let status = sync.transaction_queue.lock().unwrap().status(); - assert_eq!(status.pending, 1); - assert_eq!(status.future, 0); + let status = sync.miner.status(); + assert_eq!(status.transaction_queue_pending, 1); + assert_eq!(status.transaction_queue_future, 0); } #[test] diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index d01dba0b2..37ee862b5 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -17,7 +17,9 @@ use util::*; use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo}; use ethcore::header::{Header as BlockHeader, BlockNumber}; +use ethcore::block::*; use ethcore::error::*; +use ethminer::EthMiner; use io::SyncIo; use chain::ChainSync; use ::SyncConfig; @@ -308,6 +310,14 @@ impl BlockChainClient for TestBlockChainClient { best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, } } + + fn prepare_sealing(&self, author: Address, extra_data: Bytes) -> Option { + unimplemented!() + } + + fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { + unimplemented!() + } } pub struct TestIo<'p> { @@ -382,7 +392,7 @@ impl TestNet { for _ in 0..n { net.peers.push(TestPeer { chain: TestBlockChainClient::new(), - sync: ChainSync::new(SyncConfig::default()), + sync: ChainSync::new(SyncConfig::default(), EthMiner::new()), queue: VecDeque::new(), }); } From 49f1834ffb97244f0aaf404d86c0bdaaf226c9d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 16:40:35 +0100 Subject: [PATCH 039/222] Fixing travis yml whitespace --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 48487e0d3..0c614ca5d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,11 +14,11 @@ matrix: - rust: nightly include: - rust: stable - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: beta - env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: nightly - env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" + env: FEATURES="--features travis-nightly" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" cache: apt: true directories: From b2fc077f8c6a8a28ca0f14e975107b7431887f21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 16:42:30 +0100 Subject: [PATCH 040/222] Fixing CLI parameters --- parity/main.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 43b0504f1..ceb58e31e 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -79,6 +79,7 @@ Protocol Options: --networkid INDEX Override the network identifier from the chain we are on. --archive Client should not prune the state/storage trie. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. Networking Options: @@ -113,7 +114,7 @@ Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. - --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with + --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with other cache options (geth-compatible). Miscellaneous Options: @@ -129,7 +130,7 @@ struct Args { arg_enode: Vec, flag_chain: String, flag_testnet: bool, - flag_db_path: String, + flag_datadir: String, flag_networkid: Option, flag_identity: String, flag_cache: Option, @@ -238,7 +239,7 @@ impl Configuration { } fn path(&self) -> String { - self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) + self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) } fn author(&self) -> Address { From a069e890ba52cf5242cf4492266b1d638a138d74 Mon Sep 17 00:00:00 2001 From: arkpar Date: Tue, 8 Mar 2016 19:14:43 +0100 Subject: [PATCH 041/222] Replaced --archive option with --pruning --- parity/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index ceb58e31e..2d7bd8bab 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -77,7 +77,7 @@ Protocol Options: or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. - --archive Client should not prune the state/storage trie. + --pruning Enable state/storage trie pruning. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. @@ -135,7 +135,7 @@ struct Args { flag_identity: String, flag_cache: Option, flag_keys_path: String, - flag_archive: bool, + flag_pruning: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, @@ -362,7 +362,7 @@ impl Configuration { client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; } } - client_config.prefer_journal = !self.args.flag_archive; + client_config.prefer_journal = self.args.flag_pruning; client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); From f84d40734d372aa2338df55dc0ad44c4d97650b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 10:26:51 +0100 Subject: [PATCH 042/222] Validating sender before importing to queuue --- sync/src/chain.rs | 4 +- sync/src/transaction_queue.rs | 140 +++++++++++++++++++++------------- 2 files changed, 87 insertions(+), 57 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a41b06904..8cf1beea1 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -935,7 +935,7 @@ impl ChainSync { let mut transaction_queue = self.transaction_queue.lock().unwrap(); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - transaction_queue.add(tx, &fetch_latest_nonce); + let _ = transaction_queue.add(tx, &fetch_latest_nonce); } Ok(()) } @@ -1291,7 +1291,7 @@ impl ChainSync { let _sender = tx.sender(); } let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); + let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); }); } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..39ad29894 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -24,6 +24,7 @@ use util::numbers::{Uint, U256}; use util::hash::{Address, H256}; use util::table::*; use ethcore::transaction::*; +use ethcore::error::Error; #[derive(Clone, Debug)] @@ -82,10 +83,11 @@ struct VerifiedTransaction { transaction: SignedTransaction } impl VerifiedTransaction { - fn new(transaction: SignedTransaction) -> Self { - VerifiedTransaction { + fn new(transaction: SignedTransaction) -> Result { + try!(transaction.sender()); + Ok(VerifiedTransaction { transaction: transaction - } + }) } fn hash(&self) -> H256 { @@ -148,6 +150,8 @@ impl TransactionSet { } } +// Will be used when rpc merged +#[allow(dead_code)] #[derive(Debug)] /// Current status of the queue pub struct TransactionQueueStatus { @@ -196,6 +200,8 @@ impl TransactionQueue { } } + // Will be used when rpc merged + #[allow(dead_code)] /// Returns current status for this queue pub fn status(&self) -> TransactionQueueStatus { TransactionQueueStatus { @@ -205,17 +211,19 @@ impl TransactionQueue { } /// Adds all signed transactions to queue to be verified and imported - pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) + pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { for tx in txs.into_iter() { - self.add(tx, &fetch_nonce); + try!(self.add(tx, &fetch_nonce)); } + Ok(()) } /// Add signed transaction to queue to be verified and imported - pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) + pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error> where T: Fn(&Address) -> U256 { - self.import_tx(VerifiedTransaction::new(tx), fetch_nonce); + self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce); + Ok(()) } /// Removes all transactions identified by hashes given in slice @@ -299,7 +307,8 @@ impl TransactionQueue { self.future.enforce_limit(&mut self.by_hash); } - + // Will be used when mining merged + #[allow(dead_code)] /// Returns top transactions from the queue pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority @@ -407,13 +416,8 @@ impl TransactionQueue { #[cfg(test)] mod test { extern crate rustc_serialize; - use self::rustc_serialize::hex::FromHex; - use std::ops::Deref; - use std::collections::{HashMap, BTreeSet}; - use util::crypto::KeyPair; - use util::numbers::{U256, Uint}; - use util::hash::{Address}; use util::table::*; + use util::*; use ethcore::transaction::*; use super::*; use super::{TransactionSet, TransactionOrder, VerifiedTransaction}; @@ -457,12 +461,12 @@ mod test { limit: 1 }; let (tx1, tx2) = new_txs(U256::from(1)); - let tx1 = VerifiedTransaction::new(tx1); - let tx2 = VerifiedTransaction::new(tx2); + let tx1 = VerifiedTransaction::new(tx1).unwrap(); + let tx2 = VerifiedTransaction::new(tx2).unwrap(); let mut by_hash = { let mut x = HashMap::new(); - let tx1 = VerifiedTransaction::new(tx1.transaction.clone()); - let tx2 = VerifiedTransaction::new(tx2.transaction.clone()); + let tx1 = VerifiedTransaction::new(tx1.transaction.clone()).unwrap(); + let tx2 = VerifiedTransaction::new(tx2.transaction.clone()).unwrap(); x.insert(tx1.hash(), tx1); x.insert(tx2.hash(), tx2); x @@ -496,13 +500,39 @@ mod test { let tx = new_tx(); // when - txq.add(tx, &default_nonce); + let res = txq.add(tx, &default_nonce); // then + assert!(res.is_ok()); let stats = txq.status(); assert_eq!(stats.pending, 1); } + #[test] + fn should_reject_incorectly_signed_transaction() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_unsigned_tx(U256::from(123)); + let stx = { + let mut s = RlpStream::new_list(9); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + s.append_empty_data(); // action=create + s.append(&tx.value); + s.append(&tx.data); + s.append(&0u64); // v + s.append(&U256::zero()); // r + s.append(&U256::zero()); // s + decode(s.as_raw()) + }; + // when + let res = txq.add(stx, &default_nonce); + + // then + assert!(res.is_err()); + } + #[test] fn should_import_txs_from_same_sender() { // given @@ -511,8 +541,8 @@ mod test { let (tx, tx2) = new_txs(U256::from(1)); // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let top = txq.top_transactions(5); @@ -529,8 +559,8 @@ mod test { let (tx, tx2) = new_txs(U256::from(2)); // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let stats = txq.status(); @@ -551,13 +581,13 @@ mod test { let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret); let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret); - txq.add(tx, &default_nonce); + txq.add(tx, &default_nonce).unwrap(); assert_eq!(txq.status().pending, 1); - txq.add(tx2, &default_nonce); + txq.add(tx2, &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); // when - txq.add(tx1, &default_nonce); + txq.add(tx1, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -570,8 +600,8 @@ mod test { // given let mut txq2 = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(3)); - txq2.add(tx.clone(), &default_nonce); - txq2.add(tx2.clone(), &default_nonce); + txq2.add(tx.clone(), &default_nonce).unwrap(); + txq2.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq2.status().pending, 1); assert_eq!(txq2.status().future, 1); @@ -592,10 +622,10 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 3); // when @@ -614,8 +644,8 @@ mod test { let (tx, tx2) = new_txs(U256::one()); // add - txq.add(tx2.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); let stats = txq.status(); assert_eq!(stats.pending, 2); @@ -632,11 +662,11 @@ mod test { // given let mut txq = TransactionQueue::with_limits(1, 1); let (tx, tx2) = new_txs(U256::one()); - txq.add(tx.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 1); // when - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let t = txq.top_transactions(2); @@ -650,14 +680,14 @@ mod test { let mut txq = TransactionQueue::with_limits(10, 1); let (tx1, tx2) = new_txs(U256::from(4)); let (tx3, tx4) = new_txs(U256::from(4)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx3.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx3.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 2); // when - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx4.clone(), &default_nonce); + txq.add(tx4.clone(), &default_nonce).unwrap(); // then assert_eq!(txq.status().future, 1); @@ -671,7 +701,7 @@ mod test { let fetch_last_nonce = |_a: &Address| last_nonce; // when - txq.add(tx, &fetch_last_nonce); + txq.add(tx, &fetch_last_nonce).unwrap(); // then let stats = txq.status(); @@ -685,12 +715,12 @@ mod test { let nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (_tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); assert_eq!(txq.status().pending, 0); // when - txq.add(tx2.clone(), &nonce); + txq.add(tx2.clone(), &nonce).unwrap(); // then let stats = txq.status(); @@ -703,15 +733,15 @@ mod test { // given let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 2); // when txq.remove(&tx1.hash(), &default_nonce); assert_eq!(txq.status().pending, 0); assert_eq!(txq.status().future, 1); - txq.add(tx1.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); // then let stats = txq.status(); @@ -726,10 +756,10 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 3); // when @@ -754,8 +784,8 @@ mod test { }; // when - txq.add(tx, &default_nonce); - txq.add(tx2, &default_nonce); + txq.add(tx, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -782,10 +812,10 @@ mod test { }; // when - txq.add(tx1, &default_nonce); - txq.add(tx2, &default_nonce); + txq.add(tx1, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx0, &default_nonce); + txq.add(tx0, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -801,8 +831,8 @@ mod test { let next_nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::one()); - txq.add(tx1.clone(), &previous_nonce); - txq.add(tx2, &previous_nonce); + txq.add(tx1.clone(), &previous_nonce).unwrap(); + txq.add(tx2, &previous_nonce).unwrap(); assert_eq!(txq.status().future, 2); // when From a1640dcf7205c2d47b017e2398c6fda03da889d6 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 11:38:53 +0100 Subject: [PATCH 043/222] jsonrpc panic handle --- ethcore/src/block_queue.rs | 2 +- parity/main.rs | 34 +++++++++++++++++++++------------- rpc/src/lib.rs | 22 +++++++++++++++++----- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 490a17995..8f1105b8b 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -385,7 +385,7 @@ impl BlockQueue { } } - pub fn collect_garbage(&self) { + pub fn collect_garbage(&self) { { let mut verification = self.verification.lock().unwrap(); verification.unverified.shrink_to_fit(); diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..94db8e706 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -43,7 +43,7 @@ use std::path::PathBuf; use env_logger::LogBuilder; use ctrlc::CtrlC; use util::*; -use util::panics::MayPanic; +use util::panics::{MayPanic, PanicHandler}; use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; @@ -151,7 +151,7 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) { +fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) -> Option> { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); @@ -159,11 +159,12 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom server.add_delegate(EthClient::new(&client, &sync).to_delegate()); server.add_delegate(EthFilterClient::new(&client).to_delegate()); server.add_delegate(NetClient::new(&sync).to_delegate()); - server.start_async(url, cors_domain); + Some(server.start_async(url, cors_domain)) } #[cfg(not(feature = "rpc"))] -fn setup_rpc_server(_client: Arc, _sync: Arc, _url: &str) { +fn setup_rpc_server(_client: Arc, _sync: Arc, _url: &str) -> Option> { + None } fn print_version() { @@ -323,26 +324,28 @@ impl Configuration { // Sync let sync = EthSync::register(service.network(), sync_config, client); - // Setup rpc - if self.args.flag_jsonrpc { - setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors); - SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); - } - // Register IO handler let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), - sync: sync + sync: sync.clone(), }); service.io().register_handler(io_handler).expect("Error registering IO handler"); + // Setup rpc + let server_handler = if self.args.flag_jsonrpc { + SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); + setup_rpc_server(service.client(), sync, &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors) + } else { + None + }; + // Handle exit - wait_for_exit(&service); + wait_for_exit(&service, server_handler); } } -fn wait_for_exit(client_service: &ClientService) { +fn wait_for_exit(client_service: &ClientService, server_handler: Option>) { let exit = Arc::new(Condvar::new()); // Handle possible exits @@ -351,6 +354,11 @@ fn wait_for_exit(client_service: &ClientService) { let e = exit.clone(); client_service.on_panic(move |_reason| { e.notify_all(); }); + if let Some(handler) = server_handler { + let e = exit.clone(); + handler.on_panic(move |_reason| { e.notify_all(); }); + } + // Wait for signal let mutex = Mutex::new(()); let _ = exit.wait(mutex.lock().unwrap()).unwrap(); diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 0653a0c33..97a3a5fe5 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -29,6 +29,9 @@ extern crate ethcore; extern crate ethsync; extern crate transient_hashmap; +use std::sync::Arc; +use std::thread; +use util::panics::PanicHandler; use self::jsonrpc_core::{IoHandler, IoDelegate}; pub mod v1; @@ -36,7 +39,7 @@ pub mod v1; /// Http server. pub struct HttpServer { handler: IoHandler, - threads: usize + threads: usize, } impl HttpServer { @@ -44,7 +47,7 @@ impl HttpServer { pub fn new(threads: usize) -> HttpServer { HttpServer { handler: IoHandler::new(), - threads: threads + threads: threads, } } @@ -53,9 +56,18 @@ impl HttpServer { self.handler.add_delegate(delegate); } - /// Start server asynchronously in new thread - pub fn start_async(self, addr: &str, cors_domain: &str) { + /// Start server asynchronously in new thread and returns panic handler. + pub fn start_async(self, addr: &str, cors_domain: &str) -> Arc { + let addr = addr.to_owned(); + let cors_domain = cors_domain.to_owned(); + let panic_handler = PanicHandler::new_in_arc(); + let ph = panic_handler.clone(); let server = jsonrpc_http_server::Server::new(self.handler, self.threads); - server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned())) + thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || { + ph.catch_panic(move || { + server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain)); + }).unwrap() + }).expect("Error while creating jsonrpc http thread"); + panic_handler } } From 5db84c32338bc6708dce3d299553531f27b68f8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 12:54:07 +0100 Subject: [PATCH 044/222] Adding transactions to block --- ethcore/src/block.rs | 2 +- ethcore/src/client.rs | 35 +++++++++++++++++++++++++++++------ ethcore/src/service.rs | 2 ++ ethcore/src/tests/client.rs | 2 +- miner/src/miner.rs | 11 +++++++++-- sync/src/chain.rs | 4 ++++ sync/src/lib.rs | 4 ++++ sync/src/tests/helpers.rs | 4 ++-- 8 files changed, 52 insertions(+), 12 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 68f647e37..9ecd58e0a 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -274,7 +274,7 @@ impl<'x> OpenBlock<'x> { s.block.base.header.note_dirty(); ClosedBlock { - block: s.block, + block: s.block, uncle_bytes: uncle_bytes, } } diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index af1745ca8..fb69df757 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -32,7 +32,7 @@ use service::{NetSyncMessage, SyncMessage}; use env_info::LastHashes; use verification::*; use block::*; -use transaction::LocalizedTransaction; +use transaction::{LocalizedTransaction, SignedTransaction}; use extras::TransactionAddress; use filter::Filter; use log_entry::LocalizedLogEntry; @@ -185,7 +185,10 @@ pub trait BlockChainClient : Sync + Send { /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; - fn prepare_sealing(&self, author: Address, extra_data: Bytes) -> Option; + /// Returns ClosedBlock prepared for sealing. + fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option; + + /// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error. fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result; } @@ -417,6 +420,12 @@ impl Client where V: Verifier { } } + { + if self.chain_info().best_block_hash != original_best { + io.send(NetworkIoMessage::User(SyncMessage::NewChainHead)).unwrap(); + } + } + imported } @@ -477,7 +486,7 @@ impl BlockChainClient for Client where V: Verifier { block.try_seal(self.engine.deref().deref(), seal) } - fn prepare_sealing(&self, author: Address, extra_data: Bytes) -> Option { + fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option { let engine = self.engine.deref().deref(); let h = self.chain.read().unwrap().best_block_hash(); @@ -490,7 +499,9 @@ impl BlockChainClient for Client where V: Verifier { extra_data, ); - self.chain.read().unwrap().find_uncle_headers(&h, engine.maximum_uncle_age()) + // Add uncles + self.chain.read().unwrap() + .find_uncle_headers(&h, engine.maximum_uncle_age()) .unwrap() .into_iter() .take(engine.maximum_uncle_count()) @@ -498,10 +509,22 @@ impl BlockChainClient for Client where V: Verifier { b.push_uncle(h).unwrap(); }); - // TODO: push transactions. + // Add transactions + let block_number = b.block().header().number(); + for tx in transactions { + let import = b.push_transaction(tx, None); + if let Err(e) = import { + trace!("Error adding transaction to block: number={}. Error: {:?}", block_number, e); + } + } + // And close let b = b.close(); - trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number()); + trace!("Sealing: number={}, hash={}, diff={}", + b.block().header().number(), + b.hash(), + b.block().header().difficulty() + ); Some(b) } diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 443d09e3b..11380d276 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -34,6 +34,8 @@ pub enum SyncMessage { /// Hashes of blocks that were removed from canonical chain retracted: Vec, }, + /// Best Block Hash in chain has been changed + NewChainHead, /// A block is ready BlockVerified, } diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index d31a780e6..ed0b02788 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -133,7 +133,7 @@ fn can_mine() { let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); let client = client_result.reference(); - let b = client.prepare_sealing(Address::default(), vec![]).unwrap(); + let b = client.prepare_sealing(Address::default(), vec![], vec![]).unwrap(); assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); assert!(client.try_seal(b, vec![]).is_ok()); diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 76130b261..501f8c35c 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -87,8 +87,15 @@ impl Miner { } /// New chain head event. Restart mining operation. - fn prepare_sealing(&self, chain: &BlockChainClient) { - let b = chain.prepare_sealing(*self.author.read().unwrap(), self.extra_data.read().unwrap().clone()); + pub fn prepare_sealing(&self, chain: &BlockChainClient) { + let no_of_transactions = 128; + let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); + + let b = chain.prepare_sealing( + self.author(), + self.extra_data(), + transactions, + ); *self.sealing_block.lock().unwrap() = b; } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index c607f53b1..2669b71e2 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1270,6 +1270,10 @@ impl ChainSync { // TODO [todr] propagate transactions? } + pub fn chain_new_head(&mut self, io: &mut SyncIo) { + self.miner.prepare_sealing(io.chain()); + } + } #[cfg(test)] diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 0d6044135..be01d2b7b 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -158,6 +158,10 @@ impl NetworkProtocolHandler for EthSync { let mut sync_io = NetSyncIo::new(io, self.chain.deref()); self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); }, + SyncMessage::NewChainHead => { + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_head(&mut sync_io); + } _ => {/* Ignore other messages */}, } } diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 37ee862b5..8c8f669a2 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -24,7 +24,7 @@ use io::SyncIo; use chain::ChainSync; use ::SyncConfig; use ethcore::receipt::Receipt; -use ethcore::transaction::{LocalizedTransaction, Transaction, Action}; +use ethcore::transaction::{LocalizedTransaction, SignedTransaction, Transaction, Action}; use ethcore::filter::Filter; use ethcore::log_entry::LocalizedLogEntry; @@ -311,7 +311,7 @@ impl BlockChainClient for TestBlockChainClient { } } - fn prepare_sealing(&self, author: Address, extra_data: Bytes) -> Option { + fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option { unimplemented!() } From 9d664336b5e2bdd99855ca9961cc2da0ab4b9b31 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 13:28:37 +0100 Subject: [PATCH 045/222] Tratifying Miner --- miner/src/lib.rs | 4 +- miner/src/miner.rs | 90 ++++++++++++++++++++++++--------------- parity/main.rs | 2 +- rpc/src/v1/impls/eth.rs | 2 +- sync/src/chain.rs | 4 +- sync/src/tests/helpers.rs | 4 +- 6 files changed, 65 insertions(+), 41 deletions(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index ae6235393..36b040b78 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -31,7 +31,8 @@ mod transaction_queue; use std::ops::*; use std::sync::*; -pub use miner::Miner; +pub use miner::{Miner, MinerService}; + pub struct EthMiner { miner: Miner, @@ -45,6 +46,7 @@ impl EthMiner { }) } } + impl Deref for EthMiner { type Target = Miner; diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 501f8c35c..64d3c9083 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -17,16 +17,45 @@ use util::*; use std::sync::atomic::AtomicBool; use rayon::prelude::*; -use ethcore::views::{HeaderView, BlockView}; -use ethcore::header::{BlockNumber, Header as BlockHeader}; -use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; +use ethcore::views::{BlockView}; +use ethcore::client::{BlockChainClient, BlockId}; use ethcore::block::*; use ethcore::error::*; use ethcore::transaction::SignedTransaction; -use transaction_queue::{TransactionQueue, TransactionQueueStatus}; +use transaction_queue::{TransactionQueue}; + +pub trait MinerService { + fn status(&self) -> MinerStatus; + + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) + where T: Fn(&Address) -> U256; + + /// called when blocks are imported to chain, updates transactions queue + fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]); + + /// Set the author that we will seal blocks as. + fn set_author(&self, author: Address); + + /// Set the extra_data that we will seal blocks with. + fn set_extra_data(&self, extra_data: Bytes); + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, chain: &BlockChainClient); + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error>; +} + +pub struct MinerStatus { + pub transaction_queue_pending: usize, + pub transaction_queue_future: usize, +} pub struct Miner { - /// Transactions Queue transaction_queue: Mutex, // for sealing... @@ -36,12 +65,8 @@ pub struct Miner { extra_data: RwLock, } -pub struct MinerStatus { - pub transaction_queue_pending: usize, - pub transaction_queue_future: usize, -} - impl Miner { + /// Creates new instance of miner pub fn new() -> Miner { Miner { transaction_queue: Mutex::new(TransactionQueue::new()), @@ -52,7 +77,20 @@ impl Miner { } } - pub fn status(&self) -> MinerStatus { + /// Get the author that we will seal blocks as. + fn author(&self) -> Address { + *self.author.read().unwrap() + } + + /// Get the extra_data that we will seal blocks wuth. + fn extra_data(&self) -> Bytes { + self.extra_data.read().unwrap().clone() + } +} + +impl MinerService for Miner { + + fn status(&self) -> MinerStatus { let status = self.transaction_queue.lock().unwrap().status(); MinerStatus { transaction_queue_pending: status.pending, @@ -60,34 +98,22 @@ impl Miner { } } - pub fn import_transactions(&self, transactions: Vec, fetch_nonce: T) + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) where T: Fn(&Address) -> U256 { let mut transaction_queue = self.transaction_queue.lock().unwrap(); transaction_queue.add_all(transactions, fetch_nonce); } - /// Get the author that we will seal blocks as. - pub fn author(&self) -> Address { - *self.author.read().unwrap() - } - - /// Set the author that we will seal blocks as. - pub fn set_author(&self, author: Address) { + fn set_author(&self, author: Address) { *self.author.write().unwrap() = author; } - /// Get the extra_data that we will seal blocks wuth. - pub fn extra_data(&self) -> Bytes { - self.extra_data.read().unwrap().clone() - } - /// Set the extra_data that we will seal blocks with. - pub fn set_extra_data(&self, extra_data: Bytes) { + fn set_extra_data(&self, extra_data: Bytes) { *self.extra_data.write().unwrap() = extra_data; } - /// New chain head event. Restart mining operation. - pub fn prepare_sealing(&self, chain: &BlockChainClient) { + fn prepare_sealing(&self, chain: &BlockChainClient) { let no_of_transactions = 128; let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); @@ -99,8 +125,7 @@ impl Miner { *self.sealing_block.lock().unwrap() = b; } - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - pub fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex> { + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex> { if self.sealing_block.lock().unwrap().is_none() { self.sealing_enabled.store(true, atomic::Ordering::Relaxed); // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. @@ -109,9 +134,7 @@ impl Miner { &self.sealing_block } - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - pub fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error> { let mut maybe_b = self.sealing_block.lock().unwrap(); match *maybe_b { Some(ref b) if b.hash() == pow_hash => {} @@ -132,8 +155,7 @@ impl Miner { } } - /// called when block is imported to chain, updates transactions queue and propagates the blocks - pub fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]) { + fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(hash.clone())) diff --git a/parity/main.rs b/parity/main.rs index a0bc87a03..89668a456 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -50,7 +50,7 @@ use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethsync::{EthSync, SyncConfig}; -use ethminer::{EthMiner}; +use ethminer::{EthMiner, MinerService}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 46d875c99..d40761b09 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; use std::ops::Deref; use ethsync::{EthSync, SyncState}; -use ethminer::{EthMiner}; +use ethminer::{EthMiner, MinerService}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 2669b71e2..cb584f51d 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -38,7 +38,7 @@ use range_collection::{RangeCollection, ToUsize, FromUsize}; use ethcore::error::*; use ethcore::transaction::SignedTransaction; use ethcore::block::Block; -use ethminer::EthMiner; +use ethminer::{EthMiner, MinerService}; use io::SyncIo; use time; use super::SyncConfig; @@ -1285,7 +1285,7 @@ mod tests { use super::{PeerInfo, PeerAsking}; use ethcore::header::*; use ethcore::client::*; - use ethminer::EthMiner; + use ethminer::{EthMiner, MinerService}; fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 8c8f669a2..9a4dd2814 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -311,11 +311,11 @@ impl BlockChainClient for TestBlockChainClient { } } - fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option { + fn prepare_sealing(&self, _author: Address, _extra_data: Bytes, _transactions: Vec) -> Option { unimplemented!() } - fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { + fn try_seal(&self, _block: ClosedBlock, _seal: Vec) -> Result { unimplemented!() } } From 6ad0ba8fe24bf35c9a74b48a25c6e84dc132429f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 17:11:15 +0400 Subject: [PATCH 046/222] basic commands --- Cargo.lock | 20 ++++++++++++++++++++ Cargo.toml | 1 + parity/main.rs | 34 ++++++++++++++++++++++++++++++++++ util/src/keys/store.rs | 1 + 4 files changed, 56 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..65ca8f566 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,6 +15,7 @@ dependencies = [ "fdlimit 0.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rpassword 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -680,6 +681,17 @@ dependencies = [ "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", ] +[[package]] +name = "rpassword" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rust-crypto" version = "0.2.34" @@ -813,6 +825,14 @@ dependencies = [ "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "termios" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "time" version = "0.1.34" diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..0852a16bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ fdlimit = { path = "util/fdlimit" } daemonize = "0.2" ethcore-devtools = { path = "devtools" } number_prefix = "0.2" +rpassword = "0.1" [features] default = ["rpc"] diff --git a/parity/main.rs b/parity/main.rs index 296e1df65..a442f4fdb 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -32,6 +32,7 @@ extern crate fdlimit; extern crate daemonize; extern crate time; extern crate number_prefix; +extern crate rpassword; #[cfg(feature = "rpc")] extern crate ethcore_rpc as rpc; @@ -70,6 +71,7 @@ Parity. Ethereum Client. Usage: parity daemon [options] [ --no-bootstrap | ... ] + parity account parity [options] [ --no-bootstrap | ... ] Protocol Options: @@ -126,8 +128,10 @@ Miscellaneous Options: #[derive(Debug, RustcDecodable)] struct Args { cmd_daemon: bool, + cmd_account: bool, arg_pid_file: String, arg_enode: Vec, + arg_command: String, flag_chain: String, flag_testnet: bool, flag_datadir: String, @@ -337,9 +341,39 @@ impl Configuration { .start() .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); } + if self.args.cmd_account { + self.execute_account_cli(&self.args.arg_command); + return; + } self.execute_client(); } + fn execute_account_cli(&self, command: &str) { + use util::keys::store::SecretStore; + use rpassword::read_password; + let mut secret_store = SecretStore::new(); + if command == "new" { + println!("Please note that password is NOT RECOVERABLE."); + println!("Type password: "); + let password = read_password().unwrap(); + println!("Repeat password: "); + let password_repeat = read_password().unwrap(); + if password != password_repeat { + println!("Passwords do not match!"); + return; + } + println!("New account address:"); + let new_address = secret_store.new_account(&password).unwrap(); + println!("{:?}", new_address); + } + if command == "list" { + println!("Known addresses:"); + for &(addr, _) in secret_store.accounts().unwrap().iter() { + println!("{:?}", addr); + } + } + } + fn execute_client(&self) { // Setup logging setup_log(&self.args.flag_logging); diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 625d6fd8f..dcc165259 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -84,6 +84,7 @@ impl SecretStore { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); + ::std::fs::create_dir_all(&path).expect("Should panic since it is critical to be able to access home dir"); Self::new_in(&path) } From 363de973c90ed916959e8bf912f8ebde7f3aafa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 14:26:28 +0100 Subject: [PATCH 047/222] Adding documentation and ditching EthMiner --- miner/src/lib.rs | 54 ++++++++++++++++++++++----------------- miner/src/miner.rs | 34 +++++++++++++++++++----- parity/main.rs | 6 ++--- rpc/src/v1/impls/eth.rs | 6 ++--- sync/src/chain.rs | 10 ++++---- sync/src/lib.rs | 4 +-- sync/src/tests/helpers.rs | 4 +-- 7 files changed, 74 insertions(+), 44 deletions(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 36b040b78..591b73402 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -18,6 +18,37 @@ #![cfg_attr(feature="dev", feature(plugin))] #![cfg_attr(feature="dev", plugin(clippy))] +//! Miner module +//! Keeps track of transactions and mined block. +//! +//! Usage example: +//! +//! ```rust +//! extern crate ethcore_util as util; +//! extern crate ethcore; +//! extern crate ethminer; +//! use std::env; +//! use std::sync::Arc; +//! use util::network::{NetworkService, NetworkConfiguration}; +//! use ethcore::client::{Client, ClientConfig}; +//! use ethcore::ethereum; +//! use ethminer::{Miner, MinerService}; +//! +//! fn main() { +//! let dir = env::temp_dir(); +//! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); +//! +//! let miner: Miner = Miner::default(); +//! // get status +//! assert_eq!(miner.status().transaction_queue_pending, 0); +//! +//! // Check block for sealing +//! miner.prepare_sealing(&client); +//! assert_eq!(miner.sealing_block(&client).lock().unwrap().is_some()); +//! } +//! ``` + + #[macro_use] extern crate log; #[macro_use] @@ -29,28 +60,5 @@ extern crate rayon; mod miner; mod transaction_queue; -use std::ops::*; -use std::sync::*; pub use miner::{Miner, MinerService}; - -pub struct EthMiner { - miner: Miner, -} - -impl EthMiner { - /// Creates and register protocol with the network service - pub fn new() -> Arc { - Arc::new(EthMiner { - miner: Miner::new(), - }) - } -} - -impl Deref for EthMiner { - type Target = Miner; - - fn deref(&self) -> &Self::Target { - &self.miner - } -} diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 64d3c9083..f5ad32d2d 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -24,21 +24,28 @@ use ethcore::error::*; use ethcore::transaction::SignedTransaction; use transaction_queue::{TransactionQueue}; +/// Miner external API pub trait MinerService { + + /// Returns miner's status. fn status(&self) -> MinerStatus; + /// Imports transactions to transaction queue. fn import_transactions(&self, transactions: Vec, fetch_nonce: T) where T: Fn(&Address) -> U256; - /// called when blocks are imported to chain, updates transactions queue - fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]); - /// Set the author that we will seal blocks as. fn set_author(&self, author: Address); /// Set the extra_data that we will seal blocks with. fn set_extra_data(&self, extra_data: Bytes); + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, chain: &BlockChainClient); + + /// called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]); + /// New chain head event. Restart mining operation. fn prepare_sealing(&self, chain: &BlockChainClient); @@ -50,11 +57,15 @@ pub trait MinerService { fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error>; } +/// Mining status pub struct MinerStatus { + /// Number of transactions in queue with state `pending` (ready to be included in block) pub transaction_queue_pending: usize, + /// Number of transactions in queue with state `future` (not yet ready to be included in block) pub transaction_queue_future: usize, } +/// Keeps track of transactions using priority queue and holds currently mined block. pub struct Miner { transaction_queue: Mutex, @@ -65,9 +76,8 @@ pub struct Miner { extra_data: RwLock, } -impl Miner { - /// Creates new instance of miner - pub fn new() -> Miner { +impl Default for Miner { + fn default() -> Miner { Miner { transaction_queue: Mutex::new(TransactionQueue::new()), sealing_enabled: AtomicBool::new(false), @@ -76,6 +86,13 @@ impl Miner { extra_data: RwLock::new(Vec::new()), } } +} + +impl Miner { + /// Creates new instance of miner + pub fn new() -> Arc { + Arc::new(Miner::default()) + } /// Get the author that we will seal blocks as. fn author(&self) -> Address { @@ -90,6 +107,11 @@ impl Miner { impl MinerService for Miner { + fn clear_and_reset(&self, chain: &BlockChainClient) { + self.transaction_queue.lock().unwrap().clear(); + self.prepare_sealing(chain); + } + fn status(&self) -> MinerStatus { let status = self.transaction_queue.lock().unwrap().status(); MinerStatus { diff --git a/parity/main.rs b/parity/main.rs index 89668a456..d75bdcb57 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -50,7 +50,7 @@ use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethsync::{EthSync, SyncConfig}; -use ethminer::{EthMiner, MinerService}; +use ethminer::{Miner, MinerService}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; @@ -192,7 +192,7 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, miner: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { +fn setup_rpc_server(client: Arc, sync: Arc, miner: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); @@ -382,7 +382,7 @@ impl Configuration { let client = service.client(); // Miner - let miner = EthMiner::new(); + let miner = Miner::new(); miner.set_author(self.author()); miner.set_extra_data(self.extra_data()); diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index d40761b09..a9ee389f8 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -19,7 +19,7 @@ use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; use std::ops::Deref; use ethsync::{EthSync, SyncState}; -use ethminer::{EthMiner, MinerService}; +use ethminer::{Miner, MinerService}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; @@ -38,13 +38,13 @@ use v1::helpers::{PollFilter, PollManager}; pub struct EthClient { client: Weak, sync: Weak, - miner: Weak, + miner: Weak, hashrates: RwLock>, } impl EthClient { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc, miner: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, miner: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), diff --git a/sync/src/chain.rs b/sync/src/chain.rs index cb584f51d..4c7b0893a 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -38,7 +38,7 @@ use range_collection::{RangeCollection, ToUsize, FromUsize}; use ethcore::error::*; use ethcore::transaction::SignedTransaction; use ethcore::block::Block; -use ethminer::{EthMiner, MinerService}; +use ethminer::{Miner, MinerService}; use io::SyncIo; use time; use super::SyncConfig; @@ -212,14 +212,14 @@ pub struct ChainSync { /// Network ID network_id: U256, /// Miner - miner: Arc, + miner: Arc, } type RlpResponseResult = Result, PacketDecodeError>; impl ChainSync { /// Create a new instance of syncing strategy. - pub fn new(config: SyncConfig, miner: Arc) -> ChainSync { + pub fn new(config: SyncConfig, miner: Arc) -> ChainSync { ChainSync { state: SyncState::NotSynced, starting_block: 0, @@ -1285,7 +1285,7 @@ mod tests { use super::{PeerInfo, PeerAsking}; use ethcore::header::*; use ethcore::client::*; - use ethminer::{EthMiner, MinerService}; + use ethminer::{Miner, MinerService}; fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); @@ -1395,7 +1395,7 @@ mod tests { } fn dummy_sync_with_peer(peer_latest_hash: H256) -> ChainSync { - let mut sync = ChainSync::new(SyncConfig::default(), EthMiner::new()); + let mut sync = ChainSync::new(SyncConfig::default(), Miner::new()); sync.peers.insert(0, PeerInfo { protocol_version: 0, diff --git a/sync/src/lib.rs b/sync/src/lib.rs index be01d2b7b..dd331b5da 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -65,7 +65,7 @@ use util::TimerToken; use util::{U256, ONE_U256}; use ethcore::client::Client; use ethcore::service::SyncMessage; -use ethminer::EthMiner; +use ethminer::Miner; use io::NetSyncIo; use chain::ChainSync; @@ -105,7 +105,7 @@ pub use self::chain::{SyncStatus, SyncState}; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc, miner: Arc) -> Arc { + pub fn register(service: &mut NetworkService, config: SyncConfig, chain: Arc, miner: Arc) -> Arc { let sync = Arc::new(EthSync { chain: chain, sync: RwLock::new(ChainSync::new(config, miner)), diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 9a4dd2814..52a1feba4 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -19,7 +19,7 @@ use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, use ethcore::header::{Header as BlockHeader, BlockNumber}; use ethcore::block::*; use ethcore::error::*; -use ethminer::EthMiner; +use ethminer::Miner; use io::SyncIo; use chain::ChainSync; use ::SyncConfig; @@ -392,7 +392,7 @@ impl TestNet { for _ in 0..n { net.peers.push(TestPeer { chain: TestBlockChainClient::new(), - sync: ChainSync::new(SyncConfig::default(), EthMiner::new()), + sync: ChainSync::new(SyncConfig::default(), Miner::new()), queue: VecDeque::new(), }); } From 493c61f09d9a2e8b12579c8a27cdc9ef22766651 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 15:22:06 +0100 Subject: [PATCH 048/222] Minimal gas price threshold. Closes: #636 --- miner/src/miner.rs | 32 ++++++++++++++++---------------- miner/src/transaction_queue.rs | 34 ++++++++++++++++++++++++++++++++++ parity/main.rs | 8 ++++++++ 3 files changed, 58 insertions(+), 16 deletions(-) diff --git a/miner/src/miner.rs b/miner/src/miner.rs index f5ad32d2d..2c18f3a79 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -24,7 +24,7 @@ use ethcore::error::*; use ethcore::transaction::SignedTransaction; use transaction_queue::{TransactionQueue}; -/// Miner external API +/// Miner client API pub trait MinerService { /// Returns miner's status. @@ -34,12 +34,6 @@ pub trait MinerService { fn import_transactions(&self, transactions: Vec, fetch_nonce: T) where T: Fn(&Address) -> U256; - /// Set the author that we will seal blocks as. - fn set_author(&self, author: Address); - - /// Set the extra_data that we will seal blocks with. - fn set_extra_data(&self, extra_data: Bytes); - /// Removes all transactions from the queue and restart mining operation. fn clear_and_reset(&self, chain: &BlockChainClient); @@ -103,6 +97,21 @@ impl Miner { fn extra_data(&self) -> Bytes { self.extra_data.read().unwrap().clone() } + + /// Set the author that we will seal blocks as. + pub fn set_author(&self, author: Address) { + *self.author.write().unwrap() = author; + } + + /// Set the extra_data that we will seal blocks with. + pub fn set_extra_data(&self, extra_data: Bytes) { + *self.extra_data.write().unwrap() = extra_data; + } + + /// Set minimal gas price of transaction to be accepted for mining. + pub fn set_minimal_gas_price(&self, min_gas_price: U256) { + self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price); + } } impl MinerService for Miner { @@ -126,15 +135,6 @@ impl MinerService for Miner { transaction_queue.add_all(transactions, fetch_nonce); } - fn set_author(&self, author: Address) { - *self.author.write().unwrap() = author; - } - - - fn set_extra_data(&self, extra_data: Bytes) { - *self.extra_data.write().unwrap() = extra_data; - } - fn prepare_sealing(&self, chain: &BlockChainClient) { let no_of_transactions = 128; let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); diff --git a/miner/src/transaction_queue.rs b/miner/src/transaction_queue.rs index 3e0d931b5..ed8cf801e 100644 --- a/miner/src/transaction_queue.rs +++ b/miner/src/transaction_queue.rs @@ -159,6 +159,8 @@ pub struct TransactionQueueStatus { /// TransactionQueue implementation pub struct TransactionQueue { + /// Gas Price threshold for transactions that can be imported to this queue (defaults to 0) + minimal_gas_price: U256, /// Priority queue for transactions that can go to block current: TransactionSet, /// Priority queue for transactions that has been received but are not yet valid to go to block @@ -189,6 +191,7 @@ impl TransactionQueue { }; TransactionQueue { + minimal_gas_price: U256::zero(), current: current, future: future, by_hash: HashMap::new(), @@ -196,6 +199,12 @@ impl TransactionQueue { } } + /// Sets new gas price threshold for incoming transactions. + /// Any transactions already imported to the queue are not affected. + pub fn set_minimal_gas_price(&mut self, min_gas_price: U256) { + self.minimal_gas_price = min_gas_price; + } + /// Returns current status for this queue pub fn status(&self) -> TransactionQueueStatus { TransactionQueueStatus { @@ -215,6 +224,15 @@ impl TransactionQueue { /// Add signed transaction to queue to be verified and imported pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) where T: Fn(&Address) -> U256 { + + if tx.gas_price < self.minimal_gas_price { + trace!(target: "sync", + "Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})", + tx.hash(), tx.gas_price, self.minimal_gas_price + ); + return; + } + // Everything ok - import transaction self.import_tx(VerifiedTransaction::new(tx), fetch_nonce); } @@ -503,6 +521,22 @@ mod test { assert_eq!(stats.pending, 1); } + #[test] + fn should_not_import_transaction_below_min_gas_price_threshold() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_tx(); + txq.set_minimal_gas_price(tx.gas_price + U256::one()); + + // when + txq.add(tx, &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 0); + assert_eq!(stats.future, 0); + } + #[test] fn should_import_txs_from_same_sender() { // given diff --git a/parity/main.rs b/parity/main.rs index d75bdcb57..b3a0224d8 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -108,6 +108,7 @@ API and Console Options: --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). Sealing/Mining Options: + --gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 50000000000]. --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. @@ -161,6 +162,7 @@ struct Args { flag_rpcapi: Option, flag_logging: Option, flag_version: bool, + flag_gasprice: String, flag_author: String, flag_extra_data: Option, } @@ -248,6 +250,11 @@ impl Configuration { Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) } + fn gasprice(&self) -> U256 { + U256::from_dec_str(self.args.flag_gasprice).unwrap_or_else(|_| die("{}: Invalid gasprice given. Must be a + decimal unsigned 256-bit number.")) + } + fn extra_data(&self) -> Bytes { match self.args.flag_extra_data { Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), @@ -385,6 +392,7 @@ impl Configuration { let miner = Miner::new(); miner.set_author(self.author()); miner.set_extra_data(self.extra_data()); + miner.set_minimal_gas_price(self.gasprice()); // Sync let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone()); From 3d74e5bd473d466d20b7169ee56f0316ea72d2ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 15:27:07 +0100 Subject: [PATCH 049/222] Fixing doctest --- miner/src/lib.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 591b73402..4fccc6d51 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -27,14 +27,16 @@ //! extern crate ethcore_util as util; //! extern crate ethcore; //! extern crate ethminer; +//! use std::ops::Deref; //! use std::env; //! use std::sync::Arc; //! use util::network::{NetworkService, NetworkConfiguration}; -//! use ethcore::client::{Client, ClientConfig}; +//! use ethcore::client::{Client, ClientConfig, BlockChainClient}; //! use ethcore::ethereum; //! use ethminer::{Miner, MinerService}; //! //! fn main() { +//! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); //! let dir = env::temp_dir(); //! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); //! @@ -43,8 +45,8 @@ //! assert_eq!(miner.status().transaction_queue_pending, 0); //! //! // Check block for sealing -//! miner.prepare_sealing(&client); -//! assert_eq!(miner.sealing_block(&client).lock().unwrap().is_some()); +//! miner.prepare_sealing(client.deref()); +//! assert!(miner.sealing_block(client.deref()).lock().unwrap().is_some()); //! } //! ``` From bcb9b0e45723f6e35035505ad89f8f747909041b Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 15:32:27 +0100 Subject: [PATCH 050/222] wait_for_exit takes only one input param, which is PanicHandler --- parity/main.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 296e1df65..adc3972e4 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -43,7 +43,7 @@ use std::path::PathBuf; use env_logger::LogBuilder; use ctrlc::CtrlC; use util::*; -use util::panics::{MayPanic, PanicHandler}; +use util::panics::{MayPanic, ForwardPanic, PanicHandler}; use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; @@ -341,6 +341,9 @@ impl Configuration { } fn execute_client(&self) { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + // Setup logging setup_log(&self.args.flag_logging); // Raise fdlimit @@ -367,6 +370,7 @@ impl Configuration { client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); + panic_handler.forward_from(&service); let client = service.client().clone(); client.set_author(self.author()); client.set_extra_data(self.extra_data()); @@ -375,7 +379,7 @@ impl Configuration { let sync = EthSync::register(service.network(), sync_config, client); // Setup rpc - let server_handler = if self.args.flag_jsonrpc || self.args.flag_rpc { + if self.args.flag_jsonrpc || self.args.flag_rpc { let url = format!("{}:{}", self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) @@ -384,10 +388,12 @@ impl Configuration { let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()) - } else { - None - }; + let server_handler = setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); + if let Some(handler) = server_handler { + panic_handler.forward_from(handler.deref()); + } + + } // Register IO handler let io_handler = Arc::new(ClientIoHandler { @@ -398,23 +404,20 @@ impl Configuration { service.io().register_handler(io_handler).expect("Error registering IO handler"); // Handle exit - wait_for_exit(&service, server_handler); + wait_for_exit(panic_handler); } } -fn wait_for_exit(client_service: &ClientService, server_handler: Option>) { +fn wait_for_exit(panic_handler: Arc) { let exit = Arc::new(Condvar::new()); // Handle possible exits let e = exit.clone(); CtrlC::set_handler(move || { e.notify_all(); }); - let e = exit.clone(); - client_service.on_panic(move |_reason| { e.notify_all(); }); - if let Some(handler) = server_handler { - let e = exit.clone(); - handler.on_panic(move |_reason| { e.notify_all(); }); - } + // Handle panics + let e = exit.clone(); + panic_handler.on_panic(move |_reason| { e.notify_all(); }); // Wait for signal let mutex = Mutex::new(()); From 7ff4d145448487685be000f572f790c3bcef5ae9 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 19:27:44 +0400 Subject: [PATCH 051/222] adding return to if branch --- parity/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/parity/main.rs b/parity/main.rs index 1a2847439..9a45980ef 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -365,6 +365,7 @@ impl Configuration { println!("New account address:"); let new_address = secret_store.new_account(&password).unwrap(); println!("{:?}", new_address); + return; } if command == "list" { println!("Known addresses:"); From 082a4d9078cff6b90f4b01ca3614cc9c3825f265 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 17:31:43 +0100 Subject: [PATCH 052/222] jsonrpc uses client and sync interfaces as a preparetion for jsonrpc tests --- ethcore/src/client.rs | 73 +++++++++++++++++++++------------------ parity/main.rs | 2 +- rpc/src/v1/impls/eth.rs | 25 +++++++------- rpc/src/v1/impls/net.rs | 12 +++---- sync/src/lib.rs | 18 +++++++--- sync/src/tests/helpers.rs | 9 +++++ 6 files changed, 81 insertions(+), 58 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 8471666aa..374011f71 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -182,6 +182,13 @@ pub trait BlockChainClient : Sync + Send { /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error>; } #[derive(Default, Clone, Debug, Eq, PartialEq)] @@ -511,39 +518,6 @@ impl Client where V: Verifier { trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number()); *self.sealing_block.lock().unwrap() = Some(b); } - - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - pub fn sealing_block(&self) -> &Mutex> { - if self.sealing_block.lock().unwrap().is_none() { - self.sealing_enabled.store(true, atomic::Ordering::Relaxed); - // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. - self.prepare_sealing(); - } - &self.sealing_block - } - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - pub fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let mut maybe_b = self.sealing_block.lock().unwrap(); - match *maybe_b { - Some(ref b) if b.hash() == pow_hash => {} - _ => { return Err(Error::PowHashInvalid); } - } - - let b = maybe_b.take(); - match b.unwrap().try_seal(self.engine.deref().deref(), seal) { - Err(old) => { - *maybe_b = Some(old); - Err(Error::PowInvalid) - } - Ok(sealed) => { - // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. - try!(self.import_block(sealed.rlp_bytes())); - Ok(()) - } - } - } } // TODO: need MinerService MinerIoHandler @@ -702,6 +676,39 @@ impl BlockChainClient for Client where V: Verifier { }) .collect() } + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self) -> &Mutex> { + if self.sealing_block.lock().unwrap().is_none() { + self.sealing_enabled.store(true, atomic::Ordering::Relaxed); + // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. + self.prepare_sealing(); + } + &self.sealing_block + } + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { + let mut maybe_b = self.sealing_block.lock().unwrap(); + match *maybe_b { + Some(ref b) if b.hash() == pow_hash => {} + _ => { return Err(Error::PowHashInvalid); } + } + + let b = maybe_b.take(); + match b.unwrap().try_seal(self.engine.deref().deref(), seal) { + Err(old) => { + *maybe_b = Some(old); + Err(Error::PowInvalid) + } + Ok(sealed) => { + // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. + try!(self.import_block(sealed.rlp_bytes())); + Ok(()) + } + } + } } impl MayPanic for Client { diff --git a/parity/main.rs b/parity/main.rs index ceb58e31e..1cd2970b4 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -48,7 +48,7 @@ use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; -use ethsync::{EthSync, SyncConfig}; +use ethsync::{EthSync, SyncConfig, SyncStatusProvider}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7113c55b1..abcb54ab7 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -17,7 +17,7 @@ //! Eth rpc implementation. use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; -use ethsync::{EthSync, SyncState}; +use ethsync::{SyncStatusProvider, SyncState}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; @@ -25,7 +25,6 @@ use util::rlp::encode; use ethcore::client::*; use ethcore::block::{IsBlock}; use ethcore::views::*; -//#[macro_use] extern crate log; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; use v1::traits::{Eth, EthFilter}; @@ -33,15 +32,15 @@ use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncIn use v1::helpers::{PollFilter, PollManager}; /// Eth rpc implementation. -pub struct EthClient { - client: Weak, - sync: Weak, +pub struct EthClient where C: BlockChainClient, S: SyncStatusProvider { + client: Weak, + sync: Weak, hashrates: RwLock>, } -impl EthClient { +impl EthClient where C: BlockChainClient, S: SyncStatusProvider { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), @@ -95,7 +94,7 @@ impl EthClient { } } -impl Eth for EthClient { +impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncStatusProvider + 'static { fn protocol_version(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), @@ -256,14 +255,14 @@ impl Eth for EthClient { } /// Eth filter rpc implementation. -pub struct EthFilterClient { - client: Weak, +pub struct EthFilterClient where C: BlockChainClient { + client: Weak, polls: Mutex>, } -impl EthFilterClient { +impl EthFilterClient where C: BlockChainClient { /// Creates new Eth filter client. - pub fn new(client: &Arc) -> Self { + pub fn new(client: &Arc) -> Self { EthFilterClient { client: Arc::downgrade(client), polls: Mutex::new(PollManager::new()) @@ -271,7 +270,7 @@ impl EthFilterClient { } } -impl EthFilter for EthFilterClient { +impl EthFilter for EthFilterClient where C: BlockChainClient + 'static { fn new_filter(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 9e24caad2..a686ed66f 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -17,24 +17,24 @@ //! Net rpc implementation. use std::sync::{Arc, Weak}; use jsonrpc_core::*; -use ethsync::EthSync; +use ethsync::SyncStatusProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient { - sync: Weak +pub struct NetClient where S: SyncStatusProvider { + sync: Weak } -impl NetClient { +impl NetClient where S: SyncStatusProvider { /// Creates new NetClient. - pub fn new(sync: &Arc) -> Self { + pub fn new(sync: &Arc) -> Self { NetClient { sync: Arc::downgrade(sync) } } } -impl Net for NetClient { +impl Net for NetClient where S: SyncStatusProvider + 'static { fn version(&self, _: Params) -> Result { Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..427a58e15 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -93,6 +93,12 @@ impl Default for SyncConfig { } } +/// Current sync status +pub trait SyncStatusProvider: Send + Sync { + /// Get sync status + fn status(&self) -> SyncStatus; +} + /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint @@ -114,11 +120,6 @@ impl EthSync { sync } - /// Get sync status - pub fn status(&self) -> SyncStatus { - self.sync.read().unwrap().status() - } - /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref())); @@ -130,6 +131,13 @@ impl EthSync { } } +impl SyncStatusProvider for EthSync { + /// Get sync status + fn status(&self) -> SyncStatus { + self.sync.read().unwrap().status() + } +} + impl NetworkProtocolHandler for EthSync { fn initialize(&self, io: &NetworkContext) { io.register_timer(0, 1000).expect("Error registering sync timer"); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index e170a4a85..e7d5cf57f 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -25,6 +25,7 @@ use ethcore::receipt::Receipt; use ethcore::transaction::LocalizedTransaction; use ethcore::filter::Filter; use ethcore::log_entry::LocalizedLogEntry; +use ethcore::block::ClosedBlock; pub struct TestBlockChainClient { pub blocks: RwLock>, @@ -125,6 +126,14 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } + fn sealing_block(&self) -> &Mutex> { + unimplemented!(); + } + + fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { + unimplemented!(); + } + fn block_header(&self, id: BlockId) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) } From 423dd7e0a967a808b769304a245f4316e5a2aa5b Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 18:04:13 +0100 Subject: [PATCH 053/222] updated jsonrpc-core and http-server libs --- Cargo.lock | 10 +++++----- parity/main.rs | 4 ++-- rpc/Cargo.toml | 4 ++-- rpc/src/lib.rs | 22 ++++++++++------------ 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..15845c806 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -228,8 +228,8 @@ dependencies = [ "ethcore 0.9.99", "ethcore-util 0.9.99", "ethsync 0.9.99", - "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -400,7 +400,7 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -411,11 +411,11 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "2.1.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/parity/main.rs b/parity/main.rs index adc3972e4..f28ef84c3 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -193,7 +193,7 @@ fn setup_log(init: &Option) { fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { use rpc::v1::*; - let mut server = rpc::HttpServer::new(1); + let server = rpc::RpcServer::new(); for api in apis.into_iter() { match api { "web3" => server.add_delegate(Web3Client::new().to_delegate()), @@ -207,7 +207,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom } } } - Some(server.start_async(url, cors_domain)) + Some(server.start_http(url, cors_domain, 1)) } #[cfg(not(feature = "rpc"))] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..f324aba10 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -12,8 +12,8 @@ build = "build.rs" log = "0.3" serde = "0.7.0" serde_json = "0.7.0" -jsonrpc-core = "1.2" -jsonrpc-http-server = "2.1" +jsonrpc-core = "2.0" +jsonrpc-http-server = "3.0" ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 97a3a5fe5..731ded8c4 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -37,35 +37,33 @@ use self::jsonrpc_core::{IoHandler, IoDelegate}; pub mod v1; /// Http server. -pub struct HttpServer { - handler: IoHandler, - threads: usize, +pub struct RpcServer { + handler: Arc, } -impl HttpServer { +impl RpcServer { /// Construct new http server object with given number of threads. - pub fn new(threads: usize) -> HttpServer { - HttpServer { - handler: IoHandler::new(), - threads: threads, + pub fn new() -> RpcServer { + RpcServer { + handler: Arc::new(IoHandler::new()), } } /// Add io delegate. - pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { + pub fn add_delegate(&self, delegate: IoDelegate) where D: Send + Sync + 'static { self.handler.add_delegate(delegate); } /// Start server asynchronously in new thread and returns panic handler. - pub fn start_async(self, addr: &str, cors_domain: &str) -> Arc { + pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc { let addr = addr.to_owned(); let cors_domain = cors_domain.to_owned(); let panic_handler = PanicHandler::new_in_arc(); let ph = panic_handler.clone(); - let server = jsonrpc_http_server::Server::new(self.handler, self.threads); + let server = jsonrpc_http_server::Server::new(self.handler.clone()); thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || { ph.catch_panic(move || { - server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain)); + server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads); }).unwrap() }).expect("Error while creating jsonrpc http thread"); panic_handler From c302fa9a4ebae4710ad98933539fbb4b6998b6f4 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 9 Mar 2016 18:37:44 +0100 Subject: [PATCH 054/222] Style --- util/src/journaldb.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 3309612c2..1d854924a 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -243,9 +243,10 @@ impl JournalDB { { if canon_id == journal.id { for h in &journal.insertions { - match journal_overlay.backing_overlay.raw(&h) { - Some(&(ref d, rc)) if rc > 0 => canon_insertions.push((h.clone(), d.clone())), //TODO: optimizie this to avoid data copy - _ => () + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if rc > 0 { + canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy + } } } canon_deletions = journal.deletions; @@ -352,7 +353,7 @@ impl HashDB for JournalDB { ret } - fn lookup(&self, key: &H256) -> Option<&[u8]> { + fn lookup(&self, key: &H256) -> Option<&[u8]> { let k = self.transaction_overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), @@ -573,7 +574,6 @@ mod tests { fn reopen_remove() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); From 8a83e27d6a8f2f298e6b0dc44a60f9190e8e6c2a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 22:55:41 +0400 Subject: [PATCH 055/222] cfg-test for noop verifier --- ethcore/src/verification/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 260121989..fe1f406cc 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -17,9 +17,11 @@ pub mod verification; pub mod verifier; mod canon_verifier; +#[cfg(test)] mod noop_verifier; pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; +#[cfg(test)] pub use self::noop_verifier::NoopVerifier; From accc1db43fc46e3bd2ab425c778dcfaa843bdec8 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 23:39:36 +0400 Subject: [PATCH 056/222] chaning docopt config a bit --- parity/main.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 9a45980ef..92400728d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -71,7 +71,7 @@ Parity. Ethereum Client. Usage: parity daemon [options] [ --no-bootstrap | ... ] - parity account + parity account (new | list) parity [options] [ --no-bootstrap | ... ] Protocol Options: @@ -129,9 +129,10 @@ Miscellaneous Options: struct Args { cmd_daemon: bool, cmd_account: bool, + cmd_new: bool, + cmd_list: bool, arg_pid_file: String, arg_enode: Vec, - arg_command: String, flag_chain: String, flag_testnet: bool, flag_datadir: String, @@ -342,17 +343,17 @@ impl Configuration { .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); } if self.args.cmd_account { - self.execute_account_cli(&self.args.arg_command); + self.execute_account_cli(); return; } self.execute_client(); } - fn execute_account_cli(&self, command: &str) { + fn execute_account_cli(&self) { use util::keys::store::SecretStore; use rpassword::read_password; let mut secret_store = SecretStore::new(); - if command == "new" { + if self.args.cmd_new { println!("Please note that password is NOT RECOVERABLE."); println!("Type password: "); let password = read_password().unwrap(); @@ -367,7 +368,7 @@ impl Configuration { println!("{:?}", new_address); return; } - if command == "list" { + if self.args.cmd_list { println!("Known addresses:"); for &(addr, _) in secret_store.accounts().unwrap().iter() { println!("{:?}", addr); From 8b042ac875f0abe9968f70d76b44773cd64c4350 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 21:55:23 +0100 Subject: [PATCH 057/222] blockchain import_route --- ethcore/src/blockchain/block_info.rs | 6 +- ethcore/src/blockchain/blockchain.rs | 55 +++++++++--- ethcore/src/blockchain/import_route.rs | 119 +++++++++++++++++++++++++ ethcore/src/blockchain/mod.rs | 2 + 4 files changed, 167 insertions(+), 15 deletions(-) create mode 100644 ethcore/src/blockchain/import_route.rs diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index ce639bfed..335bdbb4e 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -18,6 +18,7 @@ use util::numbers::{U256,H256}; use header::BlockNumber; /// Brief info about inserted block. +#[derive(Clone)] pub struct BlockInfo { /// Block hash. pub hash: H256, @@ -30,6 +31,7 @@ pub struct BlockInfo { } /// Describes location of newly inserted block. +#[derive(Clone)] pub enum BlockLocation { /// It's part of the canon chain. CanonChain, @@ -42,6 +44,8 @@ pub enum BlockLocation { /// Hash of the newest common ancestor with old canon chain. ancestor: H256, /// Hashes of the blocks between ancestor and this block. - route: Vec + route: Vec, + /// Hashes of the blocks which were invalidated. + old_route: Vec, } } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e529f50af..e57f7208a 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -28,7 +28,7 @@ use blockchain::best_block::BestBlock; use blockchain::bloom_indexer::BloomIndexer; use blockchain::tree_route::TreeRoute; use blockchain::update::ExtrasUpdate; -use blockchain::CacheSize; +use blockchain::{CacheSize, ImportRoute}; const BLOOM_INDEX_SIZE: usize = 16; const BLOOM_LEVELS: u8 = 3; @@ -414,14 +414,14 @@ impl BlockChain { /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. - pub fn insert_block(&self, bytes: &[u8], receipts: Vec) { + pub fn insert_block(&self, bytes: &[u8], receipts: Vec) -> ImportRoute { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); let hash = header.sha3(); if self.is_known(&hash) { - return; + return ImportRoute::none(); } // store block in db @@ -435,8 +435,10 @@ impl BlockChain { block_receipts: self.prepare_block_receipts_update(receipts, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), - info: info + info: info.clone(), }); + + ImportRoute::from(info) } /// Applies extras update. @@ -549,9 +551,14 @@ impl BlockChain { match route.blocks.len() { 0 => BlockLocation::CanonChain, - _ => BlockLocation::BranchBecomingCanonChain { - ancestor: route.ancestor, - route: route.blocks.into_iter().skip(route.index).collect() + _ => { + let old_route = route.blocks.iter().take(route.index).cloned().collect::>(); + + BlockLocation::BranchBecomingCanonChain { + ancestor: route.ancestor, + route: route.blocks.into_iter().skip(route.index).collect(), + old_route: old_route.into_iter().rev().collect(), + } } } } else { @@ -572,7 +579,7 @@ impl BlockChain { BlockLocation::CanonChain => { block_hashes.insert(number, info.hash.clone()); }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; @@ -661,7 +668,7 @@ impl BlockChain { ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels()) .add_bloom(&header.log_bloom(), header.number() as usize) }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; @@ -825,7 +832,7 @@ mod tests { use rustc_serialize::hex::FromHex; use util::hash::*; use util::sha3::Hashable; - use blockchain::{BlockProvider, BlockChain, BlockChainConfig}; + use blockchain::{BlockProvider, BlockChain, BlockChainConfig, ImportRoute}; use tests::helpers::*; use devtools::*; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; @@ -943,10 +950,30 @@ mod tests { let temp = RandomTempPath::new(); let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path()); - bc.insert_block(&b1, vec![]); - bc.insert_block(&b2, vec![]); - bc.insert_block(&b3a, vec![]); - bc.insert_block(&b3b, vec![]); + let ir1 = bc.insert_block(&b1, vec![]); + let ir2 = bc.insert_block(&b2, vec![]); + let ir3b = bc.insert_block(&b3b, vec![]); + let ir3a = bc.insert_block(&b3a, vec![]); + + assert_eq!(ir1, ImportRoute { + validated_blocks: vec![b1_hash], + invalidated_blocks: vec![], + }); + + assert_eq!(ir2, ImportRoute { + validated_blocks: vec![b2_hash], + invalidated_blocks: vec![], + }); + + assert_eq!(ir3b, ImportRoute { + validated_blocks: vec![b3b_hash], + invalidated_blocks: vec![], + }); + + assert_eq!(ir3a, ImportRoute { + validated_blocks: vec![b3a_hash], + invalidated_blocks: vec![b3b_hash], + }); assert_eq!(bc.best_block_hash(), best_block_hash); assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0); diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs new file mode 100644 index 000000000..10629f2cb --- /dev/null +++ b/ethcore/src/blockchain/import_route.rs @@ -0,0 +1,119 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Import route. + +use util::hash::H256; +use blockchain::block_info::{BlockInfo, BlockLocation}; + +/// Import route for newly inserted block. +#[derive(Debug, PartialEq)] +pub struct ImportRoute { + /// Blocks that were invalidated by new block. + pub invalidated_blocks: Vec, + /// Blocks that were validted by new block. + pub validated_blocks: Vec, +} + +impl ImportRoute { + pub fn none() -> Self { + ImportRoute { + invalidated_blocks: vec![], + validated_blocks: vec![], + } + } +} + +impl From for ImportRoute { + fn from(info: BlockInfo) -> ImportRoute { + match info.location { + BlockLocation::CanonChain => ImportRoute { + invalidated_blocks: vec![], + validated_blocks: vec![info.hash], + }, + BlockLocation::Branch => ImportRoute::none(), + BlockLocation::BranchBecomingCanonChain { mut route, old_route, .. } => { + route.push(info.hash); + ImportRoute { + invalidated_blocks: old_route, + validated_blocks: route, + } + } + } + } +} + +#[cfg(test)] +mod tests { + use util::hash::H256; + use util::numbers::U256; + use blockchain::block_info::{BlockInfo, BlockLocation}; + use blockchain::ImportRoute; + + #[test] + fn import_route_none() { + assert_eq!(ImportRoute::none(), ImportRoute { + validated_blocks: vec![], + invalidated_blocks: vec![], + }); + } + + #[test] + fn import_route_branch() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::Branch, + }; + + assert_eq!(ImportRoute::from(info), ImportRoute::none()); + } + + #[test] + fn import_route_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::CanonChain, + }; + + assert_eq!(ImportRoute::from(info), ImportRoute { + invalidated_blocks: vec![], + validated_blocks: vec![H256::from(U256::from(1))], + }); + } + + #[test] + fn import_route_branch_becoming_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(2)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::BranchBecomingCanonChain { + ancestor: H256::from(U256::from(0)), + route: vec![H256::from(U256::from(1))], + old_route: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + } + }; + + assert_eq!(ImportRoute::from(info), ImportRoute { + invalidated_blocks: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + validated_blocks: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], + }); + } +} diff --git a/ethcore/src/blockchain/mod.rs b/ethcore/src/blockchain/mod.rs index b0679b563..6559d8364 100644 --- a/ethcore/src/blockchain/mod.rs +++ b/ethcore/src/blockchain/mod.rs @@ -25,7 +25,9 @@ mod tree_route; mod update; #[cfg(test)] mod generator; +mod import_route; pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig}; pub use self::cache::CacheSize; pub use self::tree_route::TreeRoute; +pub use self::import_route::ImportRoute; From d7e729a4eaee966d5ef4ea9b1ac57ac32a0714f9 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 9 Mar 2016 23:55:56 +0100 Subject: [PATCH 058/222] Fixed sync handling large forks --- sync/src/chain.rs | 4 ++-- sync/src/range_collection.rs | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index fe1b559cd..14f6d6344 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -843,8 +843,8 @@ impl ChainSync { self.downloading_bodies.remove(&n); self.downloading_headers.remove(&n); } - self.headers.remove_tail(&start); - self.bodies.remove_tail(&start); + self.headers.remove_from(&start); + self.bodies.remove_from(&start); } /// Request headers from a peer by block hash diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index dc2f4e446..9bb5cc522 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -42,6 +42,8 @@ pub trait RangeCollection { fn remove_head(&mut self, start: &K); /// Remove all elements >= `start` in the range that contains `start` fn remove_tail(&mut self, start: &K); + /// Remove all elements >= `start` + fn remove_from(&mut self, start: &K); /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); /// Get an iterator over ranges @@ -137,6 +139,28 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } } + /// Remove the element and all following it. + fn remove_from(&mut self, key: &K) { + match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) { + Ok(index) => { self.drain(.. index + 1); }, + Err(index) =>{ + let mut empty = false; + match self.get_mut(index) { + Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => { + v.truncate((*key - *k).to_usize()); + empty = v.is_empty(); + } + _ => {} + } + if empty { + self.drain(.. index + 1); + } else { + self.drain(.. index); + } + }, + } + } + /// Remove range elements up to key fn remove_head(&mut self, key: &K) { if *key == FromUsize::from_usize(0) { @@ -272,5 +296,17 @@ fn test_range() { assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); r.remove_tail(&2); assert_eq!(r.range_iter().next(), None); + + let mut r = ranges.clone(); + r.remove_from(&20); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal); + r.remove_from(&17); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal); + r.remove_from(&15); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal); + r.remove_from(&3); + assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); + r.remove_from(&2); + assert_eq!(r.range_iter().next(), None); } From f397fb210f3ff0695595a28d3dc5483f59b07848 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 10 Mar 2016 00:11:35 +0100 Subject: [PATCH 059/222] fixed typo --- ethcore/src/blockchain/import_route.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs index 10629f2cb..b0d76ef6e 100644 --- a/ethcore/src/blockchain/import_route.rs +++ b/ethcore/src/blockchain/import_route.rs @@ -24,7 +24,7 @@ use blockchain::block_info::{BlockInfo, BlockLocation}; pub struct ImportRoute { /// Blocks that were invalidated by new block. pub invalidated_blocks: Vec, - /// Blocks that were validted by new block. + /// Blocks that were validated by new block. pub validated_blocks: Vec, } From 84a741d0f9ba09bff6a409c077ebacdc531fd561 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 00:21:07 +0100 Subject: [PATCH 060/222] Don't call mark_as_bad needlessly --- ethcore/src/block_queue.rs | 6 ++++++ ethcore/src/client.rs | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 8f1105b8b..c83542f12 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -320,6 +320,9 @@ impl BlockQueue { /// Mark given block and all its children as bad. Stops verification. pub fn mark_as_bad(&mut self, block_hashes: &[H256]) { + if block_hashes.is_empty() { + return; + } let mut verification_lock = self.verification.lock().unwrap(); let mut processing = self.processing.write().unwrap(); @@ -345,6 +348,9 @@ impl BlockQueue { /// Mark given block as processed pub fn mark_as_good(&mut self, block_hashes: &[H256]) { + if block_hashes.is_empty() { + return; + } let mut processing = self.processing.write().unwrap(); for hash in block_hashes { processing.remove(&hash); diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 8471666aa..2d9b2e3c5 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -404,8 +404,12 @@ impl Client where V: Verifier { { let mut block_queue = self.block_queue.write().unwrap(); - block_queue.mark_as_bad(&bad_blocks); - block_queue.mark_as_good(&good_blocks); + if !bad_blocks.is_empty() { + block_queue.mark_as_bad(&bad_blocks); + } + if !good_blocks.is_empty() { + block_queue.mark_as_good(&good_blocks); + } } { From 3c1888c26abd94107f20a4a42cabaa3f01fbef53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:26:04 +0100 Subject: [PATCH 061/222] Fixing deps --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7df0c2541..1dbe54c8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,14 +25,14 @@ ethcore-util = { path = "util" } ethsync = { path = "sync" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } +number_prefix = "0.2" +rpassword = "0.1" [dev-dependencies] ethcore = { path = "ethcore", features = ["dev"] } ethcore-util = { path = "util", features = ["dev"] } ethsync = { path = "sync", features = ["dev"] } ethcore-rpc = { path = "rpc", features = ["dev"] } -number_prefix = "0.2" -rpassword = "0.1" [features] default = ["rpc"] From 878e38c0cf922fbdd3b51b0f71892418181903ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:33:25 +0100 Subject: [PATCH 062/222] Fixing deps again --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1dbe54c8f..e797a3eac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ ethcore-util = { path = "util" } ethsync = { path = "sync" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } -number_prefix = "0.2" rpassword = "0.1" [dev-dependencies] From 9f77a85491b714ee03de491471c23b2714548cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:35:46 +0100 Subject: [PATCH 063/222] Fixing compilation on nightly --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e797a3eac..22d0f9288 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ rpc = ["ethcore-rpc"] dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "clippy", "dev"] +travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"] [[bin]] path = "parity/main.rs" From ca2cf8e591404245fed40cc1f8d81f8a6e67087e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 10:05:51 +0100 Subject: [PATCH 064/222] Lowering minimal gas price --- parity/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index b3a0224d8..4cf52728e 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -108,7 +108,7 @@ API and Console Options: --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). Sealing/Mining Options: - --gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 50000000000]. + --gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000]. --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. From 02b7e7698ad07fccb61aa5f51b31e47ad599c851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 10:09:55 +0100 Subject: [PATCH 065/222] Breaking couple of lines to keep number of characters below limit --- parity/main.rs | 37 +++++++++++++++++++++++++++---------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 4cf52728e..729f6aeed 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -247,12 +247,15 @@ impl Configuration { } fn author(&self) -> Address { - Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) + Address::from_str(&self.args.flag_author).unwrap_or_else(|_| { + die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author) + }) } fn gasprice(&self) -> U256 { - U256::from_dec_str(self.args.flag_gasprice).unwrap_or_else(|_| die("{}: Invalid gasprice given. Must be a - decimal unsigned 256-bit number.")) + U256::from_dec_str(self.args.flag_gasprice).unwrap_or_else(|_| { + die("{}: Invalid gasprice given. Must be a decimal unsigned 256-bit number.") + }) } fn extra_data(&self) -> Bytes { @@ -275,7 +278,9 @@ impl Configuration { "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), "morden" | "testnet" => ethereum::new_morden(), "olympic" => ethereum::new_olympic(), - f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()), + f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| { + die!("{}: Couldn't read chain specification file. Sure it exists?", f) + }).as_ref()), } } @@ -291,7 +296,9 @@ impl Configuration { if self.args.flag_no_bootstrap { Vec::new() } else { match self.args.arg_enode.len() { 0 => spec.nodes().clone(), - _ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s))).collect(), + _ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(|| { + die!("{}: Invalid node address format given for a boot node.", s) + })).collect(), } } } @@ -302,17 +309,23 @@ impl Configuration { let mut public_address = None; if let Some(ref a) = self.args.flag_address { - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --address", a))); + public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| { + die!("{}: Invalid listen/public address given with --address", a) + })); listen_address = public_address; } if listen_address.is_none() { - listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address))); + listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| { + die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address) + })); } if let Some(ref a) = self.args.flag_public_address { if public_address.is_some() { die!("Conflicting flags provided: --address and --public-address"); } - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --public-address", a))); + public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| { + die!("{}: Invalid listen/public address given with --public-address", a) + })); } (listen_address, public_address) } @@ -403,7 +416,7 @@ impl Configuration { self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) ); - SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); + SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); @@ -475,7 +488,11 @@ impl Informant { let report = client.report(); let sync_info = sync.status(); - if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { + if let (_, _, &Some(ref last_report)) = ( + self.chain_info.read().unwrap().deref(), + self.cache_info.read().unwrap().deref(), + self.report.read().unwrap().deref() + ) { println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, From 0a7cda09ffc9ec876599b23f2955763ab0ae6539 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 10 Mar 2016 10:17:17 +0100 Subject: [PATCH 066/222] changed route name to enacted and retracted --- ethcore/src/blockchain/block_info.rs | 4 +-- ethcore/src/blockchain/blockchain.rs | 30 ++++++++++----------- ethcore/src/blockchain/import_route.rs | 36 +++++++++++++------------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index 335bdbb4e..cf16a8834 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -44,8 +44,8 @@ pub enum BlockLocation { /// Hash of the newest common ancestor with old canon chain. ancestor: H256, /// Hashes of the blocks between ancestor and this block. - route: Vec, + enacted: Vec, /// Hashes of the blocks which were invalidated. - old_route: Vec, + retracted: Vec, } } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e57f7208a..d67c1b7f1 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -552,12 +552,12 @@ impl BlockChain { match route.blocks.len() { 0 => BlockLocation::CanonChain, _ => { - let old_route = route.blocks.iter().take(route.index).cloned().collect::>(); + let retracted = route.blocks.iter().take(route.index).cloned().collect::>(); BlockLocation::BranchBecomingCanonChain { ancestor: route.ancestor, - route: route.blocks.into_iter().skip(route.index).collect(), - old_route: old_route.into_iter().rev().collect(), + enacted: route.blocks.into_iter().skip(route.index).collect(), + retracted: retracted.into_iter().rev().collect(), } } } @@ -579,11 +579,11 @@ impl BlockChain { BlockLocation::CanonChain => { block_hashes.insert(number, info.hash.clone()); }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; - for (index, hash) in route.iter().cloned().enumerate() { + for (index, hash) in enacted.iter().cloned().enumerate() { block_hashes.insert(start_number + index as BlockNumber, hash); } @@ -668,11 +668,11 @@ impl BlockChain { ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels()) .add_bloom(&header.log_bloom(), header.number() as usize) }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; - let mut blooms: Vec = route.iter() + let mut blooms: Vec = enacted.iter() .map(|hash| self.block(hash).unwrap()) .map(|bytes| BlockView::new(&bytes).header_view().log_bloom()) .collect(); @@ -956,23 +956,23 @@ mod tests { let ir3a = bc.insert_block(&b3a, vec![]); assert_eq!(ir1, ImportRoute { - validated_blocks: vec![b1_hash], - invalidated_blocks: vec![], + enacted: vec![b1_hash], + retracted: vec![], }); assert_eq!(ir2, ImportRoute { - validated_blocks: vec![b2_hash], - invalidated_blocks: vec![], + enacted: vec![b2_hash], + retracted: vec![], }); assert_eq!(ir3b, ImportRoute { - validated_blocks: vec![b3b_hash], - invalidated_blocks: vec![], + enacted: vec![b3b_hash], + retracted: vec![], }); assert_eq!(ir3a, ImportRoute { - validated_blocks: vec![b3a_hash], - invalidated_blocks: vec![b3b_hash], + enacted: vec![b3a_hash], + retracted: vec![b3b_hash], }); assert_eq!(bc.best_block_hash(), best_block_hash); diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs index b0d76ef6e..262b70899 100644 --- a/ethcore/src/blockchain/import_route.rs +++ b/ethcore/src/blockchain/import_route.rs @@ -23,16 +23,16 @@ use blockchain::block_info::{BlockInfo, BlockLocation}; #[derive(Debug, PartialEq)] pub struct ImportRoute { /// Blocks that were invalidated by new block. - pub invalidated_blocks: Vec, + pub retracted: Vec, /// Blocks that were validated by new block. - pub validated_blocks: Vec, + pub enacted: Vec, } impl ImportRoute { pub fn none() -> Self { ImportRoute { - invalidated_blocks: vec![], - validated_blocks: vec![], + retracted: vec![], + enacted: vec![], } } } @@ -41,15 +41,15 @@ impl From for ImportRoute { fn from(info: BlockInfo) -> ImportRoute { match info.location { BlockLocation::CanonChain => ImportRoute { - invalidated_blocks: vec![], - validated_blocks: vec![info.hash], + retracted: vec![], + enacted: vec![info.hash], }, BlockLocation::Branch => ImportRoute::none(), - BlockLocation::BranchBecomingCanonChain { mut route, old_route, .. } => { - route.push(info.hash); + BlockLocation::BranchBecomingCanonChain { mut enacted, retracted, .. } => { + enacted.push(info.hash); ImportRoute { - invalidated_blocks: old_route, - validated_blocks: route, + retracted: retracted, + enacted: enacted, } } } @@ -66,8 +66,8 @@ mod tests { #[test] fn import_route_none() { assert_eq!(ImportRoute::none(), ImportRoute { - validated_blocks: vec![], - invalidated_blocks: vec![], + enacted: vec![], + retracted: vec![], }); } @@ -93,8 +93,8 @@ mod tests { }; assert_eq!(ImportRoute::from(info), ImportRoute { - invalidated_blocks: vec![], - validated_blocks: vec![H256::from(U256::from(1))], + retracted: vec![], + enacted: vec![H256::from(U256::from(1))], }); } @@ -106,14 +106,14 @@ mod tests { total_difficulty: U256::from(0), location: BlockLocation::BranchBecomingCanonChain { ancestor: H256::from(U256::from(0)), - route: vec![H256::from(U256::from(1))], - old_route: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + enacted: vec![H256::from(U256::from(1))], + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], } }; assert_eq!(ImportRoute::from(info), ImportRoute { - invalidated_blocks: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], - validated_blocks: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], }); } } From a2046b429f9120a0532dc1da3fc4467fa20c6469 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 10 Mar 2016 13:27:33 +0400 Subject: [PATCH 067/222] exposing only one func --- sync/src/chain.rs | 9 +++++++-- sync/src/lib.rs | 3 +-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 7294570fe..ea9a47da2 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -43,6 +43,7 @@ use io::SyncIo; use transaction_queue::TransactionQueue; use time; use super::SyncConfig; +use ethcore; known_heap_size!(0, PeerInfo, Header, HeaderId); @@ -1300,8 +1301,12 @@ impl ChainSync { // TODO [todr] propagate transactions? } - pub fn transaction_queue(&self) -> &Mutex { - return &self.transaction_queue; + /// Add transaction to the transaction queue + pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) + where T: Fn(&Address) -> U256 + { + let mut queue = self.transaction_queue.lock().unwrap(); + queue.add(transaction, fetch_nonce); } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index fdcf79749..c9eb792a1 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -135,8 +135,7 @@ impl EthSync { let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); let sync = self.sync.write().unwrap(); - let mut queue = sync.transaction_queue().lock().unwrap(); - queue.add(transaction, &nonce_fn); + sync.insert_transaction(transaction, &nonce_fn); } } From ff12b53ba69899dd4a700ff58720bdf6d29ca22c Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 10:40:16 +0100 Subject: [PATCH 068/222] Stop workers before stopping event loop --- util/src/io/service.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 83fa71b8a..8a34ee80a 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -153,7 +153,7 @@ struct UserTimer { pub struct IoManager where Message: Send + Sync { timers: Arc>>, handlers: Vec>>, - _workers: Vec, + workers: Vec, worker_channel: chase_lev::Worker>, work_ready: Arc, } @@ -180,7 +180,7 @@ impl IoManager where Message: Send + Sync + Clone + 'static { timers: Arc::new(RwLock::new(HashMap::new())), handlers: Vec::new(), worker_channel: worker, - _workers: workers, + workers: workers, work_ready: work_ready, }; try!(event_loop.run(&mut io)); @@ -230,7 +230,10 @@ impl Handler for IoManager where Message: Send + Clone + Sync fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { match msg { - IoMessage::Shutdown => event_loop.shutdown(), + IoMessage::Shutdown => { + self.workers.clear(); + event_loop.shutdown(); + }, IoMessage::AddHandler { handler } => { let handler_id = { self.handlers.push(handler.clone()); From 90ae7500da814df956e7b2fb228c1a15711c5886 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Thu, 10 Mar 2016 11:07:10 +0100 Subject: [PATCH 069/222] Update main.rs --- parity/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index 729f6aeed..745912028 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -108,7 +108,7 @@ API and Console Options: --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). Sealing/Mining Options: - --gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000]. + --gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000]. --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. From 276768a82600045a2f95767d5d5c6dbb72873753 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 11:11:59 +0100 Subject: [PATCH 070/222] Failing test case for #656 --- sync/src/transaction_queue.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..38e70d1fc 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -541,6 +541,28 @@ mod test { assert_eq!(top[0], tx); } + #[test] + fn should_correctly_update_futures_when_removing() { + // given + let prev_nonce = |a: &Address| default_nonce(a) - U256::one(); + let next2_nonce = |a: &Address| default_nonce(a) + U256::from(2); + + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + txq.add(tx.clone(), &prev_nonce); + txq.add(tx2.clone(), &prev_nonce); + assert_eq!(txq.status().future, 2); + + // when + txq.remove(&tx.hash(), &next2_nonce); + // should remove both transactions since they are not valid + + // then + assert_eq!(txq.status().pending, 0); + assert_eq!(txq.status().future, 0); + } + #[test] fn should_move_transactions_if_gap_filled() { // given From 0cf405527e80879a6e97fae68707b86ec67403be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 11:14:25 +0100 Subject: [PATCH 071/222] Fixing update height bug --- sync/src/transaction_queue.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 38e70d1fc..ac8debfc9 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -277,7 +277,12 @@ impl TransactionQueue { }; for k in all_nonces_from_sender { let order = self.future.drop(&sender, &k).unwrap(); - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + if k >= current_nonce { + self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + } else { + // Remove the transaction completely + self.by_hash.remove(&order.hash); + } } } From 6d0578e19c5b5442ccdb42d695fb6a70238cf6ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 11:16:54 +0100 Subject: [PATCH 072/222] Additional explanation for ordering of commit/insert_block --- ethcore/src/client.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 874fc9646..aaf5fd728 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -396,7 +396,8 @@ impl Client where V: Verifier { .commit(header.number(), &header.hash(), ancient) .expect("State DB commit failed."); - // And update the chain + // And update the chain after commit to prevent race conditions + // (when something is in chain but you are not able to fetch details) self.chain.write().unwrap() .insert_block(&block.bytes, receipts); From 6681aaf76af521a4c67bfa4d85fbaedaa4029915 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 10 Mar 2016 11:32:10 +0100 Subject: [PATCH 073/222] split client into few submodules --- ethcore/src/{ => client}/client.rs | 123 ++--------------------------- ethcore/src/client/config.rs | 31 ++++++++ ethcore/src/client/ids.rs | 44 +++++++++++ ethcore/src/client/mod.rs | 102 ++++++++++++++++++++++++ 4 files changed, 183 insertions(+), 117 deletions(-) rename ethcore/src/{ => client}/client.rs (85%) create mode 100644 ethcore/src/client/config.rs create mode 100644 ethcore/src/client/ids.rs create mode 100644 ethcore/src/client/mod.rs diff --git a/ethcore/src/client.rs b/ethcore/src/client/client.rs similarity index 85% rename from ethcore/src/client.rs rename to ethcore/src/client/client.rs index b342cef15..2f9536b2e 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client/client.rs @@ -20,7 +20,6 @@ use std::marker::PhantomData; use std::sync::atomic::AtomicBool; use util::*; use util::panics::*; -use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; use header::{BlockNumber}; @@ -28,7 +27,6 @@ use state::State; use spec::Spec; use engine::Engine; use views::HeaderView; -use block_queue::BlockQueue; use service::{NetSyncMessage, SyncMessage}; use env_info::LastHashes; use verification::*; @@ -38,32 +36,9 @@ use extras::TransactionAddress; use filter::Filter; use log_entry::LocalizedLogEntry; use util::keys::store::SecretStore; -pub use block_queue::{BlockQueueConfig, BlockQueueInfo}; -pub use blockchain::{TreeRoute, BlockChainConfig, CacheSize as BlockChainCacheSize}; - -/// Uniquely identifies block. -#[derive(Debug, PartialEq, Clone)] -pub enum BlockId { - /// Block's sha3. - /// Querying by hash is always faster. - Hash(H256), - /// Block number within canon blockchain. - Number(BlockNumber), - /// Earliest block (genesis). - Earliest, - /// Latest mined block. - Latest -} - -/// Uniquely identifies transaction. -#[derive(Debug, PartialEq, Clone)] -pub enum TransactionId { - /// Transaction's sha3. - Hash(H256), - /// Block id and transaction index within this block. - /// Querying by block position is always faster. - Location(BlockId, usize) -} +use block_queue::{BlockQueue, BlockQueueInfo}; +use blockchain::{BlockChain, BlockProvider, TreeRoute, CacheSize as BlockChainCacheSize}; +use client::{BlockId, TransactionId, ClientConfig, BlockChainClient}; /// General block status #[derive(Debug, Eq, PartialEq)] @@ -78,30 +53,6 @@ pub enum BlockStatus { Unknown, } -/// Client configuration. Includes configs for all sub-systems. -#[derive(Debug)] -pub struct ClientConfig { - /// Block queue configuration. - pub queue: BlockQueueConfig, - /// Blockchain configuration. - pub blockchain: BlockChainConfig, - /// Prefer journal rather than archive. - pub prefer_journal: bool, - /// The name of the client instance. - pub name: String, -} - -impl Default for ClientConfig { - fn default() -> ClientConfig { - ClientConfig { - queue: Default::default(), - blockchain: Default::default(), - prefer_journal: false, - name: Default::default(), - } - } -} - /// Information about the blockchain gathered together. #[derive(Debug)] pub struct BlockChainInfo { @@ -123,72 +74,8 @@ impl fmt::Display for BlockChainInfo { } } -/// Blockchain database client. Owns and manages a blockchain and a block queue. -pub trait BlockChainClient : Sync + Send { - /// Get raw block header data by block id. - fn block_header(&self, id: BlockId) -> Option; - - /// Get raw block body data by block id. - /// Block body is an RLP list of two items: uncles and transactions. - fn block_body(&self, id: BlockId) -> Option; - - /// Get raw block data by block header hash. - fn block(&self, id: BlockId) -> Option; - - /// Get block status by block header hash. - fn block_status(&self, id: BlockId) -> BlockStatus; - - /// Get block total difficulty. - fn block_total_difficulty(&self, id: BlockId) -> Option; - - /// Get address nonce. - fn nonce(&self, address: &Address) -> U256; - - /// Get block hash. - fn block_hash(&self, id: BlockId) -> Option; - - /// Get address code. - fn code(&self, address: &Address) -> Option; - - /// Get transaction with given hash. - fn transaction(&self, id: TransactionId) -> Option; - - /// Get a tree route between `from` and `to`. - /// See `BlockChain::tree_route`. - fn tree_route(&self, from: &H256, to: &H256) -> Option; - - /// Get latest state node - fn state_data(&self, hash: &H256) -> Option; - - /// Get raw block receipts data by block header hash. - fn block_receipts(&self, hash: &H256) -> Option; - - /// Import a block into the blockchain. - fn import_block(&self, bytes: Bytes) -> ImportResult; - - /// Get block queue information. - fn queue_info(&self) -> BlockQueueInfo; - - /// Clear block queue and abort all import activity. - fn clear_queue(&self); - - /// Get blockchain information. - fn chain_info(&self) -> BlockChainInfo; - - /// Get the best block header. - fn best_block_header(&self) -> Bytes { - self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap() - } - - /// Returns numbers of blocks containing given bloom. - fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; - - /// Returns logs matching given filter. - fn logs(&self, filter: Filter) -> Vec; -} - -#[derive(Default, Clone, Debug, Eq, PartialEq)] /// Report on the status of a client. +#[derive(Default, Clone, Debug, Eq, PartialEq)] pub struct ClientReport { /// How many blocks have been imported so far. pub blocks_imported: usize, @@ -679,6 +566,8 @@ impl BlockChainClient for Client where V: Verifier { } fn logs(&self, filter: Filter) -> Vec { + // TODO: lock blockchain only once + let mut blocks = filter.bloom_possibilities().iter() .filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone())) .flat_map(|m| m) diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs new file mode 100644 index 000000000..484c8d0c6 --- /dev/null +++ b/ethcore/src/client/config.rs @@ -0,0 +1,31 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +pub use block_queue::BlockQueueConfig; +pub use blockchain::BlockChainConfig; + +/// Client configuration. Includes configs for all sub-systems. +#[derive(Debug, Default)] +pub struct ClientConfig { + /// Block queue configuration. + pub queue: BlockQueueConfig, + /// Blockchain configuration. + pub blockchain: BlockChainConfig, + /// Prefer journal rather than archive. + pub prefer_journal: bool, + /// The name of the client instance. + pub name: String, +} diff --git a/ethcore/src/client/ids.rs b/ethcore/src/client/ids.rs new file mode 100644 index 000000000..303657a76 --- /dev/null +++ b/ethcore/src/client/ids.rs @@ -0,0 +1,44 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Unique identifiers. + +use util::hash::H256; +use header::BlockNumber; + +/// Uniquely identifies block. +#[derive(Debug, PartialEq, Clone)] +pub enum BlockId { + /// Block's sha3. + /// Querying by hash is always faster. + Hash(H256), + /// Block number within canon blockchain. + Number(BlockNumber), + /// Earliest block (genesis). + Earliest, + /// Latest mined block. + Latest +} + +/// Uniquely identifies transaction. +#[derive(Debug, PartialEq, Clone)] +pub enum TransactionId { + /// Transaction's sha3. + Hash(H256), + /// Block id and transaction index within this block. + /// Querying by block position is always faster. + Location(BlockId, usize) +} diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs new file mode 100644 index 000000000..0daa17553 --- /dev/null +++ b/ethcore/src/client/mod.rs @@ -0,0 +1,102 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Blockchain database client. + +mod client; +mod config; +mod ids; + +pub use self::client::*; +pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig}; +pub use self::ids::{BlockId, TransactionId}; + +use util::bytes::Bytes; +use util::hash::{Address, H256, H2048}; +use util::numbers::U256; +use blockchain::TreeRoute; +use block_queue::BlockQueueInfo; +use header::BlockNumber; +use transaction::LocalizedTransaction; +use log_entry::LocalizedLogEntry; +use filter::Filter; +use error::ImportResult; + +/// Blockchain database client. Owns and manages a blockchain and a block queue. +pub trait BlockChainClient : Sync + Send { + /// Get raw block header data by block id. + fn block_header(&self, id: BlockId) -> Option; + + /// Get raw block body data by block id. + /// Block body is an RLP list of two items: uncles and transactions. + fn block_body(&self, id: BlockId) -> Option; + + /// Get raw block data by block header hash. + fn block(&self, id: BlockId) -> Option; + + /// Get block status by block header hash. + fn block_status(&self, id: BlockId) -> BlockStatus; + + /// Get block total difficulty. + fn block_total_difficulty(&self, id: BlockId) -> Option; + + /// Get address nonce. + fn nonce(&self, address: &Address) -> U256; + + /// Get block hash. + fn block_hash(&self, id: BlockId) -> Option; + + /// Get address code. + fn code(&self, address: &Address) -> Option; + + /// Get transaction with given hash. + fn transaction(&self, id: TransactionId) -> Option; + + /// Get a tree route between `from` and `to`. + /// See `BlockChain::tree_route`. + fn tree_route(&self, from: &H256, to: &H256) -> Option; + + /// Get latest state node + fn state_data(&self, hash: &H256) -> Option; + + /// Get raw block receipts data by block header hash. + fn block_receipts(&self, hash: &H256) -> Option; + + /// Import a block into the blockchain. + fn import_block(&self, bytes: Bytes) -> ImportResult; + + /// Get block queue information. + fn queue_info(&self) -> BlockQueueInfo; + + /// Clear block queue and abort all import activity. + fn clear_queue(&self); + + /// Get blockchain information. + fn chain_info(&self) -> BlockChainInfo; + + /// Get the best block header. + fn best_block_header(&self) -> Bytes { + // TODO: lock blockchain only once + self.block_header(BlockId::Hash(self.chain_info().best_block_hash)).unwrap() + } + + /// Returns numbers of blocks containing given bloom. + fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option>; + + /// Returns logs matching given filter. + fn logs(&self, filter: Filter) -> Vec; +} + From eb1fab92024779aa91d4c83844677347b0b071e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 12:33:29 +0100 Subject: [PATCH 074/222] Adding clippy support to ethminer. --- miner/Cargo.toml | 4 ++++ miner/build.rs | 25 +++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 miner/build.rs diff --git a/miner/Cargo.toml b/miner/Cargo.toml index fb3f24210..713182563 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -5,6 +5,10 @@ license = "GPL-3.0" name = "ethminer" version = "0.9.99" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] ethcore-util = { path = "../util" } diff --git a/miner/build.rs b/miner/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/miner/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} From 9db4720162a2d46e81d1d595c0a052c0e2452f60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 14:06:47 +0100 Subject: [PATCH 075/222] Fixing clippy warnings. --- miner/src/miner.rs | 10 +++++----- miner/src/transaction_queue.rs | 28 ++++++++++++++-------------- parity/main.rs | 4 ++-- sync/src/chain.rs | 2 +- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 2c18f3a79..8e93defcf 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -31,7 +31,7 @@ pub trait MinerService { fn status(&self) -> MinerStatus; /// Imports transactions to transaction queue. - fn import_transactions(&self, transactions: Vec, fetch_nonce: T) + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256; /// Removes all transactions from the queue and restart mining operation. @@ -129,10 +129,10 @@ impl MinerService for Miner { } } - fn import_transactions(&self, transactions: Vec, fetch_nonce: T) + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(transactions, fetch_nonce); + transaction_queue.add_all(transactions, fetch_nonce) } fn prepare_sealing(&self, chain: &BlockChainClient) { @@ -180,7 +180,7 @@ impl MinerService for Miner { fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain - .block(BlockId::Hash(hash.clone())) + .block(BlockId::Hash(*hash)) // Client should send message after commit to db and inserting to chain. .expect("Expected in-chain blocks."); let block = BlockView::new(&block); @@ -202,7 +202,7 @@ impl MinerService for Miner { let _sender = tx.sender(); } let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); + let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); }); } diff --git a/miner/src/transaction_queue.rs b/miner/src/transaction_queue.rs index 81825773f..f64bd7318 100644 --- a/miner/src/transaction_queue.rs +++ b/miner/src/transaction_queue.rs @@ -360,7 +360,7 @@ impl TransactionQueue { self.update_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current - self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender, current_nonce, current_nonce); return; } @@ -376,7 +376,7 @@ impl TransactionQueue { self.move_all_to_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current. It should also update last_nonces. - self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender, current_nonce, current_nonce); return; } } @@ -391,7 +391,7 @@ impl TransactionQueue { for k in all_nonces_from_sender { let order = self.future.drop(&sender, &k).unwrap(); if k >= current_nonce { - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { // Remove the transaction completely self.by_hash.remove(&order.hash); @@ -411,7 +411,7 @@ impl TransactionQueue { // Goes to future or is removed let order = self.current.drop(&sender, &k).unwrap(); if k >= current_nonce { - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { self.by_hash.remove(&order.hash); } @@ -452,8 +452,8 @@ impl TransactionQueue { // remove also from priority and hash self.future.by_priority.remove(&order); // Put to current - let order = order.update_height(current_nonce.clone(), first_nonce); - self.current.insert(address.clone(), current_nonce, order); + let order = order.update_height(current_nonce, first_nonce); + self.current.insert(address, current_nonce, order); current_nonce = current_nonce + U256::one(); } } @@ -501,10 +501,10 @@ impl TransactionQueue { } let base_nonce = fetch_nonce(&address); - Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); - self.last_nonces.insert(address.clone(), nonce); + Self::replace_transaction(tx, base_nonce, &mut self.current, &mut self.by_hash); + self.last_nonces.insert(address, nonce); // But maybe there are some more items waiting in future? - self.move_matching_future_to_current(address.clone(), nonce + U256::one(), base_nonce); + self.move_matching_future_to_current(address, nonce + U256::one(), base_nonce); self.current.enforce_limit(&mut self.by_hash); } @@ -518,7 +518,7 @@ impl TransactionQueue { let address = tx.sender(); let nonce = tx.nonce(); - by_hash.insert(hash.clone(), tx); + by_hash.insert(hash, tx); if let Some(old) = set.insert(address, nonce, order.clone()) { // There was already transaction in queue. Let's check which one should stay let old_fee = old.gas_price; @@ -642,7 +642,7 @@ mod test { txq.set_minimal_gas_price(tx.gas_price + U256::one()); // when - txq.add(tx, &default_nonce); + txq.add(tx, &default_nonce).unwrap_err(); // then let stats = txq.status(); @@ -722,8 +722,8 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); - txq.add(tx.clone(), &prev_nonce); - txq.add(tx2.clone(), &prev_nonce); + txq.add(tx.clone(), &prev_nonce).unwrap(); + txq.add(tx2.clone(), &prev_nonce).unwrap(); assert_eq!(txq.status().future, 2); // when @@ -861,7 +861,7 @@ mod test { fn should_drop_transactions_with_old_nonces() { let mut txq = TransactionQueue::new(); let tx = new_tx(); - let last_nonce = tx.nonce.clone() + U256::one(); + let last_nonce = tx.nonce + U256::one(); let fetch_last_nonce = |_a: &Address| last_nonce; // when diff --git a/parity/main.rs b/parity/main.rs index 9dbc3e6be..c73f971d9 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -259,8 +259,8 @@ impl Configuration { } fn gasprice(&self) -> U256 { - U256::from_dec_str(self.args.flag_gasprice).unwrap_or_else(|_| { - die("{}: Invalid gasprice given. Must be a decimal unsigned 256-bit number.") + U256::from_dec_str(self.args.flag_gasprice.as_str()).unwrap_or_else(|_| { + die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", self.args.flag_gasprice) }) } diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 46eaebe4c..85f5d6510 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -936,7 +936,7 @@ impl ChainSync { } let chain = io.chain(); let fetch_nonce = |a: &Address| chain.nonce(a); - self.miner.import_transactions(transactions, fetch_nonce); + let _ = self.miner.import_transactions(transactions, fetch_nonce); Ok(()) } From 027f122aea154b14c7563386251ff0407b4d2969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 14:24:33 +0100 Subject: [PATCH 076/222] Removing get prefix from poll_info --- rpc/src/v1/helpers/poll_manager.rs | 18 +++++++++--------- rpc/src/v1/impls/eth.rs | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 36a6352c2..0297384d1 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -84,7 +84,7 @@ impl PollManager where T: Timer { } /// Returns number of block when last poll happend. - pub fn get_poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { + pub fn poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { self.polls.prune(); self.polls.get(id) } @@ -124,21 +124,21 @@ mod tests { *time.borrow_mut() = 10; indexer.update_poll(&0, 21); - assert_eq!(indexer.get_poll_info(&0).unwrap().filter, false); - assert_eq!(indexer.get_poll_info(&0).unwrap().block_number, 21); + assert_eq!(indexer.poll_info(&0).unwrap().filter, false); + assert_eq!(indexer.poll_info(&0).unwrap().block_number, 21); *time.borrow_mut() = 30; indexer.update_poll(&1, 23); - assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23); + assert_eq!(indexer.poll_info(&1).unwrap().filter, true); + assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); *time.borrow_mut() = 75; indexer.update_poll(&0, 30); - assert!(indexer.get_poll_info(&0).is_none()); - assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23); + assert!(indexer.poll_info(&0).is_none()); + assert_eq!(indexer.poll_info(&1).unwrap().filter, true); + assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); indexer.remove_poll(&1); - assert!(indexer.get_poll_info(&1).is_none()); + assert!(indexer.poll_info(&1).is_none()); } } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7113c55b1..479bae95b 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -307,7 +307,7 @@ impl EthFilter for EthFilterClient { let client = take_weak!(self.client); from_params::<(Index,)>(params) .and_then(|(index,)| { - let info = self.polls.lock().unwrap().get_poll_info(&index.value()).cloned(); + let info = self.polls.lock().unwrap().poll_info(&index.value()).cloned(); match info { None => Ok(Value::Array(vec![] as Vec)), Some(info) => match info.filter { From 9ea3c0eba00efc5c7e13abc26ac2b7f7a5490ca4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 15:20:54 +0100 Subject: [PATCH 077/222] Fixing compilation on beta & stable --- miner/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 4fccc6d51..0cee4ef43 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -15,8 +15,8 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] //! Miner module //! Keeps track of transactions and mined block. From 9741d48496171b387732ca37a2b8f222fbc3983a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 15:35:36 +0100 Subject: [PATCH 078/222] Transaction data associated with polls. --- rpc/src/v1/helpers/poll_manager.rs | 104 +++++++++++++++++++++++++++-- 1 file changed, 100 insertions(+), 4 deletions(-) diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 36a6352c2..6c0862633 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -16,6 +16,8 @@ //! Indexes all rpc poll requests. +use util::hash::H256; +use std::collections::HashMap; use transient_hashmap::{TransientHashMap, Timer, StandardTimer}; /// Lifetime of poll (in seconds). @@ -43,7 +45,8 @@ impl Clone for PollInfo where F: Clone { /// Lazily garbage collects unused polls info. pub struct PollManager where T: Timer { polls: TransientHashMap, T>, - next_available_id: PollId + transactions_data: HashMap>, + next_available_id: PollId, } impl PollManager { @@ -57,15 +60,25 @@ impl PollManager where T: Timer { pub fn new_with_timer(timer: T) -> Self { PollManager { polls: TransientHashMap::new_with_timer(POLL_LIFETIME, timer), + transactions_data: HashMap::new(), next_available_id: 0, } } + fn prune(&mut self) { + self.polls.prune(); + // self.polls.prune() + // .into_iter() + // .map(|key| { + // self.transactions_data.remove(key); + // }); + } + /// Returns id which can be used for new poll. /// /// Stores information when last poll happend. pub fn create_poll(&mut self, filter: F, block: BlockNumber) -> PollId { - self.polls.prune(); + self.prune(); let id = self.next_available_id; self.next_available_id += 1; self.polls.insert(id, PollInfo { @@ -77,7 +90,7 @@ impl PollManager where T: Timer { /// Updates information when last poll happend. pub fn update_poll(&mut self, id: &PollId, block: BlockNumber) { - self.polls.prune(); + self.prune(); if let Some(info) = self.polls.get_mut(id) { info.block_number = block; } @@ -85,13 +98,27 @@ impl PollManager where T: Timer { /// Returns number of block when last poll happend. pub fn get_poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { - self.polls.prune(); + self.prune(); self.polls.get(id) } + pub fn set_poll_transactions(&mut self, id: &PollId, transactions: Vec) { + self.prune(); + if self.polls.get(id).is_some() { + self.transactions_data.insert(*id, transactions); + } + } + + /// Returns last transactions hashes for given poll. + pub fn poll_transactions(&mut self, id: &PollId) -> Option<&Vec> { + self.prune(); + self.transactions_data.get(id) + } + /// Removes poll info. pub fn remove_poll(&mut self, id: &PollId) { self.polls.remove(id); + self.transactions_data.remove(id); } } @@ -100,6 +127,7 @@ mod tests { use std::cell::RefCell; use transient_hashmap::Timer; use v1::helpers::PollManager; + use util::hash::H256; struct TestTimer<'a> { time: &'a RefCell, @@ -141,4 +169,72 @@ mod tests { indexer.remove_poll(&1); assert!(indexer.get_poll_info(&1).is_none()); } + + #[test] + fn should_return_poll_transactions_hashes() { + // given + let mut indexer = PollManager::new(); + let poll_id = indexer.create_poll(false, 20); + assert!(indexer.poll_transactions(&poll_id).is_none()); + let transactions = vec![H256::from(1), H256::from(2)]; + + // when + indexer.set_poll_transactions(&poll_id, transactions.clone()); + + // then + let txs = indexer.poll_transactions(&poll_id); + assert_eq!(txs.unwrap(), &transactions); + } + + + #[test] + fn should_remove_transaction_data_when_poll_timed_out() { + // given + let time = RefCell::new(0); + let timer = TestTimer { + time: &time, + }; + let mut indexer = PollManager::new_with_timer(timer); + let poll_id = indexer.create_poll(false, 20); + let transactions = vec![H256::from(1), H256::from(2)]; + indexer.set_poll_transactions(&poll_id, transactions.clone()); + assert!(indexer.poll_transactions(&poll_id).is_some()); + + // when + *time.borrow_mut() = 75; + indexer.prune(); + + // then + assert!(indexer.poll_transactions(&poll_id).is_none()); + + } + + #[test] + fn should_remove_transaction_data_when_poll_is_removed() { + // given + let mut indexer = PollManager::new(); + let poll_id = indexer.create_poll(false, 20); + let transactions = vec![H256::from(1), H256::from(2)]; + + // when + indexer.set_poll_transactions(&poll_id, transactions.clone()); + assert!(indexer.poll_transactions(&poll_id).is_some()); + indexer.remove_poll(&poll_id); + + // then + assert!(indexer.poll_transactions(&poll_id).is_none()); + } + + #[test] + fn should_ignore_transactions_for_invalid_poll_id() { + // given + let mut indexer = PollManager::<()>::new(); + let transactions = vec![H256::from(1), H256::from(2)]; + + // when + indexer.set_poll_transactions(&5, transactions.clone()); + + // then + assert!(indexer.poll_transactions(&5).is_none()); + } } From c37370a8a777f503307d341381cd00f3fc27ff08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 16:00:55 +0100 Subject: [PATCH 079/222] PendingTransaction filter. --- miner/src/miner.rs | 8 ++++++++ miner/src/transaction_queue.rs | 26 ++++++++++++++++++++++++ parity/main.rs | 2 +- rpc/src/v1/helpers/poll_manager.rs | 32 +++++++++++++++++------------- rpc/src/v1/impls/eth.rs | 32 ++++++++++++++++++++++++------ 5 files changed, 79 insertions(+), 21 deletions(-) diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 8e93defcf..85dbc6bbc 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -34,6 +34,9 @@ pub trait MinerService { fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256; + /// Returns hashes of transactions currently in pending + fn pending_transactions_hashes(&self) -> Vec; + /// Removes all transactions from the queue and restart mining operation. fn clear_and_reset(&self, chain: &BlockChainClient); @@ -135,6 +138,11 @@ impl MinerService for Miner { transaction_queue.add_all(transactions, fetch_nonce) } + fn pending_transactions_hashes(&self) -> Vec { + let transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.pending_hashes() + } + fn prepare_sealing(&self, chain: &BlockChainClient) { let no_of_transactions = 128; let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); diff --git a/miner/src/transaction_queue.rs b/miner/src/transaction_queue.rs index f64bd7318..4379531b2 100644 --- a/miner/src/transaction_queue.rs +++ b/miner/src/transaction_queue.rs @@ -431,6 +431,14 @@ impl TransactionQueue { .collect() } + /// Returns hashes of all transactions from current, ordered by priority. + pub fn pending_hashes(&self) -> Vec { + self.current.by_priority + .iter() + .map(|t| t.hash) + .collect() + } + /// Removes all elements (in any state) from the queue pub fn clear(&mut self) { self.current.clear(); @@ -693,6 +701,24 @@ mod test { assert_eq!(top.len(), 2); } + #[test] + fn should_return_pending_hashes() { + // given + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + + // when + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); + + // then + let top = txq.pending_hashes(); + assert_eq!(top[0], tx.hash()); + assert_eq!(top[1], tx2.hash()); + assert_eq!(top.len(), 2); + } + #[test] fn should_put_transaction_to_futures_if_gap_detected() { // given diff --git a/parity/main.rs b/parity/main.rs index c73f971d9..d83fe680d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -209,7 +209,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, miner: Arc, "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), "eth" => { server.add_delegate(EthClient::new(&client, &sync, &miner).to_delegate()); - server.add_delegate(EthFilterClient::new(&client).to_delegate()); + server.add_delegate(EthFilterClient::new(&client, &miner).to_delegate()); } _ => { die!("{}: Invalid API name to be enabled.", api); diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 6c0862633..73b273a8f 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -102,15 +102,19 @@ impl PollManager where T: Timer { self.polls.get(id) } - pub fn set_poll_transactions(&mut self, id: &PollId, transactions: Vec) { + pub fn update_transactions(&mut self, id: &PollId, transactions: Vec) -> Option> { self.prune(); if self.polls.get(id).is_some() { - self.transactions_data.insert(*id, transactions); + self.transactions_data.insert(*id, transactions) + } else { + None } } + // Normal code always replaces transactions + #[cfg(test)] /// Returns last transactions hashes for given poll. - pub fn poll_transactions(&mut self, id: &PollId) -> Option<&Vec> { + pub fn transactions(&mut self, id: &PollId) -> Option<&Vec> { self.prune(); self.transactions_data.get(id) } @@ -175,14 +179,14 @@ mod tests { // given let mut indexer = PollManager::new(); let poll_id = indexer.create_poll(false, 20); - assert!(indexer.poll_transactions(&poll_id).is_none()); + assert!(indexer.transactions(&poll_id).is_none()); let transactions = vec![H256::from(1), H256::from(2)]; // when - indexer.set_poll_transactions(&poll_id, transactions.clone()); + indexer.update_transactions(&poll_id, transactions.clone()); // then - let txs = indexer.poll_transactions(&poll_id); + let txs = indexer.transactions(&poll_id); assert_eq!(txs.unwrap(), &transactions); } @@ -197,15 +201,15 @@ mod tests { let mut indexer = PollManager::new_with_timer(timer); let poll_id = indexer.create_poll(false, 20); let transactions = vec![H256::from(1), H256::from(2)]; - indexer.set_poll_transactions(&poll_id, transactions.clone()); - assert!(indexer.poll_transactions(&poll_id).is_some()); + indexer.update_transactions(&poll_id, transactions.clone()); + assert!(indexer.transactions(&poll_id).is_some()); // when *time.borrow_mut() = 75; indexer.prune(); // then - assert!(indexer.poll_transactions(&poll_id).is_none()); + assert!(indexer.transactions(&poll_id).is_none()); } @@ -217,12 +221,12 @@ mod tests { let transactions = vec![H256::from(1), H256::from(2)]; // when - indexer.set_poll_transactions(&poll_id, transactions.clone()); - assert!(indexer.poll_transactions(&poll_id).is_some()); + indexer.update_transactions(&poll_id, transactions.clone()); + assert!(indexer.transactions(&poll_id).is_some()); indexer.remove_poll(&poll_id); // then - assert!(indexer.poll_transactions(&poll_id).is_none()); + assert!(indexer.transactions(&poll_id).is_none()); } #[test] @@ -232,9 +236,9 @@ mod tests { let transactions = vec![H256::from(1), H256::from(2)]; // when - indexer.set_poll_transactions(&5, transactions.clone()); + indexer.update_transactions(&5, transactions.clone()); // then - assert!(indexer.poll_transactions(&5).is_none()); + assert!(indexer.transactions(&5).is_none()); } } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index a9ee389f8..5c7df574d 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . //! Eth rpc implementation. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::sync::{Arc, Weak, Mutex, RwLock}; use std::ops::Deref; use ethsync::{EthSync, SyncState}; @@ -264,15 +264,17 @@ impl Eth for EthClient { /// Eth filter rpc implementation. pub struct EthFilterClient { client: Weak, + miner: Weak, polls: Mutex>, } impl EthFilterClient { /// Creates new Eth filter client. - pub fn new(client: &Arc) -> Self { + pub fn new(client: &Arc, miner: &Arc) -> Self { EthFilterClient { client: Arc::downgrade(client), - polls: Mutex::new(PollManager::new()) + miner: Arc::downgrade(miner), + polls: Mutex::new(PollManager::new()), } } } @@ -302,7 +304,12 @@ impl EthFilter for EthFilterClient { match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::PendingTransaction, take_weak!(self.client).chain_info().best_block_number); + let best_block_number = take_weak!(self.client).chain_info().best_block_number; + let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); + + let id = polls.create_poll(PollFilter::PendingTransaction, best_block_number); + polls.update_transactions(&id, pending_transactions); + to_value(&U256::from(id)) }, _ => Err(Error::invalid_params()) @@ -330,8 +337,21 @@ impl EthFilter for EthFilterClient { to_value(&hashes) }, PollFilter::PendingTransaction => { - // TODO: fix implementation once TransactionQueue is merged - to_value(&vec![] as &Vec) + let poll_id = index.value(); + let mut polls = self.polls.lock().unwrap(); + + let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); + let previous_hashes = polls.update_transactions(&poll_id, current_hashes.clone()).unwrap(); + polls.update_poll(&poll_id, client.chain_info().best_block_number); + + // calculate diff + let previous_hashes_set = previous_hashes.into_iter().collect::>(); + let diff = current_hashes + .into_iter() + .filter(|hash| previous_hashes_set.contains(&hash)) + .collect::>(); + + to_value(&diff) }, PollFilter::Logs(mut filter) => { filter.from_block = BlockId::Number(info.block_number); From 094ae4e9f9a77708c15751afa17c4835e1fb16a0 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 10 Mar 2016 19:15:10 +0400 Subject: [PATCH 080/222] personal is back to the master ver --- rpc/src/v1/impls/personal.rs | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index a2788b9d9..48e1b1c6a 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -18,27 +18,28 @@ use std::sync::{Arc, Weak}; use jsonrpc_core::*; use v1::traits::Personal; +use util::keys::store::*; use util::Address; -use ethcore::client::Client; +use std::sync::RwLock; /// Account management (personal) rpc implementation. pub struct PersonalClient { - client: Weak, + secret_store: Weak>, } impl PersonalClient { /// Creates new PersonalClient - pub fn new(client: &Arc) -> Self { + pub fn new(store: &Arc>) -> Self { PersonalClient { - client: Arc::downgrade(client), + secret_store: Arc::downgrade(store), } } } impl Personal for PersonalClient { fn accounts(&self, _: Params) -> Result { - let client = take_weak!(self.client); - let store = client.secret_store().read().unwrap(); + let store_wk = take_weak!(self.secret_store); + let store = store_wk.read().unwrap(); match store.accounts() { Ok(account_list) => { Ok(Value::Array(account_list.iter() @@ -53,8 +54,8 @@ impl Personal for PersonalClient { fn new_account(&self, params: Params) -> Result { from_params::<(String, )>(params).and_then( |(pass, )| { - let client = take_weak!(self.client); - let mut store = client.secret_store().write().unwrap(); + let store_wk = take_weak!(self.secret_store); + let mut store = store_wk.write().unwrap(); match store.new_account(&pass) { Ok(address) => Ok(Value::String(format!("{:?}", address))), Err(_) => Err(Error::internal_error()) @@ -66,8 +67,8 @@ impl Personal for PersonalClient { fn unlock_account(&self, params: Params) -> Result { from_params::<(Address, String, u64)>(params).and_then( |(account, account_pass, _)|{ - let client = take_weak!(self.client); - let store = client.secret_store().read().unwrap(); + let store_wk = take_weak!(self.secret_store); + let store = store_wk.read().unwrap(); match store.unlock_account(&account, &account_pass) { Ok(_) => Ok(Value::Bool(true)), Err(_) => Ok(Value::Bool(false)), From 0eaf0a8db1827a005f5ae920736136ef33f4d70f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 16:40:15 +0100 Subject: [PATCH 081/222] Updating hook. --- hook.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hook.sh b/hook.sh index 113bf1838..25877868a 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,3 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer --features dev-clippy" > ./.git/hooks/pre-push +echo "#!/bin/sh\ncargo build --features dev-clippy && cargo test --no-run -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer --features dev-clippy" > ./.git/hooks/pre-push chmod +x ./.git/hooks/pre-push From c951dee7668e4d6ef5164230bdc1d41b77e4254e Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Thu, 10 Mar 2016 17:09:34 +0100 Subject: [PATCH 082/222] --archive is default. --pruning is option. --- parity/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 68d45bc04..69650270a 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -79,7 +79,7 @@ Protocol Options: or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. - --archive Client should not prune the state/storage trie. + --pruning Client should prune the state/storage trie. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. @@ -140,7 +140,7 @@ struct Args { flag_identity: String, flag_cache: Option, flag_keys_path: String, - flag_archive: bool, + flag_pruning: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, @@ -402,7 +402,7 @@ impl Configuration { client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; } } - client_config.prefer_journal = !self.args.flag_archive; + client_config.prefer_journal = self.args.flag_pruning; client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); From 5571503c224b5d1b185243d7f5e6f1f1bc3a6856 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 10 Mar 2016 20:18:01 +0400 Subject: [PATCH 083/222] traitified secret store --- rpc/src/v1/impls/eth.rs | 16 +++++++++------- util/src/keys/store.rs | 22 ++++++++++++++++++---- 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 4a8461c45..97d248ef6 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -30,20 +30,23 @@ use ethcore::ethereum::denominations::shannon; use v1::traits::{Eth, EthFilter}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log}; use v1::helpers::{PollFilter, PollManager}; +use util::keys::store::AccountProvider; /// Eth rpc implementation. -pub struct EthClient where C: BlockChainClient, S: SyncStatusProvider { +pub struct EthClient where C: BlockChainClient, S: SyncStatusProvider, A: AccountProvider { client: Weak, sync: Weak, + accounts: Weak, hashrates: RwLock>, } -impl EthClient where C: BlockChainClient, S: SyncStatusProvider { +impl EthClient where C: BlockChainClient, S: SyncStatusProvider, A: AccountProvider { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, accounts: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), + accounts: Arc::downgrade(accounts), hashrates: RwLock::new(HashMap::new()), } } @@ -94,7 +97,7 @@ impl EthClient where C: BlockChainClient, S: SyncStatusProvider { } } -impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncStatusProvider + 'static { +impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncStatusProvider + 'static, A: AccountProvider + 'static { fn protocol_version(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), @@ -256,9 +259,8 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncS fn send_transaction(&self, params: Params) -> Result { from_params::<(TransactionRequest, )>(params) .and_then(|(transaction_request, )| { - let client = take_weak!(self.client); - let store = client.secret_store().read().unwrap(); - match store.account_secret(&transaction_request.from) { + let accounts = take_weak!(self.accounts); + match accounts.account_secret(&transaction_request.from) { Ok(secret) => { let sync = take_weak!(self.sync); let (transaction, _) = transaction_request.to_eth(); diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index dcc165259..9ea00cbba 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -78,6 +78,18 @@ struct AccountUnlock { expires: DateTime, } +/// Basic account management trait +pub trait AccountProvider : Send + Sync { + /// Unlocks account with the password provided + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError>; + /// Creates account + fn new_account(&mut self, pass: &str) -> Result; + /// Returns secret for unlocked account + fn account_secret(&self, account: &Address) -> Result; + /// Returns secret for unlocked account + fn sign(&self, account: &Address, message: &H256) -> Result; +} + impl SecretStore { /// new instance of Secret Store in default home directory pub fn new() -> SecretStore { @@ -144,9 +156,11 @@ impl SecretStore { unlocks: RwLock::new(HashMap::new()), } } +} +impl AccountProvider for SecretStore { /// Unlocks account for use - pub fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { let secret_id = try!(self.account(&account).ok_or(EncryptedHashMapError::UnknownIdentifier)); let secret = try!(self.get(&secret_id, pass)); { @@ -160,7 +174,7 @@ impl SecretStore { } /// Creates new account - pub fn new_account(&mut self, pass: &str) -> Result { + fn new_account(&mut self, pass: &str) -> Result { let secret = H256::random(); let key_id = H128::random(); self.insert(key_id.clone(), secret, pass); @@ -173,7 +187,7 @@ impl SecretStore { } /// Signs message with unlocked account - pub fn sign(&self, account: &Address, message: &H256) -> Result { + fn sign(&self, account: &Address, message: &H256) -> Result { let read_lock = self.unlocks.read().unwrap(); let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); match crypto::KeyPair::from_secret(unlock.secret) { @@ -186,7 +200,7 @@ impl SecretStore { } /// Returns secret for unlocked account - pub fn account_secret(&self, account: &Address) -> Result { + fn account_secret(&self, account: &Address) -> Result { let read_lock = self.unlocks.read().unwrap(); let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); Ok(unlock.secret as crypto::Secret) From 25a63611f856ab0896b114b6388461c174bf9905 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 10 Mar 2016 20:32:17 +0400 Subject: [PATCH 084/222] extend sync status interface to sync provider --- parity/main.rs | 2 +- rpc/src/v1/impls/eth.rs | 8 ++++---- rpc/src/v1/impls/net.rs | 8 ++++---- sync/src/lib.rs | 20 +++++++++++--------- 4 files changed, 20 insertions(+), 18 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 8814da44b..d2b9b3567 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -49,7 +49,7 @@ use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; -use ethsync::{EthSync, SyncConfig, SyncStatusProvider}; +use ethsync::{EthSync, SyncConfig, SyncProvider}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 91bd0cce3..a067b48fb 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -17,7 +17,7 @@ //! Eth rpc implementation. use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; -use ethsync::{SyncStatusProvider, SyncState}; +use ethsync::{SyncProvider, SyncState}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; @@ -32,13 +32,13 @@ use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncIn use v1::helpers::{PollFilter, PollManager}; /// Eth rpc implementation. -pub struct EthClient where C: BlockChainClient, S: SyncStatusProvider { +pub struct EthClient where C: BlockChainClient, S: SyncProvider { client: Weak, sync: Weak, hashrates: RwLock>, } -impl EthClient where C: BlockChainClient, S: SyncStatusProvider { +impl EthClient where C: BlockChainClient, S: SyncProvider { /// Creates new EthClient. pub fn new(client: &Arc, sync: &Arc) -> Self { EthClient { @@ -94,7 +94,7 @@ impl EthClient where C: BlockChainClient, S: SyncStatusProvider { } } -impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncStatusProvider + 'static { +impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncProvider + 'static { fn protocol_version(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index a686ed66f..5e67bf252 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -17,15 +17,15 @@ //! Net rpc implementation. use std::sync::{Arc, Weak}; use jsonrpc_core::*; -use ethsync::SyncStatusProvider; +use ethsync::SyncProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient where S: SyncStatusProvider { +pub struct NetClient where S: SyncProvider { sync: Weak } -impl NetClient where S: SyncStatusProvider { +impl NetClient where S: SyncProvider { /// Creates new NetClient. pub fn new(sync: &Arc) -> Self { NetClient { @@ -34,7 +34,7 @@ impl NetClient where S: SyncStatusProvider { } } -impl Net for NetClient where S: SyncStatusProvider + 'static { +impl Net for NetClient where S: SyncProvider + 'static { fn version(&self, _: Params) -> Result { Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 5a0ba79c5..3b79e5614 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -95,9 +95,11 @@ impl Default for SyncConfig { } /// Current sync status -pub trait SyncStatusProvider: Send + Sync { +pub trait SyncProvider: Send + Sync { /// Get sync status fn status(&self) -> SyncStatus; + /// Insert transaction in the sync transaction queue + fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction); } /// Ethereum network protocol handler @@ -130,9 +132,16 @@ impl EthSync { pub fn restart(&mut self, io: &mut NetworkContext) { self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref())); } +} + +impl SyncProvider for EthSync { + /// Get sync status + fn status(&self) -> SyncStatus { + self.sync.read().unwrap().status() + } /// Insert transaction in transaction queue - pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction) { + fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction) { use util::numbers::*; let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); @@ -141,13 +150,6 @@ impl EthSync { } } -impl SyncStatusProvider for EthSync { - /// Get sync status - fn status(&self) -> SyncStatus { - self.sync.read().unwrap().status() - } -} - impl NetworkProtocolHandler for EthSync { fn initialize(&self, io: &NetworkContext) { io.register_timer(0, 1000).expect("Error registering sync timer"); From 84a48142defadd8b48654299c5d4d532f992261d Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Thu, 10 Mar 2016 19:50:04 +0100 Subject: [PATCH 085/222] Add more geth options. --- parity/main.rs | 39 +++++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 16 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 68d45bc04..cced0ed0f 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -70,9 +70,9 @@ Parity. Ethereum Client. Copyright 2015, 2016 Ethcore (UK) Limited Usage: - parity daemon [options] [ --no-bootstrap | ... ] + parity daemon [options] parity account (new | list) - parity [options] [ --no-bootstrap | ... ] + parity [options] Protocol Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file @@ -85,13 +85,13 @@ Protocol Options: --identity NAME Specify your node's name. Networking Options: - --no-bootstrap Don't bother trying to connect to any nodes initially. - --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. - --public-address URL Specify the IP/port on which peers may connect. - --address URL Equivalent to --listen-address URL --public-address URL. - --peers NUM Try to maintain that many peers [default: 25]. + --no-bootstrap Don't bother trying to connect to standard bootnodes. + --bootnodes NODES Specify additional comma-separated bootnodes. --no-discovery Disable new peer discovery. - --no-upnp Disable trying to figure out the correct public adderss over UPnP. + --peers NUM Try to maintain that many peers [default: 25]. + --port PORT Override the port for the node to listen on, supercedes --address. + --nat METHOD Specify method to use for determining public address. Must be one of: any, none, + upnp, extip:(IP) [default: upnp]. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. API and Console Options: @@ -101,16 +101,11 @@ API and Console Options: --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. - --rpc Equivalent to --jsonrpc (geth-compatible). - --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). - --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). - --rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible). - --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). Sealing/Mining Options: --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. @@ -119,6 +114,18 @@ Memory Footprint Options: --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with other cache options (geth-compatible). +Geth-Compatibility Options + --rpc Equivalent to --jsonrpc. + --rpcaddr HOST Equivalent to --jsonrpc-addr HOST. + --rpcport PORT Equivalent to --jsonrpc-port PORT. + --rpcapi APIS Equivalent to --jsonrpc-apis APIS. + --rpccorsdomain URL Equivalent to --jsonrpc-cors URL. + --maxpeers COUNT Equivalent to --peers COUNT. + --nodekey KEY Equivalent to --node-key KEY. + --nodiscover Equivalent to --no-discovery. + --etherbase ADDRESS Equivalent to --author ADDRESS. + --extradata STRING Equivalent to --extra-data STRING. + Miscellaneous Options: -l --logging LOGGING Specify the logging level. -v --version Show information about version. @@ -145,7 +152,7 @@ struct Args { flag_listen_address: String, flag_public_address: Option, flag_address: Option, - flag_peers: usize, + flag_maxpeers: usize, flag_no_discovery: bool, flag_no_upnp: bool, flag_node_key: Option, @@ -323,7 +330,7 @@ impl Configuration { ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_peers as u32; + ret.ideal_peers = self.args.flag_maxpeers as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); From a2dea3885b393da47dad0127f22773b7fcae00bc Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 10 Mar 2016 23:09:45 +0400 Subject: [PATCH 086/222] refactoring to AccountService --- parity/main.rs | 10 ++----- rpc/src/v1/impls/personal.rs | 18 +++++------- util/src/keys/store.rs | 53 +++++++++++++++++++++++++++++++----- 3 files changed, 56 insertions(+), 25 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 6d1c08162..b6ed5cba3 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -196,7 +196,7 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { +fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { use rpc::v1::*; let server = rpc::RpcServer::new(); @@ -416,11 +416,7 @@ impl Configuration { let sync = EthSync::register(service.network(), sync_config, client); // Secret Store - let secret_store = Arc::new(SecretStore::new()); - { - let import_ref = Arc::make_mut(&mut secret_store); - import_ref.try_import_existing(); - } + let account_service = Arc::new(AccountService::new()); // Setup rpc if self.args.flag_jsonrpc || self.args.flag_rpc { @@ -432,7 +428,7 @@ impl Configuration { let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - let server_handler = setup_rpc_server(service.client(), sync.clone(), secret_store.clone(), &url, cors, apis.split(",").collect()); + let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(",").collect()); if let Some(handler) = server_handler { panic_handler.forward_from(handler.deref()); } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 48e1b1c6a..7b79ceae7 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -20,30 +20,28 @@ use jsonrpc_core::*; use v1::traits::Personal; use util::keys::store::*; use util::Address; -use std::sync::RwLock; /// Account management (personal) rpc implementation. pub struct PersonalClient { - secret_store: Weak>, + accounts: Weak, } impl PersonalClient { /// Creates new PersonalClient - pub fn new(store: &Arc>) -> Self { + pub fn new(store: &Arc) -> Self { PersonalClient { - secret_store: Arc::downgrade(store), + accounts: Arc::downgrade(store), } } } impl Personal for PersonalClient { fn accounts(&self, _: Params) -> Result { - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let store = take_weak!(self.accounts); match store.accounts() { Ok(account_list) => { Ok(Value::Array(account_list.iter() - .map(|&(account, _)| Value::String(format!("{:?}", account))) + .map(|&account| Value::String(format!("{:?}", account))) .collect::>()) ) } @@ -54,8 +52,7 @@ impl Personal for PersonalClient { fn new_account(&self, params: Params) -> Result { from_params::<(String, )>(params).and_then( |(pass, )| { - let store_wk = take_weak!(self.secret_store); - let mut store = store_wk.write().unwrap(); + let store = take_weak!(self.accounts); match store.new_account(&pass) { Ok(address) => Ok(Value::String(format!("{:?}", address))), Err(_) => Err(Error::internal_error()) @@ -67,8 +64,7 @@ impl Personal for PersonalClient { fn unlock_account(&self, params: Params) -> Result { from_params::<(Address, String, u64)>(params).and_then( |(account, account_pass, _)|{ - let store_wk = take_weak!(self.secret_store); - let store = store_wk.read().unwrap(); + let store = take_weak!(self.accounts); match store.unlock_account(&account, &account_pass) { Ok(_) => Ok(Value::Bool(true)), Err(_) => Ok(Value::Bool(false)), diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 9ea00cbba..ea97cc80e 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -80,16 +80,57 @@ struct AccountUnlock { /// Basic account management trait pub trait AccountProvider : Send + Sync { + /// Lists all accounts + fn accounts(&self) -> Result, ::std::io::Error>; /// Unlocks account with the password provided fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError>; /// Creates account - fn new_account(&mut self, pass: &str) -> Result; + fn new_account(&self, pass: &str) -> Result; /// Returns secret for unlocked account fn account_secret(&self, account: &Address) -> Result; /// Returns secret for unlocked account fn sign(&self, account: &Address, message: &H256) -> Result; } +/// Thread-safe accounts management +pub struct AccountService { + secret_store: RwLock, +} + +impl AccountProvider for AccountService { + /// Lists all accounts + fn accounts(&self) -> Result, ::std::io::Error> { + Ok(try!(self.secret_store.read().unwrap().accounts()).iter().map(|&(addr, _)| addr).collect::>()) + } + /// Unlocks account with the password provided + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + self.secret_store.read().unwrap().unlock_account(account, pass) + } + /// Creates account + fn new_account(&self, pass: &str) -> Result { + self.secret_store.write().unwrap().new_account(pass) + } + /// Returns secret for unlocked account + fn account_secret(&self, account: &Address) -> Result { + self.secret_store.read().unwrap().account_secret(account) + } + /// Returns secret for unlocked account + fn sign(&self, account: &Address, message: &H256) -> Result { + self.secret_store.read().unwrap().sign(account, message) + } +} + +impl AccountService { + /// New account service with the default location + pub fn new() -> AccountService { + let secret_store = RwLock::new(SecretStore::new()); + secret_store.write().unwrap().try_import_existing(); + AccountService { + secret_store: secret_store + } + } +} + impl SecretStore { /// new instance of Secret Store in default home directory pub fn new() -> SecretStore { @@ -156,11 +197,9 @@ impl SecretStore { unlocks: RwLock::new(HashMap::new()), } } -} -impl AccountProvider for SecretStore { /// Unlocks account for use - fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + pub fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { let secret_id = try!(self.account(&account).ok_or(EncryptedHashMapError::UnknownIdentifier)); let secret = try!(self.get(&secret_id, pass)); { @@ -174,7 +213,7 @@ impl AccountProvider for SecretStore { } /// Creates new account - fn new_account(&mut self, pass: &str) -> Result { + pub fn new_account(&mut self, pass: &str) -> Result { let secret = H256::random(); let key_id = H128::random(); self.insert(key_id.clone(), secret, pass); @@ -187,7 +226,7 @@ impl AccountProvider for SecretStore { } /// Signs message with unlocked account - fn sign(&self, account: &Address, message: &H256) -> Result { + pub fn sign(&self, account: &Address, message: &H256) -> Result { let read_lock = self.unlocks.read().unwrap(); let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); match crypto::KeyPair::from_secret(unlock.secret) { @@ -200,7 +239,7 @@ impl AccountProvider for SecretStore { } /// Returns secret for unlocked account - fn account_secret(&self, account: &Address) -> Result { + pub fn account_secret(&self, account: &Address) -> Result { let read_lock = self.unlocks.read().unwrap(); let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); Ok(unlock.secret as crypto::Secret) From c5c8851b5097e27ccadc43b579dd8f605b9c9733 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 10 Mar 2016 20:27:50 +0100 Subject: [PATCH 087/222] moved TestBlockChainClient to ethcore --- ethcore/src/client/mod.rs | 2 + ethcore/src/client/test_client.rs | 322 ++++++++++++++++++++++++++++++ ethcore/src/lib.rs | 2 +- sync/src/tests/chain.rs | 2 +- sync/src/tests/helpers.rs | 292 +-------------------------- 5 files changed, 327 insertions(+), 293 deletions(-) create mode 100644 ethcore/src/client/test_client.rs diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index 0daa17553..58a21f151 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -19,10 +19,12 @@ mod client; mod config; mod ids; +mod test_client; pub use self::client::*; pub use self::config::{ClientConfig, BlockQueueConfig, BlockChainConfig}; pub use self::ids::{BlockId, TransactionId}; +pub use self::test_client::{TestBlockChainClient, EachBlockWith}; use util::bytes::Bytes; use util::hash::{Address, H256, H2048}; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs new file mode 100644 index 000000000..4ca30dcd5 --- /dev/null +++ b/ethcore/src/client/test_client.rs @@ -0,0 +1,322 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//use std::mem; +//use std::ops::{Deref, DerefMut}; +//use std::collections::HashMap; +//use rustc_serialize::hex::FromHex; +//use util::rlp; +//use util::rlp::*; +//use util::bytes::Bytes; +//use util::hash::{FixedHash, Address, H256, H2048}; +//use util::numbers::{Uint, U256}; +//use util::crypto::KeyPair; +//use util::sha3::Hashable; +use util::*; +//use std::sync::RwLock; +use transaction::{Transaction, LocalizedTransaction, Action}; +use blockchain::TreeRoute; +use client::{BlockChainClient, BlockChainInfo, BlockStatus, BlockId, TransactionId}; +use header::{Header as BlockHeader, BlockNumber}; +use filter::Filter; +use log_entry::LocalizedLogEntry; +use receipt::Receipt; +use error::ImportResult; +use block_queue::BlockQueueInfo; + +pub struct TestBlockChainClient { + pub blocks: RwLock>, + pub numbers: RwLock>, + pub genesis_hash: H256, + pub last_hash: RwLock, + pub difficulty: RwLock, +} + +#[derive(Clone)] +pub enum EachBlockWith { + Nothing, + Uncle, + Transaction, + UncleAndTransaction +} + +impl TestBlockChainClient { + pub fn new() -> TestBlockChainClient { + + let mut client = TestBlockChainClient { + blocks: RwLock::new(HashMap::new()), + numbers: RwLock::new(HashMap::new()), + genesis_hash: H256::new(), + last_hash: RwLock::new(H256::new()), + difficulty: RwLock::new(From::from(0)), + }; + client.add_blocks(1, EachBlockWith::Nothing); // add genesis block + client.genesis_hash = client.last_hash.read().unwrap().clone(); + client + } + + pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { + let len = self.numbers.read().unwrap().len(); + for n in len..(len + count) { + let mut header = BlockHeader::new(); + header.difficulty = From::from(n); + header.parent_hash = self.last_hash.read().unwrap().clone(); + header.number = n as BlockNumber; + let uncles = match with { + EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { + let mut uncles = RlpStream::new_list(1); + let mut uncle_header = BlockHeader::new(); + uncle_header.difficulty = From::from(n); + uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); + uncle_header.number = n as BlockNumber; + uncles.append(&uncle_header); + header.uncles_hash = uncles.as_raw().sha3(); + uncles + }, + _ => RlpStream::new_list(0) + }; + let txs = match with { + EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { + let mut txs = RlpStream::new_list(1); + let keypair = KeyPair::create().unwrap(); + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: U256::zero() + }; + let signed_tx = tx.sign(&keypair.secret()); + txs.append(&signed_tx); + txs.out() + }, + _ => rlp::NULL_RLP.to_vec() + }; + + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&txs, 1); + rlp.append_raw(uncles.as_raw(), 1); + self.import_block(rlp.as_raw().to_vec()).unwrap(); + } + } + + pub fn corrupt_block(&mut self, n: BlockNumber) { + let hash = self.block_hash(BlockId::Number(n)).unwrap(); + let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); + header.parent_hash = H256::new(); + let mut rlp = RlpStream::new_list(3); + rlp.append(&header); + rlp.append_raw(&rlp::NULL_RLP, 1); + rlp.append_raw(&rlp::NULL_RLP, 1); + self.blocks.write().unwrap().insert(hash, rlp.out()); + } + + pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { + let blocks_read = self.numbers.read().unwrap(); + let index = blocks_read.len() - delta; + blocks_read[&index].clone() + } + + fn block_hash(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(hash) => Some(hash), + BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(), + BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(), + BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned() + } + } +} + +impl BlockChainClient for TestBlockChainClient { + fn block_total_difficulty(&self, _id: BlockId) -> Option { + Some(U256::zero()) + } + + fn block_hash(&self, _id: BlockId) -> Option { + unimplemented!(); + } + + fn nonce(&self, _address: &Address) -> U256 { + U256::zero() + } + + fn code(&self, _address: &Address) -> Option { + unimplemented!(); + } + + fn transaction(&self, _id: TransactionId) -> Option { + unimplemented!(); + } + + fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { + unimplemented!(); + } + + fn logs(&self, _filter: Filter) -> Vec { + unimplemented!(); + } + + fn block_header(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) + } + + fn block_body(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| { + let mut stream = RlpStream::new_list(2); + stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); + stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); + stream.out() + })) + } + + fn block(&self, id: BlockId) -> Option { + self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned()) + } + + fn block_status(&self, id: BlockId) -> BlockStatus { + match id { + BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain, + BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain, + _ => BlockStatus::Unknown + } + } + + // works only if blocks are one after another 1 -> 2 -> 3 + fn tree_route(&self, from: &H256, to: &H256) -> Option { + Some(TreeRoute { + ancestor: H256::new(), + index: 0, + blocks: { + let numbers_read = self.numbers.read().unwrap(); + let mut adding = false; + + let mut blocks = Vec::new(); + for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { + if hash == to { + if adding { + blocks.push(hash.clone()); + } + adding = false; + break; + } + if hash == from { + adding = true; + } + if adding { + blocks.push(hash.clone()); + } + } + if adding { Vec::new() } else { blocks } + } + }) + } + + // TODO: returns just hashes instead of node state rlp(?) + fn state_data(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let mut rlp = RlpStream::new(); + rlp.append(&hash.clone()); + return Some(rlp.out()); + } + None + } + + fn block_receipts(&self, hash: &H256) -> Option { + // starts with 'f' ? + if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { + let receipt = Receipt::new( + H256::zero(), + U256::zero(), + vec![]); + let mut rlp = RlpStream::new(); + rlp.append(&receipt); + return Some(rlp.out()); + } + None + } + + fn import_block(&self, b: Bytes) -> ImportResult { + let header = Rlp::new(&b).val_at::(0); + let h = header.hash(); + let number: usize = header.number as usize; + if number > self.blocks.read().unwrap().len() { + panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); + } + if number > 0 { + match self.blocks.read().unwrap().get(&header.parent_hash) { + Some(parent) => { + let parent = Rlp::new(parent).val_at::(0); + if parent.number != (header.number - 1) { + panic!("Unexpected block parent"); + } + }, + None => { + panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); + } + } + } + let len = self.numbers.read().unwrap().len(); + if number == len { + { + let mut difficulty = self.difficulty.write().unwrap(); + *difficulty.deref_mut() = *difficulty.deref() + header.difficulty; + } + mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); + self.blocks.write().unwrap().insert(h.clone(), b); + self.numbers.write().unwrap().insert(number, h.clone()); + let mut parent_hash = header.parent_hash; + if number > 0 { + let mut n = number - 1; + while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { + *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); + n -= 1; + parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; + } + } + } + else { + self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); + } + Ok(h) + } + + fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { + verified_queue_size: 0, + unverified_queue_size: 0, + verifying_queue_size: 0, + max_queue_size: 0, + max_mem_use: 0, + mem_used: 0, + } + } + + fn clear_queue(&self) { + } + + fn chain_info(&self) -> BlockChainInfo { + BlockChainInfo { + total_difficulty: *self.difficulty.read().unwrap(), + pending_total_difficulty: *self.difficulty.read().unwrap(), + genesis_hash: self.genesis_hash.clone(), + best_block_hash: self.last_hash.read().unwrap().clone(), + best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, + } + } +} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 469364eb3..0ff5c1903 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -86,6 +86,7 @@ extern crate crossbeam; #[cfg(feature = "jit" )] extern crate evmjit; pub mod block; +pub mod block_queue; pub mod client; pub mod error; pub mod ethereum; @@ -119,7 +120,6 @@ mod substate; mod executive; mod externalities; mod verification; -mod block_queue; mod blockchain; #[cfg(test)] diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 855aa79a6..eebbdb164 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockId}; +use ethcore::client::{BlockChainClient, BlockId, EachBlockWith}; use io::SyncIo; use chain::{SyncState}; use super::helpers::*; diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index d01dba0b2..ca4ae5158 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -15,300 +15,10 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo, TransactionId, BlockId, BlockQueueInfo}; -use ethcore::header::{Header as BlockHeader, BlockNumber}; -use ethcore::error::*; +use ethcore::client::{TestBlockChainClient, BlockChainClient}; use io::SyncIo; use chain::ChainSync; use ::SyncConfig; -use ethcore::receipt::Receipt; -use ethcore::transaction::{LocalizedTransaction, Transaction, Action}; -use ethcore::filter::Filter; -use ethcore::log_entry::LocalizedLogEntry; - -pub struct TestBlockChainClient { - pub blocks: RwLock>, - pub numbers: RwLock>, - pub genesis_hash: H256, - pub last_hash: RwLock, - pub difficulty: RwLock, -} - -#[derive(Clone)] -pub enum EachBlockWith { - Nothing, - Uncle, - Transaction, - UncleAndTransaction -} - -impl TestBlockChainClient { - pub fn new() -> TestBlockChainClient { - - let mut client = TestBlockChainClient { - blocks: RwLock::new(HashMap::new()), - numbers: RwLock::new(HashMap::new()), - genesis_hash: H256::new(), - last_hash: RwLock::new(H256::new()), - difficulty: RwLock::new(From::from(0)), - }; - client.add_blocks(1, EachBlockWith::Nothing); // add genesis block - client.genesis_hash = client.last_hash.read().unwrap().clone(); - client - } - - pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { - let len = self.numbers.read().unwrap().len(); - for n in len..(len + count) { - let mut header = BlockHeader::new(); - header.difficulty = From::from(n); - header.parent_hash = self.last_hash.read().unwrap().clone(); - header.number = n as BlockNumber; - let uncles = match with { - EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { - let mut uncles = RlpStream::new_list(1); - let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); - uncle_header.number = n as BlockNumber; - uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); - uncles - }, - _ => RlpStream::new_list(0) - }; - let txs = match with { - EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { - let mut txs = RlpStream::new_list(1); - let keypair = KeyPair::create().unwrap(); - let tx = Transaction { - action: Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::one(), - nonce: U256::zero() - }; - let signed_tx = tx.sign(&keypair.secret()); - txs.append(&signed_tx); - txs.out() - }, - _ => rlp::NULL_RLP.to_vec() - }; - - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&txs, 1); - rlp.append_raw(uncles.as_raw(), 1); - self.import_block(rlp.as_raw().to_vec()).unwrap(); - } - } - - pub fn corrupt_block(&mut self, n: BlockNumber) { - let hash = self.block_hash(BlockId::Number(n)).unwrap(); - let mut header: BlockHeader = decode(&self.block_header(BlockId::Number(n)).unwrap()); - header.parent_hash = H256::new(); - let mut rlp = RlpStream::new_list(3); - rlp.append(&header); - rlp.append_raw(&rlp::NULL_RLP, 1); - rlp.append_raw(&rlp::NULL_RLP, 1); - self.blocks.write().unwrap().insert(hash, rlp.out()); - } - - pub fn block_hash_delta_minus(&mut self, delta: usize) -> H256 { - let blocks_read = self.numbers.read().unwrap(); - let index = blocks_read.len() - delta; - blocks_read[&index].clone() - } - - fn block_hash(&self, id: BlockId) -> Option { - match id { - BlockId::Hash(hash) => Some(hash), - BlockId::Number(n) => self.numbers.read().unwrap().get(&(n as usize)).cloned(), - BlockId::Earliest => self.numbers.read().unwrap().get(&0).cloned(), - BlockId::Latest => self.numbers.read().unwrap().get(&(self.numbers.read().unwrap().len() - 1)).cloned() - } - } -} - -impl BlockChainClient for TestBlockChainClient { - fn block_total_difficulty(&self, _id: BlockId) -> Option { - Some(U256::zero()) - } - - fn block_hash(&self, _id: BlockId) -> Option { - unimplemented!(); - } - - fn nonce(&self, _address: &Address) -> U256 { - U256::zero() - } - - fn code(&self, _address: &Address) -> Option { - unimplemented!(); - } - - fn transaction(&self, _id: TransactionId) -> Option { - unimplemented!(); - } - - fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option> { - unimplemented!(); - } - - fn logs(&self, _filter: Filter) -> Vec { - unimplemented!(); - } - - fn block_header(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) - } - - fn block_body(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| { - let mut stream = RlpStream::new_list(2); - stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); - stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); - stream.out() - })) - } - - fn block(&self, id: BlockId) -> Option { - self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).cloned()) - } - - fn block_status(&self, id: BlockId) -> BlockStatus { - match id { - BlockId::Number(number) if (number as usize) < self.blocks.read().unwrap().len() => BlockStatus::InChain, - BlockId::Hash(ref hash) if self.blocks.read().unwrap().get(hash).is_some() => BlockStatus::InChain, - _ => BlockStatus::Unknown - } - } - - // works only if blocks are one after another 1 -> 2 -> 3 - fn tree_route(&self, from: &H256, to: &H256) -> Option { - Some(TreeRoute { - ancestor: H256::new(), - index: 0, - blocks: { - let numbers_read = self.numbers.read().unwrap(); - let mut adding = false; - - let mut blocks = Vec::new(); - for (_, hash) in numbers_read.iter().sort_by(|tuple1, tuple2| tuple1.0.cmp(tuple2.0)) { - if hash == to { - if adding { - blocks.push(hash.clone()); - } - adding = false; - break; - } - if hash == from { - adding = true; - } - if adding { - blocks.push(hash.clone()); - } - } - if adding { Vec::new() } else { blocks } - } - }) - } - - // TODO: returns just hashes instead of node state rlp(?) - fn state_data(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let mut rlp = RlpStream::new(); - rlp.append(&hash.clone()); - return Some(rlp.out()); - } - None - } - - fn block_receipts(&self, hash: &H256) -> Option { - // starts with 'f' ? - if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let receipt = Receipt::new( - H256::zero(), - U256::zero(), - vec![]); - let mut rlp = RlpStream::new(); - rlp.append(&receipt); - return Some(rlp.out()); - } - None - } - - fn import_block(&self, b: Bytes) -> ImportResult { - let header = Rlp::new(&b).val_at::(0); - let h = header.hash(); - let number: usize = header.number as usize; - if number > self.blocks.read().unwrap().len() { - panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); - } - if number > 0 { - match self.blocks.read().unwrap().get(&header.parent_hash) { - Some(parent) => { - let parent = Rlp::new(parent).val_at::(0); - if parent.number != (header.number - 1) { - panic!("Unexpected block parent"); - } - }, - None => { - panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); - } - } - } - let len = self.numbers.read().unwrap().len(); - if number == len { - { - let mut difficulty = self.difficulty.write().unwrap(); - *difficulty.deref_mut() = *difficulty.deref() + header.difficulty; - } - mem::replace(self.last_hash.write().unwrap().deref_mut(), h.clone()); - self.blocks.write().unwrap().insert(h.clone(), b); - self.numbers.write().unwrap().insert(number, h.clone()); - let mut parent_hash = header.parent_hash; - if number > 0 { - let mut n = number - 1; - while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { - *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); - n -= 1; - parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; - } - } - } - else { - self.blocks.write().unwrap().insert(h.clone(), b.to_vec()); - } - Ok(h) - } - - fn queue_info(&self) -> BlockQueueInfo { - BlockQueueInfo { - verified_queue_size: 0, - unverified_queue_size: 0, - verifying_queue_size: 0, - max_queue_size: 0, - max_mem_use: 0, - mem_used: 0, - } - } - - fn clear_queue(&self) { - } - - fn chain_info(&self) -> BlockChainInfo { - BlockChainInfo { - total_difficulty: *self.difficulty.read().unwrap(), - pending_total_difficulty: *self.difficulty.read().unwrap(), - genesis_hash: self.genesis_hash.clone(), - best_block_hash: self.last_hash.read().unwrap().clone(), - best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, - } - } -} pub struct TestIo<'p> { pub chain: &'p mut TestBlockChainClient, From 5f37f6edb442aa41097f7c014d2a0a08b20cb1f0 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 21:01:17 +0100 Subject: [PATCH 088/222] Correct cache update order --- ethcore/src/blockchain/blockchain.rs | 52 ++++++++++++++-------------- 1 file changed, 26 insertions(+), 26 deletions(-) diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 8e56cdc5f..4ebd111d9 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -452,7 +452,32 @@ impl BlockChain { let batch = DBTransaction::new(); batch.put(b"best", &update.info.hash).unwrap(); - // These cached values must be updated atomically + { + let mut write_details = self.block_details.write().unwrap(); + for (hash, details) in update.block_details.into_iter() { + batch.put_extras(&hash, &details); + self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone())); + write_details.insert(hash, details); + } + } + + { + let mut write_receipts = self.block_receipts.write().unwrap(); + for (hash, receipt) in &update.block_receipts { + batch.put_extras(hash, receipt); + write_receipts.remove(hash); + } + } + + { + let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); + for (bloom_hash, blocks_bloom) in &update.blocks_blooms { + batch.put_extras(bloom_hash, blocks_bloom); + write_blocks_blooms.remove(bloom_hash); + } + } + + // These cached values must be updated last and togeterh { let mut best_block = self.best_block.write().unwrap(); let mut write_hashes = self.block_hashes.write().unwrap(); @@ -481,31 +506,6 @@ impl BlockChain { } } - { - let mut write_details = self.block_details.write().unwrap(); - for (hash, details) in update.block_details.into_iter() { - batch.put_extras(&hash, &details); - self.note_used(CacheID::Extras(ExtrasIndex::BlockDetails, hash.clone())); - write_details.insert(hash, details); - } - } - - { - let mut write_receipts = self.block_receipts.write().unwrap(); - for (hash, receipt) in &update.block_receipts { - batch.put_extras(hash, receipt); - write_receipts.remove(hash); - } - } - - { - let mut write_blocks_blooms = self.blocks_blooms.write().unwrap(); - for (bloom_hash, blocks_bloom) in &update.blocks_blooms { - batch.put_extras(bloom_hash, blocks_bloom); - write_blocks_blooms.remove(bloom_hash); - } - } - // update extras database self.extras_db.write(batch).unwrap(); } From 06a3abd01e0cd6f2435510c1133c34e87c48beff Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 21:15:43 +0100 Subject: [PATCH 089/222] Removed unused return type --- util/src/journaldb.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 97ae077b2..4f2cdeb31 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -153,7 +153,7 @@ impl JournalDB { } /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { let have_journal_overlay = self.journal_overlay.is_some(); if have_journal_overlay { self.commit_with_overlay(now, id, end) @@ -183,16 +183,16 @@ impl JournalDB { } /// Just commit the transaction overlay into the backing DB. - fn commit_without_overlay(&mut self) -> Result { + fn commit_without_overlay(&mut self) -> Result<(), UtilError> { let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); + Self::batch_overlay_insertions(&mut self.transaction_overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) + Ok(()) } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit_with_overlay(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result<(), UtilError> { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut journal_overlay = self.journal_overlay.as_mut().unwrap().write().unwrap(); @@ -274,7 +274,7 @@ impl JournalDB { journal_overlay.journal.remove(&end_era); } try!(self.backing.write(batch)); - Ok(0 as u32) + Ok(()) } fn payload(&self, key: &H256) -> Option { From 29916edb91092d95a5a08c10075bea2bb7098a3f Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Thu, 10 Mar 2016 21:36:45 +0100 Subject: [PATCH 090/222] More geth compatibility. --- parity/main.rs | 101 +++++++++++++++++++++++++------------------------ 1 file changed, 52 insertions(+), 49 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index cced0ed0f..402017f3c 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -37,7 +37,7 @@ extern crate rpassword; #[cfg(feature = "rpc")] extern crate ethcore_rpc as rpc; -use std::net::{SocketAddr}; +use std::net::{SocketAddr, IpAddr}; use std::env; use std::process::exit; use std::path::PathBuf; @@ -77,21 +77,19 @@ Usage: Protocol Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. - --testnet Equivalent to --chain testnet (geth-compatible). - --networkid INDEX Override the network identifier from the chain we are on. - --archive Client should not prune the state/storage trie. - -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. + --archive Client should not prune the state/storage trie. Networking Options: - --no-bootstrap Don't bother trying to connect to standard bootnodes. - --bootnodes NODES Specify additional comma-separated bootnodes. - --no-discovery Disable new peer discovery. + --port PORT Override the port on which the node should listen [default: 30303]. --peers NUM Try to maintain that many peers [default: 25]. - --port PORT Override the port for the node to listen on, supercedes --address. --nat METHOD Specify method to use for determining public address. Must be one of: any, none, - upnp, extip:(IP) [default: upnp]. + upnp, extip:(IP) [default: any]. + --bootnodes NODES Specify additional comma-separated bootnodes. + --no-bootstrap Don't bother trying to connect to standard bootnodes. + --no-discovery Disable new peer discovery. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. API and Console Options: @@ -115,6 +113,9 @@ Memory Footprint Options: other cache options (geth-compatible). Geth-Compatibility Options + --datadir PATH Equivalent to --db-path PATH. + --testnet Equivalent to --chain testnet. + --networkid INDEX Override the network identifier from the chain we are on. --rpc Equivalent to --jsonrpc. --rpcaddr HOST Equivalent to --jsonrpc-addr HOST. --rpcport PORT Equivalent to --jsonrpc-port PORT. @@ -139,22 +140,18 @@ struct Args { cmd_new: bool, cmd_list: bool, arg_pid_file: String, - arg_enode: Vec, flag_chain: String, - flag_testnet: bool, - flag_datadir: String, - flag_networkid: Option, + flag_db_path: String, flag_identity: String, flag_cache: Option, flag_keys_path: String, flag_archive: bool, + flag_bootnodes: Option, flag_no_bootstrap: bool, - flag_listen_address: String, - flag_public_address: Option, - flag_address: Option, - flag_maxpeers: usize, + flag_port: u16, + flag_peers: usize, flag_no_discovery: bool, - flag_no_upnp: bool, + flag_nat: String, flag_node_key: Option, flag_cache_pref_size: usize, flag_cache_max_size: usize, @@ -164,15 +161,24 @@ struct Args { flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, flag_jsonrpc_apis: String, + flag_logging: Option, + flag_version: bool, + // geth-compatibility... + flag_nodekey: Option, + flag_nodiscover: bool, + flag_maxpeers: Option, + flag_author: String, + flag_extra_data: Option, + flag_datadir: Option, + flag_extradata: Option, + flag_etherbase: Option, flag_rpc: bool, flag_rpcaddr: Option, flag_rpcport: Option, flag_rpccorsdomain: Option, flag_rpcapi: Option, - flag_logging: Option, - flag_version: bool, - flag_author: String, - flag_extra_data: Option, + flag_testnet: bool, + flag_networkid: Option, } fn setup_log(init: &Option) { @@ -252,15 +258,17 @@ impl Configuration { } fn path(&self) -> String { - self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) + let d = self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path); + d.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) } fn author(&self) -> Address { - Address::from_str(&self.args.flag_author).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) + let d = self.args.flag_etherbase.as_ref().unwrap_or(&self.args.flag_author); + Address::from_str(d).unwrap_or_else(|_| die!("{}: Invalid address for --author. Must be 40 hex characters, without the 0x at the beginning.", self.args.flag_author)) } fn extra_data(&self) -> Bytes { - match self.args.flag_extra_data { + match self.args.flag_extradata.as_ref().or(self.args.flag_extra_data.as_ref()) { Some(ref x) if x.len() <= 32 => x.as_bytes().to_owned(), None => version_data(), Some(ref x) => { die!("{}: Extra data must be at most 32 characters.", x); } @@ -292,45 +300,40 @@ impl Configuration { } fn init_nodes(&self, spec: &Spec) -> Vec { - if self.args.flag_no_bootstrap { Vec::new() } else { - match self.args.arg_enode.len() { - 0 => spec.nodes().clone(), - _ => self.args.arg_enode.iter().map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s))).collect(), - } + let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() }; + if let Some(ref x) = self.args.flag_bootnodes { + r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s)))); } + r } #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { - let mut listen_address = None; - let mut public_address = None; + let listen_address = Some(SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), self.args.flag_port)); + + let host = if self.args.flag_nat.starts_with("extip:") { + &self.args.flag_nat[6..] + } else { + "127.0.0.1" + }; + let public_address = Some(SocketAddr::new( + IpAddr::from_str(&host).unwrap_or_else(|_| die!("{}: Invalid host given with --net extip:", host)), + self.args.flag_port + )); - if let Some(ref a) = self.args.flag_address { - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --address", a))); - listen_address = public_address; - } - if listen_address.is_none() { - listen_address = Some(SocketAddr::from_str(self.args.flag_listen_address.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --listen-address", self.args.flag_listen_address))); - } - if let Some(ref a) = self.args.flag_public_address { - if public_address.is_some() { - die!("Conflicting flags provided: --address and --public-address"); - } - public_address = Some(SocketAddr::from_str(a.as_ref()).unwrap_or_else(|_| die!("{}: Invalid listen/public address given with --public-address", a))); - } (listen_address, public_address) } fn net_settings(&self, spec: &Spec) -> NetworkConfiguration { let mut ret = NetworkConfiguration::new(); - ret.nat_enabled = !self.args.flag_no_upnp; + ret.nat_enabled = self.args.flag_nat == "any" || self.args.flag_nat == "upnp"; ret.boot_nodes = self.init_nodes(spec); let (listen, public) = self.net_addresses(); ret.listen_address = listen; ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); - ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_maxpeers as u32; + ret.discovery_enabled = !self.args.flag_no_discovery && !self.args.flag_nodiscover; + ret.ideal_peers = self.args.flag_maxpeers.unwrap_or(self.args.flag_peers) as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); From d9c462a3d3bd5a15b5c8dc6c8424b85b4179349c Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 10:05:27 +0100 Subject: [PATCH 091/222] Use proper listen address. Tidyups. --- parity/main.rs | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index d6c3516d0..732e94f02 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -302,25 +302,21 @@ impl Configuration { fn init_nodes(&self, spec: &Spec) -> Vec { let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() }; if let Some(ref x) = self.args.flag_bootnodes { - r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(||die!("{}: Invalid node address format given for a boot node.", s)))); + r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s)))); } r } #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { - let listen_address = Some(SocketAddr::new(IpAddr::from_str("127.0.0.1").unwrap(), self.args.flag_port)); - - let host = if self.args.flag_nat.starts_with("extip:") { - &self.args.flag_nat[6..] + let listen_address = Some(SocketAddr::new(IpAddr::from_str("0.0.0.0").unwrap(), self.args.flag_port)); + let public_address = if self.args.flag_nat.starts_with("extip:") { + let host = &self.args.flag_nat[6..]; + let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); + Some(SocketAddr::new(host, self.args.flag_port)) } else { - "127.0.0.1" + listen_address.clone() }; - let public_address = Some(SocketAddr::new( - IpAddr::from_str(&host).unwrap_or_else(|_| die!("{}: Invalid host given with --net extip:", host)), - self.args.flag_port - )); - (listen_address, public_address) } From 34a120e1270288ffafc3518f8a4fb681ee5cfdf2 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 10:17:20 +0100 Subject: [PATCH 092/222] net tests --- rpc/src/v1/impls/net.rs | 4 +- rpc/src/v1/mod.rs | 3 +- rpc/src/v1/tests/helpers/mod.rs | 19 ++++++++ rpc/src/v1/tests/helpers/sync_provider.rs | 57 +++++++++++++++++++++++ rpc/src/v1/tests/mod.rs | 21 ++++++++- rpc/src/v1/tests/net.rs | 52 +++++++++++++++++++++ sync/src/chain.rs | 1 + 7 files changed, 153 insertions(+), 4 deletions(-) create mode 100644 rpc/src/v1/tests/helpers/mod.rs create mode 100644 rpc/src/v1/tests/helpers/sync_provider.rs create mode 100644 rpc/src/v1/tests/net.rs diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 5e67bf252..1918073e3 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -36,10 +36,10 @@ impl NetClient where S: SyncProvider { impl Net for NetClient where S: SyncProvider + 'static { fn version(&self, _: Params) -> Result { - Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) + Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())) } fn peer_count(&self, _params: Params) -> Result { - Ok(Value::U64(take_weak!(self.sync).status().num_peers as u64)) + Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned())) } } diff --git a/rpc/src/v1/mod.rs b/rpc/src/v1/mod.rs index 104a8b3f0..b82a20e89 100644 --- a/rpc/src/v1/mod.rs +++ b/rpc/src/v1/mod.rs @@ -21,9 +21,10 @@ pub mod traits; mod impls; mod types; +mod helpers; + #[cfg(test)] mod tests; -mod helpers; pub use self::traits::{Web3, Eth, EthFilter, Personal, Net}; pub use self::impls::*; diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs new file mode 100644 index 000000000..501bfb2d3 --- /dev/null +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -0,0 +1,19 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod sync_provider; + +pub use self::sync_provider::{Config, TestSyncProvider}; diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs new file mode 100644 index 000000000..b6d9241dd --- /dev/null +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -0,0 +1,57 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use ethcore::transaction::SignedTransaction; +use ethsync::{SyncProvider, SyncStatus, SyncState}; + +pub struct Config { + pub protocol_version: u8, + pub num_peers: usize, +} + +pub struct TestSyncProvider { + status: SyncStatus, +} + +impl TestSyncProvider { + pub fn new(config: Config) -> Self { + TestSyncProvider { + status: SyncStatus { + state: SyncState::NotSynced, + protocol_version: config.protocol_version, + start_block_number: 0, + last_imported_block_number: None, + highest_block_number: None, + blocks_total: 0, + blocks_received: 0, + num_peers: config.num_peers, + num_active_peers: 0, + mem_used: 0, + }, + } + } +} + +impl SyncProvider for TestSyncProvider { + fn status(&self) -> SyncStatus { + self.status.clone() + } + + fn insert_transaction(&self, _transaction: SignedTransaction) { + unimplemented!() + } +} + diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index bdf4567b6..5ef74987c 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -1 +1,20 @@ -//TODO: load custom blockchain state and test +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//!TODO: load custom blockchain state and test + +mod net; +mod helpers; diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs new file mode 100644 index 000000000..f6c4ae2e9 --- /dev/null +++ b/rpc/src/v1/tests/net.rs @@ -0,0 +1,52 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use v1::{Net, NetClient}; +use v1::tests::helpers::{Config, TestSyncProvider}; + +#[test] +fn rpc_net_version() { + let sync = Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_string())); +} + +#[test] +fn rpc_net_peer_count() { + let sync = Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_string())); +} diff --git a/sync/src/chain.rs b/sync/src/chain.rs index da3908a1e..da25c72de 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -120,6 +120,7 @@ pub enum SyncState { } /// Syncing status and statistics +#[derive(Clone)] pub struct SyncStatus { /// State pub state: SyncState, From 3bbdc03d0cbebe315a059ac7762db386805868f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:17:51 +0100 Subject: [PATCH 093/222] Fixing doctest. --- sync/src/lib.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index e0158a564..d54acaf8a 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -31,18 +31,21 @@ //! extern crate ethcore_util as util; //! extern crate ethcore; //! extern crate ethsync; +//! extern crate ethminer; //! use std::env; //! use std::sync::Arc; //! use util::network::{NetworkService, NetworkConfiguration}; //! use ethcore::client::{Client, ClientConfig}; //! use ethsync::{EthSync, SyncConfig}; +//! use ethminer::Miner; //! use ethcore::ethereum; //! //! fn main() { //! let mut service = NetworkService::start(NetworkConfiguration::new()).unwrap(); //! let dir = env::temp_dir(); //! let client = Client::new(ClientConfig::default(), ethereum::new_frontier(), &dir, service.io().channel()).unwrap(); -//! EthSync::register(&mut service, SyncConfig::default(), client); +//! let miner = Miner::new(); +//! EthSync::register(&mut service, SyncConfig::default(), client, miner); //! } //! ``` From 8e52510dbb45642105f3fb4dab5e6fc166225fff Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 10:21:25 +0100 Subject: [PATCH 094/222] implemented net_listening method --- rpc/src/v1/impls/net.rs | 5 +++++ rpc/src/v1/tests/net.rs | 16 ++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 1918073e3..e52fc0bd4 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -42,4 +42,9 @@ impl Net for NetClient where S: SyncProvider + 'static { fn peer_count(&self, _params: Params) -> Result { Ok(Value::String(format!("0x{:x}", take_weak!(self.sync).status().num_peers as u64).to_owned())) } + + fn is_listening(&self, _: Params) -> Result { + // right now (11 march 2016), we are always listening for incoming connections + Ok(Value::Bool(true)) + } } diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs index f6c4ae2e9..9cb0bd189 100644 --- a/rpc/src/v1/tests/net.rs +++ b/rpc/src/v1/tests/net.rs @@ -50,3 +50,19 @@ fn rpc_net_peer_count() { assert_eq!(io.handle_request(request), Some(response.to_string())); } + +#[test] +fn rpc_net_listening() { + let sync = Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })); + let net = NetClient::new(&sync).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(net); + + let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_string())); +} From a6d268db1618dea7bb2c6189a443777cb0760438 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 10:30:13 +0100 Subject: [PATCH 095/222] fixed missing reexport --- ethcore/src/blockchain/mod.rs | 2 +- ethcore/src/client/client.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/ethcore/src/blockchain/mod.rs b/ethcore/src/blockchain/mod.rs index 6559d8364..29a4ee684 100644 --- a/ethcore/src/blockchain/mod.rs +++ b/ethcore/src/blockchain/mod.rs @@ -23,9 +23,9 @@ mod bloom_indexer; mod cache; mod tree_route; mod update; +mod import_route; #[cfg(test)] mod generator; -mod import_route; pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig}; pub use self::cache::CacheSize; diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4ef8bb029..40f86f7a2 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -37,8 +37,9 @@ use filter::Filter; use log_entry::LocalizedLogEntry; use util::keys::store::SecretStore; use block_queue::{BlockQueue, BlockQueueInfo}; -use blockchain::{BlockChain, BlockProvider, TreeRoute, CacheSize as BlockChainCacheSize}; +use blockchain::{BlockChain, BlockProvider, TreeRoute}; use client::{BlockId, TransactionId, ClientConfig, BlockChainClient}; +pub use blockchain::CacheSize as BlockChainCacheSize; /// General block status #[derive(Debug, Eq, PartialEq)] From 3a4a7ac822289181229c16af58690e63e3b98b62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:35:26 +0100 Subject: [PATCH 096/222] Bumping clippy version --- Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- sync/Cargo.toml | 2 +- util/Cargo.toml | 2 +- util/bigint/Cargo.toml | 1 - 6 files changed, 5 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 22d0f9288..70e4cbc34 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index fbfe175d7..b5c4a7636 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -21,7 +21,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 2ce430e51..918160be9 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,7 +18,7 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 46baa8a83..8412d3700 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -14,7 +14,7 @@ rustc_version = "0.1" [dependencies] ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } log = "0.3" env_logger = "0.3" time = "0.1.34" diff --git a/util/Cargo.toml b/util/Cargo.toml index 0ce27ec2b..93b475538 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -27,7 +27,7 @@ crossbeam = "0.2" slab = "0.1" sha3 = { path = "sha3" } serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } +clippy = { version = "0.0.49", optional = true } json-tests = { path = "json-tests" } rustc_version = "0.1.0" igd = "0.4.2" diff --git a/util/bigint/Cargo.toml b/util/bigint/Cargo.toml index 377391eeb..1bd2b994e 100644 --- a/util/bigint/Cargo.toml +++ b/util/bigint/Cargo.toml @@ -15,7 +15,6 @@ rustc-serialize = "0.3" arrayvec = "0.3" rand = "0.3.12" serde = "0.7.0" -clippy = { version = "0.0.44", optional = true } heapsize = "0.3" [features] From 8709dd28f884742756ae4bbba9bd628708f271bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 10:57:58 +0100 Subject: [PATCH 097/222] Fixing clippy warnings --- Cargo.lock | 30 ++++++++++++------------------ ethcore/src/extras.rs | 14 ++++++++++---- ethcore/src/lib.rs | 2 ++ ethcore/src/substate.rs | 14 ++++++++++---- sync/src/chain.rs | 2 +- sync/src/lib.rs | 4 +++- sync/src/transaction_queue.rs | 8 ++++++++ util/src/keys/store.rs | 10 ++++++++-- util/src/kvdb.rs | 7 +++++++ util/src/lib.rs | 2 ++ util/src/memorydb.rs | 11 +++++++++-- util/src/network/discovery.rs | 9 ++++++++- util/src/network/host.rs | 9 ++++++++- util/src/panics.rs | 11 +++++++++-- util/src/rlp/rlpstream.rs | 17 +++++++++++++++-- util/src/table.rs | 13 ++++++++++++- util/src/trie/journal.rs | 7 +++++++ 17 files changed, 131 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f583a8747..e33e0288c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "parity" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,7 +94,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.44" +version = "0.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -207,7 +207,7 @@ dependencies = [ name = "ethcore" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", @@ -234,7 +234,7 @@ dependencies = [ name = "ethcore-rpc" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -258,7 +258,7 @@ dependencies = [ "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -273,7 +273,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.2 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -290,7 +290,7 @@ dependencies = [ name = "ethsync" version = "0.9.99" dependencies = [ - "clippy 0.0.44 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -469,11 +469,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.2.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "pkg-config 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -597,11 +596,6 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "pkg-config" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" - [[package]] name = "primal" version = "0.2.3" @@ -697,11 +691,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" -version = "0.4.1" -source = "git+https://github.com/arkpar/rust-rocksdb.git#2156621f583bda95c1c07e89e79e4019f75158ee" +version = "0.4.2" +source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", + "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] [[package]] diff --git a/ethcore/src/extras.rs b/ethcore/src/extras.rs index f4759b040..a7c82c37c 100644 --- a/ethcore/src/extras.rs +++ b/ethcore/src/extras.rs @@ -35,13 +35,13 @@ pub enum ExtrasIndex { BlocksBlooms = 4, /// Block receipts index BlockReceipts = 5, -} +} /// trait used to write Extras data to db pub trait ExtrasWritable { /// Write extra data to db fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable; } @@ -60,9 +60,9 @@ pub trait ExtrasReadable { impl ExtrasWritable for DBTransaction { fn put_extras(&self, hash: &K, value: &T) where - T: ExtrasIndexable + Encodable, + T: ExtrasIndexable + Encodable, K: ExtrasSliceConvertable { - + self.put(&hash.to_extras_slice(T::extras_index()), &encode(value)).unwrap() } } @@ -215,6 +215,12 @@ pub struct BlocksBlooms { pub blooms: [H2048; 16], } +impl Default for BlocksBlooms { + fn default() -> Self { + BlocksBlooms::new() + } +} + impl BlocksBlooms { pub fn new() -> Self { BlocksBlooms { blooms: unsafe { ::std::mem::zeroed() }} diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 469364eb3..2209df7dc 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -25,6 +25,8 @@ #![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Ethcore library //! diff --git a/ethcore/src/substate.rs b/ethcore/src/substate.rs index 374397ca7..57e35ad2e 100644 --- a/ethcore/src/substate.rs +++ b/ethcore/src/substate.rs @@ -31,6 +31,12 @@ pub struct Substate { pub contracts_created: Vec
} +impl Default for Substate { + fn default() -> Self { + Substate::new() + } +} + impl Substate { /// Creates new substate. pub fn new() -> Self { @@ -67,8 +73,8 @@ mod tests { let mut sub_state = Substate::new(); sub_state.contracts_created.push(address_from_u64(1u64)); sub_state.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state.sstore_clears_count = x!(5); @@ -77,8 +83,8 @@ mod tests { let mut sub_state_2 = Substate::new(); sub_state_2.contracts_created.push(address_from_u64(2u64)); sub_state_2.logs.push(LogEntry { - address: address_from_u64(1u64), - topics: vec![], + address: address_from_u64(1u64), + topics: vec![], data: vec![] }); sub_state_2.sstore_clears_count = x!(7); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index da3908a1e..a22f23eb8 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1307,7 +1307,7 @@ impl ChainSync { where T: Fn(&Address) -> U256 { let mut queue = self.transaction_queue.lock().unwrap(); - queue.add(transaction, fetch_nonce); + let _ = queue.add(transaction, fetch_nonce); } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 3b79e5614..6cb98521e 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -17,9 +17,10 @@ #![warn(missing_docs)] #![cfg_attr(all(nightly, feature="dev"), feature(plugin))] #![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] - // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: @@ -172,6 +173,7 @@ impl NetworkProtocolHandler for EthSync { self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } + #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 243939a4c..45dc0e299 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -79,6 +79,7 @@ //! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. //! +use std::default::Default; use std::cmp::{Ordering}; use std::collections::{HashMap, BTreeSet}; use util::numbers::{Uint, U256}; @@ -102,6 +103,7 @@ struct TransactionOrder { hash: H256, } + impl TransactionOrder { fn for_transaction(tx: &VerifiedTransaction, base_nonce: U256) -> Self { TransactionOrder { @@ -253,6 +255,12 @@ pub struct TransactionQueue { last_nonces: HashMap, } +impl Default for TransactionQueue { + fn default() -> Self { + TransactionQueue::new() + } +} + impl TransactionQueue { /// Creates new instance of this Queue pub fn new() -> Self { diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index dcc165259..7e12703ea 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -78,9 +78,15 @@ struct AccountUnlock { expires: DateTime, } +impl Default for SecretStore { + fn default() -> Self { + SecretStore::new() + } +} + impl SecretStore { /// new instance of Secret Store in default home directory - pub fn new() -> SecretStore { + pub fn new() -> Self { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); @@ -89,7 +95,7 @@ impl SecretStore { } /// new instance of Secret Store in specific directory - pub fn new_in(path: &Path) -> SecretStore { + pub fn new_in(path: &Path) -> Self { SecretStore { directory: KeyDirectory::new(path), unlocks: RwLock::new(HashMap::new()), diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index a2fa2215a..df5c2c448 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -16,6 +16,7 @@ //! Key-Value store abstraction with RocksDB backend. +use std::default::Default; use rocksdb::{DB, Writable, WriteBatch, IteratorMode, DBVector, DBIterator, IndexType, Options, DBCompactionStyle, BlockBasedOptions, Direction}; @@ -24,6 +25,12 @@ pub struct DBTransaction { batch: WriteBatch, } +impl Default for DBTransaction { + fn default() -> Self { + DBTransaction::new() + } +} + impl DBTransaction { /// Create new transaction. pub fn new() -> DBTransaction { diff --git a/util/src/lib.rs b/util/src/lib.rs index 59d66a325..bdb4be6e4 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -27,6 +27,8 @@ #![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. #![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +// In most cases it expresses function flow better +#![cfg_attr(all(nightly, feature="dev"), allow(if_not_else))] //! Ethcore-util library //! diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..66fa32055 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -24,6 +24,7 @@ use hashdb::*; use heapsize::*; use std::mem; use std::collections::HashMap; +use std::default::Default; #[derive(Debug,Clone)] /// Reference-counted memory-based HashDB implementation. @@ -32,7 +33,7 @@ use std::collections::HashMap; /// with `kill()`, check for existance with `exists()` and lookup a hash to derive /// the data with `lookup()`. Clear with `clear()` and purge the portions of the data /// that have no references with `purge()`. -/// +/// /// # Example /// ```rust /// extern crate ethcore_util; @@ -74,6 +75,12 @@ pub struct MemoryDB { static_null_rlp: (Bytes, i32), } +impl Default for MemoryDB { + fn default() -> Self { + MemoryDB::new() + } +} + impl MemoryDB { /// Create a new instance of the memory DB. pub fn new() -> MemoryDB { @@ -133,7 +140,7 @@ impl MemoryDB { /// Denote than an existing value has the given key. Used when a key gets removed without /// a prior insert and thus has a negative reference with no value. - /// + /// /// May safely be called even if the key's value is known, in which case it will be a no-op. pub fn denote(&self, key: &H256, value: Bytes) -> &(Bytes, i32) { if self.raw(key) == None { diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 644af22af..2e7c51cb0 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -19,6 +19,7 @@ use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; use std::cmp; +use std::default::Default; use mio::*; use mio::udp::*; use sha3::*; @@ -62,8 +63,14 @@ struct NodeBucket { nodes: VecDeque, //sorted by last active } +impl Default for NodeBucket { + fn default() -> Self { + NodeBucket::new() + } +} + impl NodeBucket { - fn new() -> NodeBucket { + fn new() -> Self { NodeBucket { nodes: VecDeque::new() } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 2d1af55ba..3db94131a 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -23,6 +23,7 @@ use std::ops::*; use std::cmp::min; use std::path::{Path, PathBuf}; use std::io::{Read, Write}; +use std::default::Default; use std::fs; use mio::*; use mio::tcp::*; @@ -75,9 +76,15 @@ pub struct NetworkConfiguration { pub ideal_peers: u32, } +impl Default for NetworkConfiguration { + fn default() -> Self { + NetworkConfiguration::new() + } +} + impl NetworkConfiguration { /// Create a new instance of default settings. - pub fn new() -> NetworkConfiguration { + pub fn new() -> Self { NetworkConfiguration { config_path: None, listen_address: None, diff --git a/util/src/panics.rs b/util/src/panics.rs index 70ce0bc33..ab25eae57 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -19,6 +19,7 @@ use std::thread; use std::ops::DerefMut; use std::sync::{Arc, Mutex}; +use std::default::Default; /// Thread-safe closure for handling possible panics pub trait OnPanicListener: Send + Sync + 'static { @@ -56,14 +57,20 @@ pub struct PanicHandler { listeners: Mutex>> } +impl Default for PanicHandler { + fn default() -> Self { + PanicHandler::new() + } +} + impl PanicHandler { /// Creates new `PanicHandler` wrapped in `Arc` - pub fn new_in_arc() -> Arc { + pub fn new_in_arc() -> Arc { Arc::new(Self::new()) } /// Creates new `PanicHandler` - pub fn new() -> PanicHandler { + pub fn new() -> Self { PanicHandler { listeners: Mutex::new(vec![]) } diff --git a/util/src/rlp/rlpstream.rs b/util/src/rlp/rlpstream.rs index ba70e7b2b..7bf3d3cdd 100644 --- a/util/src/rlp/rlpstream.rs +++ b/util/src/rlp/rlpstream.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::ops::Deref; +use std::default::Default; use elastic_array::*; use rlp::bytes::{ToBytes, VecLike}; use rlp::{Stream, Encoder, Encodable}; @@ -44,6 +45,12 @@ pub struct RlpStream { finished_list: bool, } +impl Default for RlpStream { + fn default() -> Self { + RlpStream::new() + } +} + impl Stream for RlpStream { fn new() -> Self { RlpStream { @@ -190,8 +197,14 @@ struct BasicEncoder { bytes: ElasticArray1024, } +impl Default for BasicEncoder { + fn default() -> Self { + BasicEncoder::new() + } +} + impl BasicEncoder { - fn new() -> BasicEncoder { + fn new() -> Self { BasicEncoder { bytes: ElasticArray1024::new() } } @@ -222,7 +235,7 @@ impl Encoder for BasicEncoder { // just 0 0 => self.bytes.push(0x80u8), // byte is its own encoding if < 0x80 - 1 => { + 1 => { value.to_bytes(&mut self.bytes); let len = self.bytes.len(); let last_byte = self.bytes[len - 1]; diff --git a/util/src/table.rs b/util/src/table.rs index e41209608..5ba572289 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -16,6 +16,7 @@ //! A collection associating pair of keys (row and column) with a single value. +use std::default::Default; use std::hash::Hash; use std::collections::HashMap; @@ -30,11 +31,21 @@ pub struct Table map: HashMap>, } +impl Default for Table + where Row: Eq + Hash + Clone, + Col: Eq + Hash { + fn default() -> Self { + Table::new() + } +} + +// There is default but clippy does not detect it? +#[allow(new_without_default)] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash { /// Creates new Table - pub fn new() -> Table { + pub fn new() -> Self { Table { map: HashMap::new(), } diff --git a/util/src/trie/journal.rs b/util/src/trie/journal.rs index db16a313d..4ffd7cf5c 100644 --- a/util/src/trie/journal.rs +++ b/util/src/trie/journal.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::default::Default; use sha3::*; use hash::H256; use bytes::*; @@ -39,6 +40,12 @@ pub struct Score { #[derive(Debug)] pub struct Journal (Vec); +impl Default for Journal { + fn default() -> Self { + Journal::new() + } +} + impl Journal { /// Create a new, empty, object. pub fn new() -> Journal { Journal(vec![]) } From 18939462c392a3c175b69dc6002454c7baeb55ca Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 11:03:43 +0100 Subject: [PATCH 098/222] sync_provider function --- rpc/src/v1/tests/net.rs | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs index 9cb0bd189..792e469d8 100644 --- a/rpc/src/v1/tests/net.rs +++ b/rpc/src/v1/tests/net.rs @@ -19,12 +19,16 @@ use jsonrpc_core::IoHandler; use v1::{Net, NetClient}; use v1::tests::helpers::{Config, TestSyncProvider}; -#[test] -fn rpc_net_version() { - let sync = Arc::new(TestSyncProvider::new(Config { +fn sync_provider() -> Arc { + Arc::new(TestSyncProvider::new(Config { protocol_version: 65, num_peers: 120, - })); + })) +} + +#[test] +fn rpc_net_version() { + let sync = sync_provider(); let net = NetClient::new(&sync).to_delegate(); let io = IoHandler::new(); io.add_delegate(net); @@ -37,10 +41,7 @@ fn rpc_net_version() { #[test] fn rpc_net_peer_count() { - let sync = Arc::new(TestSyncProvider::new(Config { - protocol_version: 65, - num_peers: 120, - })); + let sync = sync_provider(); let net = NetClient::new(&sync).to_delegate(); let io = IoHandler::new(); io.add_delegate(net); @@ -53,10 +54,7 @@ fn rpc_net_peer_count() { #[test] fn rpc_net_listening() { - let sync = Arc::new(TestSyncProvider::new(Config { - protocol_version: 65, - num_peers: 120, - })); + let sync = sync_provider(); let net = NetClient::new(&sync).to_delegate(); let io = IoHandler::new(); io.add_delegate(net); From a8a21da9ba7942615a6bbf031fc0ac78e9bc83f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 11:05:20 +0100 Subject: [PATCH 099/222] Updating hook and removing running clippy from dev-dependencies --- Cargo.toml | 6 ------ hook.sh | 13 +++++++++++-- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 22d0f9288..563dd2a69 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,12 +27,6 @@ ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } rpassword = "0.1" -[dev-dependencies] -ethcore = { path = "ethcore", features = ["dev"] } -ethcore-util = { path = "util", features = ["dev"] } -ethsync = { path = "sync", features = ["dev"] } -ethcore-rpc = { path = "rpc", features = ["dev"] } - [features] default = ["rpc"] rpc = ["ethcore-rpc"] diff --git a/hook.sh b/hook.sh index 354fddd5d..aa6ed7415 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,12 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push -chmod +x ./.git/hooks/pre-push +FILE=./.git/hooks/pre-push +echo "#!/bin/sh\n" > $FILE +# Exit on any error +echo "set -e" >> $FILE +# Run release build +echo "cargo build --release --features dev-clippy" >> $FILE +# Build tests +echo "cargo test --no-run --features dev-clippy \\" >> $FILE +echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" >> $FILE +echo "" >> $FILE +chmod +x $FILE From d84e008e00b23d64d6bee1920cc545b434f5348d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 11:16:49 +0100 Subject: [PATCH 100/222] Removing superflous check for nightly --- Cargo.toml | 5 ++--- cargo.sh | 2 -- ethcore/Cargo.toml | 6 +----- ethcore/build.rs | 25 ------------------------- ethcore/src/basic_types.rs | 2 +- ethcore/src/block.rs | 2 +- ethcore/src/block_queue.rs | 2 +- ethcore/src/blockchain/blockchain.rs | 4 ++-- ethcore/src/ethereum/ethash.rs | 2 +- ethcore/src/evm/interpreter.rs | 6 +++--- ethcore/src/externalities.rs | 2 +- ethcore/src/lib.rs | 10 +++++----- ethcore/src/service.rs | 2 +- ethcore/src/spec.rs | 2 +- ethcore/src/state.rs | 2 +- ethcore/src/transaction.rs | 2 +- hook.sh | 4 ++-- parity/main.rs | 6 +++--- rpc/Cargo.toml | 3 +-- rpc/build.rs | 7 ------- sync/Cargo.toml | 6 +----- sync/build.rs | 25 ------------------------- sync/src/chain.rs | 6 +++--- sync/src/lib.rs | 6 +++--- sync/src/range_collection.rs | 4 ++-- util/Cargo.toml | 3 +-- util/bigint/src/uint.rs | 6 +++--- util/build.rs | 5 ----- util/src/hash.rs | 4 ++-- util/src/lib.rs | 12 ++++++------ util/src/network/discovery.rs | 2 +- util/src/network/host.rs | 4 ++-- util/src/panics.rs | 2 +- util/src/trie/triedb.rs | 2 +- util/src/trie/triedbmut.rs | 4 ++-- 35 files changed, 56 insertions(+), 131 deletions(-) delete mode 100755 cargo.sh delete mode 100644 ethcore/build.rs delete mode 100644 sync/build.rs diff --git a/Cargo.toml b/Cargo.toml index 563dd2a69..4daabf669 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,10 +30,9 @@ rpassword = "0.1" [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] -dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"] +travis-nightly = ["ethcore/json-tests", "dev"] [[bin]] path = "parity/main.rs" diff --git a/cargo.sh b/cargo.sh deleted file mode 100755 index 6870ab385..000000000 --- a/cargo.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -cargo "$@" --features dev-clippy diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index fbfe175d7..c3a3d32dc 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -5,10 +5,6 @@ license = "GPL-3.0" name = "ethcore" version = "0.9.99" authors = ["Ethcore "] -build = "build.rs" - -[build-dependencies] -rustc_version = "0.1" [dependencies] log = "0.3" @@ -31,5 +27,5 @@ jit = ["evmjit"] evm-debug = [] json-tests = [] test-heavy = [] -dev = [] +dev = ["clippy"] default = [] diff --git a/ethcore/build.rs b/ethcore/build.rs deleted file mode 100644 index 41b9a1b3e..000000000 --- a/ethcore/build.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -extern crate rustc_version; - -use rustc_version::{version_meta, Channel}; - -fn main() { - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } -} diff --git a/ethcore/src/basic_types.rs b/ethcore/src/basic_types.rs index 9cba8b3a0..5f6515c0d 100644 --- a/ethcore/src/basic_types.rs +++ b/ethcore/src/basic_types.rs @@ -24,7 +24,7 @@ pub type LogBloom = H2048; /// Constant 2048-bit datum for 0. Often used as a default. pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); -#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] +#[cfg_attr(feature="dev", allow(enum_variant_names))] /// Semantic boolean for when a seal/signature is included. pub enum Seal { /// The seal/signature is included. diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index b3894db94..9ecd58e0a 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,7 +16,7 @@ //! Blockchain block. -#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> +#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> use common::*; use engine::*; diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 3dfb98e8a..c83542f12 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -121,7 +121,7 @@ struct QueueSignal { } impl QueueSignal { - #[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))] + #[cfg_attr(feature="dev", allow(bool_comparison))] fn set(&self) { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 8c21532c8..d67c1b7f1 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -891,7 +891,7 @@ mod tests { } #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn test_find_uncles() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); @@ -929,7 +929,7 @@ mod tests { } #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn test_small_fork() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index b0c0e4a9f..f9810b964 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -202,7 +202,7 @@ impl Engine for Ethash { } } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self +#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self impl Ethash { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; diff --git a/ethcore/src/evm/interpreter.rs b/ethcore/src/evm/interpreter.rs index fb8d19357..7491321cb 100644 --- a/ethcore/src/evm/interpreter.rs +++ b/ethcore/src/evm/interpreter.rs @@ -243,7 +243,7 @@ struct CodeReader<'a> { code: &'a Bytes } -#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))] +#[cfg_attr(feature="dev", allow(len_without_is_empty))] impl<'a> CodeReader<'a> { /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { @@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> { } } -#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] +#[cfg_attr(feature="dev", allow(enum_variant_names))] enum InstructionCost { Gas(U256), GasMem(U256, U256), @@ -347,7 +347,7 @@ impl evm::Evm for Interpreter { } impl Interpreter { - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn get_gas_cost_mem(&self, ext: &evm::Ext, instruction: Instruction, diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index a1f5763ea..598921580 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> { self.state.code(address).unwrap_or_else(|| vec![]) } - #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] + #[cfg_attr(feature="dev", allow(match_ref_pats))] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 469364eb3..938da02a0 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -15,16 +15,16 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] // Clippy config // TODO [todr] not really sure -#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] +#![cfg_attr(feature="dev", allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] +#![cfg_attr(feature="dev", allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +#![cfg_attr(feature="dev", allow(clone_on_copy))] //! Ethcore library //! diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index bd15ee501..6daf0d7b6 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -117,7 +117,7 @@ impl IoHandler for ClientIoHandler { } } - #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] + #[cfg_attr(feature="dev", allow(single_match))] fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { if let UserMessage(ref message) = *net_message { match *message { diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 774024351..2208350cc 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -99,7 +99,7 @@ pub struct Spec { genesis_state: PodState, } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) +#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index 7c1064abf..c13678c38 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -224,7 +224,7 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. - #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] + #[cfg_attr(feature="dev", allow(match_ref_pats))] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? diff --git a/ethcore/src/transaction.rs b/ethcore/src/transaction.rs index 733e5ac6b..a51824494 100644 --- a/ethcore/src/transaction.rs +++ b/ethcore/src/transaction.rs @@ -80,7 +80,7 @@ impl Transaction { } impl FromJson for SignedTransaction { - #[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))] + #[cfg_attr(feature="dev", allow(single_char_pattern))] fn from_json(json: &Json) -> SignedTransaction { let t = Transaction { nonce: xjson!(&json["nonce"]), diff --git a/hook.sh b/hook.sh index aa6ed7415..9780541fe 100755 --- a/hook.sh +++ b/hook.sh @@ -4,9 +4,9 @@ echo "#!/bin/sh\n" > $FILE # Exit on any error echo "set -e" >> $FILE # Run release build -echo "cargo build --release --features dev-clippy" >> $FILE +echo "cargo build --release --features dev" >> $FILE # Build tests -echo "cargo test --no-run --features dev-clippy \\" >> $FILE +echo "cargo test --no-run --features dev \\" >> $FILE echo " -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" >> $FILE echo "" >> $FILE chmod +x $FILE diff --git a/parity/main.rs b/parity/main.rs index efff52e4e..c38c2d515 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -17,8 +17,8 @@ //! Ethcore client application. #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -293,7 +293,7 @@ impl Configuration { } } - #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] + #[cfg_attr(feature="dev", allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { let mut listen_address = None; let mut public_address = None; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 2ce430e51..f324aba10 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -26,9 +26,8 @@ serde_macros = { version = "0.7.0", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } syntex = "0.29.0" -rustc_version = "0.1" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/build.rs b/rpc/build.rs index 3806f6fe5..659bc35eb 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -14,10 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; - -use rustc_version::{version_meta, Channel}; - #[cfg(not(feature = "serde_macros"))] mod inner { extern crate syntex; @@ -46,7 +42,4 @@ mod inner { fn main() { inner::main(); - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 46baa8a83..0097cd47e 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -4,13 +4,9 @@ name = "ethsync" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore . - -extern crate rustc_version; - -use rustc_version::{version_meta, Channel}; - -fn main() { - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } -} diff --git a/sync/src/chain.rs b/sync/src/chain.rs index da3908a1e..7789fb004 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -275,7 +275,7 @@ impl ChainSync { } - #[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()` + #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Rest sync. Clear all downloaded data but keep the queue fn reset(&mut self) { self.downloading_headers.clear(); @@ -343,7 +343,7 @@ impl ChainSync { Ok(()) } - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); @@ -470,7 +470,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 3b79e5614..3ef4b4150 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +#![cfg_attr(feature="dev", allow(clone_on_copy))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index 826a67121..664d7c7a3 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -42,7 +42,7 @@ pub trait RangeCollection { fn remove_head(&mut self, start: &K); /// Remove all elements >= `start` in the range that contains `start` fn remove_tail(&mut self, start: &K); - /// Remove all elements >= `start` + /// Remove all elements >= `start` fn remove_from(&mut self, start: &K); /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); @@ -231,7 +231,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } #[test] -#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] +#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn test_range() { use std::cmp::{Ordering}; diff --git a/util/Cargo.toml b/util/Cargo.toml index 0ce27ec2b..74e4d7226 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -40,8 +40,7 @@ chrono = "0.2" [features] default = [] -dev = [] +dev = ["clippy"] [build-dependencies] vergen = "*" -rustc_version = "0.1" diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index 959df0944..32abdb5d5 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -1103,7 +1103,7 @@ macro_rules! construct_uint { } } - #[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok. + #[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok. impl Hash for $name { fn hash(&self, state: &mut H) where H: Hasher { unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); } @@ -1485,7 +1485,7 @@ mod tests { } #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] + #[cfg_attr(feature="dev", allow(eq_op))] pub fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -2032,7 +2032,7 @@ mod tests { #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn u256_multi_full_mul() { let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); diff --git a/util/build.rs b/util/build.rs index 0b9b233e0..b0b64a380 100644 --- a/util/build.rs +++ b/util/build.rs @@ -14,15 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate rustc_version; extern crate vergen; use vergen::*; -use rustc_version::{version_meta, Channel}; fn main() { vergen(OutputFns::all()).unwrap(); - if let Channel::Nightly = version_meta().channel { - println!("cargo:rustc-cfg=nightly"); - } } diff --git a/util/src/hash.rs b/util/src/hash.rs index 4eb96b53e..73fa33b47 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -305,7 +305,7 @@ macro_rules! impl_hash { } impl Copy for $from {} - #[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))] + #[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))] impl Clone for $from { fn clone(&self) -> $from { unsafe { @@ -637,7 +637,7 @@ mod tests { use std::str::FromStr; #[test] - #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] + #[cfg_attr(feature="dev", allow(eq_op))] fn hash() { let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); diff --git a/util/src/lib.rs b/util/src/lib.rs index 59d66a325..a50ba8da4 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] -#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] +#![cfg_attr(feature="dev", feature(plugin))] +#![cfg_attr(feature="dev", plugin(clippy))] // Clippy settings // TODO [todr] not really sure -#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] +#![cfg_attr(feature="dev", allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] +#![cfg_attr(feature="dev", allow(match_bool))] // We use that to be more explicit about handled cases -#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] +#![cfg_attr(feature="dev", allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] +#![cfg_attr(feature="dev", allow(clone_on_copy))] //! Ethcore-util library //! diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 644af22af..4f3384894 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -243,7 +243,7 @@ impl Discovery { self.send_to(packet, address.clone()); } - #[cfg_attr(all(nightly, feature="dev"), allow(map_clone))] + #[cfg_attr(feature="dev", allow(map_clone))] fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec { let mut found: BTreeMap> = BTreeMap::new(); let mut count = 0; diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 2d1af55ba..ece24a1d1 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -507,7 +507,7 @@ impl Host where Message: Send + Sync + Clone { debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count()); } - #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] + #[cfg_attr(feature="dev", allow(single_match))] fn connect_peer(&self, id: &NodeId, io: &IoContext>) { if self.have_session(id) { @@ -542,7 +542,7 @@ impl Host where Message: Send + Sync + Clone { self.create_connection(socket, Some(id), io); } - #[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))] + #[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))] fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext>) { let nonce = self.info.write().unwrap().next_nonce(); let mut handshakes = self.handshakes.write().unwrap(); diff --git a/util/src/panics.rs b/util/src/panics.rs index 70ce0bc33..05d266b8b 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -71,7 +71,7 @@ impl PanicHandler { /// Invoke closure and catch any possible panics. /// In case of panic notifies all listeners about it. - #[cfg_attr(all(nightly, feature="dev"), allow(deprecated))] + #[cfg_attr(feature="dev", allow(deprecated))] pub fn catch_panic(&self, g: G) -> thread::Result where G: FnOnce() -> R + Send + 'static { let _guard = PanicGuard { handler: self }; let result = g(); diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 182b87063..06076d273 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -54,7 +54,7 @@ pub struct TrieDB<'db> { pub hash_count: usize, } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] +#[cfg_attr(feature="dev", allow(wrong_self_convention))] impl<'db> TrieDB<'db> { /// Create a new trie with the backing database `db` and `root` /// Panics, if `root` does not exist diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 3d5c366e5..3d75fa3e1 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -66,7 +66,7 @@ enum MaybeChanged<'a> { Changed(Bytes), } -#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] +#[cfg_attr(feature="dev", allow(wrong_self_convention))] impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. @@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> { } } - #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] /// Determine the RLP of the node, assuming we're inserting `partial` into the /// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// it will just return the new RLP that includes the new node. From eb8e92c37f7ffca670c2c9790f3a585141ab6c40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 11:18:38 +0100 Subject: [PATCH 101/222] Cargo.lock --- Cargo.lock | 3 --- 1 file changed, 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f583a8747..627fbfa69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -219,7 +219,6 @@ dependencies = [ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -243,7 +242,6 @@ dependencies = [ "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -299,7 +297,6 @@ dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] From 55a14b3aaf31e8b1f7b14d4eb75032a1fa815561 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 11:40:12 +0100 Subject: [PATCH 102/222] Fixing transaction queue test --- Cargo.lock | 1 + miner/src/lib.rs | 1 + miner/src/transaction_queue.rs | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3b49a974c..8b24df187 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -299,6 +299,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 0cee4ef43..20b5dd7d3 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -62,5 +62,6 @@ extern crate rayon; mod miner; mod transaction_queue; +pub use transaction_queue::TransactionQueue; pub use miner::{Miner, MinerService}; diff --git a/miner/src/transaction_queue.rs b/miner/src/transaction_queue.rs index f64bd7318..3d5c38b0c 100644 --- a/miner/src/transaction_queue.rs +++ b/miner/src/transaction_queue.rs @@ -28,13 +28,13 @@ //! ```rust //! extern crate ethcore_util as util; //! extern crate ethcore; -//! extern crate ethsync; +//! extern crate ethminer; //! extern crate rustc_serialize; //! //! use util::crypto::KeyPair; //! use util::hash::Address; //! use util::numbers::{Uint, U256}; -//! use ethsync::TransactionQueue; +//! use ethminer::TransactionQueue; //! use ethcore::transaction::*; //! use rustc_serialize::hex::FromHex; //! From 68a13973a4c9e6151c77465d7798b68ed3fec75f Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 11:42:24 +0100 Subject: [PATCH 103/222] fixed ethcore-rpc tests build after merge --- rpc/src/v1/tests/helpers/sync_provider.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index b6d9241dd..a3711d949 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -40,6 +40,7 @@ impl TestSyncProvider { num_peers: config.num_peers, num_active_peers: 0, mem_used: 0, + transaction_queue_pending: 0, }, } } From 8f54c24e47032a52bdbc9bd48cb2f16c7c9aa888 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 11 Mar 2016 11:52:11 +0100 Subject: [PATCH 104/222] Merged changes from jdb_option1, keep LATEST_ERA from decreasing --- ethcore/src/account_db.rs | 9 +- util/src/journaldb.rs | 347 +++++++++++++++++++++++++++++++++++--- util/src/memorydb.rs | 1 + 3 files changed, 323 insertions(+), 34 deletions(-) diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index e7f1b2bad..026e813f5 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -13,17 +13,14 @@ pub struct AccountDB<'db> { #[inline] fn combine_key<'a>(address: &'a H256, key: &'a H256) -> H256 { - let mut addr_hash = address.sha3(); - // preserve 96 bits of original key for db lookup - addr_hash[0..12].clone_from_slice(&[0u8; 12]); - &addr_hash ^ key + address ^ key } impl<'db> AccountDB<'db> { pub fn new(db: &'db HashDB, address: &Address) -> AccountDB<'db> { AccountDB { db: db, - address: x!(address.clone()), + address: x!(address), } } } @@ -70,7 +67,7 @@ impl<'db> AccountDBMut<'db> { pub fn new(db: &'db mut HashDB, address: &Address) -> AccountDBMut<'db> { AccountDBMut { db: db, - address: x!(address.clone()), + address: x!(address), } } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 4f2cdeb31..8bff08a77 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -64,11 +64,14 @@ pub struct JournalDB { journal_overlay: Option>>, } +#[derive(PartialEq)] struct JournalOverlay { backing_overlay: MemoryDB, - journal: HashMap> + journal: HashMap>, + latest_era: u64, } +#[derive(PartialEq)] struct JournalEntry { id: H256, insertions: Vec, @@ -220,7 +223,10 @@ impl JournalDB { k.append(&index); k.append(&&PADDING[..]); try!(batch.put(&k.drain(), r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + if now >= journal_overlay.latest_era { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + journal_overlay.latest_era = now; + } journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); } @@ -243,7 +249,7 @@ impl JournalDB { { if canon_id == journal.id { for h in &journal.insertions { - if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { + if let Some(&(ref d, rc)) = journal_overlay.backing_overlay.raw(h) { if rc > 0 { canon_insertions.push((h.clone(), d.clone())); //TODO: optimize this to avoid data copy } @@ -253,17 +259,17 @@ impl JournalDB { } overlay_deletions.append(&mut journal.insertions); } - index +=1; + index += 1; } // apply canon inserts first for (k, v) in canon_insertions { try!(batch.put(&k, &v)); } - // clean the overlay + // update the overlay for k in overlay_deletions { journal_overlay.backing_overlay.kill(&k); } - // apply removes + // apply canon deletions for k in canon_deletions { if !journal_overlay.backing_overlay.exists(&k) { try!(batch.delete(&k)); @@ -277,6 +283,13 @@ impl JournalDB { Ok(()) } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_overlay(&self.backing); + let journal_overlay = self.journal_overlay.as_ref().unwrap().read().unwrap(); + *journal_overlay == reconstructed + } + fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } @@ -285,8 +298,10 @@ impl JournalDB { let mut journal = HashMap::new(); let mut overlay = MemoryDB::new(); let mut count = 0; + let mut latest_era = 0; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + latest_era = decode::(&val); + let mut era = latest_era; loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -296,7 +311,7 @@ impl JournalDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); + trace!("read_overlay: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let id: H256 = rlp.val_at(0); let insertions = rlp.at(1); @@ -323,7 +338,7 @@ impl JournalDB { } } trace!("Recovered {} overlay entries, {} journal entries", count, journal.len()); - JournalOverlay { backing_overlay: overlay, journal: journal } + JournalOverlay { backing_overlay: overlay, journal: journal, latest_era: latest_era } } /// Returns heap memory size used @@ -396,6 +411,7 @@ mod tests { use common::*; use super::*; use hashdb::*; + use log::init_log; #[test] fn insert_same_in_fork() { @@ -404,17 +420,25 @@ mod tests { let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&x)); } @@ -425,15 +449,20 @@ mod tests { let mut jdb = JournalDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.remove(&h); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&h)); } @@ -445,6 +474,7 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); @@ -452,6 +482,7 @@ mod tests { jdb.remove(&bar); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); @@ -459,17 +490,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); @@ -483,21 +517,25 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); @@ -510,35 +548,113 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.exists(&foo)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = JournalDB::new_temp(); + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } #[test] fn reopen() { @@ -552,6 +668,7 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); foo }; @@ -559,6 +676,7 @@ mod tests { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); } { @@ -566,41 +684,210 @@ mod tests { assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } #[test] - fn reopen_remove() { + fn insert_delete_insert_delete_insert_expunge() { + init_log(); let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // foo is ancient history. + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); @@ -611,18 +898,22 @@ mod tests { let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 9cd018935..dd8de7fd8 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -69,6 +69,7 @@ use std::collections::HashMap; /// assert!(!m.exists(&k)); /// } /// ``` +#[derive(PartialEq)] pub struct MemoryDB { data: HashMap, static_null_rlp: (Bytes, i32), From dd2fb4df67307c4ce8e632d7ab009937422888af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 12:31:42 +0100 Subject: [PATCH 105/222] Storing BlockNumber & transactions directly in enum --- rpc/src/v1/helpers/poll_filter.rs | 9 +- rpc/src/v1/helpers/poll_manager.rs | 183 ++++++----------------------- rpc/src/v1/impls/eth.rs | 45 ++++--- 3 files changed, 59 insertions(+), 178 deletions(-) diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 465290270..f9ed6230c 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -1,10 +1,13 @@ //! Helper type with all filter possibilities. +use util::hash::H256; use ethcore::filter::Filter; +pub type BlockNumber = u64; + #[derive(Clone)] pub enum PollFilter { - Block, - PendingTransaction, - Logs(Filter) + Block(BlockNumber), + PendingTransaction(Vec), + Logs(BlockNumber, Filter) } diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 765410567..9735d7d5d 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -16,36 +16,18 @@ //! Indexes all rpc poll requests. -use util::hash::H256; -use std::collections::HashMap; use transient_hashmap::{TransientHashMap, Timer, StandardTimer}; /// Lifetime of poll (in seconds). const POLL_LIFETIME: u64 = 60; pub type PollId = usize; -pub type BlockNumber = u64; - -pub struct PollInfo { - pub filter: F, - pub block_number: BlockNumber -} - -impl Clone for PollInfo where F: Clone { - fn clone(&self) -> Self { - PollInfo { - filter: self.filter.clone(), - block_number: self.block_number.clone() - } - } -} /// Indexes all poll requests. /// /// Lazily garbage collects unused polls info. pub struct PollManager where T: Timer { - polls: TransientHashMap, T>, - transactions_data: HashMap>, + polls: TransientHashMap, next_available_id: PollId, } @@ -57,188 +39,89 @@ impl PollManager { } impl PollManager where T: Timer { + pub fn new_with_timer(timer: T) -> Self { PollManager { polls: TransientHashMap::new_with_timer(POLL_LIFETIME, timer), - transactions_data: HashMap::new(), next_available_id: 0, } } - fn prune(&mut self) { - self.polls.prune(); - // self.polls.prune() - // .into_iter() - // .map(|key| { - // self.transactions_data.remove(key); - // }); - } - /// Returns id which can be used for new poll. /// /// Stores information when last poll happend. - pub fn create_poll(&mut self, filter: F, block: BlockNumber) -> PollId { - self.prune(); + pub fn create_poll(&mut self, filter: F) -> PollId { + self.polls.prune(); + let id = self.next_available_id; + self.polls.insert(id, filter); + self.next_available_id += 1; - self.polls.insert(id, PollInfo { - filter: filter, - block_number: block, - }); id } - /// Updates information when last poll happend. - pub fn update_poll(&mut self, id: &PollId, block: BlockNumber) { - self.prune(); - if let Some(info) = self.polls.get_mut(id) { - info.block_number = block; - } - } - - /// Returns number of block when last poll happend. - pub fn poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { - self.prune(); + // Implementation is always using `poll_mut` + #[cfg(test)] + /// Get a reference to stored poll filter + pub fn poll(&mut self, id: &PollId) -> Option<&F> { + self.polls.prune(); self.polls.get(id) } - pub fn update_transactions(&mut self, id: &PollId, transactions: Vec) -> Option> { - self.prune(); - if self.polls.get(id).is_some() { - self.transactions_data.insert(*id, transactions) - } else { - None - } - } - - // Normal code always replaces transactions - #[cfg(test)] - /// Returns last transactions hashes for given poll. - pub fn transactions(&mut self, id: &PollId) -> Option<&Vec> { - self.prune(); - self.transactions_data.get(id) + /// Get a mutable reference to stored poll filter + pub fn poll_mut(&mut self, id: &PollId) -> Option<&mut F> { + self.polls.prune(); + self.polls.get_mut(id) } /// Removes poll info. pub fn remove_poll(&mut self, id: &PollId) { self.polls.remove(id); - self.transactions_data.remove(id); } } #[cfg(test)] mod tests { - use std::cell::RefCell; + use std::cell::Cell; use transient_hashmap::Timer; use v1::helpers::PollManager; - use util::hash::H256; struct TestTimer<'a> { - time: &'a RefCell, + time: &'a Cell, } impl<'a> Timer for TestTimer<'a> { fn get_time(&self) -> i64 { - *self.time.borrow() + self.time.get() } } #[test] fn test_poll_indexer() { - let time = RefCell::new(0); + let time = Cell::new(0); let timer = TestTimer { time: &time, }; let mut indexer = PollManager::new_with_timer(timer); - assert_eq!(indexer.create_poll(false, 20), 0); - assert_eq!(indexer.create_poll(true, 20), 1); + assert_eq!(indexer.create_poll(20), 0); + assert_eq!(indexer.create_poll(20), 1); - *time.borrow_mut() = 10; - indexer.update_poll(&0, 21); - assert_eq!(indexer.poll_info(&0).unwrap().filter, false); - assert_eq!(indexer.poll_info(&0).unwrap().block_number, 21); + time.set(10); + *indexer.poll_mut(&0).unwrap() = 21; + assert_eq!(*indexer.poll(&0).unwrap(), 21); + assert_eq!(*indexer.poll(&1).unwrap(), 20); - *time.borrow_mut() = 30; - indexer.update_poll(&1, 23); - assert_eq!(indexer.poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); + time.set(30); + *indexer.poll_mut(&1).unwrap() = 23; + assert_eq!(*indexer.poll(&1).unwrap(), 23); - *time.borrow_mut() = 75; - indexer.update_poll(&0, 30); - assert!(indexer.poll_info(&0).is_none()); - assert_eq!(indexer.poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); + time.set(75); + assert!(indexer.poll(&0).is_none()); + assert_eq!(*indexer.poll(&1).unwrap(), 23); indexer.remove_poll(&1); - assert!(indexer.poll_info(&1).is_none()); + assert!(indexer.poll(&1).is_none()); } - #[test] - fn should_return_poll_transactions_hashes() { - // given - let mut indexer = PollManager::new(); - let poll_id = indexer.create_poll(false, 20); - assert!(indexer.transactions(&poll_id).is_none()); - let transactions = vec![H256::from(1), H256::from(2)]; - - // when - indexer.update_transactions(&poll_id, transactions.clone()); - - // then - let txs = indexer.transactions(&poll_id); - assert_eq!(txs.unwrap(), &transactions); - } - - - #[test] - fn should_remove_transaction_data_when_poll_timed_out() { - // given - let time = RefCell::new(0); - let timer = TestTimer { - time: &time, - }; - let mut indexer = PollManager::new_with_timer(timer); - let poll_id = indexer.create_poll(false, 20); - let transactions = vec![H256::from(1), H256::from(2)]; - indexer.update_transactions(&poll_id, transactions.clone()); - assert!(indexer.transactions(&poll_id).is_some()); - - // when - *time.borrow_mut() = 75; - indexer.prune(); - - // then - assert!(indexer.transactions(&poll_id).is_none()); - - } - - #[test] - fn should_remove_transaction_data_when_poll_is_removed() { - // given - let mut indexer = PollManager::new(); - let poll_id = indexer.create_poll(false, 20); - let transactions = vec![H256::from(1), H256::from(2)]; - - // when - indexer.update_transactions(&poll_id, transactions.clone()); - assert!(indexer.transactions(&poll_id).is_some()); - indexer.remove_poll(&poll_id); - - // then - assert!(indexer.transactions(&poll_id).is_none()); - } - - #[test] - fn should_ignore_transactions_for_invalid_poll_id() { - // given - let mut indexer = PollManager::<()>::new(); - let transactions = vec![H256::from(1), H256::from(2)]; - - // when - indexer.update_transactions(&5, transactions.clone()); - - // then - assert!(indexer.transactions(&5).is_none()); - } } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 211c46304..9f81caa90 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -301,7 +301,8 @@ impl EthFilter for EthFilterClient from_params::<(Filter,)>(params) .and_then(|(filter,)| { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::Logs(filter.into()), take_weak!(self.client).chain_info().best_block_number); + let block_number = take_weak!(self.client).chain_info().best_block_number; + let id = polls.create_poll(PollFilter::Logs(block_number, filter.into())); to_value(&U256::from(id)) }) } @@ -310,7 +311,7 @@ impl EthFilter for EthFilterClient match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let id = polls.create_poll(PollFilter::Block, take_weak!(self.client).chain_info().best_block_number); + let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number)); to_value(&U256::from(id)) }, _ => Err(Error::invalid_params()) @@ -321,11 +322,8 @@ impl EthFilter for EthFilterClient match params { Params::None => { let mut polls = self.polls.lock().unwrap(); - let best_block_number = take_weak!(self.client).chain_info().best_block_number; let pending_transactions = take_weak!(self.miner).pending_transactions_hashes(); - - let id = polls.create_poll(PollFilter::PendingTransaction, best_block_number); - polls.update_transactions(&id, pending_transactions); + let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions)); to_value(&U256::from(id)) }, @@ -337,50 +335,47 @@ impl EthFilter for EthFilterClient let client = take_weak!(self.client); from_params::<(Index,)>(params) .and_then(|(index,)| { - let info = self.polls.lock().unwrap().poll_info(&index.value()).cloned(); - match info { + let mut polls = self.polls.lock().unwrap(); + match polls.poll_mut(&index.value()) { None => Ok(Value::Array(vec![] as Vec)), - Some(info) => match info.filter { - PollFilter::Block => { + Some(filter) => match *filter { + PollFilter::Block(ref mut block_number) => { // + 1, cause we want to return hashes including current block hash. let current_number = client.chain_info().best_block_number + 1; - let hashes = (info.block_number..current_number).into_iter() + let hashes = (*block_number..current_number).into_iter() .map(BlockId::Number) .filter_map(|id| client.block_hash(id)) .collect::>(); - self.polls.lock().unwrap().update_poll(&index.value(), current_number); + *block_number = current_number; to_value(&hashes) }, - PollFilter::PendingTransaction => { - let poll_id = index.value(); - let mut polls = self.polls.lock().unwrap(); - + PollFilter::PendingTransaction(ref mut previous_hashes) => { let current_hashes = take_weak!(self.miner).pending_transactions_hashes(); - let previous_hashes = polls.update_transactions(&poll_id, current_hashes.clone()).unwrap(); - polls.update_poll(&poll_id, client.chain_info().best_block_number); - // calculate diff - let previous_hashes_set = previous_hashes.into_iter().collect::>(); + let previous_hashes_set = previous_hashes.into_iter().map(|h| h.clone()).collect::>(); let diff = current_hashes - .into_iter() + .iter() .filter(|hash| previous_hashes_set.contains(&hash)) + .cloned() .collect::>(); + *previous_hashes = current_hashes; + to_value(&diff) }, - PollFilter::Logs(mut filter) => { - filter.from_block = BlockId::Number(info.block_number); + PollFilter::Logs(ref mut block_number, ref mut filter) => { + filter.from_block = BlockId::Number(*block_number); filter.to_block = BlockId::Latest; - let logs = client.logs(filter) + let logs = client.logs(filter.clone()) .into_iter() .map(From::from) .collect::>(); let current_number = client.chain_info().best_block_number; - self.polls.lock().unwrap().update_poll(&index.value(), current_number); + *block_number = current_number; to_value(&logs) } } From 190630cc6bd3ed4d076f6b6490a431ee4b5180ec Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 12:31:45 +0100 Subject: [PATCH 106/222] separated transaction_request to its own submodule, added basic tests for it --- rpc/src/v1/impls/eth.rs | 3 +- rpc/src/v1/types/mod.rs.in | 3 +- rpc/src/v1/types/transaction.rs | 32 +------- rpc/src/v1/types/transaction_request.rs | 101 ++++++++++++++++++++++++ 4 files changed, 106 insertions(+), 33 deletions(-) create mode 100644 rpc/src/v1/types/transaction_request.rs diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 102d0da61..38e363624 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -27,6 +27,7 @@ use ethcore::block::{IsBlock}; use ethcore::views::*; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; +use ethcore::transaction::Transaction as EthTransaction; use v1::traits::{Eth, EthFilter}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log}; use v1::helpers::{PollFilter, PollManager}; @@ -274,7 +275,7 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: match accounts.account_secret(&transaction_request.from) { Ok(secret) => { let sync = take_weak!(self.sync); - let (transaction, _) = transaction_request.to_eth(); + let transaction: EthTransaction = transaction_request.into(); let signed_transaction = transaction.sign(&secret); let hash = signed_transaction.hash(); sync.insert_transaction(signed_transaction); diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index 2b2390ecb..ebc3bc0ff 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -23,6 +23,7 @@ mod log; mod optionals; mod sync; mod transaction; +mod transaction_request; pub use self::block::{Block, BlockTransactions}; pub use self::block_number::BlockNumber; @@ -33,5 +34,5 @@ pub use self::log::Log; pub use self::optionals::OptionalValue; pub use self::sync::{SyncStatus, SyncInfo}; pub use self::transaction::Transaction; -pub use self::transaction::TransactionRequest; +pub use self::transaction_request::TransactionRequest; diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 17b42cfcf..0518a58ea 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -17,8 +17,7 @@ use util::numbers::*; use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; -use serde::{Deserializer, Error}; -use ethcore; +use serde::Error; #[derive(Debug, Default, Serialize)] pub struct Transaction { @@ -39,35 +38,6 @@ pub struct Transaction { pub input: Bytes } -#[derive(Debug, Default, Serialize, Deserialize)] -pub struct TransactionRequest { - pub from: Address, - pub to: Option
, - #[serde(rename="gasPrice")] - pub gas_price: Option, - pub gas: Option, - pub value: Option, - pub data: Bytes, - pub nonce: Option, -} - -impl TransactionRequest { - /// maps transaction request to the transaction that can be signed and inserted - pub fn to_eth(self) -> (ethcore::transaction::Transaction, Address) { - (ethcore::transaction::Transaction { - nonce: self.nonce.unwrap_or(U256::zero()), - action: match self.to { - None => ethcore::transaction::Action::Create, - Some(addr) => ethcore::transaction::Action::Call(addr) - }, - gas: self.gas.unwrap_or(U256::zero()), - gas_price: self.gas_price.unwrap_or(U256::zero()), - value: self.value.unwrap_or(U256::zero()), - data: self.data.to_vec() - }, self.from) - } -} - impl From for Transaction { fn from(t: LocalizedTransaction) -> Transaction { Transaction { diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs new file mode 100644 index 000000000..a61b11c25 --- /dev/null +++ b/rpc/src/v1/types/transaction_request.rs @@ -0,0 +1,101 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::hash::Address; +use util::numbers::{Uint, U256}; +use ethcore::transaction::{Action, Transaction}; +use v1::types::Bytes; + +#[derive(Debug, Default, Deserialize)] +pub struct TransactionRequest { + pub from: Address, + pub to: Option
, + #[serde(rename="gasPrice")] + pub gas_price: Option, + pub gas: Option, + pub value: Option, + pub data: Bytes, + pub nonce: Option, +} + +impl Into for TransactionRequest { + fn into(self) -> Transaction { + Transaction { + nonce: self.nonce.unwrap_or(U256::zero()), + action: match self.to { + None => Action::Create, + Some(addr) => Action::Call(addr) + }, + gas: self.gas.unwrap_or(U256::zero()), + gas_price: self.gas_price.unwrap_or(U256::zero()), + value: self.value.unwrap_or(U256::zero()), + data: self.data.to_vec() + } + } +} + +#[cfg(test)] +mod tests { + use util::numbers::{Uint, U256}; + use util::hash::Address; + use ethcore::transaction::{Transaction, Action}; + use v1::types::Bytes; + use super::*; + + #[test] + fn transaction_request_into_transaction() { + let tr = TransactionRequest { + from: Address::default(), + to: Some(Address::from(10)), + gas_price: Some(U256::from(20)), + gas: Some(U256::from(10_000)), + value: Some(U256::from(1)), + data: Bytes::new(vec![10, 20]), + nonce: Some(U256::from(12)), + }; + + assert_eq!(Transaction { + nonce: U256::from(12), + action: Action::Call(Address::from(10)), + gas: U256::from(10_000), + gas_price: U256::from(20), + value: U256::from(1), + data: vec![10, 20], + }, tr.into()); + } + + #[test] + fn empty_transaction_request_into_transaction() { + let tr = TransactionRequest { + from: Address::default(), + to: None, + gas_price: None, + gas: None, + value: None, + data: Bytes::new(vec![]), + nonce: None, + }; + + assert_eq!(Transaction { + nonce: U256::zero(), + action: Action::Create, + gas: U256::zero(), + gas_price: U256::zero(), + value: U256::zero(), + data: vec![], + }, tr.into()); + } +} From ed0047725cbe4a027bd6c75bf592728cf1292efb Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 15:49:49 +0400 Subject: [PATCH 107/222] adding cli extension --- parity/main.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index b6ed5cba3..5f6d50027 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -101,7 +101,7 @@ API and Console Options: --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal]. --rpc Equivalent to --jsonrpc (geth-compatible). --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). @@ -208,6 +208,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, secret_store: Arc server.add_delegate(PersonalClient::new(&secret_store).to_delegate()), _ => { die!("{}: Invalid API name to be enabled.", api); } From 70ee6aa94219bac3cc22dbc9dac037b76e535c15 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 15:50:13 +0400 Subject: [PATCH 108/222] refactoring to use generic account provider as web3 svc --- rpc/src/v1/impls/personal.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 7b79ceae7..ce200244c 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -22,20 +22,20 @@ use util::keys::store::*; use util::Address; /// Account management (personal) rpc implementation. -pub struct PersonalClient { - accounts: Weak, +pub struct PersonalClient where A: AccountProvider { + accounts: Weak, } -impl PersonalClient { +impl PersonalClient where A: AccountProvider { /// Creates new PersonalClient - pub fn new(store: &Arc) -> Self { + pub fn new(store: &Arc) -> Self { PersonalClient { accounts: Arc::downgrade(store), } } } -impl Personal for PersonalClient { +impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { let store = take_weak!(self.accounts); match store.accounts() { From 756f96413045e63a3dccf801f992c46ed22f7dfb Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 12:54:48 +0100 Subject: [PATCH 109/222] JournalDB -> Box, and it's a trait. --- ethcore/src/block.rs | 16 ++--- ethcore/src/client.rs | 12 ++-- ethcore/src/state.rs | 26 +++---- ethcore/src/tests/helpers.rs | 8 +-- util/src/hashdb.rs | 15 ++++- util/src/journaldb.rs | 127 +++++++++++++++++++---------------- util/src/overlaydb.rs | 1 + 7 files changed, 116 insertions(+), 89 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index b3894db94..ea9e91781 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -171,7 +171,7 @@ pub struct SealedBlock { impl<'x> OpenBlock<'x> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new(engine: &'x Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { + pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -317,7 +317,7 @@ impl ClosedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> JournalDB { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl SealedBlock { @@ -331,7 +331,7 @@ impl SealedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> JournalDB { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl IsBlock for SealedBlock { @@ -339,10 +339,10 @@ impl IsBlock for SealedBlock { } /// Enact the block given by block header, transactions and uncles -pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { - let s = State::from_existing(db.clone(), parent.state_root().clone(), engine.account_start_nonce()); + let s = State::from_existing(db.spawn(), parent.state_root().clone(), engine.account_start_nonce()); trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); } } @@ -357,20 +357,20 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let block = BlockView::new(block_bytes); let header = block.header(); enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let view = BlockView::new(&block.bytes); enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: JournalDB, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let header = BlockView::new(block_bytes).header_view(); Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal()))) } diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index b342cef15..7ccf094d2 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -214,7 +214,7 @@ impl ClientReport { pub struct Client where V: Verifier { chain: Arc>, engine: Arc>, - state_db: Mutex, + state_db: Mutex>, block_queue: RwLock, report: RwLock, import_lock: Mutex<()>, @@ -253,8 +253,8 @@ impl Client where V: Verifier { state_path.push("state"); let engine = Arc::new(try!(spec.to_engine())); - let mut state_db = JournalDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal); - if state_db.is_empty() && engine.spec().ensure_db_good(&mut state_db) { + let mut state_db = Box::new(OptionOneDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal)); + if state_db.is_empty() && engine.spec().ensure_db_good(state_db.deref_mut()) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } @@ -336,7 +336,7 @@ impl Client where V: Verifier { // Enact Verified Block let parent = chain_has_parent.unwrap(); let last_hashes = self.build_last_hashes(header.parent_hash.clone()); - let db = self.state_db.lock().unwrap().clone(); + let db = self.state_db.lock().unwrap().spawn(); let enact_result = enact_verified(&block, engine, db, &parent, last_hashes); if let Err(e) = enact_result { @@ -438,7 +438,7 @@ impl Client where V: Verifier { /// Get a copy of the best block's state. pub fn state(&self) -> State { - State::from_existing(self.state_db.lock().unwrap().clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) + State::from_existing(self.state_db.lock().unwrap().spawn(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce()) } /// Get info on the cache. @@ -507,7 +507,7 @@ impl Client where V: Verifier { let h = self.chain.read().unwrap().best_block_hash(); let mut b = OpenBlock::new( self.engine.deref().deref(), - self.state_db.lock().unwrap().clone(), + self.state_db.lock().unwrap().spawn(), match self.chain.read().unwrap().block_header(&h) { Some(ref x) => x, None => {return;} }, self.build_last_hashes(h.clone()), self.author(), diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index 7c1064abf..9a3d1791d 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -31,7 +31,7 @@ pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. pub struct State { - db: JournalDB, + db: Box, root: H256, cache: RefCell>>, snapshots: RefCell>>>>, @@ -41,11 +41,11 @@ pub struct State { impl State { /// Creates new state with empty state root #[cfg(test)] - pub fn new(mut db: JournalDB, account_start_nonce: U256) -> State { + pub fn new(mut db: Box, account_start_nonce: U256) -> State { let mut root = H256::new(); { // init trie and reset root too null - let _ = SecTrieDBMut::new(&mut db, &mut root); + let _ = SecTrieDBMut::new(db.deref_mut(), &mut root); } State { @@ -58,10 +58,10 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: JournalDB, root: H256, account_start_nonce: U256) -> State { + pub fn from_existing(db: Box, root: H256, account_start_nonce: U256) -> State { { // trie should panic! if root does not exist - let _ = SecTrieDB::new(&db, &root); + let _ = SecTrieDB::new(db.as_hashdb(), &root); } State { @@ -126,7 +126,7 @@ impl State { } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, JournalDB) { + pub fn drop(self) -> (H256, Box) { (self.root, self.db) } @@ -148,7 +148,7 @@ impl State { /// Determine whether an account exists. pub fn exists(&self, a: &Address) -> bool { - self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(&self.db, &self.root).contains(&a) + self.cache.borrow().get(&a).unwrap_or(&None).is_some() || SecTrieDB::new(self.db.as_hashdb(), &self.root).contains(&a) } /// Get the balance of account `a`. @@ -163,7 +163,7 @@ impl State { /// Mutate storage of account `address` so that it is `value` for `key`. pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { - self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(&self.db, address), key)) + self.get(address, false).as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::new(self.db.as_hashdb(), address), key)) } /// Mutate storage of account `a` so that it is `value` for `key`. @@ -253,7 +253,7 @@ impl State { /// Commits our cached account changes into the trie. pub fn commit(&mut self) { assert!(self.snapshots.borrow().is_empty()); - Self::commit_into(&mut self.db, &mut self.root, self.cache.borrow_mut().deref_mut()); + Self::commit_into(self.db.as_hashdb_mut(), &mut self.root, self.cache.borrow_mut().deref_mut()); } #[cfg(test)] @@ -285,11 +285,11 @@ impl State { fn get<'a>(&'a self, a: &Address, require_code: bool) -> &'a Option { let have_key = self.cache.borrow().contains_key(a); if !have_key { - self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp)) } if require_code { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { - account.cache_code(&AccountDB::new(&self.db, a)); + account.cache_code(&AccountDB::new(self.db.as_hashdb(), a)); } } unsafe { ::std::mem::transmute(self.cache.borrow().get(a).unwrap()) } @@ -305,7 +305,7 @@ impl State { fn require_or_from<'a, F: FnOnce() -> Account, G: FnOnce(&mut Account)>(&self, a: &Address, require_code: bool, default: F, not_default: G) -> &'a mut Account { let have_key = self.cache.borrow().contains_key(a); if !have_key { - self.insert_cache(a, SecTrieDB::new(&self.db, &self.root).get(&a).map(Account::from_rlp)) + self.insert_cache(a, SecTrieDB::new(self.db.as_hashdb(), &self.root).get(&a).map(Account::from_rlp)) } else { self.note_cache(a); } @@ -318,7 +318,7 @@ impl State { unsafe { ::std::mem::transmute(self.cache.borrow_mut().get_mut(a).unwrap().as_mut().map(|account| { if require_code { - account.cache_code(&AccountDB::new(&self.db, a)); + account.cache_code(&AccountDB::new(self.db.as_hashdb(), a)); } account }).unwrap()) } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index bb9a44614..7b99f68d5 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -250,9 +250,9 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { } } -pub fn get_temp_journal_db() -> GuardedTempResult { +pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = JournalDB::new(temp.as_str()); + let journal_db = Box::new(OptionOneDB::new(temp.as_str())); GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -268,8 +268,8 @@ pub fn get_temp_state() -> GuardedTempResult { } } -pub fn get_temp_journal_db_in(path: &Path) -> JournalDB { - JournalDB::new(path.to_str().unwrap()) +pub fn get_temp_journal_db_in(path: &Path) -> Box { + Box::new(OptionOneDB::new(path.to_str().unwrap())) } pub fn get_temp_state_in(path: &Path) -> State { diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 4d8cbaba1..e622c4b99 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -20,7 +20,7 @@ use bytes::*; use std::collections::HashMap; /// Trait modelling datastore keyed by a 32-byte Keccak hash. -pub trait HashDB { +pub trait HashDB : AsHashDB { /// Get the keys in the database together with number of underlying references. fn keys(&self) -> HashMap; @@ -111,3 +111,16 @@ pub trait HashDB { /// ``` fn remove(&mut self, key: &H256) { self.kill(key) } } + +/// Upcast trait. +pub trait AsHashDB { + /// Perform upcast to HashDB for anything that derives from HashDB. + fn as_hashdb(&self) -> &HashDB; + /// Perform mutable upcast to HashDB for anything that derives from HashDB. + fn as_hashdb_mut(&mut self) -> &mut HashDB; +} + +impl AsHashDB for T { + fn as_hashdb(&self) -> &HashDB { self } + fn as_hashdb_mut(&mut self) -> &mut HashDB { self } +} diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 35ad83fa0..23fd08011 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -24,6 +24,22 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; +/// A HashDB which can manage a short-term journal potentially containing many forks of mutually +/// exclusive actions. +pub trait JournalDB : HashDB { + /// Return a copy of ourself, in a box. + fn spawn(&self) -> Box; + + /// Returns heap memory size used + fn mem_used(&self) -> usize; + + /// Check if this database has any commits + fn is_empty(&self) -> bool; + + /// Commit all recent insert operations. + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; +} + /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -31,22 +47,12 @@ use std::env; /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. -pub struct JournalDB { +pub struct OptionOneDB { overlay: MemoryDB, backing: Arc, counters: Option>>>, } -impl Clone for JournalDB { - fn clone(&self) -> JournalDB { - JournalDB { - overlay: MemoryDB::new(), - backing: self.backing.clone(), - counters: self.counters.clone(), - } - } -} - // all keys must be at least 12 bytes const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; @@ -56,14 +62,14 @@ const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl JournalDB { +impl OptionOneDB { /// Create a new instance from file - pub fn new(path: &str) -> JournalDB { + pub fn new(path: &str) -> OptionOneDB { Self::from_prefs(path, true) } /// Create a new instance from file - pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB { + pub fn from_prefs(path: &str, prefer_journal: bool) -> OptionOneDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -83,11 +89,11 @@ impl JournalDB { } let counters = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) + Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))) } else { None }; - JournalDB { + OptionOneDB { overlay: MemoryDB::new(), backing: Arc::new(backing), counters: counters, @@ -96,27 +102,12 @@ impl JournalDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - pub fn new_temp() -> JournalDB { + fn new_temp() -> OptionOneDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) } - /// Check if this database has any commits - pub fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() - } - - /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) - } else { - self.commit_without_counters() - } - } - /// Drain the overlay and place it into a batch for the DB. fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { let mut inserts = 0usize; @@ -339,11 +330,11 @@ impl JournalDB { try!(batch.delete(&last)); index += 1; } - trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } try!(self.backing.write(batch)); -// trace!("JournalDB::commit() deleted {} nodes", deletes); +// trace!("OptionOneDB::commit() deleted {} nodes", deletes); Ok(0) } @@ -379,17 +370,9 @@ impl JournalDB { trace!("Recovered {} counters", counters.len()); counters } +} - /// Returns heap memory size used - pub fn mem_used(&self) -> usize { - self.overlay.mem_used() + match self.counters { - Some(ref c) => c.read().unwrap().heap_size_of_children(), - None => 0 - } - } - } - -impl HashDB for JournalDB { +impl HashDB for OptionOneDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -434,6 +417,36 @@ impl HashDB for JournalDB { } } +impl JournalDB for OptionOneDB { + fn spawn(&self) -> Box { + Box::new(OptionOneDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + counters: self.counters.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.counters { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + let have_counters = self.counters.is_some(); + if have_counters { + self.commit_with_counters(now, id, end) + } else { + self.commit_without_counters() + } + } +} + #[cfg(test)] mod tests { use common::*; @@ -443,7 +456,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -465,7 +478,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.exists(&h)); @@ -483,7 +496,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -521,7 +534,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -549,7 +562,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -568,7 +581,7 @@ mod tests { #[test] fn fork_same_key() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); jdb.commit(0, &b"0".sha3(), None).unwrap(); let foo = jdb.insert(b"foo"); @@ -590,7 +603,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -599,13 +612,13 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); } { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -619,7 +632,7 @@ mod tests { dir.push(H32::random().hex()); let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -633,7 +646,7 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.exists(&foo)); @@ -648,7 +661,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -663,7 +676,7 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 3c80f4148..f14677d05 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -36,6 +36,7 @@ use kvdb::{Database}; /// /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. +#[derive(Clone)] pub struct OverlayDB { overlay: MemoryDB, backing: Arc, From 99c5794929e0c14b524f949fcc16a5ecbda88fee Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 16:00:30 +0400 Subject: [PATCH 110/222] fix warning for transaction_queue.add usage --- sync/src/chain.rs | 4 ++-- sync/src/lib.rs | 3 ++- sync/src/transaction_queue.rs | 4 ++-- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 866838bec..d65685dfe 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1306,11 +1306,11 @@ impl ChainSync { } /// Add transaction to the transaction queue - pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) + pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) -> Result<(), Error> where T: Fn(&Address) -> U256 { let mut queue = self.transaction_queue.lock().unwrap(); - queue.add(transaction, fetch_nonce); + queue.add(transaction, fetch_nonce) } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 3b79e5614..a416c8829 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -146,7 +146,8 @@ impl SyncProvider for EthSync { let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); let sync = self.sync.write().unwrap(); - sync.insert_transaction(transaction, &nonce_fn); + sync.insert_transaction(transaction, &nonce_fn).unwrap_or_else( + |e| warn!(target: "sync", "Error inserting transaction to queue: {:?}", e)); } } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 243939a4c..618eb6a0b 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -684,8 +684,8 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); - txq.add(tx.clone(), &prev_nonce); - txq.add(tx2.clone(), &prev_nonce); + txq.add(tx.clone(), &prev_nonce).unwrap(); + txq.add(tx2.clone(), &prev_nonce).unwrap(); assert_eq!(txq.status().future, 2); // when From d71c5d4c17f26484fb15511d155e02f7a27eab3b Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 13:19:10 +0100 Subject: [PATCH 111/222] Place Sync/Send in trait. --- ethcore/src/block.rs | 14 +++++++------- ethcore/src/client/client.rs | 2 +- ethcore/src/state.rs | 8 ++++---- ethcore/src/tests/helpers.rs | 4 ++-- util/src/journaldb.rs | 6 +++--- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index ea9e91781..ab5086273 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -171,7 +171,7 @@ pub struct SealedBlock { impl<'x> OpenBlock<'x> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { + pub fn new(engine: &'x Engine, db: Box>, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -317,7 +317,7 @@ impl ClosedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> Box { self.block.state.drop().1 } + pub fn drain(self) -> Box> { self.block.state.drop().1 } } impl SealedBlock { @@ -331,7 +331,7 @@ impl SealedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> Box { self.block.state.drop().1 } + pub fn drain(self) -> Box> { self.block.state.drop().1 } } impl IsBlock for SealedBlock { @@ -339,7 +339,7 @@ impl IsBlock for SealedBlock { } /// Enact the block given by block header, transactions and uncles -pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { let s = State::from_existing(db.spawn(), parent.state_root().clone(), engine.account_start_nonce()); @@ -357,20 +357,20 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { let block = BlockView::new(block_bytes); let header = block.header(); enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { let view = BlockView::new(&block.bytes); enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { let header = BlockView::new(block_bytes).header_view(); Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal()))) } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index e2fe7bb90..ea0919bcc 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -101,7 +101,7 @@ impl ClientReport { pub struct Client where V: Verifier { chain: Arc, engine: Arc>, - state_db: Mutex>, + state_db: Mutex>>, block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index 9a3d1791d..222b88643 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -31,7 +31,7 @@ pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. pub struct State { - db: Box, + db: Box>, root: H256, cache: RefCell>>, snapshots: RefCell>>>>, @@ -41,7 +41,7 @@ pub struct State { impl State { /// Creates new state with empty state root #[cfg(test)] - pub fn new(mut db: Box, account_start_nonce: U256) -> State { + pub fn new(mut db: Box>, account_start_nonce: U256) -> State { let mut root = H256::new(); { // init trie and reset root too null @@ -58,7 +58,7 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: Box, root: H256, account_start_nonce: U256) -> State { + pub fn from_existing(db: Box>, root: H256, account_start_nonce: U256) -> State { { // trie should panic! if root does not exist let _ = SecTrieDB::new(db.as_hashdb(), &root); @@ -126,7 +126,7 @@ impl State { } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, Box) { + pub fn drop(self) -> (H256, Box>) { (self.root, self.db) } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 7b99f68d5..85e311e82 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -250,7 +250,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { } } -pub fn get_temp_journal_db() -> GuardedTempResult> { +pub fn get_temp_journal_db() -> GuardedTempResult>> { let temp = RandomTempPath::new(); let journal_db = Box::new(OptionOneDB::new(temp.as_str())); GuardedTempResult { @@ -268,7 +268,7 @@ pub fn get_temp_state() -> GuardedTempResult { } } -pub fn get_temp_journal_db_in(path: &Path) -> Box { +pub fn get_temp_journal_db_in(path: &Path) -> Box> { Box::new(OptionOneDB::new(path.to_str().unwrap())) } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 23fd08011..c6243ace0 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -26,9 +26,9 @@ use std::env; /// A HashDB which can manage a short-term journal potentially containing many forks of mutually /// exclusive actions. -pub trait JournalDB : HashDB { +pub trait JournalDB : HashDB + Sync + Send { /// Return a copy of ourself, in a box. - fn spawn(&self) -> Box; + fn spawn(&self) -> Box>; /// Returns heap memory size used fn mem_used(&self) -> usize; @@ -418,7 +418,7 @@ impl HashDB for OptionOneDB { } impl JournalDB for OptionOneDB { - fn spawn(&self) -> Box { + fn spawn(&self) -> Box> { Box::new(OptionOneDB { overlay: MemoryDB::new(), backing: self.backing.clone(), From 2a856a13f04271fbe78576858aedca3b2c4b659c Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 13:21:53 +0100 Subject: [PATCH 112/222] Obvious typo fix. --- ethcore/src/block.rs | 14 +++++++------- ethcore/src/client/client.rs | 2 +- ethcore/src/state.rs | 8 ++++---- ethcore/src/tests/helpers.rs | 4 ++-- util/src/journaldb.rs | 4 ++-- 5 files changed, 16 insertions(+), 16 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index ab5086273..f3a4feaf0 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -171,7 +171,7 @@ pub struct SealedBlock { impl<'x> OpenBlock<'x> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new(engine: &'x Engine, db: Box>, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { + pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -317,7 +317,7 @@ impl ClosedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> Box> { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl SealedBlock { @@ -331,7 +331,7 @@ impl SealedBlock { } /// Drop this object and return the underlieing database. - pub fn drain(self) -> Box> { self.block.state.drop().1 } + pub fn drain(self) -> Box { self.block.state.drop().1 } } impl IsBlock for SealedBlock { @@ -339,7 +339,7 @@ impl IsBlock for SealedBlock { } /// Enact the block given by block header, transactions and uncles -pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Header], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { let s = State::from_existing(db.spawn(), parent.state_root().clone(), engine.account_start_nonce()); @@ -357,20 +357,20 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_bytes(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let block = BlockView::new(block_bytes); let header = block.header(); enact(&header, &block.transactions(), &block.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_verified(block: &PreverifiedBlock, engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let view = BlockView::new(&block.bytes); enact(&block.header, &block.transactions, &view.uncles(), engine, db, parent, last_hashes) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards -pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box>, parent: &Header, last_hashes: LastHashes) -> Result { +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: Box, parent: &Header, last_hashes: LastHashes) -> Result { let header = BlockView::new(block_bytes).header_view(); Ok(try!(try!(enact_bytes(block_bytes, engine, db, parent, last_hashes)).seal(engine, header.seal()))) } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index ea0919bcc..3c8a28380 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -101,7 +101,7 @@ impl ClientReport { pub struct Client where V: Verifier { chain: Arc, engine: Arc>, - state_db: Mutex>>, + state_db: Mutex>, block_queue: BlockQueue, report: RwLock, import_lock: Mutex<()>, diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index 222b88643..519debcc1 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -31,7 +31,7 @@ pub type ApplyResult = Result; /// Representation of the entire state of all accounts in the system. pub struct State { - db: Box>, + db: Box, root: H256, cache: RefCell>>, snapshots: RefCell>>>>, @@ -41,7 +41,7 @@ pub struct State { impl State { /// Creates new state with empty state root #[cfg(test)] - pub fn new(mut db: Box>, account_start_nonce: U256) -> State { + pub fn new(mut db: Box, account_start_nonce: U256) -> State { let mut root = H256::new(); { // init trie and reset root too null @@ -58,7 +58,7 @@ impl State { } /// Creates new state with existing state root - pub fn from_existing(db: Box>, root: H256, account_start_nonce: U256) -> State { + pub fn from_existing(db: Box, root: H256, account_start_nonce: U256) -> State { { // trie should panic! if root does not exist let _ = SecTrieDB::new(db.as_hashdb(), &root); @@ -126,7 +126,7 @@ impl State { } /// Destroy the current object and return root and database. - pub fn drop(self) -> (H256, Box>) { + pub fn drop(self) -> (H256, Box) { (self.root, self.db) } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 85e311e82..0bb6b5015 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -250,7 +250,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { } } -pub fn get_temp_journal_db() -> GuardedTempResult>> { +pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); let journal_db = Box::new(OptionOneDB::new(temp.as_str())); GuardedTempResult { @@ -268,7 +268,7 @@ pub fn get_temp_state() -> GuardedTempResult { } } -pub fn get_temp_journal_db_in(path: &Path) -> Box> { +pub fn get_temp_journal_db_in(path: &Path) -> Box { Box::new(OptionOneDB::new(path.to_str().unwrap())) } diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index c6243ace0..e7a670a08 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -28,7 +28,7 @@ use std::env; /// exclusive actions. pub trait JournalDB : HashDB + Sync + Send { /// Return a copy of ourself, in a box. - fn spawn(&self) -> Box>; + fn spawn(&self) -> Box; /// Returns heap memory size used fn mem_used(&self) -> usize; @@ -418,7 +418,7 @@ impl HashDB for OptionOneDB { } impl JournalDB for OptionOneDB { - fn spawn(&self) -> Box> { + fn spawn(&self) -> Box { Box::new(OptionOneDB { overlay: MemoryDB::new(), backing: self.backing.clone(), From 4771fdf0fb448d72e3b386dee78b308cd40f1c45 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 13:50:39 +0100 Subject: [PATCH 113/222] Rearrange journaldb infrastructure. --- ethcore/src/block_queue.rs | 1 + ethcore/src/client/client.rs | 10 +- util/src/journaldb/archivedb.rs | 387 ++++++++++++++++++ util/src/journaldb/mod.rs | 33 ++ .../optiononedb.rs} | 279 +++++-------- util/src/journaldb/traits.rs | 37 ++ 6 files changed, 571 insertions(+), 176 deletions(-) create mode 100644 util/src/journaldb/archivedb.rs create mode 100644 util/src/journaldb/mod.rs rename util/src/{journaldb.rs => journaldb/optiononedb.rs} (88%) create mode 100644 util/src/journaldb/traits.rs diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 4e335f705..8b7143b0b 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -412,6 +412,7 @@ impl BlockQueue { } } + /// Optimise memory footprint of the heap fields. pub fn collect_garbage(&self) { { self.verification.unverified.lock().unwrap().shrink_to_fit(); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 3c8a28380..70a3eb92a 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -139,8 +139,14 @@ impl Client where V: Verifier { state_path.push("state"); let engine = Arc::new(try!(spec.to_engine())); - let mut state_db = Box::new(OptionOneDB::from_prefs(state_path.to_str().unwrap(), config.prefer_journal)); - if state_db.is_empty() && engine.spec().ensure_db_good(state_db.deref_mut()) { + let state_path_str = state_path.to_str().unwrap(); + let mut state_db = if config.prefer_journal { + new_optiononedb(state_path_str) + } else { + new_archivedb(state_path_str) + }; + + if state_db.is_empty() && engine.spec().ensure_db_good(state_db.as_hashdb_mut()) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); } diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs new file mode 100644 index 000000000..28cc4130a --- /dev/null +++ b/util/src/journaldb/archivedb.rs @@ -0,0 +1,387 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use memorydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct ArchiveDB { + overlay: MemoryDB, + backing: Arc, +} + +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 259; + +impl ArchiveDB { + /// Create a new instance from file + pub fn new(path: &str) -> ArchiveDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + ArchiveDB { + overlay: MemoryDB::new(), + backing: Arc::new(backing), + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> ArchiveDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } + + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } +} + +impl HashDB for ArchiveDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for ArchiveDB { + fn spawn(&self) -> Box { + Box::new(ArchiveDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result { + let batch = DBTransaction::new(); + let mut inserts = 0usize; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc > 0 { + assert!(rc == 1); + batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); + inserts += 1; + } + if rc < 0 { + assert!(rc == -1); + deletes += 1; + } + } + try!(self.backing.write(batch)); + Ok((inserts + deletes) as u32) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use hashdb::*; + + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + + assert!(jdb.exists(&x)); + } + + #[test] + fn long_history() { + // history is 3 + let mut jdb = ArchiveDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn overwrite() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + jdb.insert(b"foo"); + assert!(jdb.exists(&foo)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_same_key() { + // history is 1 + let mut jdb = ArchiveDB::new_temp(); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + } + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + } + } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + + // foo is ancient history. + + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + foo + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + } + } +} diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs new file mode 100644 index 000000000..fdb825d51 --- /dev/null +++ b/util/src/journaldb/mod.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! JournalDB interface and implementation. + +use common::*; + +/// Export the journaldb module. +pub mod traits; +mod archivedb; +mod optiononedb; + +/// Export the JournalDB trait. +pub use self::traits::JournalDB; + +/// Create a new JournalDB trait object which is an ArchiveDB. +pub fn new_archivedb(path: &str) -> Box { Box::new(archivedb::ArchiveDB::new(path)) } + +/// Create a new JournalDB trait object which is an OptionOneDB. +pub fn new_optiononedb(path: &str) -> Box { Box::new(optiononedb::OptionOneDB::new(path)) } diff --git a/util/src/journaldb.rs b/util/src/journaldb/optiononedb.rs similarity index 88% rename from util/src/journaldb.rs rename to util/src/journaldb/optiononedb.rs index e7a670a08..58bb88277 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -20,26 +20,11 @@ use common::*; use rlp::*; use hashdb::*; use memorydb::*; +use super::traits::JournalDB; use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; -/// A HashDB which can manage a short-term journal potentially containing many forks of mutually -/// exclusive actions. -pub trait JournalDB : HashDB + Sync + Send { - /// Return a copy of ourself, in a box. - fn spawn(&self) -> Box; - - /// Returns heap memory size used - fn mem_used(&self) -> usize; - - /// Check if this database has any commits - fn is_empty(&self) -> bool; - - /// Commit all recent insert operations. - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; -} - /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -56,43 +41,28 @@ pub struct OptionOneDB { // all keys must be at least 12 bytes const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; - const DB_VERSION : u32 = 3; -const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; - const PADDING : [u8; 10] = [ 0u8; 10 ]; impl OptionOneDB { /// Create a new instance from file pub fn new(path: &str) -> OptionOneDB { - Self::from_prefs(path, true) - } - - /// Create a new instance from file - pub fn from_prefs(path: &str, prefer_journal: bool) -> OptionOneDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; let backing = Database::open(&opts, path).unwrap_or_else(|e| { panic!("Error opening state db: {}", e); }); - let with_journal; if !backing.is_empty() { match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => { with_journal = true; }, - Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; }, + Ok(Some(DB_VERSION)) => {}, v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) } } else { - backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database"); - with_journal = prefer_journal; + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - - let counters = if with_journal { - Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))) - } else { - None - }; + + let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); OptionOneDB { overlay: MemoryDB::new(), backing: Arc::new(backing), @@ -108,34 +78,6 @@ impl OptionOneDB { Self::new(dir.to_str().unwrap()) } - /// Drain the overlay and place it into a batch for the DB. - fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut inserts = 0usize; - let mut deletes = 0usize; - for i in overlay.drain().into_iter() { - let (key, (value, rc)) = i; - if rc > 0 { - assert!(rc == 1); - batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - inserts += 1; - } - if rc < 0 { - assert!(rc == -1); - deletes += 1; - } - } - trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); - inserts + deletes - } - - /// Just commit the overlay into the backing DB. - fn commit_without_counters(&mut self) -> Result { - let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); - try!(self.backing.write(batch)); - Ok(ret as u32) - } - fn morph_key(key: &H256, index: u8) -> Bytes { let mut ret = key.bytes().to_owned(); ret.push(index); @@ -217,9 +159,106 @@ impl OptionOneDB { } } - /// Commit all recent insert operations and historical removals from the old era - /// to the backing database. - fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn payload(&self, key: &H256) -> Option { + self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) + } + + fn read_counters(db: &Database) -> HashMap { + let mut counters = HashMap::new(); + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val); + loop { + let mut index = 0usize; + while let Some(rlp_data) = db.get({ + let mut r = RlpStream::new_list(3); + r.append(&era); + r.append(&index); + r.append(&&PADDING[..]); + &r.drain() + }).expect("Low-level database error.") { + trace!("read_counters: era={}, index={}", era, index); + let rlp = Rlp::new(&rlp_data); + let inserts: Vec = rlp.val_at(1); + Self::replay_keys(&inserts, db, &mut counters); + index += 1; + }; + if index == 0 || era == 0 { + break; + } + era -= 1; + } + } + trace!("Recovered {} counters", counters.len()); + counters + } +} + +impl HashDB for OptionOneDB { + fn keys(&self) -> HashMap { + let mut ret: HashMap = HashMap::new(); + for (key, _) in self.backing.iter() { + let h = H256::from_slice(key.deref()); + ret.insert(h, 1); + } + + for (key, refs) in self.overlay.keys().into_iter() { + let refs = *ret.get(&key).unwrap_or(&0) + refs; + ret.insert(key, refs); + } + ret + } + + fn lookup(&self, key: &H256) -> Option<&[u8]> { + let k = self.overlay.raw(key); + match k { + Some(&(ref d, rc)) if rc > 0 => Some(d), + _ => { + if let Some(x) = self.payload(key) { + Some(&self.overlay.denote(key, x).0) + } + else { + None + } + } + } + } + + fn exists(&self, key: &H256) -> bool { + self.lookup(key).is_some() + } + + fn insert(&mut self, value: &[u8]) -> H256 { + self.overlay.insert(value) + } + fn emplace(&mut self, key: H256, value: Bytes) { + self.overlay.emplace(key, value); + } + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); + } +} + +impl JournalDB for OptionOneDB { + fn spawn(&self) -> Box { + Box::new(OptionOneDB { + overlay: MemoryDB::new(), + backing: self.backing.clone(), + counters: self.counters.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.counters { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + + fn is_empty(&self) -> bool { + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -337,114 +376,6 @@ impl OptionOneDB { // trace!("OptionOneDB::commit() deleted {} nodes", deletes); Ok(0) } - - fn payload(&self, key: &H256) -> Option { - self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) - } - - fn read_counters(db: &Database) -> HashMap { - let mut counters = HashMap::new(); - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); - loop { - let mut index = 0usize; - while let Some(rlp_data) = db.get({ - let mut r = RlpStream::new_list(3); - r.append(&era); - r.append(&index); - r.append(&&PADDING[..]); - &r.drain() - }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); - let rlp = Rlp::new(&rlp_data); - let inserts: Vec = rlp.val_at(1); - Self::replay_keys(&inserts, db, &mut counters); - index += 1; - }; - if index == 0 || era == 0 { - break; - } - era -= 1; - } - } - trace!("Recovered {} counters", counters.len()); - counters - } -} - -impl HashDB for OptionOneDB { - fn keys(&self) -> HashMap { - let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { - let h = H256::from_slice(key.deref()); - ret.insert(h, 1); - } - - for (key, refs) in self.overlay.keys().into_iter() { - let refs = *ret.get(&key).unwrap_or(&0) + refs; - ret.insert(key, refs); - } - ret - } - - fn lookup(&self, key: &H256) -> Option<&[u8]> { - let k = self.overlay.raw(key); - match k { - Some(&(ref d, rc)) if rc > 0 => Some(d), - _ => { - if let Some(x) = self.payload(key) { - Some(&self.overlay.denote(key, x).0) - } - else { - None - } - } - } - } - - fn exists(&self, key: &H256) -> bool { - self.lookup(key).is_some() - } - - fn insert(&mut self, value: &[u8]) -> H256 { - self.overlay.insert(value) - } - fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); - } - fn kill(&mut self, key: &H256) { - self.overlay.kill(key); - } -} - -impl JournalDB for OptionOneDB { - fn spawn(&self) -> Box { - Box::new(OptionOneDB { - overlay: MemoryDB::new(), - backing: self.backing.clone(), - counters: self.counters.clone(), - }) - } - - fn mem_used(&self) -> usize { - self.overlay.mem_used() + match self.counters { - Some(ref c) => c.read().unwrap().heap_size_of_children(), - None => 0 - } - } - - fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() - } - - fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) - } else { - self.commit_without_counters() - } - } } #[cfg(test)] diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs new file mode 100644 index 000000000..25e132339 --- /dev/null +++ b/util/src/journaldb/traits.rs @@ -0,0 +1,37 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed HashDB implementation. + +use common::*; +use hashdb::*; + +/// A HashDB which can manage a short-term journal potentially containing many forks of mutually +/// exclusive actions. +pub trait JournalDB : HashDB + Send + Sync { + /// Return a copy of ourself, in a box. + fn spawn(&self) -> Box; + + /// Returns heap memory size used + fn mem_used(&self) -> usize; + + /// Check if this database has any commits + fn is_empty(&self) -> bool; + + /// Commit all recent insert operations and canonical historical commits' removals from the + /// old era to the backing database, reverting any non-canonical historical commit's inserts. + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; +} From ecd33a60931be9616245984149a46aba12457f36 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 13:54:52 +0100 Subject: [PATCH 114/222] fixed U256 and transaction request deserialization, added tests for transaction request --- rpc/src/v1/types/bytes.rs | 2 +- rpc/src/v1/types/transaction_request.rs | 64 ++++++++++++++++++++----- util/bigint/src/uint.rs | 8 +--- util/src/hash.rs | 2 +- 4 files changed, 55 insertions(+), 21 deletions(-) diff --git a/rpc/src/v1/types/bytes.rs b/rpc/src/v1/types/bytes.rs index 466fbebde..0b14c30e8 100644 --- a/rpc/src/v1/types/bytes.rs +++ b/rpc/src/v1/types/bytes.rs @@ -20,7 +20,7 @@ use serde::de::Visitor; use util::common::FromHex; /// Wrapper structure around vector of bytes. -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct Bytes(Vec); impl Bytes { diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs index a61b11c25..d40402ab5 100644 --- a/rpc/src/v1/types/transaction_request.rs +++ b/rpc/src/v1/types/transaction_request.rs @@ -19,7 +19,7 @@ use util::numbers::{Uint, U256}; use ethcore::transaction::{Action, Transaction}; use v1::types::Bytes; -#[derive(Debug, Default, Deserialize)] +#[derive(Debug, Default, PartialEq, Deserialize)] pub struct TransactionRequest { pub from: Address, pub to: Option
, @@ -27,28 +27,26 @@ pub struct TransactionRequest { pub gas_price: Option, pub gas: Option, pub value: Option, - pub data: Bytes, + pub data: Option, pub nonce: Option, } impl Into for TransactionRequest { fn into(self) -> Transaction { Transaction { - nonce: self.nonce.unwrap_or(U256::zero()), - action: match self.to { - None => Action::Create, - Some(addr) => Action::Call(addr) - }, - gas: self.gas.unwrap_or(U256::zero()), - gas_price: self.gas_price.unwrap_or(U256::zero()), - value: self.value.unwrap_or(U256::zero()), - data: self.data.to_vec() + nonce: self.nonce.unwrap_or_else(U256::zero), + action: self.to.map_or(Action::Create, Action::Call), + gas: self.gas.unwrap_or_else(U256::zero), + gas_price: self.gas_price.unwrap_or_else(U256::zero), + value: self.value.unwrap_or_else(U256::zero), + data: self.data.map_or_else(Vec::new, |d| d.to_vec()), } } } #[cfg(test)] mod tests { + use serde_json; use util::numbers::{Uint, U256}; use util::hash::Address; use ethcore::transaction::{Transaction, Action}; @@ -63,7 +61,7 @@ mod tests { gas_price: Some(U256::from(20)), gas: Some(U256::from(10_000)), value: Some(U256::from(1)), - data: Bytes::new(vec![10, 20]), + data: Some(Bytes::new(vec![10, 20])), nonce: Some(U256::from(12)), }; @@ -85,7 +83,7 @@ mod tests { gas_price: None, gas: None, value: None, - data: Bytes::new(vec![]), + data: None, nonce: None, }; @@ -98,4 +96,44 @@ mod tests { data: vec![], }, tr.into()); } + + #[test] + fn transaction_request_deserialize() { + let s = r#"{ + "from":"0x0000000000000000000000000000000000000001", + "to":"0x0000000000000000000000000000000000000002", + "gasPrice":"0x1", + "gas":"0x2", + "value":"0x3", + "data":"0x123456", + "nonce":"0x4" + }"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, TransactionRequest { + from: Address::from(1), + to: Some(Address::from(2)), + gas_price: Some(U256::from(1)), + gas: Some(U256::from(2)), + value: Some(U256::from(3)), + data: Some(Bytes::new(vec![0x12, 0x34, 0x56])), + nonce: Some(U256::from(4)), + }); + } + + #[test] + fn transaction_request_deserialize_empty() { + let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#; + let deserialized: TransactionRequest = serde_json::from_str(s).unwrap(); + + assert_eq!(deserialized, TransactionRequest { + from: Address::from(1), + to: None, + gas_price: None, + gas: None, + value: None, + data: None, + nonce: None, + }); + } } diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index 959df0944..47888fd88 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -39,7 +39,6 @@ use std::fmt; use std::cmp; -use std::mem; use std::str::{FromStr}; use std::convert::From; use std::hash::{Hash, Hasher}; @@ -788,14 +787,11 @@ macro_rules! construct_uint { fn visit_str(&mut self, value: &str) -> Result where E: serde::Error { // 0x + len - if value.len() != 2 + $n_words / 8 { + if value.len() > 2 + $n_words * 16 { return Err(serde::Error::custom("Invalid length.")); } - match $name::from_str(&value[2..]) { - Ok(val) => Ok(val), - Err(_) => { return Err(serde::Error::custom("Invalid length.")); } - } + $name::from_str(&value[2..]).map_err(|_| serde::Error::custom("Invalid hex value.")) } fn visit_string(&mut self, value: String) -> Result where E: serde::Error { diff --git a/util/src/hash.rs b/util/src/hash.rs index 4eb96b53e..3dc15116d 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -257,7 +257,7 @@ macro_rules! impl_hash { return Err(serde::Error::custom("Invalid length.")); } - value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid valid hex.")) + value[2..].from_hex().map(|ref v| $from::from_slice(v)).map_err(|_| serde::Error::custom("Invalid hex value.")) } fn visit_string(&mut self, value: String) -> Result where E: serde::Error { From 51cfd4b0ea03a7a22ae0c0034a4ac0c8093a05c6 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 13:58:11 +0100 Subject: [PATCH 115/222] Remove unneeded clone. --- util/src/overlaydb.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index f14677d05..7c9b6b04b 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -36,7 +36,7 @@ use kvdb::{Database}; /// /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. -#[derive(Clone)] +//#[derive(Clone)] pub struct OverlayDB { overlay: MemoryDB, backing: Arc, From 38d470f3bcc584a3f0f29e3670ebf10e2a2ddad9 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 14:45:19 +0100 Subject: [PATCH 116/222] Reorganise command line options into more general engine. --- ethcore/src/client/client.rs | 9 ++---- ethcore/src/client/config.rs | 5 ++-- parity/main.rs | 17 ++++++++---- util/src/journaldb/mod.rs | 53 +++++++++++++++++++++++++++++++++--- util/src/lib.rs | 2 +- 5 files changed, 68 insertions(+), 18 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 70a3eb92a..1148a0140 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -131,7 +131,8 @@ impl Client where V: Verifier { let mut dir = path.to_path_buf(); dir.push(H64::from(spec.genesis_header().hash()).hex()); //TODO: sec/fat: pruned/full versioning - dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, if config.prefer_journal { "pruned" } else { "archive" })); + // version here is a bit useless now, since it's controlled only be the pruning algo. + dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, config.pruning)); let path = dir.as_path(); let gb = spec.genesis_block(); let chain = Arc::new(BlockChain::new(config.blockchain, &gb, path)); @@ -140,11 +141,7 @@ impl Client where V: Verifier { let engine = Arc::new(try!(spec.to_engine())); let state_path_str = state_path.to_str().unwrap(); - let mut state_db = if config.prefer_journal { - new_optiononedb(state_path_str) - } else { - new_archivedb(state_path_str) - }; + let mut state_db = journaldb::new(state_path_str, config.pruning); if state_db.is_empty() && engine.spec().ensure_db_good(state_db.as_hashdb_mut()) { state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 484c8d0c6..89e95ea06 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -16,6 +16,7 @@ pub use block_queue::BlockQueueConfig; pub use blockchain::BlockChainConfig; +use util::journaldb; /// Client configuration. Includes configs for all sub-systems. #[derive(Debug, Default)] @@ -24,8 +25,8 @@ pub struct ClientConfig { pub queue: BlockQueueConfig, /// Blockchain configuration. pub blockchain: BlockChainConfig, - /// Prefer journal rather than archive. - pub prefer_journal: bool, + /// The JournalDB ("pruning") algorithm to use. + pub pruning: journaldb::Algorithm, /// The name of the client instance. pub name: String, } diff --git a/parity/main.rs b/parity/main.rs index b6ed5cba3..bf1e24203 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -80,7 +80,8 @@ Protocol Options: or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. - --pruning Client should prune the state/storage trie. + --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, + light (experimental) [default: archive]. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. @@ -101,7 +102,7 @@ API and Console Options: --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + list of API name. Possible names are web3, eth and net. [default: web3,eth,net]. --rpc Equivalent to --jsonrpc (geth-compatible). --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). @@ -141,7 +142,7 @@ struct Args { flag_identity: String, flag_cache: Option, flag_keys_path: String, - flag_pruning: bool, + flag_pruning: String, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, @@ -403,7 +404,13 @@ impl Configuration { client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; } } - client_config.prefer_journal = self.args.flag_pruning; + client_config.pruning = match self.args.flag_pruning.as_str() { + "archive" => journaldb::Algorithm::Archive, + "pruned" => journaldb::Algorithm::EarlyMerge, +// "fast" => journaldb::Algorithm::OverlayRecent, // TODO: @arkpar uncomment this once option 2 is merged. +// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged. + _ => { die!("{}: Invalid pruning method given.", self.args.flag_pruning); } + }; client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); @@ -424,7 +431,7 @@ impl Configuration { self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) ); - SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); + SocketAddr::from_str(&url).unwrap_or_else(|_| die!("{}: Invalid JSONRPC listen host/port given.", url)); let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index fdb825d51..0cee7dd8d 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -26,8 +26,53 @@ mod optiononedb; /// Export the JournalDB trait. pub use self::traits::JournalDB; -/// Create a new JournalDB trait object which is an ArchiveDB. -pub fn new_archivedb(path: &str) -> Box { Box::new(archivedb::ArchiveDB::new(path)) } +/// A journal database algorithm. +#[derive(Debug)] +pub enum Algorithm { + /// Keep all keys forever. + Archive, -/// Create a new JournalDB trait object which is an OptionOneDB. -pub fn new_optiononedb(path: &str) -> Box { Box::new(optiononedb::OptionOneDB::new(path)) } + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into backing database, journal retains knowledge of whether backing DB key is + /// ancient or recent. Non-canon inserts get explicitly reverted and removed from backing DB. + EarlyMerge, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// Inserts go into memory overlay, which is tried for key fetches. Memory overlay gets + /// flushed in backing only at end of recent history. + OverlayRecent, + + /// Ancient and recent history maintained separately; recent history lasts for particular + /// number of blocks. + /// + /// References are counted in disk-backed DB. + RefCounted, +} + +impl Default for Algorithm { + fn default() -> Algorithm { Algorithm::Archive } +} + +impl fmt::Display for Algorithm { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", match self { + &Algorithm::Archive => "archive", + &Algorithm::EarlyMerge => "earlymerge", + &Algorithm::OverlayRecent => "overlayrecent", + &Algorithm::RefCounted => "refcounted", + }) + } +} + +/// Create a new JournalDB trait object. +pub fn new(path: &str, algorithm: Algorithm) -> Box { + match algorithm { + Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), + Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)), + _ => unimplemented!(), + } +} diff --git a/util/src/lib.rs b/util/src/lib.rs index 59d66a325..de9934f36 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -154,7 +154,7 @@ pub use rlp::*; pub use hashdb::*; pub use memorydb::*; pub use overlaydb::*; -pub use journaldb::*; +pub use journaldb::JournalDB; pub use math::*; pub use crypto::*; pub use triehash::*; From 8ae103087ddc87b3f61f1c0b21dcbb5ce5919fd5 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 15:07:43 +0100 Subject: [PATCH 117/222] Fixups for new API. --- ethcore/src/tests/helpers.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 56ea6b1d3..dc3068560 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -252,7 +252,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db: Box = Box::new(OptionOneDB::new(temp.as_str())); + let journal_db = journaldb::new(temp.as_str(), journaldb::Algorithm::EarlyMerge); GuardedTempResult { _temp: temp, result: Some(journal_db) @@ -269,7 +269,7 @@ pub fn get_temp_state() -> GuardedTempResult { } pub fn get_temp_journal_db_in(path: &Path) -> Box { - Box::new(OptionOneDB::new(path.to_str().unwrap())) + journaldb::new(path.to_str().unwrap(), journaldb::Algorithm::EarlyMerge) } pub fn get_temp_state_in(path: &Path) -> State { From 197ea7f7d6bc89337215ad72702ac34734f0f71f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 14:48:30 +0100 Subject: [PATCH 118/222] Using miner in rpc instead of sync --- miner/src/lib.rs | 43 +++++++++++++++++++++++++++++++++++- miner/src/miner.rs | 49 ++++++++--------------------------------- parity/main.rs | 1 + rpc/src/lib.rs | 2 ++ rpc/src/v1/impls/eth.rs | 18 ++++++++++----- 5 files changed, 67 insertions(+), 46 deletions(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 20b5dd7d3..135a15df5 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -63,5 +63,46 @@ mod miner; mod transaction_queue; pub use transaction_queue::TransactionQueue; -pub use miner::{Miner, MinerService}; +pub use miner::{Miner}; +use std::sync::Mutex; +use util::{H256, U256, Address, Bytes}; +use ethcore::client::{BlockChainClient}; +use ethcore::block::{ClosedBlock}; +use ethcore::error::{Error}; +use ethcore::transaction::SignedTransaction; + +/// Miner client API +pub trait MinerService : Send + Sync { + + /// Returns miner's status. + fn status(&self) -> MinerStatus; + + /// Imports transactions to transaction queue. + fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> + where T: Fn(&Address) -> U256; + + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, chain: &BlockChainClient); + + /// called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], retracted: &[H256]); + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, chain: &BlockChainClient); + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error>; +} + +/// Mining status +pub struct MinerStatus { + /// Number of transactions in queue with state `pending` (ready to be included in block) + pub transaction_queue_pending: usize, + /// Number of transactions in queue with state `future` (not yet ready to be included in block) + pub transaction_queue_future: usize, +} diff --git a/miner/src/miner.rs b/miner/src/miner.rs index d2e839101..623af33a0 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -14,50 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::*; -use std::sync::atomic::AtomicBool; use rayon::prelude::*; +use std::sync::{Mutex, RwLock, Arc}; +use std::sync::atomic; +use std::sync::atomic::AtomicBool; + +use util::{H256, U256, Address, Bytes}; use ethcore::views::{BlockView}; use ethcore::client::{BlockChainClient, BlockId}; -use ethcore::block::*; -use ethcore::error::*; +use ethcore::block::{ClosedBlock}; +use ethcore::error::{Error}; use ethcore::transaction::SignedTransaction; -use transaction_queue::{TransactionQueue}; -/// Miner client API -pub trait MinerService : Send + Sync { - - /// Returns miner's status. - fn status(&self) -> MinerStatus; - - /// Imports transactions to transaction queue. - fn import_transactions(&self, transactions: Vec, fetch_nonce: T) -> Result<(), Error> - where T: Fn(&Address) -> U256; - - /// Removes all transactions from the queue and restart mining operation. - fn clear_and_reset(&self, chain: &BlockChainClient); - - /// called when blocks are imported to chain, updates transactions queue. - fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]); - - /// New chain head event. Restart mining operation. - fn prepare_sealing(&self, chain: &BlockChainClient); - - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - fn sealing_block(&self, chain: &BlockChainClient) -> &Mutex>; - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - fn submit_seal(&self, chain: &BlockChainClient, pow_hash: H256, seal: Vec) -> Result<(), Error>; -} - -/// Mining status -pub struct MinerStatus { - /// Number of transactions in queue with state `pending` (ready to be included in block) - pub transaction_queue_pending: usize, - /// Number of transactions in queue with state `future` (not yet ready to be included in block) - pub transaction_queue_future: usize, -} +use super::{MinerService, MinerStatus, TransactionQueue}; /// Keeps track of transactions using priority queue and holds currently mined block. pub struct Miner { @@ -76,7 +45,7 @@ impl Default for Miner { transaction_queue: Mutex::new(TransactionQueue::new()), sealing_enabled: AtomicBool::new(false), sealing_block: Mutex::new(None), - author: RwLock::new(Address::new()), + author: RwLock::new(Address::default()), extra_data: RwLock::new(Vec::new()), } } diff --git a/parity/main.rs b/parity/main.rs index d26908f8a..f8a45b01e 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -171,6 +171,7 @@ struct Args { flag_nodekey: Option, flag_nodiscover: bool, flag_maxpeers: Option, + flag_gasprice: String, flag_author: String, flag_extra_data: Option, flag_datadir: Option, diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 103bef546..3096a45c9 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -19,6 +19,8 @@ #![cfg_attr(feature="nightly", feature(custom_derive, custom_attribute, plugin))] #![cfg_attr(feature="nightly", plugin(serde_macros, clippy))] +#[macro_use] +extern crate log; extern crate rustc_serialize; extern crate serde; extern crate serde_json; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 9c0f37bc1..c4d649d5a 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -53,7 +53,7 @@ impl EthClient A: AccountProvider, M: MinerService { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc, miner: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc, accounts: &Arc, miner: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), @@ -189,7 +189,7 @@ impl Eth for EthClient fn block_transaction_count_by_number(&self, params: Params) -> Result { from_params::<(BlockNumber,)>(params) .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending), + BlockNumber::Pending => to_value(&take_weak!(self.miner).status().transaction_queue_pending), _ => match take_weak!(self.client).block(block_number.into()) { Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), None => Ok(Value::Null) @@ -292,12 +292,20 @@ impl Eth for EthClient let accounts = take_weak!(self.accounts); match accounts.account_secret(&transaction_request.from) { Ok(secret) => { - let sync = take_weak!(self.sync); + let miner = take_weak!(self.miner); + let client = take_weak!(self.client); let (transaction, _) = transaction_request.to_eth(); let signed_transaction = transaction.sign(&secret); let hash = signed_transaction.hash(); - sync.insert_transaction(signed_transaction); - to_value(&hash) + + let import = miner.import_transactions(vec![signed_transaction], |a: &Address| client.nonce(a)); + match import { + Ok(_) => to_value(&hash), + Err(e) => { + warn!("Error sending transaction: {:?}", e); + to_value(&U256::zero()) + } + } }, Err(_) => { to_value(&U256::zero()) } } From 36ff65d05078c81208eae39adbd470e2d2ea5029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 14:52:47 +0100 Subject: [PATCH 119/222] Fixing warnings --- parity/main.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index f8a45b01e..e4ce36144 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -338,7 +338,7 @@ impl Configuration { let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); Some(SocketAddr::new(host, self.args.flag_port)) } else { - listen_address.clone() + listen_address }; (listen_address, public_address) } @@ -379,9 +379,9 @@ impl Configuration { fn sync_config(&self, spec: &Spec) -> SyncConfig { let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| { + sync_config.network_id = self.args.flag_networkid.as_ref().map_or(spec.network_id(), |id| { U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id)) - }).unwrap_or(spec.network_id()); + }); sync_config } @@ -425,7 +425,7 @@ impl Configuration { } if self.args.cmd_list { println!("Known addresses:"); - for &(addr, _) in secret_store.accounts().unwrap().iter() { + for &(addr, _) in &secret_store.accounts().unwrap() { println!("{:?}", addr); } } From b458452f0e03acf44eda70b238c804afb2b62e8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 14:55:56 +0100 Subject: [PATCH 120/222] TestSyncProvider fixes --- rpc/src/v1/tests/helpers/sync_provider.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index a3711d949..631752dfc 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethcore::transaction::SignedTransaction; use ethsync::{SyncProvider, SyncStatus, SyncState}; pub struct Config { @@ -40,7 +39,6 @@ impl TestSyncProvider { num_peers: config.num_peers, num_active_peers: 0, mem_used: 0, - transaction_queue_pending: 0, }, } } @@ -50,9 +48,5 @@ impl SyncProvider for TestSyncProvider { fn status(&self) -> SyncStatus { self.status.clone() } - - fn insert_transaction(&self, _transaction: SignedTransaction) { - unimplemented!() - } } From 5499f4530c0034451e9642fc6bba2eb67cdb321e Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Fri, 11 Mar 2016 15:01:15 +0100 Subject: [PATCH 121/222] Fix tests. --- ethcore/src/block.rs | 6 +++--- ethcore/src/ethereum/ethash.rs | 4 ++-- ethcore/src/ethereum/mod.rs | 2 +- ethcore/src/state.rs | 2 +- ethcore/src/tests/helpers.rs | 2 +- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index f3a4feaf0..4f23cf0a0 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -389,7 +389,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = b.close(); @@ -404,14 +404,14 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, vec![genesis_header.hash()]).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index b0c0e4a9f..a97002a2a 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -298,7 +298,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let b = b.close(); @@ -311,7 +311,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); let mut uncle = Header::new(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 0d1dcd8d5..8c2ae6b37 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -61,7 +61,7 @@ mod tests { let genesis_header = engine.spec().genesis_header(); let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); - engine.spec().ensure_db_good(&mut db); + engine.spec().ensure_db_good(db.as_hashdb_mut()); let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce()); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000001")), U256::from(1u64)); assert_eq!(s.balance(&address_from_hex("0000000000000000000000000000000000000002")), U256::from(1u64)); diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index 519debcc1..60149d3eb 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -45,7 +45,7 @@ impl State { let mut root = H256::new(); { // init trie and reset root too null - let _ = SecTrieDBMut::new(db.deref_mut(), &mut root); + let _ = SecTrieDBMut::new(db.as_hashdb_mut(), &mut root); } State { diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 0bb6b5015..56ea6b1d3 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -252,7 +252,7 @@ pub fn generate_dummy_empty_blockchain() -> GuardedTempResult { pub fn get_temp_journal_db() -> GuardedTempResult> { let temp = RandomTempPath::new(); - let journal_db = Box::new(OptionOneDB::new(temp.as_str())); + let journal_db: Box = Box::new(OptionOneDB::new(temp.as_str())); GuardedTempResult { _temp: temp, result: Some(journal_db) From 04af38bb0de889f33145bd36c22e6e67f82c4e25 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 18:54:28 +0400 Subject: [PATCH 122/222] fix test compilation --- util/src/journaldb/archivedb.rs | 1 + util/src/journaldb/optiononedb.rs | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 28cc4130a..e7da1b737 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -166,6 +166,7 @@ mod tests { use common::*; use super::*; use hashdb::*; + use journaldb::traits::JournalDB; #[test] fn insert_same_in_fork() { diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index 58bb88277..dfa7c8ec1 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -61,7 +61,7 @@ impl OptionOneDB { } else { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - + let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); OptionOneDB { overlay: MemoryDB::new(), @@ -383,6 +383,7 @@ mod tests { use common::*; use super::*; use hashdb::*; + use journaldb::traits::JournalDB; #[test] fn insert_same_in_fork() { From 0dbe6684ad42ecd1a681b6430bcb2205a995ea7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 15:58:12 +0100 Subject: [PATCH 123/222] adding std::mem --- util/bigint/src/uint.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index 3997d2e66..3dfb9dd45 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -36,6 +36,7 @@ //! The functions here are designed to be fast. //! +use std::mem; use std::fmt; use std::cmp; From 179569f9f810b9b9d6352653eb892afb87b2df2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 16:01:18 +0100 Subject: [PATCH 124/222] Adding std::mem back --- util/bigint/src/uint.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index 3997d2e66..c18ed839c 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -36,6 +36,9 @@ //! The functions here are designed to be fast. //! + +#[cfg(all(asm_available, target_arch="x86_64"))] +use std::mem; use std::fmt; use std::cmp; From 89dbc2ac25e8ce6eec482143869a160932e50e82 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 18:08:29 +0300 Subject: [PATCH 125/222] [ci skip] update readme to exclude beta spec (stable is ok) --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index 4fd2a53cc..47a27e30e 100644 --- a/README.md +++ b/README.md @@ -34,9 +34,6 @@ Then, download and build Parity: git clone https://github.com/ethcore/parity cd parity -# parity should be built with rust beta -multirust override beta - # build in release mode cargo build --release ``` From c6ba378b6b4c4e3117cac3d758e09732c81c131b Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 16:17:09 +0100 Subject: [PATCH 126/222] rpc web3 tests --- rpc/src/v1/tests/mod.rs | 1 + rpc/src/v1/tests/net.rs | 6 +++--- rpc/src/v1/tests/web3.rs | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 37 insertions(+), 3 deletions(-) create mode 100644 rpc/src/v1/tests/web3.rs diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 5ef74987c..3a38ced15 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -17,4 +17,5 @@ //!TODO: load custom blockchain state and test mod net; +mod web3; mod helpers; diff --git a/rpc/src/v1/tests/net.rs b/rpc/src/v1/tests/net.rs index 792e469d8..e24045ca6 100644 --- a/rpc/src/v1/tests/net.rs +++ b/rpc/src/v1/tests/net.rs @@ -36,7 +36,7 @@ fn rpc_net_version() { let request = r#"{"jsonrpc": "2.0", "method": "net_version", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } #[test] @@ -49,7 +49,7 @@ fn rpc_net_peer_count() { let request = r#"{"jsonrpc": "2.0", "method": "net_peerCount", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"0x78","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } #[test] @@ -62,5 +62,5 @@ fn rpc_net_listening() { let request = r#"{"jsonrpc": "2.0", "method": "net_listening", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_string())); + assert_eq!(io.handle_request(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/tests/web3.rs b/rpc/src/v1/tests/web3.rs new file mode 100644 index 000000000..c717d361a --- /dev/null +++ b/rpc/src/v1/tests/web3.rs @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core::IoHandler; +use util::version; +use v1::{Web3, Web3Client}; + +#[test] +fn rpc_web3_version() { + let web3 = Web3Client::new().to_delegate(); + let io = IoHandler::new(); + io.add_delegate(web3); + + let v = version().to_owned().replace("Parity/", "Parity//"); + + let request = r#"{"jsonrpc": "2.0", "method": "web3_clientVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"VER","id":1}"#.to_owned().replace("VER", v.as_ref()); + + assert_eq!(io.handle_request(request), Some(response)); +} From 1e40997ff74b25c03a07fcb38e0c89799410bce1 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 22:15:56 +0400 Subject: [PATCH 127/222] state query for archive jdb --- util/src/journaldb/archivedb.rs | 23 +++++++++++++++++++++++ util/src/journaldb/traits.rs | 5 +++++ 2 files changed, 28 insertions(+) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index e7da1b737..c8c29e765 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -159,6 +159,10 @@ impl JournalDB for ArchiveDB { try!(self.backing.write(batch)); Ok((inserts + deletes) as u32) } + + fn state(&self, id: &H256) -> Option { + self.backing.get_by_prefix(&id.bytes()[0..12]).and_then(|b| Some(b.to_vec())) + } } #[cfg(test)] @@ -385,4 +389,23 @@ mod tests { assert!(jdb.exists(&foo)); } } + + #[test] + fn returns_state() { + let temp = ::devtools::RandomTempPath::new(); + + let key = { + let mut jdb = ArchiveDB::new(temp.as_str()); + let key = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + key + }; + + { + let jdb = ArchiveDB::new(temp.as_str()); + let state = jdb.state(&key); + assert!(state.is_some()); + } + + } } diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs index 25e132339..017c24330 100644 --- a/util/src/journaldb/traits.rs +++ b/util/src/journaldb/traits.rs @@ -34,4 +34,9 @@ pub trait JournalDB : HashDB + Send + Sync { /// Commit all recent insert operations and canonical historical commits' removals from the /// old era to the backing database, reverting any non-canonical historical commit's inserts. fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; + + /// State data query + fn state(&self, _id: &H256) -> Option { + None + } } From 90e20cbcad54787eaf239bef016756a884b03f62 Mon Sep 17 00:00:00 2001 From: debris Date: Fri, 11 Mar 2016 20:08:01 +0100 Subject: [PATCH 128/222] additional (failing) sstore test --- util/src/keys/store.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index ea97cc80e..cbef7b5f9 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -381,6 +381,7 @@ mod tests { use super::*; use devtools::*; use common::*; + use crypto::KeyPair; #[test] fn can_insert() { @@ -555,4 +556,15 @@ mod tests { let accounts = sstore.accounts().unwrap(); assert_eq!(30, accounts.len()); } + + #[test] + fn validate_generated_addresses() { + let temp = RandomTempPath::create_dir(); + let mut sstore = SecretStore::new_test(&temp); + let addr = sstore.new_account("test").unwrap(); + let _ok = sstore.unlock_account(&addr, "test").unwrap(); + let secret = sstore.account_secret(&addr).unwrap(); + let kp = KeyPair::from_secret(secret).unwrap(); + assert_eq!(Address::from(kp.public().sha3()), addr); + } } From b1327a045fa39e5e9552797a5f137de383206517 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 22:47:12 +0400 Subject: [PATCH 129/222] fixed new account generation --- util/src/keys/store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index cbef7b5f9..cd5fa8427 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -214,12 +214,12 @@ impl SecretStore { /// Creates new account pub fn new_account(&mut self, pass: &str) -> Result { - let secret = H256::random(); + let key_pair = crypto::KeyPair::create().expect("Error creating key-pair. Something wrong with crypto libraries?"); + let address = Address::from(key_pair.public().sha3()); let key_id = H128::random(); - self.insert(key_id.clone(), secret, pass); + self.insert(key_id.clone(), key_pair.secret().clone(), pass); let mut key_file = self.directory.get(&key_id).expect("the key was just inserted"); - let address = Address::random(); key_file.account = Some(address); try!(self.directory.save(key_file)); Ok(address) From e970dd4530cfc20b0af1e86b49a83bcff62f3ec6 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 23:09:14 +0400 Subject: [PATCH 130/222] client state data func --- ethcore/src/client/client.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 1148a0140..8abf3a526 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -466,8 +466,8 @@ impl BlockChainClient for Client where V: Verifier { } } - fn state_data(&self, _hash: &H256) -> Option { - None + fn state_data(&self, hash: &H256) -> Option { + self.state_db.lock().unwrap().state(hash) } fn block_receipts(&self, _hash: &H256) -> Option { From da6f6d57cdc4753c8253866a1fedb84a9ee2f232 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 23:24:44 +0400 Subject: [PATCH 131/222] state data query to client --- ethcore/src/tests/client.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 001d1729b..4818ef69e 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -35,6 +35,19 @@ fn imports_from_empty() { client.flush_queue(); } +#[test] +fn returns_state_root_basic() { + let client_result = generate_dummy_client(6); + let client = client_result.reference(); + + let test_spec = get_test_spec(); + let test_engine = test_spec.to_engine().unwrap(); + let state_root = test_engine.spec().genesis_header().state_root; + + assert!(client.state_data(&state_root).is_some()); + +} + #[test] fn imports_good_block() { let dir = RandomTempPath::new(); From 349584772b6b6bc2c4078ef0fc67d88f023248a6 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 23:34:18 +0400 Subject: [PATCH 132/222] redundant lines --- ethcore/src/tests/client.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 4818ef69e..43b426560 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -39,13 +39,11 @@ fn imports_from_empty() { fn returns_state_root_basic() { let client_result = generate_dummy_client(6); let client = client_result.reference(); - let test_spec = get_test_spec(); let test_engine = test_spec.to_engine().unwrap(); let state_root = test_engine.spec().genesis_header().state_root; assert!(client.state_data(&state_root).is_some()); - } #[test] From 19f23f8445bb34daf8ea3eebd9fa6fb733d8329a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 23:37:47 +0400 Subject: [PATCH 133/222] increasing history to be useful for geth fast sync --- ethcore/src/client/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 8abf3a526..77cf105ed 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -115,7 +115,7 @@ pub struct Client where V: Verifier { verifier: PhantomData, } -const HISTORY: u64 = 1000; +const HISTORY: u64 = 1200; const CLIENT_DB_VER_STR: &'static str = "5.1"; impl Client { From fb51ac0d95504d94b46908ccb7bdc1cf7c59670f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 11 Mar 2016 23:33:01 +0100 Subject: [PATCH 134/222] blockchain receipts rlp generation --- ethcore/src/client/client.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 77cf105ed..4642a0103 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -470,8 +470,12 @@ impl BlockChainClient for Client where V: Verifier { self.state_db.lock().unwrap().state(hash) } - fn block_receipts(&self, _hash: &H256) -> Option { - None + fn block_receipts(&self, hash: &H256) -> Option { + self.chain.block_receipts(hash).and_then(|receipts| { + let mut rlp = RlpStream::new(); + rlp.append(&receipts); + Some(rlp.out()) + }) } fn import_block(&self, bytes: Bytes) -> ImportResult { From 7cfe1d258bebcdf45cad0624b3cd858396464dd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 11 Mar 2016 23:49:32 +0100 Subject: [PATCH 135/222] Adding more detailed logging --- util/src/rlp/rlpin.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/src/rlp/rlpin.rs b/util/src/rlp/rlpin.rs index d58fa95e8..9d3fcb2fa 100644 --- a/util/src/rlp/rlpin.rs +++ b/util/src/rlp/rlpin.rs @@ -24,7 +24,7 @@ impl<'a> From> for Rlp<'a> { } /// Data-oriented view onto trusted rlp-slice. -/// +/// /// Unlikely to `UntrustedRlp` doesn't bother you with error /// handling. It assumes that you know what you are doing. #[derive(Debug)] @@ -44,7 +44,7 @@ impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view { type Data = &'a [u8]; type Item = Rlp<'a>; type Iter = RlpIterator<'a, 'view>; - + /// Create a new instance of `Rlp` fn new(bytes: &'a [u8]) -> Rlp<'a> { Rlp { @@ -116,7 +116,7 @@ impl<'a, 'view> View<'a, 'view> for Rlp<'a> where 'a: 'view { impl <'a, 'view> Rlp<'a> where 'a: 'view { fn view_as_val(r: &R) -> T where R: View<'a, 'view>, T: RlpDecodable { let res: Result = r.as_val(); - res.unwrap_or_else(|_| panic!()) + res.unwrap_or_else(|e| panic!("DecodeError: {}", e)) } /// Decode into an object From 12e1abdfb70e3cd2a662cbcbb4b44bbbe3c56f41 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 09:51:17 +0100 Subject: [PATCH 136/222] Port fixes to new infrastrtcutre. --- util/src/journaldb/optiononedb.rs | 572 +++++++++++++++++++++++++----- 1 file changed, 492 insertions(+), 80 deletions(-) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index dfa7c8ec1..b51d0819d 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -25,6 +25,34 @@ use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; +#[derive(Clone, PartialEq, Eq)] +struct RefInfo { + queue_refs: usize, + in_archive: bool, +} + +impl HeapSizeOf for RefInfo { + fn heap_size_of_children(&self) -> usize { 0 } +} + +impl fmt::Display for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +impl fmt::Debug for RefInfo { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}+{}", self.queue_refs, if self.in_archive {1} else {0}) + } +} + +#[derive(Clone, PartialEq, Eq)] +enum RemoveFrom { + Queue, + Archive, +} + /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// @@ -35,7 +63,7 @@ use std::env; pub struct OptionOneDB { overlay: MemoryDB, backing: Arc, - counters: Option>>>, + refs: Option>>>, } // all keys must be at least 12 bytes @@ -62,11 +90,11 @@ impl OptionOneDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); + let refs = Some(Arc::new(RwLock::new(OptionOneDB::read_refs(&backing)))); OptionOneDB { overlay: MemoryDB::new(), backing: Arc::new(backing), - counters: counters, + refs: refs, } } @@ -91,11 +119,14 @@ impl OptionOneDB { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { for &(ref h, ref d) in inserts { - if let Some(c) = counters.get_mut(h) { + if let Some(c) = refs.get_mut(h) { // already counting. increment. - *c += 1; + c.queue_refs += 1; + if trace { + trace!(target: "jdb.fine", " insert({}): In queue: Incrementing refs to {}", h, c.queue_refs); + } continue; } @@ -103,7 +134,10 @@ impl OptionOneDB { if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { // already in the backing DB. start counting, and remember it was already in. Self::set_already_in(batch, &h); - counters.insert(h.clone(), 1); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: true}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, in DB: Recording and inserting into queue", h); + } continue; } @@ -111,60 +145,104 @@ impl OptionOneDB { //Self::reset_already_in(&h); assert!(!Self::is_already_in(backing, &h)); batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: false}); + if trace { + trace!(target: "jdb.fine", " insert({}): New to queue, not in DB: Inserting into queue and DB", h); + } } } - fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { - trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); + fn replay_keys(inserts: &[H256], backing: &Database, refs: &mut HashMap) { + trace!(target: "jdb.fine", "replay_keys: inserts={:?}, refs={:?}", inserts, refs); for h in inserts { - if let Some(c) = counters.get_mut(h) { + if let Some(c) = refs.get_mut(h) { // already counting. increment. - *c += 1; + c.queue_refs += 1; continue; } // this is the first entry for this node in the journal. // it is initialised to 1 if it was already in. - if Self::is_already_in(backing, h) { - trace!("replace_keys: Key {} was already in!", h); - counters.insert(h.clone(), 1); - } + refs.insert(h.clone(), RefInfo{queue_refs: 1, in_archive: Self::is_already_in(backing, h)}); } - trace!("replay_keys: (end) counters={:?}", counters); + trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { - for h in deletes.into_iter() { - let mut n: Option = None; - if let Some(c) = counters.get_mut(&h) { - if *c > 1 { - *c -= 1; + fn kill_keys(deletes: &Vec, refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + // with a kill on {queue_refs: 1, in_archive: true}, we have two options: + // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) + // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) + // (the latter option would then mean removing the RefInfo, since it would no longer be counted in the queue.) + // both are valid, but we switch between them depending on context. + // All inserts in queue (i.e. those which may yet be reverted) have an entry in refs. + for h in deletes.iter() { + let mut n: Option = None; + if let Some(c) = refs.get_mut(h) { + if c.in_archive && from == RemoveFrom::Archive { + c.in_archive = false; + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Reducing to queue only and recording", h); + } + continue; + } else if c.queue_refs > 1 { + c.queue_refs -= 1; + if trace { + trace!(target: "jdb.fine", " kill({}): In queue > 1 refs: Decrementing ref count to {}", h, c.queue_refs); + } continue; } else { - n = Some(*c); + n = Some(c.clone()); } } match n { - Some(i) if i == 1 => { - counters.remove(&h); - Self::reset_already_in(batch, &h); + Some(RefInfo{queue_refs: 1, in_archive: true}) => { + refs.remove(h); + Self::reset_already_in(batch, h); + if trace { + trace!(target: "jdb.fine", " kill({}): In archive, 1 in queue: Removing from queue and leaving in archive", h); + } + } + Some(RefInfo{queue_refs: 1, in_archive: false}) => { + refs.remove(h); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in archive, only 1 ref in queue: Removing from queue and DB", h); + } } None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + if trace { + trace!(target: "jdb.fine", " kill({}): Not in queue - MUST BE IN ARCHIVE: Removing from DB", h); + } } - _ => panic!("Invalid value in counters: {:?}", n), + _ => panic!("Invalid value in refs: {:?}", n), } } } + #[cfg(test)] + fn can_reconstruct_refs(&self) -> bool { + let reconstructed = Self::read_refs(&self.backing); + let refs = self.refs.as_ref().unwrap().write().unwrap(); + if *refs != reconstructed { + let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); + warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); + false + } else { + true + } + } + fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &Database) -> HashMap { - let mut counters = HashMap::new(); + fn read_refs(db: &Database) -> HashMap { + let mut refs = HashMap::new(); if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val); loop { @@ -176,10 +254,9 @@ impl OptionOneDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { - trace!("read_counters: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); - Self::replay_keys(&inserts, db, &mut counters); + Self::replay_keys(&inserts, db, &mut refs); index += 1; }; if index == 0 || era == 0 { @@ -188,10 +265,9 @@ impl OptionOneDB { era -= 1; } } - trace!("Recovered {} counters", counters.len()); - counters + refs } -} + } impl HashDB for OptionOneDB { fn keys(&self) -> HashMap { @@ -243,23 +319,23 @@ impl JournalDB for OptionOneDB { Box::new(OptionOneDB { overlay: MemoryDB::new(), backing: self.backing.clone(), - counters: self.counters.clone(), + refs: self.refs.clone(), }) } - fn mem_used(&self) -> usize { - self.overlay.mem_used() + match self.counters { - Some(ref c) => c.read().unwrap().heap_size_of_children(), - None => 0 - } - } - fn is_empty(&self) -> bool { self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() } + fn mem_used(&self) -> usize { + self.overlay.mem_used() + match self.refs { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } + } + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -304,9 +380,9 @@ impl JournalDB for OptionOneDB { // // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let mut refs = self.refs.as_ref().unwrap().write().unwrap(); let batch = DBTransaction::new(); + let trace = false; { let mut index = 0usize; let mut last; @@ -323,6 +399,11 @@ impl JournalDB for OptionOneDB { } let drained = self.overlay.drain(); + + if trace { + trace!(target: "jdb", "commit: #{} ({}), end era: {:?}", now, id, end); + } + let removes: Vec = drained .iter() .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) @@ -332,6 +413,9 @@ impl JournalDB for OptionOneDB { .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) .collect(); + + // TODO: check all removes are in the db. + let mut r = RlpStream::new_list(3); r.append(id); @@ -344,7 +428,12 @@ impl JournalDB for OptionOneDB { r.begin_list(inserts.len()); inserts.iter().foreach(|&(k, _)| {r.append(&k);}); r.append(&removes); - Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); + Self::insert_keys(&inserts, &self.backing, &mut refs, &batch, trace); + if trace { + let ins = inserts.iter().map(|&(k, _)| k).collect::>(); + trace!(target: "jdb.ops", " Inserts: {:?}", ins); + trace!(target: "jdb.ops", " Deletes: {:?}", removes); + } try!(batch.put(&last, r.as_raw())); try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); } @@ -363,17 +452,64 @@ impl JournalDB for OptionOneDB { })) { let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); - let deletes: Vec = rlp.val_at(2); - // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); + + if canon_id == rlp.val_at(0) { + // Collect keys to be removed. Canon block - remove the (enacted) deletes. + let deletes: Vec = rlp.val_at(2); + if trace { + trace!(target: "jdb.ops", " Expunging: {:?}", deletes); + } + Self::kill_keys(&deletes, &mut refs, &batch, RemoveFrom::Archive, trace); + + if trace { + trace!(target: "jdb.ops", " Finalising: {:?}", inserts); + } + for k in inserts.iter() { + match refs.get(k).cloned() { + None => { + // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert + // already expunged from the queue (which is allowed since the key is in the archive). + // leave well alone. + } + Some( RefInfo{queue_refs: 1, in_archive: false} ) => { + // just delete the refs entry. + refs.remove(k); + } + Some( RefInfo{queue_refs: x, in_archive: false} ) => { + // must set already in; , + Self::set_already_in(&batch, k); + refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); + } + Some( RefInfo{queue_refs: _, in_archive: true} ) => { + // Invalid! Reinserted the same key twice. + warn!("Key {} inserted twice into same fork.", k); + } + } + } + } else { + // Collect keys to be removed. Non-canon block - remove the (reverted) inserts. + if trace { + trace!(target: "jdb.ops", " Reverting: {:?}", inserts); + } + Self::kill_keys(&inserts, &mut refs, &batch, RemoveFrom::Queue, trace); + } + try!(batch.delete(&last)); index += 1; } - trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + if trace { + trace!(target: "jdb", "delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + } } try!(self.backing.write(batch)); -// trace!("OptionOneDB::commit() deleted {} nodes", deletes); + + // Comment out for now. TODO: automatically enable in tests. + + if trace { + trace!(target: "jdb", "OK: {:?}", refs.clone()); + } + Ok(0) } } @@ -383,26 +519,34 @@ mod tests { use common::*; use super::*; use hashdb::*; - use journaldb::traits::JournalDB; + use log::init_log; #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&x); jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); let x = jdb.insert(b"X"); jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&x)); } @@ -410,29 +554,35 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.remove(&h); jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&h)); jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&h)); } #[test] fn complex() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); @@ -440,6 +590,7 @@ mod tests { jdb.remove(&bar); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); @@ -447,17 +598,20 @@ mod tests { let foo = jdb.insert(b"foo"); jdb.remove(&baz); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); assert!(!jdb.exists(&bar)); assert!(!jdb.exists(&baz)); @@ -466,26 +620,30 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); assert!(jdb.exists(&baz)); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); @@ -494,39 +652,117 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = JournalDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); assert!(jdb.exists(&foo)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); jdb.commit(3, &b"2".sha3(), Some((0, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } #[test] - fn fork_same_key() { - // history is 1 - let mut jdb = OptionOneDB::new_temp(); + fn fork_same_key_one() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.insert(b"foo"); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); } + #[test] + fn fork_same_key_other() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(1, &b"1c".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + assert!(jdb.exists(&foo)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + } + + #[test] + fn fork_ins_del_ins() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(2, &b"2b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3a".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3b".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(4, &b"4a".sha3(), Some((2, b"2a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5a".sha3(), Some((3, b"3a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } #[test] fn reopen() { @@ -535,81 +771,257 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); foo }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } #[test] - fn reopen_remove() { + fn insert_delete_insert_delete_insert_expunge() { + init_log(); let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn forked_insert_delete_insert_delete_insert_expunge() { + init_log(); + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(1, &b"1b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(2, &b"2b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&foo); + jdb.commit(3, &b"3b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(4, &b"4b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // expunge foo + jdb.commit(5, &b"5".sha3(), Some((1, b"1a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn broken_assert() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); // BROKEN + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(!jdb.exists(&foo)); + } + + #[test] + fn reopen_test() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 4 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(3, &b"3".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(4, &b"4".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + // foo is ancient history. + + jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(5, &b"5".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.remove(&foo); + jdb.remove(&bar); + jdb.commit(6, &b"6".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.insert(b"foo"); + jdb.insert(b"bar"); + jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + } + + #[test] + fn reopen_remove_three() { + init_log(); + + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + + let foo = b"foo".sha3(); + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 - let foo = jdb.insert(b"foo"); + jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); // foo is ancient history. - jdb.insert(b"foo"); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - foo - }; - - { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); - jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); + + jdb.insert(b"foo"); + jdb.commit(3, &b"3".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); - jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); - jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); + jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + assert!(jdb.exists(&foo)); + + // incantation to reopen the db + }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + + jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } } + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&foo); let baz = jdb.insert(b"baz"); jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); jdb.remove(&bar); jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); (foo, bar, baz) }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); assert!(!jdb.exists(&bar)); From e6a273f3a796f3353985fdf34a32d26838e3c5a9 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 09:53:54 +0100 Subject: [PATCH 137/222] Fix tests. --- util/src/journaldb/optiononedb.rs | 42 +++++++++++++++---------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index b51d0819d..567f620b1 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -524,7 +524,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -554,7 +554,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -577,7 +577,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -620,7 +620,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -652,7 +652,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalDB::new_temp(); + let mut jdb = OptionOneDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -677,7 +677,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -705,7 +705,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -733,7 +733,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -771,7 +771,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -781,14 +781,14 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -803,7 +803,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -832,7 +832,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -881,7 +881,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -912,7 +912,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -952,7 +952,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -973,7 +973,7 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -981,14 +981,14 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -1001,7 +1001,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -1019,7 +1019,7 @@ mod tests { }; { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); + let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); From 874393ba06dbce3c91ab59afb3b110c37c1b1896 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 09:57:57 +0100 Subject: [PATCH 138/222] Fix tests, --- util/src/journaldb/archivedb.rs | 2 +- util/src/journaldb/optiononedb.rs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index e7da1b737..a8b9c1f74 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -364,7 +364,7 @@ mod tests { fn reopen_fork() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let (foo, bar, baz) = { + let (foo, _, _) = { let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/optiononedb.rs index 567f620b1..e44c337db 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/optiononedb.rs @@ -518,6 +518,7 @@ impl JournalDB for OptionOneDB { mod tests { use common::*; use super::*; + use super::super::traits::JournalDB; use hashdb::*; use log::init_log; From e10457d2352bc8a18c266f79d2ab224d62821836 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 12 Mar 2016 09:59:57 +0100 Subject: [PATCH 139/222] Bumping clippy --- Cargo.lock | 12 ++++++------ Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- sync/Cargo.toml | 2 +- util/Cargo.toml | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f26ac923..ce508c937 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,7 +2,7 @@ name = "parity" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", @@ -94,7 +94,7 @@ dependencies = [ [[package]] name = "clippy" -version = "0.0.49" +version = "0.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -207,7 +207,7 @@ dependencies = [ name = "ethcore" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", @@ -233,7 +233,7 @@ dependencies = [ name = "ethcore-rpc" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 0.9.99", "ethcore 0.9.99", "ethcore-util 0.9.99", @@ -256,7 +256,7 @@ dependencies = [ "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -288,7 +288,7 @@ dependencies = [ name = "ethsync" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", diff --git a/Cargo.toml b/Cargo.toml index 77d1e57ae..efe794d5b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 5ef83842f..be4212f5d 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -17,7 +17,7 @@ ethcore-util = { path = "../util" } evmjit = { path = "../evmjit", optional = true } ethash = { path = "../ethash" } num_cpus = "0.2" -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } crossbeam = "0.1.5" lazy_static = "0.1" ethcore-devtools = { path = "../devtools" } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index a1f154ca8..900b10548 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,7 +18,7 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index cf0027368..6022beb9c 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -10,7 +10,7 @@ authors = ["Ethcore Date: Sat, 12 Mar 2016 10:07:55 +0100 Subject: [PATCH 140/222] Fixing warnings --- ethcore/src/client/test_client.rs | 8 +++++++- util/src/keys/store.rs | 8 +++++++- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 207f1090f..9b311081c 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -55,9 +55,15 @@ pub enum EachBlockWith { UncleAndTransaction } +impl Default for TestBlockChainClient { + fn default() -> Self { + TestBlockChainClient::new() + } +} + impl TestBlockChainClient { /// Creates new test client. - pub fn new() -> TestBlockChainClient { + pub fn new() -> Self { let mut client = TestBlockChainClient { blocks: RwLock::new(HashMap::new()), diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 6a5efc87d..37786be8b 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -120,9 +120,15 @@ impl AccountProvider for AccountService { } } +impl Default for AccountService { + fn default() -> Self { + AccountService::new() + } +} + impl AccountService { /// New account service with the default location - pub fn new() -> AccountService { + pub fn new() -> Self { let secret_store = RwLock::new(SecretStore::new()); secret_store.write().unwrap().try_import_existing(); AccountService { From 4b6e1dd4d2e4e62efa92db028cb77bfbdbfb51c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 12 Mar 2016 10:07:55 +0100 Subject: [PATCH 141/222] Fixing warnings --- ethcore/src/block_queue.rs | 2 +- ethcore/src/chainfilter/tests.rs | 8 +++++- ethcore/src/client/test_client.rs | 8 +++++- ethcore/src/externalities.rs | 8 +++++- ethcore/src/verification/verification.rs | 8 +++++- parity/main.rs | 17 +++++++----- util/src/journaldb/archivedb.rs | 2 +- util/src/keys/store.rs | 10 +++++-- util/src/network/connection.rs | 34 ++++++++++++++++++------ 9 files changed, 74 insertions(+), 23 deletions(-) diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 50db23dfe..042df1dc1 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -523,7 +523,7 @@ mod tests { let engine = spec.to_engine().unwrap(); let mut config = BlockQueueConfig::default(); config.max_mem_use = super::MIN_MEM_LIMIT; // empty queue uses about 15000 - let mut queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); + let queue = BlockQueue::new(config, Arc::new(engine), IoChannel::disconnected()); assert!(!queue.queue_info().is_full()); let mut blocks = get_good_dummy_block_seq(50); for b in blocks.drain(..) { diff --git a/ethcore/src/chainfilter/tests.rs b/ethcore/src/chainfilter/tests.rs index 08af44720..7dac29f11 100644 --- a/ethcore/src/chainfilter/tests.rs +++ b/ethcore/src/chainfilter/tests.rs @@ -28,9 +28,15 @@ pub struct MemoryCache { blooms: HashMap, } +impl Default for MemoryCache { + fn default() -> Self { + MemoryCache::new() + } +} + impl MemoryCache { /// Default constructor for MemoryCache - pub fn new() -> MemoryCache { + pub fn new() -> Self { MemoryCache { blooms: HashMap::new() } } diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 207f1090f..9b311081c 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -55,9 +55,15 @@ pub enum EachBlockWith { UncleAndTransaction } +impl Default for TestBlockChainClient { + fn default() -> Self { + TestBlockChainClient::new() + } +} + impl TestBlockChainClient { /// Creates new test client. - pub fn new() -> TestBlockChainClient { + pub fn new() -> Self { let mut client = TestBlockChainClient { blocks: RwLock::new(HashMap::new()), diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 598921580..d37bc20fb 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -301,8 +301,14 @@ mod tests { env_info: EnvInfo } + impl Default for TestSetup { + fn default() -> Self { + TestSetup::new() + } + } + impl TestSetup { - fn new() -> TestSetup { + fn new() -> Self { TestSetup { state: get_temp_state(), engine: get_test_spec().to_engine().unwrap(), diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index ed3db3791..60cbed56c 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -255,8 +255,14 @@ mod tests { numbers: HashMap, } + impl Default for TestBlockChain { + fn default() -> Self { + TestBlockChain::new() + } + } + impl TestBlockChain { - pub fn new() -> TestBlockChain { + pub fn new() -> Self { TestBlockChain { blocks: HashMap::new(), numbers: HashMap::new(), diff --git a/parity/main.rs b/parity/main.rs index 840921dce..85e29a0ae 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -315,7 +315,7 @@ impl Configuration { fn init_nodes(&self, spec: &Spec) -> Vec { let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() }; if let Some(ref x) = self.args.flag_bootnodes { - r.extend(x.split(",").map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s)))); + r.extend(x.split(',').map(|s| Self::normalize_enode(s).unwrap_or_else(|| die!("{}: Invalid node address format given for a boot node.", s)))); } r } @@ -328,7 +328,7 @@ impl Configuration { let host = IpAddr::from_str(host).unwrap_or_else(|_| die!("Invalid host given with `--nat extip:{}`", host)); Some(SocketAddr::new(host, self.args.flag_port)) } else { - listen_address.clone() + listen_address }; (listen_address, public_address) } @@ -389,7 +389,7 @@ impl Configuration { } if self.args.cmd_list { println!("Known addresses:"); - for &(addr, _) in secret_store.accounts().unwrap().iter() { + for &(addr, _) in &secret_store.accounts().unwrap() { println!("{:?}", addr); } } @@ -407,7 +407,11 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id()); + sync_config.network_id = self.args.flag_networkid.as_ref().map_or(spec.network_id(), |id| { + U256::from_str(id).unwrap_or_else(|_| { + die!("{}: Invalid index given with --networkid", id) + }) + }); // Build client let mut client_config = ClientConfig::default(); @@ -422,8 +426,7 @@ impl Configuration { } } client_config.pruning = match self.args.flag_pruning.as_str() { - "" => journaldb::Algorithm::Archive, - "archive" => journaldb::Algorithm::Archive, + "" | "archive" => journaldb::Algorithm::Archive, "pruned" => journaldb::Algorithm::EarlyMerge, // "fast" => journaldb::Algorithm::OverlayRecent, // TODO: @arkpar uncomment this once option 2 is merged. // "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged. @@ -453,7 +456,7 @@ impl Configuration { let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(",").collect()); + let server_handler = setup_rpc_server(service.client(), sync.clone(), account_service.clone(), &url, cors, apis.split(',').collect()); if let Some(handler) = server_handler { panic_handler.forward_from(handler.deref()); } diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index e7da1b737..f92058f92 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -364,7 +364,7 @@ mod tests { fn reopen_fork() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let (foo, bar, baz) = { + let (foo, _bar, _baz) = { let mut jdb = ArchiveDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 6a5efc87d..d514863bb 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -120,9 +120,15 @@ impl AccountProvider for AccountService { } } +impl Default for AccountService { + fn default() -> Self { + AccountService::new() + } +} + impl AccountService { /// New account service with the default location - pub fn new() -> AccountService { + pub fn new() -> Self { let secret_store = RwLock::new(SecretStore::new()); secret_store.write().unwrap().try_import_existing(); AccountService { @@ -568,7 +574,7 @@ mod tests { let temp = RandomTempPath::create_dir(); let mut sstore = SecretStore::new_test(&temp); let addr = sstore.new_account("test").unwrap(); - let _ok = sstore.unlock_account(&addr, "test").unwrap(); + sstore.unlock_account(&addr, "test").unwrap(); let secret = sstore.account_secret(&addr).unwrap(); let kp = KeyPair::from_secret(secret).unwrap(); assert_eq!(Address::from(kp.public().sha3()), addr); diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index fe65be6d1..a560c1a91 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -160,12 +160,12 @@ impl Connection { } } - /// Get socket token + /// Get socket token pub fn token(&self) -> StreamToken { self.token } - /// Replace socket token + /// Replace socket token pub fn set_token(&mut self, token: StreamToken) { self.token = token; } @@ -261,13 +261,13 @@ pub struct EncryptedConnection { } impl EncryptedConnection { - - /// Get socket token + + /// Get socket token pub fn token(&self) -> StreamToken { self.connection.token } - /// Replace socket token + /// Replace socket token pub fn set_token(&mut self, token: StreamToken) { self.connection.set_token(token); } @@ -513,8 +513,14 @@ mod tests { buf_size: usize, } + impl Default for TestSocket { + fn default() -> Self { + TestSocket::new() + } + } + impl TestSocket { - fn new() -> TestSocket { + fn new() -> Self { TestSocket { read_buffer: vec![], write_buffer: vec![], @@ -593,8 +599,14 @@ mod tests { type TestConnection = GenericConnection; + impl Default for TestConnection { + fn default() -> Self { + TestConnection::new() + } + } + impl TestConnection { - pub fn new() -> TestConnection { + pub fn new() -> Self { TestConnection { token: 999998888usize, socket: TestSocket::new(), @@ -609,8 +621,14 @@ mod tests { type TestBrokenConnection = GenericConnection; + impl Default for TestBrokenConnection { + fn default() -> Self { + TestBrokenConnection::new() + } + } + impl TestBrokenConnection { - pub fn new() -> TestBrokenConnection { + pub fn new() -> Self { TestBrokenConnection { token: 999998888usize, socket: TestBrokenSocket { error: "test broken socket".to_owned() }, From 9424d530627ba0d185bb505a737f4b4121c526b6 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 10:37:27 +0100 Subject: [PATCH 142/222] Update lib.rs --- miner/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 10da070a4..9c2ad9ba5 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -88,7 +88,7 @@ pub trait MinerService : Send + Sync { /// Removes all transactions from the queue and restart mining operation. fn clear_and_reset(&self, chain: &BlockChainClient); - /// called when blocks are imported to chain, updates transactions queue. + /// Called when blocks are imported to chain, updates transactions queue. fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], retracted: &[H256]); /// New chain head event. Restart mining operation. From c6dc6c0c41c45ee4279952120fb4112c55cd9932 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 12 Mar 2016 10:41:35 +0100 Subject: [PATCH 143/222] One more warning --- util/src/journaldb/overlay.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/util/src/journaldb/overlay.rs b/util/src/journaldb/overlay.rs index e91709041..76eff9aa1 100644 --- a/util/src/journaldb/overlay.rs +++ b/util/src/journaldb/overlay.rs @@ -33,14 +33,14 @@ use super::JournalDB; /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. /// -/// There are two memory overlays: -/// - Transaction overlay contains current transaction data. It is merged with with history +/// There are two memory overlays: +/// - Transaction overlay contains current transaction data. It is merged with with history /// overlay on each `commit()` -/// - History overlay contains all data inserted during the history period. When the node +/// - History overlay contains all data inserted during the history period. When the node /// in the overlay becomes ancient it is written to disk on `commit()` /// -/// There is also a journal maintained in memory and on the disk as well which lists insertions -/// and removals for each commit during the history period. This is used to track +/// There is also a journal maintained in memory and on the disk as well which lists insertions +/// and removals for each commit during the history period. This is used to track /// data nodes that go out of history scope and must be written to disk. /// /// Commit workflow: @@ -50,12 +50,12 @@ use super::JournalDB; /// 3. Clear the transaction overlay. /// 4. For a canonical journal record that becomes ancient inserts its insertions into the disk DB /// 5. For each journal record that goes out of the history scope (becomes ancient) remove its -/// insertions from the history overlay, decreasing the reference counter and removing entry if +/// insertions from the history overlay, decreasing the reference counter and removing entry if /// if reaches zero. -/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if +/// 6. For a canonical journal record that becomes ancient delete its removals from the disk only if /// the removed key is not present in the history overlay. /// 7. Delete ancient record from memory and disk. -/// +/// pub struct JournalOverlayDB { transaction_overlay: MemoryDB, backing: Arc, @@ -223,7 +223,7 @@ impl JournalDB for JournalOverlayDB { let mut tx = self.transaction_overlay.drain(); let inserted_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c > 0 { Some(k.clone()) } else { None }).collect(); let removed_keys: Vec<_> = tx.iter().filter_map(|(k, &(_, c))| if c < 0 { Some(k.clone()) } else { None }).collect(); - // Increase counter for each inserted key no matter if the block is canonical or not. + // Increase counter for each inserted key no matter if the block is canonical or not. let insertions = tx.drain().filter_map(|(k, (v, c))| if c > 0 { Some((k, v)) } else { None }); r.append(id); r.begin_list(inserted_keys.len()); @@ -236,7 +236,7 @@ impl JournalDB for JournalOverlayDB { r.append(&removed_keys); let mut k = RlpStream::new_list(3); - let index = journal_overlay.journal.get(&now).map(|j| j.len()).unwrap_or(0); + let index = journal_overlay.journal.get(&now).map_or(0, |j| j.len()); k.append(&now); k.append(&index); k.append(&&PADDING[..]); @@ -345,14 +345,14 @@ impl HashDB for JournalOverlayDB { self.lookup(key).is_some() } - fn insert(&mut self, value: &[u8]) -> H256 { + fn insert(&mut self, value: &[u8]) -> H256 { self.transaction_overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.transaction_overlay.emplace(key, value); + self.transaction_overlay.emplace(key, value); } - fn kill(&mut self, key: &H256) { - self.transaction_overlay.kill(key); + fn kill(&mut self, key: &H256) { + self.transaction_overlay.kill(key); } } @@ -749,7 +749,7 @@ mod tests { assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } - + #[test] fn reopen_test() { let mut dir = ::std::env::temp_dir(); @@ -784,7 +784,7 @@ mod tests { jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } - + #[test] fn reopen_remove_three() { init_log(); @@ -838,7 +838,7 @@ mod tests { assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); From 65dadcc2a2bfe9c5d382b2d2716fee857318cf68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 12 Mar 2016 10:44:24 +0100 Subject: [PATCH 144/222] Adding todos --- ethcore/src/client/client.rs | 5 ++--- ethcore/src/client/mod.rs | 2 ++ 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4341e1898..1a88af951 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -350,16 +350,15 @@ impl Client where V: Verifier { } } - -// TODO: need MinerService MinerIoHandler - impl BlockChainClient for Client where V: Verifier { + // TODO [todr] Should be moved to miner crate eventually. fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { block.try_seal(self.engine.deref().deref(), seal) } + // TODO [todr] Should be moved to miner crate eventually. fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option { let engine = self.engine.deref().deref(); let h = self.chain.best_block_hash(); diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index d97f0d8b9..c13cfeee1 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -102,9 +102,11 @@ pub trait BlockChainClient : Sync + Send { /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; + // TODO [todr] Should be moved to miner crate eventually. /// Returns ClosedBlock prepared for sealing. fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option; + // TODO [todr] Should be moved to miner crate eventually. /// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error. fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result; From e1c0177932efef0b75c58e2e9ce3058eeb2a40cd Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 10:44:48 +0100 Subject: [PATCH 145/222] Update main.rs --- parity/main.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 9e6b50e08..1350aca45 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -248,8 +248,14 @@ fn setup_rpc_server( #[cfg(not(feature = "rpc"))] fn setup_rpc_server( - _client: Arc, _sync: Arc, _secret_store: Arc, _miner: Arc, - _url: &str, _cors_domain: &str, _apis: Vec<&str>) -> Option> { + _client: Arc, + _sync: Arc, + _secret_store: Arc, + _miner: Arc, + _url: &str, + _cors_domain: &str, + _apis: Vec<&str> +) -> Option> { None } From d7039b72e237fb739dc98e344aa06ebf8cd49028 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 10:48:28 +0100 Subject: [PATCH 146/222] Update archivedb.rs --- util/src/journaldb/archivedb.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index c8c29e765..8db239cff 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -305,7 +305,6 @@ mod tests { assert!(jdb.exists(&foo)); } - #[test] fn reopen() { let mut dir = ::std::env::temp_dir(); @@ -364,6 +363,7 @@ mod tests { jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); } } + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); @@ -406,6 +406,5 @@ mod tests { let state = jdb.state(&key); assert!(state.is_some()); } - } } From 82a881005740f2e1ddc2f862b2f617b3e5773597 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 11:19:42 +0100 Subject: [PATCH 147/222] Rename into something that is a little more descriptive. --- .../{optiononedb.rs => earlymergedb.rs} | 48 +++++++------- util/src/journaldb/mod.rs | 8 +-- .../{overlay.rs => overlayrecentdb.rs} | 66 +++++++++---------- 3 files changed, 61 insertions(+), 61 deletions(-) rename util/src/journaldb/{optiononedb.rs => earlymergedb.rs} (94%) rename util/src/journaldb/{overlay.rs => overlayrecentdb.rs} (93%) diff --git a/util/src/journaldb/optiononedb.rs b/util/src/journaldb/earlymergedb.rs similarity index 94% rename from util/src/journaldb/optiononedb.rs rename to util/src/journaldb/earlymergedb.rs index dfa7c8ec1..48083f113 100644 --- a/util/src/journaldb/optiononedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -32,7 +32,7 @@ use std::env; /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. -pub struct OptionOneDB { +pub struct EarlyMergeDB { overlay: MemoryDB, backing: Arc, counters: Option>>>, @@ -44,9 +44,9 @@ const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 const DB_VERSION : u32 = 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl OptionOneDB { +impl EarlyMergeDB { /// Create a new instance from file - pub fn new(path: &str) -> OptionOneDB { + pub fn new(path: &str) -> EarlyMergeDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -62,8 +62,8 @@ impl OptionOneDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let counters = Some(Arc::new(RwLock::new(OptionOneDB::read_counters(&backing)))); - OptionOneDB { + let counters = Some(Arc::new(RwLock::new(EarlyMergeDB::read_counters(&backing)))); + EarlyMergeDB { overlay: MemoryDB::new(), backing: Arc::new(backing), counters: counters, @@ -72,7 +72,7 @@ impl OptionOneDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - fn new_temp() -> OptionOneDB { + fn new_temp() -> EarlyMergeDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) @@ -193,7 +193,7 @@ impl OptionOneDB { } } -impl HashDB for OptionOneDB { +impl HashDB for EarlyMergeDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -238,9 +238,9 @@ impl HashDB for OptionOneDB { } } -impl JournalDB for OptionOneDB { +impl JournalDB for EarlyMergeDB { fn spawn(&self) -> Box { - Box::new(OptionOneDB { + Box::new(EarlyMergeDB { overlay: MemoryDB::new(), backing: self.backing.clone(), counters: self.counters.clone(), @@ -369,11 +369,11 @@ impl JournalDB for OptionOneDB { try!(batch.delete(&last)); index += 1; } - trace!("OptionOneDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); + trace!("EarlyMergeDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } try!(self.backing.write(batch)); -// trace!("OptionOneDB::commit() deleted {} nodes", deletes); +// trace!("EarlyMergeDB::commit() deleted {} nodes", deletes); Ok(0) } } @@ -388,7 +388,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -410,7 +410,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.exists(&h)); @@ -428,7 +428,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -466,7 +466,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -494,7 +494,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -513,7 +513,7 @@ mod tests { #[test] fn fork_same_key() { // history is 1 - let mut jdb = OptionOneDB::new_temp(); + let mut jdb = EarlyMergeDB::new_temp(); jdb.commit(0, &b"0".sha3(), None).unwrap(); let foo = jdb.insert(b"foo"); @@ -535,7 +535,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -544,13 +544,13 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); } { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -564,7 +564,7 @@ mod tests { dir.push(H32::random().hex()); let foo = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -578,7 +578,7 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); assert!(jdb.exists(&foo)); @@ -593,7 +593,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -608,7 +608,7 @@ mod tests { }; { - let mut jdb = OptionOneDB::new(dir.to_str().unwrap()); + let mut jdb = EarlyMergeDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(!jdb.exists(&baz)); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index cf8e7d392..724e61dfb 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -21,8 +21,8 @@ use common::*; /// Export the journaldb module. pub mod traits; mod archivedb; -mod optiononedb; -mod overlay; +mod earlymergedb; +mod overlayrecentdb; /// Export the JournalDB trait. pub use self::traits::JournalDB; @@ -73,8 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::OptionOneDB::new(path)), - Algorithm::OverlayRecent => Box::new(overlay::JournalOverlayDB::new(path)), + Algorithm::EarlyMerge => Box::new(optiononedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(optiononedb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } diff --git a/util/src/journaldb/overlay.rs b/util/src/journaldb/overlayrecentdb.rs similarity index 93% rename from util/src/journaldb/overlay.rs rename to util/src/journaldb/overlayrecentdb.rs index e91709041..8dd4d1752 100644 --- a/util/src/journaldb/overlay.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -56,7 +56,7 @@ use super::JournalDB; /// the removed key is not present in the history overlay. /// 7. Delete ancient record from memory and disk. /// -pub struct JournalOverlayDB { +pub struct OverlayRecentDB { transaction_overlay: MemoryDB, backing: Arc, journal_overlay: Arc>, @@ -82,9 +82,9 @@ impl HeapSizeOf for JournalEntry { } } -impl Clone for JournalOverlayDB { - fn clone(&self) -> JournalOverlayDB { - JournalOverlayDB { +impl Clone for OverlayRecentDB { + fn clone(&self) -> OverlayRecentDB { + OverlayRecentDB { transaction_overlay: MemoryDB::new(), backing: self.backing.clone(), journal_overlay: self.journal_overlay.clone(), @@ -98,14 +98,14 @@ const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 const DB_VERSION : u32 = 0x200 + 3; const PADDING : [u8; 10] = [ 0u8; 10 ]; -impl JournalOverlayDB { +impl OverlayRecentDB { /// Create a new instance from file - pub fn new(path: &str) -> JournalOverlayDB { + pub fn new(path: &str) -> OverlayRecentDB { Self::from_prefs(path) } /// Create a new instance from file - pub fn from_prefs(path: &str) -> JournalOverlayDB { + pub fn from_prefs(path: &str) -> OverlayRecentDB { let opts = DatabaseConfig { prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix }; @@ -121,8 +121,8 @@ impl JournalOverlayDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let journal_overlay = Arc::new(RwLock::new(JournalOverlayDB::read_overlay(&backing))); - JournalOverlayDB { + let journal_overlay = Arc::new(RwLock::new(OverlayRecentDB::read_overlay(&backing))); + OverlayRecentDB { transaction_overlay: MemoryDB::new(), backing: Arc::new(backing), journal_overlay: journal_overlay, @@ -131,7 +131,7 @@ impl JournalOverlayDB { /// Create a new instance with an anonymous temporary database. #[cfg(test)] - pub fn new_temp() -> JournalOverlayDB { + pub fn new_temp() -> OverlayRecentDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); Self::new(dir.to_str().unwrap()) @@ -196,7 +196,7 @@ impl JournalOverlayDB { } } -impl JournalDB for JournalOverlayDB { +impl JournalDB for OverlayRecentDB { fn spawn(&self) -> Box { Box::new(self.clone()) } @@ -303,7 +303,7 @@ impl JournalDB for JournalOverlayDB { } -impl HashDB for JournalOverlayDB { +impl HashDB for OverlayRecentDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { @@ -367,7 +367,7 @@ mod tests { #[test] fn insert_same_in_fork() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let x = jdb.insert(b"X"); jdb.commit(1, &b"1".sha3(), None).unwrap(); @@ -397,7 +397,7 @@ mod tests { #[test] fn long_history() { // history is 3 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let h = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -420,7 +420,7 @@ mod tests { #[test] fn complex() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -463,7 +463,7 @@ mod tests { #[test] fn fork() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -495,7 +495,7 @@ mod tests { #[test] fn overwrite() { // history is 1 - let mut jdb = JournalOverlayDB::new_temp(); + let mut jdb = OverlayRecentDB::new_temp(); let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -520,7 +520,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -548,7 +548,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -576,7 +576,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(0, &b"0".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -614,7 +614,7 @@ mod tests { let bar = H256::random(); let foo = { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.emplace(bar.clone(), b"bar".to_vec()); @@ -624,14 +624,14 @@ mod tests { }; { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); @@ -646,7 +646,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -675,7 +675,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); @@ -724,7 +724,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); @@ -755,7 +755,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 4 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -795,7 +795,7 @@ mod tests { let foo = b"foo".sha3(); { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); @@ -816,7 +816,7 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.remove(&foo); jdb.commit(4, &b"4".sha3(), Some((2, b"2".sha3()))).unwrap(); @@ -824,14 +824,14 @@ mod tests { assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(5, &b"5".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); // incantation to reopen the db - }; { let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + }; { let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(6, &b"6".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); @@ -844,7 +844,7 @@ mod tests { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); let (foo, bar, baz) = { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); // history is 1 let foo = jdb.insert(b"foo"); let bar = jdb.insert(b"bar"); @@ -862,7 +862,7 @@ mod tests { }; { - let mut jdb = JournalOverlayDB::new(dir.to_str().unwrap()); + let mut jdb = OverlayRecentDB::new(dir.to_str().unwrap()); jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); assert!(jdb.exists(&foo)); From b03679e1a6e8f29dee36b0bfe3a4c701a7f154e4 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 11:22:02 +0100 Subject: [PATCH 148/222] Fix typos. --- util/src/journaldb/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index 724e61dfb..cf5278368 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -73,8 +73,8 @@ impl fmt::Display for Algorithm { pub fn new(path: &str, algorithm: Algorithm) -> Box { match algorithm { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), - Algorithm::EarlyMerge => Box::new(optiononedb::EarlyMergeDB::new(path)), - Algorithm::OverlayRecent => Box::new(optiononedb::OverlayRecentDB::new(path)), + Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), + Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), _ => unimplemented!(), } } From 98bae098bea3b5a6db3fd891eba4e54ce8e3645e Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 12:10:55 +0100 Subject: [PATCH 149/222] Update cargo lock. --- Cargo.lock | 88 +++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7f26ac923..8e3a4b168 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -31,7 +31,7 @@ dependencies = [ [[package]] name = "arrayvec" -version = "0.3.15" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "nodrop 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -43,14 +43,14 @@ name = "aster" version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "bigint" version = "0.1.0" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -65,7 +65,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "bitflags" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -85,7 +85,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "chrono" -version = "0.2.19" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num 0.1.31 (registry+https://github.com/rust-lang/crates.io-index)", @@ -117,7 +117,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -136,7 +136,7 @@ version = "1.1.1" source = "git+https://github.com/tomusdrw/rust-ctrlc.git#f4927770f89eca80ec250911eea3adcbf579ac48" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -145,7 +145,7 @@ name = "daemonize" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -161,7 +161,7 @@ name = "docopt" version = "0.6.78" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -177,7 +177,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -185,7 +185,7 @@ name = "eth-secp256k1" version = "0.5.4" source = "git+https://github.com/ethcore/rust-secp256k1#283a0677d8327536be58a87e0494d7e0e7b1d1d8" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "gcc 0.3.25 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.1.12 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -253,9 +253,9 @@ dependencies = [ name = "ethcore-util" version = "0.9.99" dependencies = [ - "arrayvec 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)", + "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", - "chrono 0.2.19 (registry+https://github.com/rust-lang/crates.io-index)", + "chrono 0.2.20 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -264,10 +264,10 @@ dependencies = [ "ethcore-devtools 0.9.99", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "itertools 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", "json-tests 0.1.0", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -304,7 +304,7 @@ dependencies = [ name = "fdlimit" version = "0.1.0" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -314,7 +314,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "glob" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -328,7 +328,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -381,7 +381,7 @@ dependencies = [ "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 0.5.7 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -391,21 +391,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.54 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", "xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "itertools" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "json-tests" version = "0.1.0" dependencies = [ - "glob 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)", + "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -461,7 +461,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "libc" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -469,7 +469,7 @@ name = "librocksdb-sys" version = "0.2.2" source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -477,7 +477,7 @@ name = "log" version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -490,7 +490,7 @@ name = "memchr" version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -536,7 +536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -560,7 +560,7 @@ dependencies = [ [[package]] name = "nom" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -577,7 +577,7 @@ name = "num_cpus" version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -639,7 +639,7 @@ name = "quasi" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -649,7 +649,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aster 0.13.1 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -657,7 +657,7 @@ name = "rand" version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -672,7 +672,7 @@ dependencies = [ [[package]] name = "regex" -version = "0.1.54" +version = "0.1.55" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -691,7 +691,7 @@ name = "rocksdb" version = "0.4.2" source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", ] @@ -701,7 +701,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -741,7 +741,7 @@ name = "semver" version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "nom 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -766,7 +766,7 @@ dependencies = [ "quasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "quasi_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "syntex 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -809,16 +809,16 @@ name = "syntex" version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "syntex_syntax 0.29.0 (registry+https://github.com/rust-lang/crates.io-index)", + "syntex_syntax 0.29.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "syntex_syntax" -version = "0.29.0" +version = "0.29.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "bitflags 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -844,7 +844,7 @@ name = "termios" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -853,7 +853,7 @@ version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -918,7 +918,7 @@ dependencies = [ [[package]] name = "url" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -975,7 +975,7 @@ name = "xml-rs" version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "bitflags 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "bitflags 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] From 19b6c74675cbf96cb0118cb13e5279860d182876 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 12 Mar 2016 13:39:17 +0100 Subject: [PATCH 150/222] Two more warnings --- ethcore/src/verification/noop_verifier.rs | 1 + parity/main.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/ethcore/src/verification/noop_verifier.rs b/ethcore/src/verification/noop_verifier.rs index ae2a153fe..20c15c3f1 100644 --- a/ethcore/src/verification/noop_verifier.rs +++ b/ethcore/src/verification/noop_verifier.rs @@ -20,6 +20,7 @@ use error::Error; use header::Header; use super::Verifier; +#[allow(dead_code)] pub struct NoopVerifier; impl Verifier for NoopVerifier { diff --git a/parity/main.rs b/parity/main.rs index 3770fe04d..b16801ad5 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -394,6 +394,7 @@ impl Configuration { } } + #[cfg_attr(feature="dev", allow(useless_format))] fn execute_client(&self) { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); From 451a5d78e35da7da15c44945375083322fe6b013 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 12 Mar 2016 13:40:39 +0100 Subject: [PATCH 151/222] Removing unused (?) serde::Error --- rpc/src/v1/types/transaction.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 0518a58ea..232cf0bf3 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -17,7 +17,6 @@ use util::numbers::*; use ethcore::transaction::{LocalizedTransaction, Action}; use v1::types::{Bytes, OptionalValue}; -use serde::Error; #[derive(Debug, Default, Serialize)] pub struct Transaction { From 7ad7996144cc3f3cbf982f0d78af25be2f1c2846 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 11 Mar 2016 18:50:29 +0100 Subject: [PATCH 152/222] Fixed common block detection --- sync/src/chain.rs | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 4a2d941a7..5ba2e8773 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -377,10 +377,14 @@ impl ChainSync { let hash = info.hash(); match io.chain().block_status(BlockId::Hash(hash.clone())) { BlockStatus::InChain => { - self.have_common_block = true; - self.last_imported_block = Some(number); - self.last_imported_hash = Some(hash.clone()); - trace!(target: "sync", "Found common header {} ({})", number, hash); + if !self.have_common_block { + self.have_common_block = true; + self.last_imported_block = Some(number); + self.last_imported_hash = Some(hash.clone()); + trace!(target: "sync", "Found common header {} ({})", number, hash); + } else { + trace!(target: "sync", "Header already in chain {} ({})", number, hash); + } }, _ => { if self.have_common_block { From 1fe575bf7bbdf276c1f3f8242d278fc1a573a099 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 11 Mar 2016 20:17:36 +0100 Subject: [PATCH 153/222] Download bodies for validated hash chain only --- sync/src/chain.rs | 19 +++---------------- sync/src/range_collection.rs | 11 ++++++++--- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 5ba2e8773..657ef6a78 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -660,10 +660,7 @@ impl ChainSync { let mut needed_numbers: Vec = Vec::new(); if self.have_common_block && !self.headers.is_empty() && self.headers.range_iter().next().unwrap().0 == self.current_base_block() + 1 { - for (start, ref items) in self.headers.range_iter() { - if needed_bodies.len() >= MAX_BODIES_TO_REQUEST { - break; - } + if let Some((start, ref items)) = self.headers.range_iter().next() { let mut index: BlockNumber = 0; while index != items.len() as BlockNumber && needed_bodies.len() < MAX_BODIES_TO_REQUEST { let block = start + index; @@ -848,18 +845,8 @@ impl ChainSync { /// Remove downloaded bocks/headers starting from specified number. /// Used to recover from an error and re-download parts of the chain detected as bad. fn remove_downloaded_blocks(&mut self, start: BlockNumber) { - for n in self.headers.get_tail(&start) { - if let Some(ref header_data) = self.headers.find_item(&n) { - let header_to_delete = HeaderView::new(&header_data.data); - let header_id = HeaderId { - transactions_root: header_to_delete.transactions_root(), - uncles: header_to_delete.uncles_hash() - }; - self.header_ids.remove(&header_id); - } - self.downloading_bodies.remove(&n); - self.downloading_headers.remove(&n); - } + self.downloading_bodies.clear(); + self.downloading_headers.clear(); self.headers.remove_from(&start); self.bodies.remove_from(&start); } diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index 664d7c7a3..0628df401 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -300,12 +300,17 @@ fn test_range() { let mut r = ranges.clone(); r.remove_from(&20); assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal); - r.remove_from(&17); - assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal); - r.remove_from(&15); + r.remove_from(&18); + assert!(!r.have_item(&18)); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q'][..])]), Ordering::Equal); + r.remove_from(&16); + assert!(!r.have_item(&16)); assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal); r.remove_from(&3); assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); + r.remove_from(&1); + assert_eq!(r.range_iter().next(), None); + let mut r = ranges.clone(); r.remove_from(&2); assert_eq!(r.range_iter().next(), None); } From 16618094f5211ad26d83e2ef2c7cb57420c48388 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 11 Mar 2016 21:23:36 +0100 Subject: [PATCH 154/222] Cleanup header_ids --- sync/src/chain.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 657ef6a78..a30db0423 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -377,10 +377,12 @@ impl ChainSync { let hash = info.hash(); match io.chain().block_status(BlockId::Hash(hash.clone())) { BlockStatus::InChain => { - if !self.have_common_block { - self.have_common_block = true; + if self.current_base_block() < number { self.last_imported_block = Some(number); self.last_imported_hash = Some(hash.clone()); + } + if !self.have_common_block { + self.have_common_block = true; trace!(target: "sync", "Found common header {} ({})", number, hash); } else { trace!(target: "sync", "Header already in chain {} ({})", number, hash); @@ -845,8 +847,12 @@ impl ChainSync { /// Remove downloaded bocks/headers starting from specified number. /// Used to recover from an error and re-download parts of the chain detected as bad. fn remove_downloaded_blocks(&mut self, start: BlockNumber) { - self.downloading_bodies.clear(); - self.downloading_headers.clear(); + let ids = self.header_ids.drain().filter(|&(_, v)| v < start).collect(); + self.header_ids = ids; + let hdrs = self.downloading_headers.drain().filter(|v| *v < start).collect(); + self.downloading_headers = hdrs; + let bodies = self.downloading_bodies.drain().filter(|v| *v < start).collect(); + self.downloading_bodies = bodies; self.headers.remove_from(&start); self.bodies.remove_from(&start); } From 1e23a4c888c9feab5c8343e1f2c49a953f560cb8 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 12 Mar 2016 14:54:44 +0100 Subject: [PATCH 155/222] Don't redownload queued blocks on restart --- sync/src/chain.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a30db0423..84ca5500a 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -298,8 +298,6 @@ impl ChainSync { /// Restart sync pub fn restart(&mut self, io: &mut SyncIo) { self.reset(); - self.last_imported_block = None; - self.last_imported_hash = None; self.starting_block = 0; self.highest_block = None; self.have_common_block = false; @@ -366,7 +364,7 @@ impl ChainSync { for i in 0..item_count { let info: BlockHeader = try!(r.val_at(i)); let number = BlockNumber::from(info.number); - if number <= self.current_base_block() || self.headers.have_item(&number) { + if (number <= self.current_base_block() && self.have_common_block) || self.headers.have_item(&number) { trace!(target: "sync", "Skipping existing block header"); continue; } @@ -376,8 +374,8 @@ impl ChainSync { } let hash = info.hash(); match io.chain().block_status(BlockId::Hash(hash.clone())) { - BlockStatus::InChain => { - if self.current_base_block() < number { + BlockStatus::InChain | BlockStatus::Queued => { + if !self.have_common_block || self.current_base_block() < number { self.last_imported_block = Some(number); self.last_imported_hash = Some(hash.clone()); } @@ -706,7 +704,10 @@ impl ChainSync { if !self.have_common_block { // download backwards until common block is found 1 header at a time let chain_info = io.chain().chain_info(); - start = chain_info.best_block_number; + start = match self.last_imported_block { + Some(n) => n, + None => chain_info.best_block_number, + }; if !self.headers.is_empty() { start = min(start, self.headers.range_iter().next().unwrap().0 - 1); } From 1ca7c35c19adffe3775d137698d8da67362e093f Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 12 Mar 2016 17:30:46 +0100 Subject: [PATCH 156/222] Fix latest era marker --- util/src/journaldb/earlymergedb.rs | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 83be31fb4..f411691d9 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -64,6 +64,7 @@ pub struct EarlyMergeDB { overlay: MemoryDB, backing: Arc, refs: Option>>>, + latest_era: u64, } // all keys must be at least 12 bytes @@ -90,11 +91,13 @@ impl EarlyMergeDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - let refs = Some(Arc::new(RwLock::new(EarlyMergeDB::read_refs(&backing)))); + let (latest_era, refs) = EarlyMergeDB::read_refs(&backing); + let refs = Some(Arc::new(RwLock::new(refs))); EarlyMergeDB { overlay: MemoryDB::new(), backing: Arc::new(backing), refs: refs, + latest_era: latest_era, } } @@ -225,9 +228,9 @@ impl EarlyMergeDB { #[cfg(test)] fn can_reconstruct_refs(&self) -> bool { - let reconstructed = Self::read_refs(&self.backing); + let (latest_era, reconstructed) = Self::read_refs(&self.backing); let refs = self.refs.as_ref().unwrap().write().unwrap(); - if *refs != reconstructed { + if *refs != reconstructed || latest_era != self.latest_era { let clean_refs = refs.iter().filter_map(|(k, v)| if reconstructed.get(k) == Some(v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); let clean_recon = reconstructed.into_iter().filter_map(|(k, v)| if refs.get(&k) == Some(&v) {None} else {Some((k.clone(), v.clone()))}).collect::>(); warn!(target: "jdb", "mem: {:?} != log: {:?}", clean_refs, clean_recon); @@ -241,10 +244,12 @@ impl EarlyMergeDB { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_refs(db: &Database) -> HashMap { + fn read_refs(db: &Database) -> (u64, HashMap) { let mut refs = HashMap::new(); + let mut latest_era = 0u64; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + latest_era = decode::(&val); + let mut era = latest_era; loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -265,7 +270,7 @@ impl EarlyMergeDB { era -= 1; } } - refs + (latest_era, refs) } } @@ -320,6 +325,7 @@ impl JournalDB for EarlyMergeDB { overlay: MemoryDB::new(), backing: self.backing.clone(), refs: self.refs.clone(), + latest_era: self.latest_era, }) } @@ -435,7 +441,10 @@ impl JournalDB for EarlyMergeDB { trace!(target: "jdb.ops", " Deletes: {:?}", removes); } try!(batch.put(&last, r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + if now >= self.latest_era { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = now; + } } // apply old commits' details From 0b4355d549d30a03287543d95b2a482ccbe1687b Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 12 Mar 2016 19:03:33 +0100 Subject: [PATCH 157/222] rocksdb version bump --- Cargo.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8e3a4b168..1c58cd431 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -271,7 +271,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rocksdb 0.4.2 (git+https://github.com/arkpar/rust-rocksdb.git)", + "rocksdb 0.4.3 (git+https://github.com/arkpar/rust-rocksdb.git)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -466,8 +466,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "librocksdb-sys" -version = "0.2.2" -source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" +version = "0.2.3" +source = "git+https://github.com/arkpar/rust-rocksdb.git#ebb602fc74b4067f9f51310bdc0401b8e59b7156" dependencies = [ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -688,11 +688,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "rocksdb" -version = "0.4.2" -source = "git+https://github.com/arkpar/rust-rocksdb.git#a4f89fea20ee3ae92b692df65d56426a5c0b6fd5" +version = "0.4.3" +source = "git+https://github.com/arkpar/rust-rocksdb.git#ebb602fc74b4067f9f51310bdc0401b8e59b7156" dependencies = [ "libc 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "librocksdb-sys 0.2.2 (git+https://github.com/arkpar/rust-rocksdb.git)", + "librocksdb-sys 0.2.3 (git+https://github.com/arkpar/rust-rocksdb.git)", ] [[package]] From 89986ec0e0d38ca3385cdc6cfeecaa6c2115ff61 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 19:19:16 +0100 Subject: [PATCH 158/222] Update main.rs [noci] --- parity/main.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 1350aca45..a22e4f763 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -224,8 +224,14 @@ fn setup_log(init: &Option) { #[cfg(feature = "rpc")] fn setup_rpc_server( - client: Arc, sync: Arc, secret_store: Arc, miner: Arc, - url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { + client: Arc, + sync: Arc, + secret_store: Arc, + miner: Arc, + url: &str, + cors_domain: &str, + apis: Vec<&str> +) -> Option> { use rpc::v1::*; let server = rpc::RpcServer::new(); From 707f67c6b4b588e43af86adad2f64d9299d8e3e8 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 12 Mar 2016 19:19:45 +0100 Subject: [PATCH 159/222] Optional last era --- util/src/journaldb/earlymergedb.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index f411691d9..0931d42d1 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -64,7 +64,7 @@ pub struct EarlyMergeDB { overlay: MemoryDB, backing: Arc, refs: Option>>>, - latest_era: u64, + latest_era: Option, } // all keys must be at least 12 bytes @@ -244,12 +244,12 @@ impl EarlyMergeDB { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_refs(db: &Database) -> (u64, HashMap) { + fn read_refs(db: &Database) -> (Option, HashMap) { let mut refs = HashMap::new(); - let mut latest_era = 0u64; + let mut latest_era = None; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - latest_era = decode::(&val); - let mut era = latest_era; + let mut era = decode::(&val); + latest_era = Some(era); loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -325,7 +325,7 @@ impl JournalDB for EarlyMergeDB { overlay: MemoryDB::new(), backing: self.backing.clone(), refs: self.refs.clone(), - latest_era: self.latest_era, + latest_era: self.latest_era.clone(), }) } @@ -441,9 +441,9 @@ impl JournalDB for EarlyMergeDB { trace!(target: "jdb.ops", " Deletes: {:?}", removes); } try!(batch.put(&last, r.as_raw())); - if now >= self.latest_era { + if self.latest_era.map_or(true, |e| now > e) { try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); - self.latest_era = now; + self.latest_era = Some(now); } } From a9a1c80fac13732e9cd642822f768e2884663aac Mon Sep 17 00:00:00 2001 From: debris Date: Sat, 12 Mar 2016 19:21:08 +0100 Subject: [PATCH 160/222] implemented eth_accounts, fixed personal_accounts, added test account provider, tests for eth_accounts --- rpc/src/v1/impls/eth.rs | 8 ++ rpc/src/v1/impls/personal.rs | 7 +- rpc/src/v1/tests/eth.rs | 59 ++++++++++++++ rpc/src/v1/tests/helpers/account_provider.rs | 84 ++++++++++++++++++++ rpc/src/v1/tests/helpers/mod.rs | 2 + rpc/src/v1/tests/mod.rs | 1 + 6 files changed, 155 insertions(+), 6 deletions(-) create mode 100644 rpc/src/v1/tests/eth.rs create mode 100644 rpc/src/v1/tests/helpers/account_provider.rs diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 38e363624..47b5471b3 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -155,6 +155,14 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: } } + fn accounts(&self, _: Params) -> Result { + let store = take_weak!(self.accounts); + match store.accounts() { + Ok(account_list) => to_value(&account_list), + Err(_) => Err(Error::internal_error()) + } + } + fn block_number(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.client).chain_info().best_block_number)), diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index ce200244c..0cd3f0040 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -39,12 +39,7 @@ impl Personal for PersonalClient where A: AccountProvider + 'static { fn accounts(&self, _: Params) -> Result { let store = take_weak!(self.accounts); match store.accounts() { - Ok(account_list) => { - Ok(Value::Array(account_list.iter() - .map(|&account| Value::String(format!("{:?}", account))) - .collect::>()) - ) - } + Ok(account_list) => to_value(&account_list), Err(_) => Err(Error::internal_error()) } } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs new file mode 100644 index 000000000..c36a9d172 --- /dev/null +++ b/rpc/src/v1/tests/eth.rs @@ -0,0 +1,59 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use util::hash::{Address}; +use ethcore::client::{TestBlockChainClient, EachBlockWith}; +use v1::{Eth, EthClient}; +use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config}; + +fn blockchain_client() -> Arc { + let mut client = TestBlockChainClient::new(); + client.add_blocks(10, EachBlockWith::Nothing); + Arc::new(client) +} + +fn accounts_provider() -> Arc { + let mut accounts = HashMap::new(); + accounts.insert(Address::from(1), TestAccount::new("test")); + let ap = TestAccountProvider::new(accounts); + Arc::new(ap) +} + +fn sync_provider() -> Arc { + Arc::new(TestSyncProvider::new(Config { + protocol_version: 65, + num_peers: 120, + })) +} + +#[test] +fn rpc_eth_accounts() { + let client = blockchain_client(); + let sync = sync_provider(); + let ap = accounts_provider(); + + let eth = EthClient::new(&client, &sync, &ap).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(eth); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/tests/helpers/account_provider.rs b/rpc/src/v1/tests/helpers/account_provider.rs new file mode 100644 index 000000000..66f085f74 --- /dev/null +++ b/rpc/src/v1/tests/helpers/account_provider.rs @@ -0,0 +1,84 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::RwLock; +use std::collections::HashMap; +use std::io; +use util::hash::{Address, H256}; +use util::crypto::{Secret, Signature}; +use util::keys::store::{AccountProvider, SigningError, EncryptedHashMapError}; + +/// Account mock. +#[derive(Clone)] +pub struct TestAccount { + /// True if account is unlocked. + pub unlocked: bool, + /// Account's password. + pub password: String, +} + +impl TestAccount { + pub fn new(password: &str) -> Self { + TestAccount { + unlocked: false, + password: password.to_owned(), + } + } +} + +/// Test account provider. +pub struct TestAccountProvider { + accounts: RwLock>, +} + +impl TestAccountProvider { + /// Basic constructor. + pub fn new(accounts: HashMap) -> Self { + TestAccountProvider { + accounts: RwLock::new(accounts), + } + } +} + +impl AccountProvider for TestAccountProvider { + fn accounts(&self) -> Result, io::Error> { + Ok(self.accounts.read().unwrap().keys().cloned().collect()) + } + + fn unlock_account(&self, account: &Address, pass: &str) -> Result<(), EncryptedHashMapError> { + match self.accounts.write().unwrap().get_mut(account) { + Some(ref mut acc) if acc.password == pass => { + acc.unlocked = true; + Ok(()) + }, + Some(_) => Err(EncryptedHashMapError::InvalidPassword), + None => Err(EncryptedHashMapError::UnknownIdentifier), + } + } + + fn new_account(&self, _pass: &str) -> Result { + unimplemented!() + } + fn account_secret(&self, _account: &Address) -> Result { + unimplemented!() + } + + fn sign(&self, _account: &Address, _message: &H256) -> Result { + unimplemented!() + } + +} + diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs index 501bfb2d3..3bd74bab7 100644 --- a/rpc/src/v1/tests/helpers/mod.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +mod account_provider; mod sync_provider; +pub use self::account_provider::{TestAccount, TestAccountProvider}; pub use self::sync_provider::{Config, TestSyncProvider}; diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 3a38ced15..3374bad36 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -16,6 +16,7 @@ //!TODO: load custom blockchain state and test +mod eth; mod net; mod web3; mod helpers; From e85a2f3804dea8387181008ba86d1b3456bde555 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 19:22:38 +0100 Subject: [PATCH 161/222] Update main.rs [noci] --- parity/main.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index a22e4f763..93fed03aa 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -506,7 +506,9 @@ impl Configuration { sync.clone(), account_service.clone(), miner.clone(), - &url, cors, apis.split(',').collect() + &url, + cors, + apis.split(',').collect() ); if let Some(handler) = server_handler { panic_handler.forward_from(handler.deref()); From 0684abd345aa9405d59bda17bae868d319d48329 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 12 Mar 2016 19:23:17 +0100 Subject: [PATCH 162/222] fixed to return receipts grouped by requested block --- ethcore/src/client/test_client.rs | 5 +++-- sync/src/chain.rs | 20 ++++++++++++-------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 207f1090f..76e13fe9f 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -24,6 +24,7 @@ use header::{Header as BlockHeader, BlockNumber}; use filter::Filter; use log_entry::LocalizedLogEntry; use receipt::Receipt; +use extras::BlockReceipts; use error::{ImportResult, Error}; use block_queue::BlockQueueInfo; use block::ClosedBlock; @@ -254,10 +255,10 @@ impl BlockChainClient for TestBlockChainClient { fn block_receipts(&self, hash: &H256) -> Option { // starts with 'f' ? if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") { - let receipt = Receipt::new( + let receipt = BlockReceipts::new(vec![Receipt::new( H256::zero(), U256::zero(), - vec![]); + vec![])]); let mut rlp = RlpStream::new(); rlp.append(&receipt); return Some(rlp.out()); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 4a2d941a7..ae4744902 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -66,6 +66,7 @@ const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; const MAX_RECEIPTS_TO_SEND: usize = 1024; +const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 16; const MAX_HEADERS_TO_REQUEST: usize = 512; const MAX_BODIES_TO_REQUEST: usize = 256; const MIN_PEERS_PROPAGATION: usize = 4; @@ -1060,17 +1061,20 @@ impl ChainSync { debug!(target: "sync", "Empty GetReceipts request, ignoring."); return Ok(None); } - count = min(count, MAX_RECEIPTS_TO_SEND); - let mut added = 0usize; + count = min(count, MAX_RECEIPTS_HEADERS_TO_SEND); + let mut added_headers = 0usize; + let mut added_receipts = 0usize; let mut data = Bytes::new(); for i in 0..count { - if let Some(mut hdr) = io.chain().block_receipts(&try!(rlp.val_at::(i))) { - data.append(&mut hdr); - added += 1; + if let Some(mut receipts_bytes) = io.chain().block_receipts(&try!(rlp.val_at::(i))) { + data.append(&mut receipts_bytes); + added_receipts += receipts_bytes.len(); + added_headers += 1; + if added_receipts > MAX_RECEIPTS_TO_SEND { break; } } } - let mut rlp_result = RlpStream::new_list(added); - rlp_result.append_raw(&data, added); + let mut rlp_result = RlpStream::new_list(added_headers); + rlp_result.append_raw(&data, added_headers); Ok(Some((RECEIPTS_PACKET, rlp_result))) } @@ -1396,7 +1400,7 @@ mod tests { assert!(rlp_result.is_some()); // the length of two rlp-encoded receipts - assert_eq!(597, rlp_result.unwrap().1.out().len()); + assert_eq!(603, rlp_result.unwrap().1.out().len()); let mut sync = dummy_sync_with_peer(H256::new()); io.sender = Some(2usize); From e7574d451675592f10fa23d0cf8afeb2ba758345 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 12 Mar 2016 19:29:18 +0100 Subject: [PATCH 163/222] Update lib.rs --- sync/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index bcf8fbcd1..0c7abd1d0 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -167,7 +167,7 @@ impl NetworkProtocolHandler for EthSync { #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted} => { + SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { let mut sync_io = NetSyncIo::new(io, self.chain.deref()); self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); }, From e09de6ea3d8877cb8926dff11b9cef644ae07506 Mon Sep 17 00:00:00 2001 From: debris Date: Sat, 12 Mar 2016 19:51:24 +0100 Subject: [PATCH 164/222] added missing eth_getBalance rpc method and tests for it --- ethcore/src/client/client.rs | 4 ++++ ethcore/src/client/mod.rs | 3 +++ ethcore/src/client/test_client.rs | 11 +++++++++++ rpc/src/v1/impls/eth.rs | 5 +++++ rpc/src/v1/tests/eth.rs | 25 ++++++++++++++++++++++++- rpc/src/v1/traits/eth.rs | 2 +- 6 files changed, 48 insertions(+), 2 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 4e8c34b33..a860dd752 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -449,6 +449,10 @@ impl BlockChainClient for Client where V: Verifier { self.state().code(address) } + fn balance(&self, address: &Address) -> U256 { + self.state().balance(address) + } + fn transaction(&self, id: TransactionId) -> Option { match id { TransactionId::Hash(ref hash) => self.chain.transaction_address(hash), diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index afdfb200a..af2c6ac14 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -66,6 +66,9 @@ pub trait BlockChainClient : Sync + Send { /// Get address code. fn code(&self, address: &Address) -> Option; + /// Get address balance. + fn balance(&self, address: &Address) -> U256; + /// Get transaction with given hash. fn transaction(&self, id: TransactionId) -> Option; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 207f1090f..cf1352bc8 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -40,6 +40,8 @@ pub struct TestBlockChainClient { pub last_hash: RwLock, /// Difficulty. pub difficulty: RwLock, + /// Balances. + pub balances: RwLock>, } #[derive(Clone)] @@ -65,12 +67,17 @@ impl TestBlockChainClient { genesis_hash: H256::new(), last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), + balances: RwLock::new(HashMap::new()), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); client } + pub fn set_balance(&mut self, address: Address, balance: U256) { + self.balances.write().unwrap().insert(address, balance); + } + /// Add blocks to test client. pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { let len = self.numbers.read().unwrap().len(); @@ -165,6 +172,10 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } + fn balance(&self, address: &Address) -> U256 { + self.balances.read().unwrap().get(address).cloned().unwrap_or_else(U256::zero) + } + fn transaction(&self, _id: TransactionId) -> Option { unimplemented!(); } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 47b5471b3..f5159f55f 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -170,6 +170,11 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: } } + fn balance(&self, params: Params) -> Result { + from_params::<(Address, BlockNumber)>(params) + .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).balance(&address))) + } + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index c36a9d172..320d583d1 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -17,7 +17,8 @@ use std::collections::HashMap; use std::sync::Arc; use jsonrpc_core::IoHandler; -use util::hash::{Address}; +use util::hash::Address; +use util::numbers::U256; use ethcore::client::{TestBlockChainClient, EachBlockWith}; use v1::{Eth, EthClient}; use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config}; @@ -25,6 +26,7 @@ use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Con fn blockchain_client() -> Arc { let mut client = TestBlockChainClient::new(); client.add_blocks(10, EachBlockWith::Nothing); + client.set_balance(Address::from(1), U256::from(5)); Arc::new(client) } @@ -57,3 +59,24 @@ fn rpc_eth_accounts() { assert_eq!(io.handle_request(request), Some(response.to_owned())); } + +#[test] +fn rpc_eth_balance() { + let client = blockchain_client(); + let sync = sync_provider(); + let ap = accounts_provider(); + + let eth = EthClient::new(&client, &sync, &ap).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(eth); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBalance", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x05","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 8c24dd38c..7d33cb63f 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -130,7 +130,7 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_gasPrice", Eth::gas_price); delegate.add_method("eth_accounts", Eth::accounts); delegate.add_method("eth_blockNumber", Eth::block_number); - delegate.add_method("eth_balance", Eth::balance); + delegate.add_method("eth_getBalance", Eth::balance); delegate.add_method("eth_getStorageAt", Eth::storage_at); delegate.add_method("eth_getTransactionCount", Eth::transaction_count); delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); From 361280a9bebb9b799effaf0689a26d7b2020a217 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 12 Mar 2016 19:52:37 +0100 Subject: [PATCH 165/222] Limit incoming connections --- util/src/network/host.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/util/src/network/host.rs b/util/src/network/host.rs index f63c75e9f..57aae51d7 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -541,7 +541,7 @@ impl Host where Message: Send + Sync + Clone { match TcpStream::connect(&address) { Ok(socket) => socket, Err(e) => { - warn!("Can't connect to address {:?}: {:?}", address, e); + debug!("Can't connect to address {:?}: {:?}", address, e); return; } } @@ -695,6 +695,14 @@ impl Host where Message: Send + Sync + Clone { return; } }; + if !originated { + let session_count = sessions.count(); + let ideal_peers = { self.info.read().unwrap().deref().config.ideal_peers }; + if session_count >= ideal_peers as usize { + session.disconnect(DisconnectReason::TooManyPeers); + return; + } + } let result = sessions.insert_with(move |session_token| { session.set_token(session_token); io.deregister_stream(token).expect("Error deleting handshake registration"); From 49dd6661998689a7e373b98f706eb9bb14307252 Mon Sep 17 00:00:00 2001 From: debris Date: Sat, 12 Mar 2016 20:06:55 +0100 Subject: [PATCH 166/222] EthTester --- rpc/src/v1/tests/eth.rs | 44 ++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 320d583d1..8c61c2ed9 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -44,32 +44,40 @@ fn sync_provider() -> Arc { })) } +struct EthTester { + client: Arc, + sync: Arc, + accounts_provider: Arc, + pub io: IoHandler, +} + +impl Default for EthTester { + fn default() -> Self { + let client = blockchain_client(); + let sync = sync_provider(); + let ap = accounts_provider(); + let eth = EthClient::new(&client, &sync, &ap).to_delegate(); + let io = IoHandler::new(); + io.add_delegate(eth); + EthTester { + client: client, + sync: sync, + accounts_provider: ap, + io: io + } + } +} + #[test] fn rpc_eth_accounts() { - let client = blockchain_client(); - let sync = sync_provider(); - let ap = accounts_provider(); - - let eth = EthClient::new(&client, &sync, &ap).to_delegate(); - let io = IoHandler::new(); - io.add_delegate(eth); - let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_owned())); + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } #[test] fn rpc_eth_balance() { - let client = blockchain_client(); - let sync = sync_provider(); - let ap = accounts_provider(); - - let eth = EthClient::new(&client, &sync, &ap).to_delegate(); - let io = IoHandler::new(); - io.add_delegate(eth); - let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBalance", @@ -78,5 +86,5 @@ fn rpc_eth_balance() { }"#; let response = r#"{"jsonrpc":"2.0","result":"0x05","id":1}"#; - assert_eq!(io.handle_request(request), Some(response.to_owned())); + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } From 0a6fea1b77c58420d0014118786961770dcce713 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 13 Mar 2016 10:33:55 +0100 Subject: [PATCH 167/222] Silenced some non-important warnings --- sync/src/chain.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 84ca5500a..332b78d83 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -592,7 +592,7 @@ impl ChainSync { pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}", peer); if let Err(e) = self.send_status(io) { - warn!(target:"sync", "Error sending status request: {:?}", e); + debug!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } } @@ -1093,7 +1093,7 @@ impl ChainSync { let rlp = UntrustedRlp::new(data); if packet_id != STATUS_PACKET && !self.peers.contains_key(&peer) { - warn!(target:"sync", "Unexpected packet from unregistered peer: {}:{}", peer, io.peer_info(peer)); + debug!(target:"sync", "Unexpected packet from unregistered peer: {}:{}", peer, io.peer_info(peer)); return; } let result = match packet_id { From 0f21779ec4d5ea156437c0142cc21d5995f5e876 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 13 Mar 2016 11:06:57 +0100 Subject: [PATCH 168/222] make heavy --- util/src/keys/geth_import.rs | 1 + util/src/keys/store.rs | 2 ++ 2 files changed, 3 insertions(+) diff --git a/util/src/keys/geth_import.rs b/util/src/keys/geth_import.rs index dbd9f0fe0..6c684c37d 100644 --- a/util/src/keys/geth_import.rs +++ b/util/src/keys/geth_import.rs @@ -161,6 +161,7 @@ mod tests { } #[test] + #[cfg(feature="heavy-tests")] fn can_decrypt_with_imported() { use keys::store::EncryptedHashMap; diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 6a5efc87d..f80d91123 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -363,6 +363,7 @@ mod vector_tests { #[test] + #[cfg(feature="heavy-tests")] fn mac_vector() { let password = "testpassword"; let salt = H256::from_str("ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd").unwrap(); @@ -464,6 +465,7 @@ mod tests { } #[test] + #[cfg(feature="heavy-tests")] fn can_get() { let temp = RandomTempPath::create_dir(); let key_id = { From ff51d0fa676d1ebeca0648afc44e9bcbdd413dc5 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 13 Mar 2016 11:50:09 +0100 Subject: [PATCH 169/222] Additional tests --- util/src/journaldb/earlymergedb.rs | 20 +++++++++++++++++ util/src/journaldb/overlayrecentdb.rs | 32 ++++++++++++++++++++++----- 2 files changed, 46 insertions(+), 6 deletions(-) diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 0931d42d1..64c7fa69d 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -561,6 +561,26 @@ mod tests { assert!(jdb.exists(&x)); } + #[test] + fn insert_older_era() { + let mut jdb = EarlyMergeDB::new_temp(); + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(0, &b"0b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"1".sha3(), Some((1, b"1".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + } + #[test] fn long_history() { // history is 3 diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 8dd4d1752..36e8b68ad 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -66,7 +66,7 @@ pub struct OverlayRecentDB { struct JournalOverlay { backing_overlay: MemoryDB, journal: HashMap>, - latest_era: u64, + latest_era: Option, } #[derive(PartialEq)] @@ -152,10 +152,10 @@ impl OverlayRecentDB { let mut journal = HashMap::new(); let mut overlay = MemoryDB::new(); let mut count = 0; - let mut latest_era = 0; + let mut latest_era = None; if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - latest_era = decode::(&val); - let mut era = latest_era; + let mut era = decode::(&val); + latest_era = Some(era); loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -241,9 +241,9 @@ impl JournalDB for OverlayRecentDB { k.append(&index); k.append(&&PADDING[..]); try!(batch.put(&k.drain(), r.as_raw())); - if now >= journal_overlay.latest_era { + if journal_overlay.latest_era.map_or(true, |e| now > e) { try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); - journal_overlay.latest_era = now; + journal_overlay.latest_era = Some(now); } journal_overlay.journal.entry(now).or_insert_with(Vec::new).push(JournalEntry { id: id.clone(), insertions: inserted_keys, deletions: removed_keys }); } @@ -870,4 +870,24 @@ mod tests { assert!(!jdb.exists(&bar)); } } + + #[test] + fn insert_older_era() { + let mut jdb = OverlayRecentDB::new_temp(); + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0a".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + let bar = jdb.insert(b"bar"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0a".sha3()))).unwrap(); + assert!(jdb.can_reconstruct_refs()); + + jdb.remove(&bar); + jdb.commit(0, &b"0b".sha3(), None).unwrap(); + assert!(jdb.can_reconstruct_refs()); + jdb.commit(2, &b"1".sha3(), Some((1, b"1".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + } } From 113161863060661f1b2afdf183f6bcfbb0706527 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 13 Mar 2016 11:55:48 +0100 Subject: [PATCH 170/222] Fixed test --- util/src/journaldb/earlymergedb.rs | 2 +- util/src/journaldb/overlayrecentdb.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 64c7fa69d..7f0f50da2 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -575,7 +575,7 @@ mod tests { jdb.remove(&bar); jdb.commit(0, &b"0b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"1".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 36e8b68ad..7eb5266b0 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -885,7 +885,7 @@ mod tests { jdb.remove(&bar); jdb.commit(0, &b"0b".sha3(), None).unwrap(); assert!(jdb.can_reconstruct_refs()); - jdb.commit(2, &b"1".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); assert!(jdb.exists(&foo)); assert!(jdb.exists(&bar)); From 487ba9b08aacd81224324d3b374433de50a7d5da Mon Sep 17 00:00:00 2001 From: debris Date: Sun, 13 Mar 2016 12:09:30 +0100 Subject: [PATCH 171/222] implemented eth_storageAt rpc method, added more tests for rpc --- ethcore/src/client/client.rs | 4 ++ ethcore/src/client/mod.rs | 3 ++ ethcore/src/client/test_client.rs | 11 +++++ rpc/src/v1/impls/eth.rs | 8 +++- rpc/src/v1/tests/eth.rs | 76 ++++++++++++++++++++++++++++--- 5 files changed, 94 insertions(+), 8 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index a860dd752..174142f7a 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -453,6 +453,10 @@ impl BlockChainClient for Client where V: Verifier { self.state().balance(address) } + fn storage_at(&self, address: &Address, position: &H256) -> H256 { + self.state().storage_at(address, position) + } + fn transaction(&self, id: TransactionId) -> Option { match id { TransactionId::Hash(ref hash) => self.chain.transaction_address(hash), diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index af2c6ac14..f07a1f7c3 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -69,6 +69,9 @@ pub trait BlockChainClient : Sync + Send { /// Get address balance. fn balance(&self, address: &Address) -> U256; + /// Get value of the storage at given position. + fn storage_at(&self, address: &Address, position: &H256) -> H256; + /// Get transaction with given hash. fn transaction(&self, id: TransactionId) -> Option; diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 997f159d2..0dd4359ce 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -42,6 +42,8 @@ pub struct TestBlockChainClient { pub difficulty: RwLock, /// Balances. pub balances: RwLock>, + /// Storage. + pub storage: RwLock>, } #[derive(Clone)] @@ -74,6 +76,7 @@ impl TestBlockChainClient { last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), balances: RwLock::new(HashMap::new()), + storage: RwLock::new(HashMap::new()), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); @@ -84,6 +87,10 @@ impl TestBlockChainClient { self.balances.write().unwrap().insert(address, balance); } + pub fn set_storage(&mut self, address: Address, position: H256, value: H256) { + self.storage.write().unwrap().insert((address, position), value); + } + /// Add blocks to test client. pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { let len = self.numbers.read().unwrap().len(); @@ -182,6 +189,10 @@ impl BlockChainClient for TestBlockChainClient { self.balances.read().unwrap().get(address).cloned().unwrap_or_else(U256::zero) } + fn storage_at(&self, address: &Address, position: &H256) -> H256 { + self.storage.read().unwrap().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new) + } + fn transaction(&self, _id: TransactionId) -> Option { unimplemented!(); } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index f5159f55f..a5736b76b 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -101,7 +101,7 @@ impl EthClient where C: BlockChainClient, S: SyncProvider, A: impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncProvider + 'static, A: AccountProvider + 'static { fn protocol_version(&self, params: Params) -> Result { match params { - Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), + Params::None => Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())), _ => Err(Error::invalid_params()) } } @@ -175,6 +175,12 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).balance(&address))) } + fn storage_at(&self, params: Params) -> Result { + from_params::<(Address, U256, BlockNumber)>(params) + .and_then(|(address, position, _block_number)| + to_value(&U256::from(take_weak!(self.client).storage_at(&address, &H256::from(position))))) + } + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 8c61c2ed9..47dc1dd89 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -17,7 +17,7 @@ use std::collections::HashMap; use std::sync::Arc; use jsonrpc_core::IoHandler; -use util::hash::Address; +use util::hash::{Address, H256}; use util::numbers::U256; use ethcore::client::{TestBlockChainClient, EachBlockWith}; use v1::{Eth, EthClient}; @@ -27,6 +27,7 @@ fn blockchain_client() -> Arc { let mut client = TestBlockChainClient::new(); client.add_blocks(10, EachBlockWith::Nothing); client.set_balance(Address::from(1), U256::from(5)); + client.set_storage(Address::from(1), H256::from(4), H256::from(7)); Arc::new(client) } @@ -45,9 +46,9 @@ fn sync_provider() -> Arc { } struct EthTester { - client: Arc, - sync: Arc, - accounts_provider: Arc, + _client: Arc, + _sync: Arc, + _accounts_provider: Arc, pub io: IoHandler, } @@ -60,14 +61,54 @@ impl Default for EthTester { let io = IoHandler::new(); io.add_delegate(eth); EthTester { - client: client, - sync: sync, - accounts_provider: ap, + _client: client, + _sync: sync, + _accounts_provider: ap, io: io } } } +#[test] +fn rpc_eth_protocol_version() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_protocolVersion", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"65","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +#[ignore] +fn rpc_eth_syncing() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_hashrate() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_author() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_mining() { + unimplemented!() +} + +#[test] +fn rpc_eth_gas_price() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_gasPrice", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0ba43b7400","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + #[test] fn rpc_eth_accounts() { let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; @@ -76,6 +117,14 @@ fn rpc_eth_accounts() { assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } +#[test] +fn rpc_eth_block_number() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0a","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + #[test] fn rpc_eth_balance() { let request = r#"{ @@ -88,3 +137,16 @@ fn rpc_eth_balance() { assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } + +#[test] +fn rpc_eth_storage_at() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getStorageAt", + "params": ["0x0000000000000000000000000000000000000001", "0x4", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x07","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} From 450ae4147fa9c6cebf7f10eba70960aa9e610ff2 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 13 Mar 2016 13:03:02 +0100 Subject: [PATCH 172/222] memory and expiration mngmt --- util/src/keys/store.rs | 57 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 54 insertions(+), 3 deletions(-) diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 610dda9f5..3477130e9 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -135,6 +135,19 @@ impl AccountService { secret_store: secret_store } } + + #[cfg(test)] + fn new_test(temp: &::devtools::RandomTempPath) -> Self { + let secret_store = RwLock::new(SecretStore::new_test(temp)); + AccountService { + secret_store: secret_store + } + } + + /// Ticks the account service + pub fn tick(&self) { + self.secret_store.write().unwrap().collect_garbage(); + } } impl Default for SecretStore { @@ -256,6 +269,17 @@ impl SecretStore { let unlock = try!(read_lock.get(account).ok_or(SigningError::AccountNotUnlocked)); Ok(unlock.secret as crypto::Secret) } + + /// Makes account unlocks expire and removes unused key files from memory + pub fn collect_garbage(&mut self) { + self.directory.collect_garbage(); + let utc = UTC::now(); + let expired_addresses = self.unlocks.read().unwrap().iter() + .filter(|&(_, unlock)| unlock.expires < utc) + .map(|(address, _)| address.clone()).collect::>(); + + for expired in expired_addresses { self.unlocks.write().unwrap().remove(&expired); } + } } fn derive_key_iterations(password: &str, salt: &H256, c: u32) -> (Bytes, Bytes) { @@ -362,14 +386,12 @@ impl EncryptedHashMap for SecretStore { } -#[cfg(test)] +#[cfg(all(test, feature="heavy-tests"))] mod vector_tests { use super::{derive_mac,derive_key_iterations}; use common::*; - #[test] - #[cfg(feature="heavy-tests")] fn mac_vector() { let password = "testpassword"; let salt = H256::from_str("ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd").unwrap(); @@ -395,6 +417,7 @@ mod tests { use devtools::*; use common::*; use crypto::KeyPair; + use chrono::*; #[test] fn can_insert() { @@ -581,4 +604,32 @@ mod tests { let kp = KeyPair::from_secret(secret).unwrap(); assert_eq!(Address::from(kp.public().sha3()), addr); } + + #[test] + fn can_create_service() { + let temp = RandomTempPath::create_dir(); + let svc = AccountService::new_test(&temp); + assert!(svc.accounts().unwrap().is_empty()); + } + + #[test] + fn accounts_expire() { + use std::collections::hash_map::*; + + let temp = RandomTempPath::create_dir(); + let svc = AccountService::new_test(&temp); + let address = svc.new_account("pass").unwrap(); + svc.unlock_account(&address, "pass").unwrap(); + assert!(svc.account_secret(&address).is_ok()); + { + let ss_rw = svc.secret_store.write().unwrap(); + let mut ua_rw = ss_rw.unlocks.write().unwrap(); + let entry = ua_rw.entry(address); + if let Entry::Occupied(mut occupied) = entry { occupied.get_mut().expires = UTC::now() - Duration::minutes(1); } + } + + svc.tick(); + + assert!(svc.account_secret(&address).is_err()); + } } From c2b3ba533b0b4dbfb64ae308a33f8f7849d3fb52 Mon Sep 17 00:00:00 2001 From: debris Date: Sun, 13 Mar 2016 14:37:33 +0100 Subject: [PATCH 173/222] fixed eth_getTransactionCount**, and eth_getUncleCount** rpc methods, added tests for them --- rpc/src/v1/impls/eth.rs | 36 ++++++++++++++-------- rpc/src/v1/tests/eth.rs | 66 ++++++++++++++++++++++++++++++++++++++++ rpc/src/v1/traits/eth.rs | 15 +++++---- 3 files changed, 98 insertions(+), 19 deletions(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index a5736b76b..6d28ccc75 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -181,30 +181,40 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: to_value(&U256::from(take_weak!(self.client).storage_at(&address, &H256::from(position))))) } + fn transaction_count(&self, params: Params) -> Result { + from_params::<(Address, BlockNumber)>(params) + .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).nonce(&address))) + } + fn block_transaction_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) - .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { - Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), - None => Ok(Value::Null) - }) + .and_then(|(hash,)| // match + to_value(&take_weak!(self.client).block(BlockId::Hash(hash)) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).transactions_count())))) } fn block_transaction_count_by_number(&self, params: Params) -> Result { from_params::<(BlockNumber,)>(params) .and_then(|(block_number,)| match block_number { - BlockNumber::Pending => to_value(&take_weak!(self.sync).status().transaction_queue_pending), - _ => match take_weak!(self.client).block(block_number.into()) { - Some(bytes) => to_value(&BlockView::new(&bytes).transactions_count()), - None => Ok(Value::Null) - } + BlockNumber::Pending => to_value(&U256::from(take_weak!(self.sync).status().transaction_queue_pending)), + _ => to_value(&take_weak!(self.client).block(block_number.into()) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).transactions_count()))) }) } - fn block_uncles_count(&self, params: Params) -> Result { + fn block_uncles_count_by_hash(&self, params: Params) -> Result { from_params::<(H256,)>(params) - .and_then(|(hash,)| match take_weak!(self.client).block(BlockId::Hash(hash)) { - Some(bytes) => to_value(&BlockView::new(&bytes).uncles_count()), - None => Ok(Value::Null) + .and_then(|(hash,)| + to_value(&take_weak!(self.client).block(BlockId::Hash(hash)) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).uncles_count())))) + } + + fn block_uncles_count_by_number(&self, params: Params) -> Result { + from_params::<(BlockNumber,)>(params) + .and_then(|(block_number,)| match block_number { + BlockNumber::Pending => to_value(&U256::from(0)), + _ => to_value(&take_weak!(self.client).block(block_number.into()) + .map_or_else(U256::zero, |bytes| U256::from(BlockView::new(&bytes).uncles_count()))) }) } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 47dc1dd89..bde84491b 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -150,3 +150,69 @@ fn rpc_eth_storage_at() { assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } + +#[test] +fn rpc_eth_transaction_count() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getTransactionCount", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_block_transaction_count_by_hash() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBlockTransactionCountByHash", + "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_transaction_count_by_number() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getBlockTransactionCountByNumber", + "params": ["latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_uncle_count_by_block_hash() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getUncleCountByBlockHash", + "params": ["0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_uncle_count_by_block_number() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getUncleCountByBlockNumber", + "params": ["latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0x00","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 7d33cb63f..738f561b2 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -59,14 +59,17 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Returns the number of transactions sent from given address at given time (block number). fn transaction_count(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block given block hash. + /// Returns the number of transactions in a block with given hash. fn block_transaction_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of transactions in a block given block number. + /// Returns the number of transactions in a block with given block number. fn block_transaction_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } - /// Returns the number of uncles in a given block. - fn block_uncles_count(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Returns the number of uncles in a block with given hash. + fn block_uncles_count_by_hash(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns the number of uncles in a block with given block number. + fn block_uncles_count_by_number(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns the code at given address at given time (block number). fn code_at(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -135,8 +138,8 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_getTransactionCount", Eth::transaction_count); delegate.add_method("eth_getBlockTransactionCountByHash", Eth::block_transaction_count_by_hash); delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); - delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count); - delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count); + delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); + delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); delegate.add_method("eth_code", Eth::code_at); delegate.add_method("eth_sendTransaction", Eth::send_transaction); delegate.add_method("eth_call", Eth::call); From 00820c342a7a04f02886c2f920d29a5c0e419c61 Mon Sep 17 00:00:00 2001 From: debris Date: Sun, 13 Mar 2016 14:45:39 +0100 Subject: [PATCH 174/222] fixed eth_getCode and added tests for it --- ethcore/src/client/test_client.rs | 14 ++++++++++++-- rpc/src/v1/impls/eth.rs | 3 ++- rpc/src/v1/tests/eth.rs | 14 ++++++++++++++ rpc/src/v1/traits/eth.rs | 2 +- 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 0dd4359ce..d85540858 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -44,6 +44,8 @@ pub struct TestBlockChainClient { pub balances: RwLock>, /// Storage. pub storage: RwLock>, + /// Code. + pub code: RwLock>, } #[derive(Clone)] @@ -77,16 +79,24 @@ impl TestBlockChainClient { difficulty: RwLock::new(From::from(0)), balances: RwLock::new(HashMap::new()), storage: RwLock::new(HashMap::new()), + code: RwLock::new(HashMap::new()), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); client } + /// Set code at given address. + pub fn set_code(&mut self, address: Address, code: Bytes) { + self.code.write().unwrap().insert(address, code); + } + + /// Set balance at given address. pub fn set_balance(&mut self, address: Address, balance: U256) { self.balances.write().unwrap().insert(address, balance); } + /// Set storage at given address and position. pub fn set_storage(&mut self, address: Address, position: H256, value: H256) { self.storage.write().unwrap().insert((address, position), value); } @@ -181,8 +191,8 @@ impl BlockChainClient for TestBlockChainClient { U256::zero() } - fn code(&self, _address: &Address) -> Option { - unimplemented!(); + fn code(&self, address: &Address) -> Option { + self.code.read().unwrap().get(address).cloned() } fn balance(&self, address: &Address) -> U256 { diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 6d28ccc75..2c7e16af5 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -221,7 +221,8 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: // TODO: do not ignore block number param fn code_at(&self, params: Params) -> Result { from_params::<(Address, BlockNumber)>(params) - .and_then(|(address, _block_number)| to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new))) + .and_then(|(address, _block_number)| + to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new))) } fn block_by_hash(&self, params: Params) -> Result { diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index bde84491b..a636ba278 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -28,6 +28,7 @@ fn blockchain_client() -> Arc { client.add_blocks(10, EachBlockWith::Nothing); client.set_balance(Address::from(1), U256::from(5)); client.set_storage(Address::from(1), H256::from(4), H256::from(7)); + client.set_code(Address::from(1), vec![0xff, 0x21]); Arc::new(client) } @@ -216,3 +217,16 @@ fn rpc_eth_uncle_count_by_block_number() { assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } +#[test] +fn rpc_eth_code() { + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_getCode", + "params": ["0x0000000000000000000000000000000000000001", "latest"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":"0xff21","id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 738f561b2..0b09012ab 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -140,7 +140,7 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_getBlockTransactionCountByNumber", Eth::block_transaction_count_by_number); delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); - delegate.add_method("eth_code", Eth::code_at); + delegate.add_method("eth_getCode", Eth::code_at); delegate.add_method("eth_sendTransaction", Eth::send_transaction); delegate.add_method("eth_call", Eth::call); delegate.add_method("eth_estimateGas", Eth::estimate_gas); From 89dc6fa9ccc57233dccd23402dbd29eae2a60a8e Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 13 Mar 2016 14:46:45 +0100 Subject: [PATCH 175/222] io handlers --- parity/main.rs | 13 +++++++++++-- util/src/keys/store.rs | 1 + 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index b16801ad5..7a92e5570 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -445,6 +445,7 @@ impl Configuration { // Secret Store let account_service = Arc::new(AccountService::new()); + service.io().register_handler(account_service).expect("Error registering IO handler"); // Setup rpc if self.args.flag_jsonrpc || self.args.flag_rpc { @@ -468,6 +469,7 @@ impl Configuration { client: service.client(), info: Default::default(), sync: sync.clone(), + accounts: account_service.clone(), }); service.io().register_handler(io_handler).expect("Error registering IO handler"); @@ -559,20 +561,27 @@ impl Informant { const INFO_TIMER: TimerToken = 0; +const ACCOUNT_TICK_TIMER: TimerToken = 10; +const ACCOUNT_TICK_MS: u64 = 60000; + struct ClientIoHandler { client: Arc, sync: Arc, + accounts: Arc, info: Informant, } impl IoHandler for ClientIoHandler { fn initialize(&self, io: &IoContext) { io.register_timer(INFO_TIMER, 5000).expect("Error registering timer"); + io.register_timer(ACCOUNT_TICK_TIMER, ACCOUNT_TICK_MS).expect("Error registering account timer"); + } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if INFO_TIMER == timer { - self.info.tick(&self.client, &self.sync); + match timer { + INFO_TIMER => { self.info.tick(&self.client, &self.sync); } + ACCOUNT_TICK_TIMER => { self.accounts.tick(); } } } } diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 610dda9f5..68121a82b 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -137,6 +137,7 @@ impl AccountService { } } + impl Default for SecretStore { fn default() -> Self { SecretStore::new() From 29c85e16cd57bae94e058883f4ee912787a24754 Mon Sep 17 00:00:00 2001 From: debris Date: Sun, 13 Mar 2016 14:57:26 +0100 Subject: [PATCH 176/222] added eth_sign and eth_sendRawTransaction to eth interface --- rpc/src/v1/tests/eth.rs | 33 +++++++++++++++++++++++++++++++++ rpc/src/v1/traits/eth.rs | 8 ++++++++ 2 files changed, 41 insertions(+) diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index a636ba278..6e408e9f7 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -230,3 +230,36 @@ fn rpc_eth_code() { assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } +#[test] +#[ignore] +fn rpc_eth_call() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_send_transaction() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_send_raw_transaction() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_sign() { + unimplemented!() +} + +#[test] +#[ignore] +fn rpc_eth_estimate_gas() { + unimplemented!() +} + + + + diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 0b09012ab..bcd7e7cfe 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -74,9 +74,15 @@ pub trait Eth: Sized + Send + Sync + 'static { /// Returns the code at given address at given time (block number). fn code_at(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Signs the data with given address signature. + fn sign(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Sends transaction. fn send_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Sends signed transaction. + fn send_raw_transaction(&self, _: Params) -> Result { rpc_unimplemented!() } + /// Call contract. fn call(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -141,7 +147,9 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_getUncleCountByBlockHash", Eth::block_uncles_count_by_hash); delegate.add_method("eth_getUncleCountByBlockNumber", Eth::block_uncles_count_by_number); delegate.add_method("eth_getCode", Eth::code_at); + delegate.add_method("eth_sign", Eth::sign); delegate.add_method("eth_sendTransaction", Eth::send_transaction); + delegate.add_method("eth_sendRawTransaction", Eth::send_raw_transaction); delegate.add_method("eth_call", Eth::call); delegate.add_method("eth_estimateGas", Eth::estimate_gas); delegate.add_method("eth_getBlockByHash", Eth::block_by_hash); From 4a58e142bd2062ccc7491e2f04b4e907224f3283 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 15:02:08 +0100 Subject: [PATCH 177/222] Remove duplicate ciippys. --- Cargo.toml | 3 +-- rpc/Cargo.toml | 3 +-- sync/Cargo.toml | 1 - 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d3dcad0f8..351041119 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,15 +19,14 @@ ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" number_prefix = "0.2" -clippy = { version = "0.0.50", optional = true } rpassword = "0.1" +clippy = { version = "0.0.50", optional = true } ethcore = { path = "ethcore" } ethcore-util = { path = "util" } ethsync = { path = "sync" } ethminer = { path = "miner" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } -clippy = { version = "0.0.49", optional = true } [features] default = ["rpc"] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 70cc2ec42..fa89041d8 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -18,12 +18,11 @@ ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } -clippy = { version = "0.0.50", optional = true } ethminer = { path = "../miner" } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 2765b0680..8cd59333d 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -17,7 +17,6 @@ env_logger = "0.3" time = "0.1.34" rand = "0.3.13" heapsize = "0.3" -clippy = { version = "0.0.49", optional = true } [features] default = [] From 6ee13b0000dc9f4cd8bfc3505b95f3d522471f2a Mon Sep 17 00:00:00 2001 From: debris Date: Sun, 13 Mar 2016 15:02:46 +0100 Subject: [PATCH 178/222] implemented eth_getCompilers --- rpc/src/v1/impls/eth.rs | 7 +++++++ rpc/src/v1/tests/eth.rs | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 2c7e16af5..8ff1f30d0 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -250,6 +250,13 @@ impl Eth for EthClient where C: BlockChainClient + 'static, S: .and_then(|(number, index)| self.transaction(TransactionId::Location(number.into(), index.value()))) } + fn compilers(&self, params: Params) -> Result { + match params { + Params::None => to_value(&vec![] as &Vec), + _ => Err(Error::invalid_params()) + } + } + fn logs(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 6e408e9f7..3d4fb0451 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -260,6 +260,13 @@ fn rpc_eth_estimate_gas() { unimplemented!() } +#[test] +fn rpc_eth_compilers() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_getCompilers", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} From a4f03100e9633d411d65a17fa568330c51be063f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 13 Mar 2016 15:11:16 +0100 Subject: [PATCH 179/222] registering timer --- parity/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 7a92e5570..1bfd2ced1 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -445,7 +445,6 @@ impl Configuration { // Secret Store let account_service = Arc::new(AccountService::new()); - service.io().register_handler(account_service).expect("Error registering IO handler"); // Setup rpc if self.args.flag_jsonrpc || self.args.flag_rpc { @@ -581,7 +580,8 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { match timer { INFO_TIMER => { self.info.tick(&self.client, &self.sync); } - ACCOUNT_TICK_TIMER => { self.accounts.tick(); } + ACCOUNT_TICK_TIMER => { self.accounts.tick(); }, + _ => {} } } } From 08b9cc2c41d3ae7a761984acc5f2a2956b18a1c1 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 15:29:55 +0100 Subject: [PATCH 180/222] Merge changes from #674 into branch. --- Cargo.lock | 10 ++++++ ethcore/src/client/client.rs | 67 ++++++++++++++++++++++++++---------- ethcore/src/service.rs | 8 +++-- miner/src/lib.rs | 2 +- miner/src/miner.rs | 15 +++++--- parity/main.rs | 21 ++++++----- sync/src/chain.rs | 8 ++--- sync/src/lib.rs | 4 +-- sync/src/tests/helpers.rs | 2 +- 9 files changed, 93 insertions(+), 44 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8552f299f..d68c5c121 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,6 +93,16 @@ dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "clippy" +version = "0.0.49" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", + "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "clippy" version = "0.0.50" diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 520b069e1..8c3d73ebb 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -35,7 +35,7 @@ use extras::TransactionAddress; use filter::Filter; use log_entry::LocalizedLogEntry; use block_queue::{BlockQueue, BlockQueueInfo}; -use blockchain::{BlockChain, BlockProvider, TreeRoute}; +use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute}; use client::{BlockId, TransactionId, ClientConfig, BlockChainClient}; pub use blockchain::CacheSize as BlockChainCacheSize; @@ -222,12 +222,39 @@ impl Client where V: Verifier { Ok(closed_block) } + fn calculate_enacted_retracted(&self, import_results: Vec) -> (Vec, Vec) { + fn map_to_vec(map: Vec<(H256, bool)>) -> Vec { + map.into_iter().map(|(k, _v)| k).collect() + } + + // In ImportRoute we get all the blocks that have been enacted and retracted by single insert. + // Because we are doing multiple inserts some of the blocks that were enacted in import `k` + // could be retracted in import `k+1`. This is why to understand if after all inserts + // the block is enacted or retracted we iterate over all routes and at the end final state + // will be in the hashmap + let map = import_results.into_iter().fold(HashMap::new(), |mut map, route| { + for hash in route.enacted { + map.insert(hash, true); + } + for hash in route.retracted { + map.insert(hash, false); + } + map + }); + + // Split to enacted retracted (using hashmap value) + let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v); + // And convert tuples to keys + (map_to_vec(enacted), map_to_vec(retracted)) + } + /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, io: &IoChannel) -> usize { let max_blocks_to_import = 128; - let mut good_blocks = Vec::with_capacity(max_blocks_to_import); - let mut bad_blocks = HashSet::new(); + let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); + let mut invalid_blocks = HashSet::new(); + let mut import_results = Vec::with_capacity(max_blocks_to_import); let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.drain(max_blocks_to_import); @@ -237,16 +264,16 @@ impl Client where V: Verifier { for block in blocks { let header = &block.header; - if bad_blocks.contains(&header.parent_hash) { - bad_blocks.insert(header.hash()); + if invalid_blocks.contains(&header.parent_hash) { + invalid_blocks.insert(header.hash()); continue; } let closed_block = self.check_and_close_block(&block); if let Err(_) = closed_block { - bad_blocks.insert(header.hash()); + invalid_blocks.insert(header.hash()); break; } - good_blocks.push(header.hash()); + imported_blocks.push(header.hash()); // Are we committing an era? let ancient = if header.number() >= HISTORY { @@ -265,31 +292,33 @@ impl Client where V: Verifier { // And update the chain after commit to prevent race conditions // (when something is in chain but you are not able to fetch details) - self.chain.insert_block(&block.bytes, receipts); + let route = self.chain.insert_block(&block.bytes, receipts); + import_results.push(route); self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } - let imported = good_blocks.len(); - let bad_blocks = bad_blocks.into_iter().collect::>(); + let imported = imported_blocks.len(); + let invalid_blocks = invalid_blocks.into_iter().collect::>(); { - if !bad_blocks.is_empty() { - self.block_queue.mark_as_bad(&bad_blocks); + if !invalid_blocks.is_empty() { + self.block_queue.mark_as_bad(&invalid_blocks); } - if !good_blocks.is_empty() { - self.block_queue.mark_as_good(&good_blocks); + if !imported_blocks.is_empty() { + self.block_queue.mark_as_good(&imported_blocks); } } { - if !good_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { + let (enacted, retracted) = self.calculate_enacted_retracted(import_results); io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - good: good_blocks, - bad: bad_blocks, - // TODO [todr] were to take those from? - retracted: vec![], + good: imported_blocks, + invalid: invalid_blocks, + enacted: enacted, + retracted: retracted, })).unwrap(); } } diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 27303bea7..bcfe7724f 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -28,11 +28,13 @@ pub enum SyncMessage { /// New block has been imported into the blockchain NewChainBlocks { /// Hashes of blocks imported to blockchain - good: Vec, - /// Hashes of blocks not imported to blockchain - bad: Vec, + imported: Vec, + /// Hashes of blocks not imported to blockchain (because were invalid) + invalid: Vec, /// Hashes of blocks that were removed from canonical chain retracted: Vec, + /// Hashes of blocks that are now included in cannonical chain + enacted: Vec, }, /// Best Block Hash in chain has been changed NewChainHead, diff --git a/miner/src/lib.rs b/miner/src/lib.rs index 9c2ad9ba5..a431bd44e 100644 --- a/miner/src/lib.rs +++ b/miner/src/lib.rs @@ -89,7 +89,7 @@ pub trait MinerService : Send + Sync { fn clear_and_reset(&self, chain: &BlockChainClient); /// Called when blocks are imported to chain, updates transactions queue. - fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], retracted: &[H256]); + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]); /// New chain head event. Restart mining operation. fn prepare_sealing(&self, chain: &BlockChainClient); diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 00efb83d3..a07da7569 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -150,7 +150,7 @@ impl MinerService for Miner { } } - fn chain_new_blocks(&self, chain: &BlockChainClient, good: &[H256], bad: &[H256], _retracted: &[H256]) { + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(*hash)) @@ -161,15 +161,20 @@ impl MinerService for Miner { } { - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); + let in_chain = vec![imported, enacted, invalid]; + let in_chain = in_chain + .par_iter() + .flat_map(|h| h.par_iter().map(|h| fetch_transactions(chain, h))); + .map(|h| fetch_transactions(chain, h)); + let out_of_chain = retracted + .par_iter() - good.for_each(|txs| { + in_chain.for_each(|txs| { let mut transaction_queue = self.transaction_queue.lock().unwrap(); let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); }); - bad.for_each(|txs| { + out_of_chain.for_each(|txs| { // populate sender for tx in &txs { let _sender = tx.sender(); diff --git a/parity/main.rs b/parity/main.rs index eb1937757..56a7d48de 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -114,7 +114,7 @@ API and Console Options: --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). Sealing/Mining Options: - --gasprice GAS Minimal gas price a transaction must have to be accepted for mining [default: 20000000000]. + --gas-price WEI Minimum amount of Wei to be paid for a transaction to be accepted for mining [default: 20000000000]. --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. @@ -138,11 +138,12 @@ Geth-Compatibility Options --maxpeers COUNT Equivalent to --peers COUNT. --nodekey KEY Equivalent to --node-key KEY. --nodiscover Equivalent to --no-discovery. + --gasprice WEI Equivalent to --gas-price WEI. --etherbase ADDRESS Equivalent to --author ADDRESS. --extradata STRING Equivalent to --extra-data STRING. Miscellaneous Options: - -l --logging LOGGING Specify the logging level. + -l --logging LOGGING Specify the logging level. Must conform to the same format as RUST_LOG. -v --version Show information about version. -h --help Show this screen. "#; @@ -175,18 +176,19 @@ struct Args { flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, flag_jsonrpc_apis: String, + flag_author: String, + flag_gas_price: String, + flag_extra_data: Option, flag_logging: Option, flag_version: bool, // geth-compatibility... flag_nodekey: Option, flag_nodiscover: bool, flag_maxpeers: Option, - flag_gasprice: String, - flag_author: String, - flag_extra_data: Option, flag_datadir: Option, flag_extradata: Option, flag_etherbase: Option, + flag_gasprice: Option, flag_rpc: bool, flag_rpcaddr: Option, flag_rpcport: Option, @@ -301,9 +303,10 @@ impl Configuration { }) } - fn gasprice(&self) -> U256 { - U256::from_dec_str(self.args.flag_gasprice.as_str()).unwrap_or_else(|_| { - die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", self.args.flag_gasprice) + fn gas_price(&self) -> U256 { + let d = self.args.flag_gasprice.as_ref().unwrap_or(&self.args.flag_gas_price); + U256::from_dec_str(d).unwrap_or_else(|_| { + die!("{}: Invalid gas price given. Must be a decimal unsigned 256-bit number.", d) }) } @@ -483,7 +486,7 @@ impl Configuration { let miner = Miner::new(); miner.set_author(self.author()); miner.set_extra_data(self.extra_data()); - miner.set_minimal_gas_price(self.gasprice()); + miner.set_minimal_gas_price(self.gas_price()); // Sync let sync = EthSync::register(service.network(), sync_config, client.clone(), miner.clone()); diff --git a/sync/src/chain.rs b/sync/src/chain.rs index e622f0b86..d06a34764 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1263,9 +1263,9 @@ impl ChainSync { } /// called when block is imported to chain, updates transactions queue and propagates the blocks - pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], retracted: &[H256]) { + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) { // Notify miner - self.miner.chain_new_blocks(io.chain(), good, bad, retracted); + self.miner.chain_new_blocks(io.chain(), imported, invalid, enacted, retracted); // Propagate latests blocks self.propagate_latest_blocks(io); // TODO [todr] propagate transactions? @@ -1616,10 +1616,10 @@ mod tests { let mut io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &good_blocks, &[], &[]); assert_eq!(sync.miner.status().transaction_queue_future, 0); assert_eq!(sync.miner.status().transaction_queue_pending, 1); - sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks); // then let status = sync.miner.status(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 0c7abd1d0..c47b74b66 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -167,9 +167,9 @@ impl NetworkProtocolHandler for EthSync { #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { + SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted } => { let mut sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); + self.sync.write().unwrap().chain_new_blocks(&mut sync_io, imported, invalid, enacted, retracted); }, SyncMessage::NewChainHead => { let mut sync_io = NetSyncIo::new(io, self.chain.deref()); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 00df35e77..b3e62ccc6 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -168,6 +168,6 @@ impl TestNet { pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]); + peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[], &[]); } } From 76696e3b49a2a8e84caa11a1d27c2f9ad5e78cb8 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 15:36:03 +0100 Subject: [PATCH 181/222] Minor build fixes. --- ethcore/src/client/client.rs | 2 +- ethcore/src/client/test_client.rs | 2 ++ miner/src/miner.rs | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 8c3d73ebb..d748cc4ee 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -315,7 +315,7 @@ impl Client where V: Verifier { if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() { let (enacted, retracted) = self.calculate_enacted_retracted(import_results); io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { - good: imported_blocks, + imported: imported_blocks, invalid: invalid_blocks, enacted: enacted, retracted: retracted, diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index d801c08e0..a97228b09 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -83,10 +83,12 @@ impl TestBlockChainClient { client } + /// Set the balance of account `address` to `balance`. pub fn set_balance(&mut self, address: Address, balance: U256) { self.balances.write().unwrap().insert(address, balance); } + /// Set storage `position` to `value` for account `address`. pub fn set_storage(&mut self, address: Address, position: H256, value: H256) { self.storage.write().unwrap().insert((address, position), value); } diff --git a/miner/src/miner.rs b/miner/src/miner.rs index a07da7569..ad403150d 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -150,7 +150,7 @@ impl MinerService for Miner { } } - fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], _retracted: &[H256]) { + fn chain_new_blocks(&self, chain: &BlockChainClient, imported: &[H256], invalid: &[H256], enacted: &[H256], retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(*hash)) @@ -165,9 +165,9 @@ impl MinerService for Miner { let in_chain = in_chain .par_iter() .flat_map(|h| h.par_iter().map(|h| fetch_transactions(chain, h))); - .map(|h| fetch_transactions(chain, h)); let out_of_chain = retracted .par_iter() + .map(|h| fetch_transactions(chain, h)); in_chain.for_each(|txs| { let mut transaction_queue = self.transaction_queue.lock().unwrap(); From 809c239ff80dd31467fcdef8f5be5da42fe95106 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 13 Mar 2016 15:59:25 +0100 Subject: [PATCH 182/222] fix rev --- ethcore/src/client/client.rs | 6 +----- sync/src/chain.rs | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 95ee90f7d..81e1ef287 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -471,11 +471,7 @@ impl BlockChainClient for Client where V: Verifier { } fn block_receipts(&self, hash: &H256) -> Option { - self.chain.block_receipts(hash).and_then(|receipts| { - let mut rlp = RlpStream::new(); - rlp.append(&receipts); - Some(rlp.out()) - }) + self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec()) } fn import_block(&self, bytes: Bytes) -> ImportResult { diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ae4744902..bc3041d02 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -66,7 +66,7 @@ const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; const MAX_RECEIPTS_TO_SEND: usize = 1024; -const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 16; +const MAX_RECEIPTS_HEADERS_TO_SEND: usize = 256; const MAX_HEADERS_TO_REQUEST: usize = 512; const MAX_BODIES_TO_REQUEST: usize = 256; const MIN_PEERS_PROPAGATION: usize = 4; From 6cedb263aa5857ec480ab9ea86061fe90dac8c7d Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 17:01:50 +0100 Subject: [PATCH 183/222] Add missing file. --- rpc/src/v1/tests/helpers/miner_service.rs | 53 +++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 rpc/src/v1/tests/helpers/miner_service.rs diff --git a/rpc/src/v1/tests/helpers/miner_service.rs b/rpc/src/v1/tests/helpers/miner_service.rs new file mode 100644 index 000000000..0cddf2a1e --- /dev/null +++ b/rpc/src/v1/tests/helpers/miner_service.rs @@ -0,0 +1,53 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::{Address, H256, U256, Bytes}; +use util::standard::*; +use ethcore::error::Error; +use ethcore::client::BlockChainClient; +use ethcore::block::ClosedBlock; +use ethcore::transaction::SignedTransaction; +use ethminer::{MinerService, MinerStatus}; + +pub struct TestMinerService; + +impl MinerService for TestMinerService { + + /// Returns miner's status. + fn status(&self) -> MinerStatus { unimplemented!(); } + + /// Imports transactions to transaction queue. + fn import_transactions(&self, _transactions: Vec, _fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { unimplemented!(); } + + /// Returns hashes of transactions currently in pending + fn pending_transactions_hashes(&self) -> Vec { unimplemented!(); } + + /// Removes all transactions from the queue and restart mining operation. + fn clear_and_reset(&self, _chain: &BlockChainClient) { unimplemented!(); } + + /// Called when blocks are imported to chain, updates transactions queue. + fn chain_new_blocks(&self, _chain: &BlockChainClient, _imported: &[H256], _invalid: &[H256], _enacted: &[H256], _retracted: &[H256]) { unimplemented!(); } + + /// New chain head event. Restart mining operation. + fn prepare_sealing(&self, _chain: &BlockChainClient) { unimplemented!(); } + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self, _chain: &BlockChainClient) -> &Mutex> { unimplemented!(); } + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, _chain: &BlockChainClient, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { unimplemented!(); } +} \ No newline at end of file From 9e912c7c0dd19054a9fa04cb8c488530667f8c06 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 18:07:10 +0100 Subject: [PATCH 184/222] Add new file. --- util/src/journaldb/refcounteddb.rs | 264 +++++++++++++++++++++++++++++ 1 file changed, 264 insertions(+) create mode 100644 util/src/journaldb/refcounteddb.rs diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs new file mode 100644 index 000000000..32471b36c --- /dev/null +++ b/util/src/journaldb/refcounteddb.rs @@ -0,0 +1,264 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Disk-backed, ref-counted JournalDB implementation. + +use common::*; +use rlp::*; +use hashdb::*; +use overlaydb::*; +use super::traits::JournalDB; +use kvdb::{Database, DBTransaction, DatabaseConfig}; +#[cfg(test)] +use std::env; + +/// Implementation of the HashDB trait for a disk-backed database with a memory overlay +/// and latent-removal semantics. +/// +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect +/// immediately. Rather some age (based on a linear but arbitrary metric) must pass before +/// the removals actually take effect. +pub struct RefCountedDB { + forward: OverlayDB, + backing: Arc, + inserts: Vec, + removes: Vec, +} + +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const DB_VERSION : u32 = 512; + +impl RefCountedDB { + /// Create a new instance given a `backing` database. + pub fn new(path: &str) -> RefCountedDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + if !backing.is_empty() { + match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { + Ok(Some(DB_VERSION)) => {}, + v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) + } + } else { + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + } + + let backing = Arc::new(backing); + RefCountedDB { + forward: OverlayDB::new_with_arc(backing.clone()), + backing: backing, + inserts: vec![], + removes: vec![], + } + } + + /// Create a new instance with an anonymous temporary database. + #[cfg(test)] + fn new_temp() -> RefCountedDB { + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + Self::new(dir.to_str().unwrap()) + } +} + +impl HashDB for RefCountedDB { + fn keys(&self) -> HashMap { self.forward.keys() } + fn lookup(&self, key: &H256) -> Option<&[u8]> { self.forward.lookup(key) } + fn exists(&self, key: &H256) -> bool { self.forward.exists(key) } + fn insert(&mut self, value: &[u8]) -> H256 { let r = self.forward.insert(value); self.inserts.push(r.clone()); r } + fn emplace(&mut self, key: H256, value: Bytes) { self.inserts.push(key.clone()); self.forward.emplace(key, value); } + fn kill(&mut self, key: &H256) { self.removes.push(key.clone()); } +} + +impl JournalDB for RefCountedDB { + fn spawn(&self) -> Box { + Box::new(RefCountedDB { + forward: self.forward.clone(), + backing: self.backing.clone(), + inserts: self.inserts.clone(), + removes: self.removes.clone(), + }) + } + + fn mem_used(&self) -> usize { + self.inserts.heap_size_of_children() + self.removes.heap_size_of_children() + } + + fn is_empty(&self) -> bool { + self.backing.get(&VERSION_KEY).expect("Low level database error").is_none() + } + + fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + // journal format: + // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] + // [era, n] => [ ... ] + + // TODO: store last_era, reclaim_period. + + // when we make a new commit, we journal the inserts and removes. + // for each end_era that we journaled that we are no passing by, + // we remove all of its removes assuming it is canonical and all + // of its inserts otherwise. + + // record new commit's details. + let batch = DBTransaction::new(); + { + let mut index = 0usize; + let mut last; + + while try!(self.backing.get({ + let mut r = RlpStream::new_list(2); + r.append(&now); + r.append(&index); + last = r.drain(); + &last + })).is_some() { + index += 1; + } + + let mut r = RlpStream::new_list(3); + r.append(id); + r.append(&self.inserts); + r.append(&self.removes); + try!(self.backing.put(&last, r.as_raw())); + self.inserts.clear(); + self.removes.clear(); + } + + // apply old commits' details + if let Some((end_era, canon_id)) = end { + let mut index = 0usize; + let mut last; + while let Some(rlp_data) = try!(self.backing.get({ + let mut r = RlpStream::new_list(2); + r.append(&end_era); + r.append(&index); + last = r.drain(); + &last + })) { + let rlp = Rlp::new(&rlp_data); + let to_remove: Vec = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); + for i in &to_remove { + self.forward.remove(i); + } + try!(self.backing.delete(&last)); + trace!("RefCountedDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); + index += 1; + } + } + + let r = try!(self.forward.commit_to_batch(&batch)); + try!(self.backing.write(batch)); + Ok(r) + } +} + +#[cfg(test)] +mod tests { + use common::*; + use super::*; + use super::super::traits::JournalDB; + use hashdb::*; + + #[test] + fn long_history() { + // history is 3 + let mut jdb = RefCountedDB::new_temp(); + let h = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.remove(&h); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(3, &b"3".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&h)); + jdb.commit(4, &b"4".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&h)); + } + + #[test] + fn complex() { + // history is 1 + let mut jdb = RefCountedDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + jdb.remove(&bar); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + let foo = jdb.insert(b"foo"); + jdb.remove(&baz); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.remove(&foo); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + assert!(!jdb.exists(&bar)); + assert!(!jdb.exists(&baz)); + } + + #[test] + fn fork() { + // history is 1 + let mut jdb = RefCountedDB::new_temp(); + + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + assert!(jdb.exists(&baz)); + + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } +} From ac655af09138347d2d716ef42b2dfad038cd7549 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 18:07:36 +0100 Subject: [PATCH 185/222] Update overlaydb --- util/src/overlaydb.rs | 70 ++++++++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 31 deletions(-) diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 7c9b6b04b..8166dd318 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -26,7 +26,7 @@ use std::ops::*; use std::sync::*; use std::env; use std::collections::HashMap; -use kvdb::{Database}; +use kvdb::{Database, DBTransaction}; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay. /// @@ -36,7 +36,7 @@ use kvdb::{Database}; /// /// `lookup()` and `contains()` maintain normal behaviour - all `insert()` and `remove()` /// queries have an immediate effect in terms of these functions. -//#[derive(Clone)] +#[derive(Clone)] pub struct OverlayDB { overlay: MemoryDB, backing: Arc, @@ -58,6 +58,36 @@ impl OverlayDB { Self::new(Database::open_default(dir.to_str().unwrap()).unwrap()) } + /// Commit all operations to given batch. + pub fn commit_to_batch(&mut self, batch: &DBTransaction) -> Result { + let mut ret = 0u32; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc != 0 { + match self.payload(&key) { + Some(x) => { + let (back_value, back_rc) = x; + let total_rc: i32 = back_rc as i32 + rc; + if total_rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash)); + } + deletes += if self.put_payload(batch, &key, (back_value, total_rc as u32)) {1} else {0}; + } + None => { + if rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash)); + } + self.put_payload(batch, &key, (value, rc as u32)); + } + }; + ret += 1; + } + } + trace!("OverlayDB::commit() deleted {} nodes", deletes); + Ok(ret) + } + /// Commit all memory operations to the backing database. /// /// Returns either an error or the number of items changed in the backing database. @@ -86,32 +116,10 @@ impl OverlayDB { /// } /// ``` pub fn commit(&mut self) -> Result { - let mut ret = 0u32; - let mut deletes = 0usize; - for i in self.overlay.drain().into_iter() { - let (key, (value, rc)) = i; - if rc != 0 { - match self.payload(&key) { - Some(x) => { - let (back_value, back_rc) = x; - let total_rc: i32 = back_rc as i32 + rc; - if total_rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash)); - } - deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; - } - None => { - if rc < 0 { - return Err(From::from(BaseDataError::NegativelyReferencedHash)); - } - self.put_payload(&key, (value, rc as u32)); - } - }; - ret += 1; - } - } - trace!("OverlayDB::commit() deleted {} nodes", deletes); - Ok(ret) + let batch = DBTransaction::new(); + let r = try!(self.commit_to_batch(&batch)); + try!(self.backing.write(batch)); + Ok(r) } /// Revert all operations on this object (i.e. `insert()`s and `kill()`s) since the @@ -148,15 +156,15 @@ impl OverlayDB { } /// Put the refs and value of the given key, possibly deleting it from the db. - fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { + fn put_payload(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { if payload.1 > 0 { let mut s = RlpStream::new_list(2); s.append(&payload.1); s.append(&payload.0); - self.backing.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); + batch.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); false } else { - self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + batch.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); true } } From 5107fc5897a92746a38a6f35af0b526827a3fbd2 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 18:09:44 +0100 Subject: [PATCH 186/222] Update options. --- parity/main.rs | 4 ++-- util/src/journaldb/mod.rs | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index b16801ad5..2bfa75e8a 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -81,7 +81,7 @@ Protocol Options: --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, - light (experimental), fast (experimental) [default: archive]. + basic (experimental), light (experimental), fast (experimental) [default: archive]. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] @@ -429,7 +429,7 @@ impl Configuration { "" | "archive" => journaldb::Algorithm::Archive, "pruned" => journaldb::Algorithm::EarlyMerge, "fast" => journaldb::Algorithm::OverlayRecent, -// "slow" => journaldb::Algorithm::RefCounted, // TODO: @gavofyork uncomment this once ref-count algo is merged. + "slow" => journaldb::Algorithm::RefCounted, _ => { die!("Invalid pruning method given."); } }; client_config.name = self.args.flag_identity.clone(); diff --git a/util/src/journaldb/mod.rs b/util/src/journaldb/mod.rs index cf5278368..e73c12969 100644 --- a/util/src/journaldb/mod.rs +++ b/util/src/journaldb/mod.rs @@ -23,6 +23,7 @@ pub mod traits; mod archivedb; mod earlymergedb; mod overlayrecentdb; +mod refcounteddb; /// Export the JournalDB trait. pub use self::traits::JournalDB; @@ -75,6 +76,6 @@ pub fn new(path: &str, algorithm: Algorithm) -> Box { Algorithm::Archive => Box::new(archivedb::ArchiveDB::new(path)), Algorithm::EarlyMerge => Box::new(earlymergedb::EarlyMergeDB::new(path)), Algorithm::OverlayRecent => Box::new(overlayrecentdb::OverlayRecentDB::new(path)), - _ => unimplemented!(), + Algorithm::RefCounted => Box::new(refcounteddb::RefCountedDB::new(path)), } } From 706c56f56a8758413d722fd97338cea37bf1a059 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 18:19:52 +0100 Subject: [PATCH 187/222] Usage of LATEST_ERA fixes for archive and ref-counted DBs. --- util/src/journaldb/archivedb.rs | 6 +++++- util/src/journaldb/refcounteddb.rs | 13 ++++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 2e4e966c1..19570b281 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -35,6 +35,7 @@ use std::env; pub struct ArchiveDB { overlay: MemoryDB, backing: Arc, + latest_era: Option, } // all keys must be at least 12 bytes @@ -60,9 +61,11 @@ impl ArchiveDB { backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } + let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); ArchiveDB { overlay: MemoryDB::new(), backing: Arc::new(backing), + latest_era: latest_era, } } @@ -129,6 +132,7 @@ impl JournalDB for ArchiveDB { Box::new(ArchiveDB { overlay: MemoryDB::new(), backing: self.backing.clone(), + latest_era: None, }) } @@ -137,7 +141,7 @@ impl JournalDB for ArchiveDB { } fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + self.latest_era.is_none() } fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result { diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 32471b36c..09362676b 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -35,10 +35,12 @@ use std::env; pub struct RefCountedDB { forward: OverlayDB, backing: Arc, + latest_era: Option, inserts: Vec, removes: Vec, } +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; const DB_VERSION : u32 = 512; @@ -61,11 +63,14 @@ impl RefCountedDB { } let backing = Arc::new(backing); + let latest_era = backing.get(&LATEST_ERA_KEY).expect("Low-level database error.").map(|val| decode::(&val)); + RefCountedDB { forward: OverlayDB::new_with_arc(backing.clone()), backing: backing, inserts: vec![], removes: vec![], + latest_era: latest_era, } } @@ -92,6 +97,7 @@ impl JournalDB for RefCountedDB { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), + latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), }) @@ -102,7 +108,7 @@ impl JournalDB for RefCountedDB { } fn is_empty(&self) -> bool { - self.backing.get(&VERSION_KEY).expect("Low level database error").is_none() + self.latest_era.is_none() } fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { @@ -141,6 +147,11 @@ impl JournalDB for RefCountedDB { try!(self.backing.put(&last, r.as_raw())); self.inserts.clear(); self.removes.clear(); + + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } } // apply old commits' details From 81291622eb9da914cfa1e6ba123245edfa48cd56 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 19:22:42 +0100 Subject: [PATCH 188/222] Avoid batches for now. --- parity/main.rs | 4 +-- util/src/journaldb/archivedb.rs | 8 +++-- util/src/journaldb/refcounteddb.rs | 2 +- util/src/overlaydb.rs | 50 +++++++++++++++++++++++++----- 4 files changed, 52 insertions(+), 12 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 2bfa75e8a..002655951 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -427,9 +427,9 @@ impl Configuration { } client_config.pruning = match self.args.flag_pruning.as_str() { "" | "archive" => journaldb::Algorithm::Archive, - "pruned" => journaldb::Algorithm::EarlyMerge, + "light" => journaldb::Algorithm::EarlyMerge, "fast" => journaldb::Algorithm::OverlayRecent, - "slow" => journaldb::Algorithm::RefCounted, + "basic" => journaldb::Algorithm::RefCounted, _ => { die!("Invalid pruning method given."); } }; client_config.name = self.args.flag_identity.clone(); diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 19570b281..83a80b7c2 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -132,7 +132,7 @@ impl JournalDB for ArchiveDB { Box::new(ArchiveDB { overlay: MemoryDB::new(), backing: self.backing.clone(), - latest_era: None, + latest_era: self.latest_era, }) } @@ -144,7 +144,7 @@ impl JournalDB for ArchiveDB { self.latest_era.is_none() } - fn commit(&mut self, _: u64, _: &H256, _: Option<(u64, H256)>) -> Result { + fn commit(&mut self, now: u64, _: &H256, _: Option<(u64, H256)>) -> Result { let batch = DBTransaction::new(); let mut inserts = 0usize; let mut deletes = 0usize; @@ -160,6 +160,10 @@ impl JournalDB for ArchiveDB { deletes += 1; } } + if self.latest_era.map_or(true, |e| now > e) { + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + self.latest_era = Some(now); + } try!(self.backing.write(batch)); Ok((inserts + deletes) as u32) } diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 09362676b..85f40e048 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -176,7 +176,7 @@ impl JournalDB for RefCountedDB { } } - let r = try!(self.forward.commit_to_batch(&batch)); + let r = try!(self.forward.commit()); try!(self.backing.write(batch)); Ok(r) } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 8166dd318..d176d38f6 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -72,13 +72,13 @@ impl OverlayDB { if total_rc < 0 { return Err(From::from(BaseDataError::NegativelyReferencedHash)); } - deletes += if self.put_payload(batch, &key, (back_value, total_rc as u32)) {1} else {0}; + deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { return Err(From::from(BaseDataError::NegativelyReferencedHash)); } - self.put_payload(batch, &key, (value, rc as u32)); + self.put_payload_in_batch(batch, &key, (value, rc as u32)); } }; ret += 1; @@ -116,10 +116,32 @@ impl OverlayDB { /// } /// ``` pub fn commit(&mut self) -> Result { - let batch = DBTransaction::new(); - let r = try!(self.commit_to_batch(&batch)); - try!(self.backing.write(batch)); - Ok(r) + let mut ret = 0u32; + let mut deletes = 0usize; + for i in self.overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc != 0 { + match self.payload(&key) { + Some(x) => { + let (back_value, back_rc) = x; + let total_rc: i32 = back_rc as i32 + rc; + if total_rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash)); + } + deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; + } + None => { + if rc < 0 { + return Err(From::from(BaseDataError::NegativelyReferencedHash)); + } + self.put_payload(&key, (value, rc as u32)); + } + }; + ret += 1; + } + } + trace!("OverlayDB::commit() deleted {} nodes", deletes); + Ok(ret) } /// Revert all operations on this object (i.e. `insert()`s and `kill()`s) since the @@ -156,7 +178,7 @@ impl OverlayDB { } /// Put the refs and value of the given key, possibly deleting it from the db. - fn put_payload(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { + fn put_payload_in_batch(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { if payload.1 > 0 { let mut s = RlpStream::new_list(2); s.append(&payload.1); @@ -168,6 +190,20 @@ impl OverlayDB { true } } + + /// Put the refs and value of the given key, possibly deleting it from the db. + fn put_payload(&self, key: &H256, payload: (Bytes, u32)) -> bool { + if payload.1 > 0 { + let mut s = RlpStream::new_list(2); + s.append(&payload.1); + s.append(&payload.0); + self.backing.put(&key.bytes(), s.as_raw()).expect("Low-level database error. Some issue with your hard disk?"); + false + } else { + self.backing.delete(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?"); + true + } + } } impl HashDB for OverlayDB { From c5edf237b2c17f56950016f3fcca1c77cfe416b3 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 13 Mar 2016 19:52:37 +0100 Subject: [PATCH 189/222] adding shrink-to-fit --- util/src/keys/directory.rs | 2 ++ util/src/keys/store.rs | 7 +++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/util/src/keys/directory.rs b/util/src/keys/directory.rs index d0d3393cd..a92bf4593 100644 --- a/util/src/keys/directory.rs +++ b/util/src/keys/directory.rs @@ -542,6 +542,8 @@ impl KeyDirectory { if removes.is_empty() { return; } let mut cache = self.cache.write().unwrap(); for key in removes { cache.remove(&key); } + + cache.shrink_to_fit(); } /// Reports how many keys are currently cached. diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index e0a505f79..78540bdb0 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -273,13 +273,16 @@ impl SecretStore { /// Makes account unlocks expire and removes unused key files from memory pub fn collect_garbage(&mut self) { + let mut garbage_lock = self.unlocks.write().unwrap(); self.directory.collect_garbage(); let utc = UTC::now(); - let expired_addresses = self.unlocks.read().unwrap().iter() + let expired_addresses = garbage_lock.iter() .filter(|&(_, unlock)| unlock.expires < utc) .map(|(address, _)| address.clone()).collect::>(); - for expired in expired_addresses { self.unlocks.write().unwrap().remove(&expired); } + for expired in expired_addresses { garbage_lock.remove(&expired); } + + garbage_lock.shrink_to_fit(); } } From e2e067cdd07f5d2a5dd18d79d7d36d8b4a93e122 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 13 Mar 2016 20:44:25 +0100 Subject: [PATCH 190/222] Bumping clippy --- miner/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/miner/Cargo.toml b/miner/Cargo.toml index 5d265e9a4..b450ece73 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -17,7 +17,7 @@ log = "0.3" env_logger = "0.3" rustc-serialize = "0.3" rayon = "0.3.1" -clippy = { version = "0.0.49", optional = true } +clippy = { version = "0.0.50", optional = true } [features] default = [] From 4cf18c728d972524089b1e6190a47cb22445c542 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 13 Mar 2016 20:53:47 +0100 Subject: [PATCH 191/222] Fixing sync invalid sync test --- Cargo.lock | 12 +----------- sync/src/chain.rs | 2 +- sync/src/lib.rs | 1 - 3 files changed, 2 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d68c5c121..8bf57cb6a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -93,16 +93,6 @@ dependencies = [ "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "clippy" -version = "0.0.49" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", - "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "clippy" version = "0.0.50" @@ -300,7 +290,7 @@ dependencies = [ name = "ethminer" version = "0.9.99" dependencies = [ - "clippy 0.0.49 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 0.9.99", "ethcore-util 0.9.99", diff --git a/sync/src/chain.rs b/sync/src/chain.rs index d06a34764..6045a23d9 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1616,7 +1616,7 @@ mod tests { let mut io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&mut io, &[], &good_blocks, &[], &[]); + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks); assert_eq!(sync.miner.status().transaction_queue_future, 0); assert_eq!(sync.miner.status().transaction_queue_pending, 1); sync.chain_new_blocks(&mut io, &good_blocks, &[], &[], &retracted_blocks); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index c47b74b66..1c87da2de 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -164,7 +164,6 @@ impl NetworkProtocolHandler for EthSync { self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } - #[allow(single_match)] fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { SyncMessage::NewChainBlocks { ref imported, ref invalid, ref enacted, ref retracted } => { From 13df958f4ae200a0bf2d8a9b913bdd8e6adcbc1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 13 Mar 2016 21:06:24 +0100 Subject: [PATCH 192/222] Fixing warnings --- util/src/journaldb/earlymergedb.rs | 16 +++++++++------- util/src/keys/store.rs | 2 +- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 7f0f50da2..7cb00b993 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -171,7 +171,7 @@ impl EarlyMergeDB { trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn kill_keys(deletes: &Vec, refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { + fn kill_keys(deletes: &[H256], refs: &mut HashMap, batch: &DBTransaction, from: RemoveFrom, trace: bool) { // with a kill on {queue_refs: 1, in_archive: true}, we have two options: // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) @@ -340,8 +340,10 @@ impl JournalDB for EarlyMergeDB { } } + + #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -473,7 +475,7 @@ impl JournalDB for EarlyMergeDB { if trace { trace!(target: "jdb.ops", " Finalising: {:?}", inserts); } - for k in inserts.iter() { + for k in &inserts { match refs.get(k).cloned() { None => { // [in archive] -> SHIFT remove -> SHIFT insert None->Some{queue_refs: 1, in_archive: true} -> TAKE remove Some{queue_refs: 1, in_archive: true}->None -> TAKE insert @@ -489,7 +491,7 @@ impl JournalDB for EarlyMergeDB { Self::set_already_in(&batch, k); refs.insert(k.clone(), RefInfo{ queue_refs: x - 1, in_archive: true }); } - Some( RefInfo{queue_refs: _, in_archive: true} ) => { + Some( RefInfo{in_archive: true, ..} ) => { // Invalid! Reinserted the same key twice. warn!("Key {} inserted twice into same fork.", k); } @@ -936,7 +938,7 @@ mod tests { assert!(jdb.can_reconstruct_refs()); assert!(!jdb.exists(&foo)); } - + #[test] fn reopen_test() { let mut dir = ::std::env::temp_dir(); @@ -971,7 +973,7 @@ mod tests { jdb.commit(7, &b"7".sha3(), Some((3, b"3".sha3()))).unwrap(); assert!(jdb.can_reconstruct_refs()); } - + #[test] fn reopen_remove_three() { init_log(); @@ -1025,7 +1027,7 @@ mod tests { assert!(!jdb.exists(&foo)); } } - + #[test] fn reopen_fork() { let mut dir = ::std::env::temp_dir(); diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 610dda9f5..8aba08519 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -363,13 +363,13 @@ impl EncryptedHashMap for SecretStore { } #[cfg(test)] +#[cfg(feature="heavy-tests")] mod vector_tests { use super::{derive_mac,derive_key_iterations}; use common::*; #[test] - #[cfg(feature="heavy-tests")] fn mac_vector() { let password = "testpassword"; let salt = H256::from_str("ae3cd4e7013836a3df6bd7241b12db061dbe2c6785853cce422d148a624ce0bd").unwrap(); From 1be92ea8efafd5e4751ba6f2404073a8b72ea0b0 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 21:21:30 +0100 Subject: [PATCH 193/222] Fixes and traces for refcountdb. --- util/src/journaldb/refcounteddb.rs | 34 ++++++++++++++++++------------ util/src/overlaydb.rs | 7 ++++++ 2 files changed, 28 insertions(+), 13 deletions(-) diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 85f40e048..71833533f 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -43,6 +43,7 @@ pub struct RefCountedDB { const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; const DB_VERSION : u32 = 512; +const PADDING : [u8; 10] = [ 0u8; 10 ]; impl RefCountedDB { /// Create a new instance given a `backing` database. @@ -125,15 +126,15 @@ impl JournalDB for RefCountedDB { // of its inserts otherwise. // record new commit's details. - let batch = DBTransaction::new(); { let mut index = 0usize; let mut last; while try!(self.backing.get({ - let mut r = RlpStream::new_list(2); + let mut r = RlpStream::new_list(3); r.append(&now); r.append(&index); + r.append(&&PADDING[..]); last = r.drain(); &last })).is_some() { @@ -145,11 +146,14 @@ impl JournalDB for RefCountedDB { r.append(&self.inserts); r.append(&self.removes); try!(self.backing.put(&last, r.as_raw())); + + trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes); + self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); + try!(self.backing.put(&LATEST_ERA_KEY, &encode(&now))); self.latest_era = Some(now); } } @@ -158,26 +162,30 @@ impl JournalDB for RefCountedDB { if let Some((end_era, canon_id)) = end { let mut index = 0usize; let mut last; - while let Some(rlp_data) = try!(self.backing.get({ - let mut r = RlpStream::new_list(2); - r.append(&end_era); - r.append(&index); - last = r.drain(); - &last - })) { + while let Some(rlp_data) = { +// trace!(target: "rcdb", "checking for journal #{}.{}", end_era, index); + try!(self.backing.get({ + let mut r = RlpStream::new_list(3); + r.append(&end_era); + r.append(&index); + r.append(&&PADDING[..]); + last = r.drain(); + &last + })) + } { let rlp = Rlp::new(&rlp_data); - let to_remove: Vec = rlp.val_at(if canon_id == rlp.val_at(0) {2} else {1}); + let our_id: H256 = rlp.val_at(0); + let to_remove: Vec = rlp.val_at(if canon_id == our_id {2} else {1}); + trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i); } try!(self.backing.delete(&last)); - trace!("RefCountedDB: delete journal for time #{}.{}, (canon was {}): {} entries", end_era, index, canon_id, to_remove.len()); index += 1; } } let r = try!(self.forward.commit()); - try!(self.backing.write(batch)); Ok(r) } } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index d176d38f6..5704950ed 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -70,12 +70,14 @@ impl OverlayDB { let (back_value, back_rc) = x; let total_rc: i32 = back_rc as i32 + rc; if total_rc < 0 { + warn!("NEGATIVELY REFERENCED HASH {:?}", key); return Err(From::from(BaseDataError::NegativelyReferencedHash)); } deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { + warn!("NEGATIVELY REFERENCED HASH {:?}", key); return Err(From::from(BaseDataError::NegativelyReferencedHash)); } self.put_payload_in_batch(batch, &key, (value, rc as u32)); @@ -126,12 +128,14 @@ impl OverlayDB { let (back_value, back_rc) = x; let total_rc: i32 = back_rc as i32 + rc; if total_rc < 0 { + warn!("NEGATIVELY REFERENCED HASH {:?}", key); return Err(From::from(BaseDataError::NegativelyReferencedHash)); } deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { + warn!("NEGATIVELY REFERENCED HASH {:?}", key); return Err(From::from(BaseDataError::NegativelyReferencedHash)); } self.put_payload(&key, (value, rc as u32)); @@ -167,6 +171,9 @@ impl OverlayDB { /// ``` pub fn revert(&mut self) { self.overlay.clear(); } + /// Get the number of references that would be committed. + pub fn commit_refs(&self, key: &H256) -> i32 { self.overlay.raw(&key).map_or(0, |&(_, refs)| refs) } + /// Get the refs and value of the given key. fn payload(&self, key: &H256) -> Option<(Bytes, u32)> { self.backing.get(&key.bytes()) From 420f473f90d3908d7a3f3cd0c245b797ae64c9f1 Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 13 Mar 2016 21:28:57 +0100 Subject: [PATCH 194/222] Check for NULL_RLP in AccountDB --- ethcore/src/account_db.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index 026e813f5..f95ec53a1 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -97,6 +97,9 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn insert(&mut self, value: &[u8]) -> H256 { + if value == &NULL_RLP { + return SHA3_NULL_RLP.clone(); + } let k = value.sha3(); let ak = combine_key(&self.address, &k); self.db.emplace(ak, value.to_vec()); @@ -104,11 +107,17 @@ impl<'db> HashDB for AccountDBMut<'db>{ } fn emplace(&mut self, key: H256, value: Bytes) { + if key == SHA3_NULL_RLP { + return; + } let key = combine_key(&self.address, &key); self.db.emplace(key, value.to_vec()) } fn kill(&mut self, key: &H256) { + if key == &SHA3_NULL_RLP { + return; + } let key = combine_key(&self.address, key); self.db.kill(&key) } From fd834084f9f80c2c447e7424e74dddf9ed7e415d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 13 Mar 2016 21:39:23 +0100 Subject: [PATCH 195/222] unknonw lint --- util/src/table.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/src/table.rs b/util/src/table.rs index 5ba572289..c3b2006cf 100644 --- a/util/src/table.rs +++ b/util/src/table.rs @@ -40,7 +40,7 @@ impl Default for Table } // There is default but clippy does not detect it? -#[allow(new_without_default)] +#[cfg_attr(feature="dev", allow(new_without_default))] impl Table where Row: Eq + Hash + Clone, Col: Eq + Hash { From 26f41b711c30dc4a0cd3264cf38dd47f7b6aefc7 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 21:54:06 +0100 Subject: [PATCH 196/222] Bring back batching. --- util/src/error.rs | 3 ++- util/src/journaldb/refcounteddb.rs | 10 ++++++---- util/src/overlaydb.rs | 12 ++++-------- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/util/src/error.rs b/util/src/error.rs index 68aa3e648..409cc0e5d 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -21,12 +21,13 @@ use network::NetworkError; use rlp::DecoderError; use io; use std::fmt; +use hash::H256; #[derive(Debug)] /// Error in database subsystem. pub enum BaseDataError { /// An entry was removed more times than inserted. - NegativelyReferencedHash, + NegativelyReferencedHash(H256), } #[derive(Debug)] diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 71833533f..590964247 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -126,6 +126,7 @@ impl JournalDB for RefCountedDB { // of its inserts otherwise. // record new commit's details. + let batch = DBTransaction::new(); { let mut index = 0usize; let mut last; @@ -145,7 +146,7 @@ impl JournalDB for RefCountedDB { r.append(id); r.append(&self.inserts); r.append(&self.removes); - try!(self.backing.put(&last, r.as_raw())); + try!(batch.put(&last, r.as_raw())); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, index, id, self.inserts, self.removes); @@ -153,7 +154,7 @@ impl JournalDB for RefCountedDB { self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { - try!(self.backing.put(&LATEST_ERA_KEY, &encode(&now))); + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); self.latest_era = Some(now); } } @@ -180,12 +181,13 @@ impl JournalDB for RefCountedDB { for i in &to_remove { self.forward.remove(i); } - try!(self.backing.delete(&last)); + try!(batch.delete(&last)); index += 1; } } - let r = try!(self.forward.commit()); + let r = try!(self.forward.commit_to_batch(&batch)); + try!(self.backing.write(batch)); Ok(r) } } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index 5704950ed..b5dec75e2 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -70,15 +70,13 @@ impl OverlayDB { let (back_value, back_rc) = x; let total_rc: i32 = back_rc as i32 + rc; if total_rc < 0 { - warn!("NEGATIVELY REFERENCED HASH {:?}", key); - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } deletes += if self.put_payload_in_batch(batch, &key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { - warn!("NEGATIVELY REFERENCED HASH {:?}", key); - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } self.put_payload_in_batch(batch, &key, (value, rc as u32)); } @@ -128,15 +126,13 @@ impl OverlayDB { let (back_value, back_rc) = x; let total_rc: i32 = back_rc as i32 + rc; if total_rc < 0 { - warn!("NEGATIVELY REFERENCED HASH {:?}", key); - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } deletes += if self.put_payload(&key, (back_value, total_rc as u32)) {1} else {0}; } None => { if rc < 0 { - warn!("NEGATIVELY REFERENCED HASH {:?}", key); - return Err(From::from(BaseDataError::NegativelyReferencedHash)); + return Err(From::from(BaseDataError::NegativelyReferencedHash(key))); } self.put_payload(&key, (value, rc as u32)); } From 8fd8f687ee0414684217b815147be40dcc8bdb06 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 13 Mar 2016 23:12:47 +0100 Subject: [PATCH 197/222] Remove EarlyMerge from user docs. --- parity/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index f2c661958..db9070493 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -83,7 +83,7 @@ Protocol Options: --testnet Equivalent to --chain testnet (geth-compatible). --networkid INDEX Override the network identifier from the chain we are on. --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, - basic (experimental), light (experimental), fast (experimental) [default: archive]. + basic (experimental), fast (experimental) [default: archive]. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] From 45c3600d5ab2012f0f7c083ec774c77c76c5d57c Mon Sep 17 00:00:00 2001 From: arkpar Date: Sun, 13 Mar 2016 23:20:26 +0100 Subject: [PATCH 198/222] Fixed splitting Neighbours packet --- util/src/network/discovery.rs | 43 +++++++++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 7 deletions(-) diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index a381b49f8..f5c4592f0 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -407,25 +407,37 @@ impl Discovery { let target: NodeId = try!(rlp.val_at(0)); let timestamp: u64 = try!(rlp.val_at(1)); try!(self.check_timestamp(timestamp)); - let limit = (MAX_DATAGRAM_SIZE - 109) / 90; let nearest = Discovery::nearest_node_entries(&target, &self.node_buckets); if nearest.is_empty() { return Ok(None); } + let mut packets = Discovery::prepare_neighbours_packets(&nearest); + for p in packets.drain(..) { + self.send_packet(PACKET_NEIGHBOURS, &from, &p); + } + trace!(target: "discovery", "Sent {} Neighbours to {:?}", nearest.len(), &from); + Ok(None) + } + + fn prepare_neighbours_packets(nearest: &[NodeEntry]) -> Vec { + let mut packets = Vec::new(); let mut rlp = RlpStream::new_list(1); - rlp.begin_list(cmp::min(limit, nearest.len())); + let limit = (MAX_DATAGRAM_SIZE - 109) / 90; + let mut count = cmp::min(limit, nearest.len()); + rlp.begin_list(count); for n in 0 .. nearest.len() { rlp.begin_list(4); nearest[n].endpoint.to_rlp(&mut rlp); rlp.append(&nearest[n].id); - if (n + 1) % limit == 0 || n == nearest.len() - 1 { - self.send_packet(PACKET_NEIGHBOURS, &from, &rlp.drain()); - trace!(target: "discovery", "Sent {} Neighbours to {:?}", n, &from); + count -= 1; + if count == 0 { + packets.push(rlp.out()); rlp = RlpStream::new_list(1); - rlp.begin_list(cmp::min(limit, nearest.len() - n)); + count = cmp::min(limit, nearest.len() - n); + rlp.begin_list(count); } } - Ok(None) + packets } fn on_neighbours(&mut self, rlp: &UntrustedRlp, _node: &NodeId, from: &SocketAddr) -> Result, NetworkError> { @@ -506,6 +518,23 @@ mod tests { use crypto::KeyPair; use std::str::FromStr; use rustc_serialize::hex::FromHex; + use rlp::*; + + #[test] + fn find_node() { + let mut nearest = Vec::new(); + let node = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7770").unwrap(); + for _ in 0..1000 { + nearest.push( NodeEntry { id: node.id.clone(), endpoint: node.endpoint.clone() }); + } + + let packets = Discovery::prepare_neighbours_packets(&nearest); + assert_eq!(packets.len(), 76); + for p in &packets { + assert!(p.len() > 1280/2); + assert!(p.len() <= 1280); + } + } #[test] fn discovery() { From 615e03542ebb6ff4f60262a3cf7426c1ce74f821 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 14 Mar 2016 00:41:25 +0100 Subject: [PATCH 199/222] Use slice.chunks --- util/src/network/discovery.rs | 33 ++++++++++++++------------------- 1 file changed, 14 insertions(+), 19 deletions(-) diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index f5c4592f0..6fda99cdb 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -18,7 +18,6 @@ use bytes::Bytes; use std::net::SocketAddr; use std::collections::{HashSet, HashMap, BTreeMap, VecDeque}; use std::mem; -use std::cmp; use std::default::Default; use mio::*; use mio::udp::*; @@ -420,24 +419,19 @@ impl Discovery { } fn prepare_neighbours_packets(nearest: &[NodeEntry]) -> Vec { - let mut packets = Vec::new(); - let mut rlp = RlpStream::new_list(1); let limit = (MAX_DATAGRAM_SIZE - 109) / 90; - let mut count = cmp::min(limit, nearest.len()); - rlp.begin_list(count); - for n in 0 .. nearest.len() { - rlp.begin_list(4); - nearest[n].endpoint.to_rlp(&mut rlp); - rlp.append(&nearest[n].id); - count -= 1; - if count == 0 { - packets.push(rlp.out()); - rlp = RlpStream::new_list(1); - count = cmp::min(limit, nearest.len() - n); - rlp.begin_list(count); + let chunks = nearest.chunks(limit); + let packets = chunks.map(|c| { + let mut rlp = RlpStream::new_list(1); + rlp.begin_list(c.len()); + for n in 0 .. c.len() { + rlp.begin_list(4); + c[n].endpoint.to_rlp(&mut rlp); + rlp.append(&c[n].id); } - } - packets + rlp.out() + }); + packets.collect() } fn on_neighbours(&mut self, rlp: &UntrustedRlp, _node: &NodeId, from: &SocketAddr) -> Result, NetworkError> { @@ -529,11 +523,12 @@ mod tests { } let packets = Discovery::prepare_neighbours_packets(&nearest); - assert_eq!(packets.len(), 76); - for p in &packets { + assert_eq!(packets.len(), 77); + for p in &packets[0..76] { assert!(p.len() > 1280/2); assert!(p.len() <= 1280); } + assert!(packets.last().unwrap().len() > 0); } #[test] From 1957a1496163f0a33a8ff8d6aff9261a5ad4ee20 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 14 Mar 2016 00:48:43 +0100 Subject: [PATCH 200/222] personal tests setup --- rpc/src/v1/tests/mod.rs | 1 + rpc/src/v1/tests/personal.rs | 47 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 rpc/src/v1/tests/personal.rs diff --git a/rpc/src/v1/tests/mod.rs b/rpc/src/v1/tests/mod.rs index 3374bad36..21085a0fd 100644 --- a/rpc/src/v1/tests/mod.rs +++ b/rpc/src/v1/tests/mod.rs @@ -20,3 +20,4 @@ mod eth; mod net; mod web3; mod helpers; +mod personal; diff --git a/rpc/src/v1/tests/personal.rs b/rpc/src/v1/tests/personal.rs new file mode 100644 index 000000000..440a95a6f --- /dev/null +++ b/rpc/src/v1/tests/personal.rs @@ -0,0 +1,47 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use jsonrpc_core::IoHandler; +use v1::tests::helpers::{TestAccount, TestAccountProvider}; +use v1::{PersonalClient, Personal}; +use util::numbers::*; +use std::collections::*; + +fn accounts_provider() -> Arc { + let mut accounts = HashMap::new(); + accounts.insert(Address::from(1), TestAccount::new("test")); + let ap = TestAccountProvider::new(accounts); + Arc::new(ap) +} + +fn setup() -> (Arc, IoHandler) { + let test_provider = accounts_provider(); + let personal = PersonalClient::new(&test_provider); + let io = IoHandler::new(); + io.add_delegate(personal.to_delegate()); + (test_provider, io) +} + +#[test] +fn accounts() { + let (_test_provider, io) = setup(); + + let request = r#"{"jsonrpc": "2.0", "method": "personal_listAccounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} From 2d1a837a8b4ff14b43ac090bc5f1a22f0cc12479 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 14 Mar 2016 00:52:31 +0100 Subject: [PATCH 201/222] docopts cleanups. one for @LefterisJP: 80-character line for docopts. --- parity/main.rs | 116 +++++++++++++++++++++++++++---------------------- 1 file changed, 65 insertions(+), 51 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index db9070493..3473f44b1 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -78,72 +78,86 @@ Usage: parity [options] Protocol Options: - --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file - or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. - --testnet Equivalent to --chain testnet (geth-compatible). - --networkid INDEX Override the network identifier from the chain we are on. - --pruning METHOD Configure pruning of the state/storage trie. METHOD may be one of: archive, - basic (experimental), fast (experimental) [default: archive]. - -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] - --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] + --chain CHAIN Specify the blockchain type. CHAIN may be either a + JSON chain specification file or olympic, frontier, + homestead, mainnet, morden, or testnet + [default: homestead]. + -d --db-path PATH Specify the database & configuration directory path + [default: $HOME/.parity]. + --keys-path PATH Specify the path for JSON key files to be found + [default: $HOME/.web3/keys]. --identity NAME Specify your node's name. Networking Options: - --port PORT Override the port on which the node should listen [default: 30303]. + --port PORT Override the port on which the node should listen + [default: 30303]. --peers NUM Try to maintain that many peers [default: 25]. - --nat METHOD Specify method to use for determining public address. Must be one of: any, none, - upnp, extip:(IP) [default: any]. - --bootnodes NODES Specify additional comma-separated bootnodes. - --no-bootstrap Don't bother trying to connect to standard bootnodes. + --nat METHOD Specify method to use for determining public + address. Must be one of: any, none, upnp, + extip: [default: any]. + --network-id INDEX Override the network identifier from the chain we + are on. + --bootnodes NODES Override the bootnodes from our chain. NODES should + be comma-delimited enodes. --no-discovery Disable new peer discovery. - --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. + --node-key KEY Specify node secret key, either as 64-character hex + string or input to SHA3 operation. API and Console Options: -j --jsonrpc Enable the JSON-RPC API sever. - --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API server [default: 127.0.0.1]. - --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. - --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited - list of API name. Possible name are web3, eth and net. [default: web3,eth,net,personal]. - - --rpc Equivalent to --jsonrpc (geth-compatible). - --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). - --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). - --rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible). - --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). + --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API + server [default: 127.0.0.1]. + --jsonrpc-port PORT Specify the port portion of the JSONRPC API server + [default: 8545]. + --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses + [default: null]. + --jsonrpc-apis APIS Specify the APIs available through the JSONRPC + interface. APIS is a comma-delimited list of API + name. Possible name are web3, eth and net. + [default: web3,eth,net,personal]. Sealing/Mining Options: - --gas-price WEI Minimum amount of Wei to be paid for a transaction to be accepted for mining [default: 20000000000]. - --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards - from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + --gas-price WEI Minimum amount of Wei to be paid for a transaction + to be accepted for mining [default: 20000000000]. + --author ADDRESS Specify the block author (aka "coinbase") address + for sending block rewards from sealed blocks + [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. + --extra-data STRING Specify a custom extra-data for authored blocks, no + more than 32 characters. -Memory Footprint Options: - --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. - --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. - --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. - --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with - other cache options (geth-compatible). +Footprint Options: + --pruning METHOD Configure pruning of the state/storage trie. METHOD + may be one of: archive, basic (experimental), fast + (experimental) [default: archive]. + --cache-pref-size BYTES Specify the prefered size of the blockchain cache in + bytes [default: 16384]. + --cache-max-size BYTES Specify the maximum size of the blockchain cache in + bytes [default: 262144]. + --queue-max-size BYTES Specify the maximum size of memory to use for block + queue [default: 52428800]. + --cache MEGABYTES Set total amount of discretionary memory to use for + the entire system, overrides other cache and queue + options. -Geth-Compatibility Options +Geth-compatibility Options: --datadir PATH Equivalent to --db-path PATH. --testnet Equivalent to --chain testnet. - --networkid INDEX Override the network identifier from the chain we are on. + --networkid INDEX Equivalent to --network-id INDEX. + --maxpeers COUNT Equivalent to --peers COUNT. + --nodekey KEY Equivalent to --node-key KEY. + --nodiscover Equivalent to --no-discovery. --rpc Equivalent to --jsonrpc. --rpcaddr HOST Equivalent to --jsonrpc-addr HOST. --rpcport PORT Equivalent to --jsonrpc-port PORT. --rpcapi APIS Equivalent to --jsonrpc-apis APIS. --rpccorsdomain URL Equivalent to --jsonrpc-cors URL. - --maxpeers COUNT Equivalent to --peers COUNT. - --nodekey KEY Equivalent to --node-key KEY. - --nodiscover Equivalent to --no-discovery. --gasprice WEI Equivalent to --gas-price WEI. --etherbase ADDRESS Equivalent to --author ADDRESS. --extradata STRING Equivalent to --extra-data STRING. Miscellaneous Options: - -l --logging LOGGING Specify the logging level. Must conform to the same format as RUST_LOG. + -l --logging LOGGING Specify the logging level. Must conform to the same + format as RUST_LOG. -v --version Show information about version. -h --help Show this screen. "#; @@ -161,8 +175,8 @@ struct Args { flag_cache: Option, flag_keys_path: String, flag_bootnodes: Option, + flag_network_id: Option, flag_pruning: String, - flag_no_bootstrap: bool, flag_port: u16, flag_peers: usize, flag_no_discovery: bool, @@ -345,15 +359,15 @@ impl Configuration { } fn init_nodes(&self, spec: &Spec) -> Vec { - let mut r = if self.args.flag_no_bootstrap { Vec::new() } else { spec.nodes().clone() }; - if let Some(ref x) = self.args.flag_bootnodes { - r.extend(x.split(',').map(|s| { + match self.args.flag_bootnodes { + Some(ref x) if x.len() > 0 => x.split(',').map(|s| { Self::normalize_enode(s).unwrap_or_else(|| { die!("{}: Invalid node address format given for a boot node.", s) }) - })); + }).collect(), + Some(_) => Vec::new(), + None => spec.nodes().clone(), } - r } #[cfg_attr(feature="dev", allow(useless_format))] @@ -390,7 +404,7 @@ impl Configuration { match self.args.flag_cache { Some(mb) => { client_config.blockchain.max_cache_size = mb * 1024 * 1024; - client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size * 3 / 4; } None => { client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; @@ -411,8 +425,8 @@ impl Configuration { fn sync_config(&self, spec: &Spec) -> SyncConfig { let mut sync_config = SyncConfig::default(); - sync_config.network_id = self.args.flag_networkid.as_ref().map_or(spec.network_id(), |id| { - U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id)) + sync_config.network_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()).map_or(spec.network_id(), |id| { + U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --network-id/--networkid", id)) }); sync_config } From 9e7ff2c00e0505b7ef6622935e2ee203bc3202f3 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 14 Mar 2016 00:13:54 +0100 Subject: [PATCH 202/222] Version 1.1 --- Cargo.lock | 73 ++++++++++++++++++++++++--------------------- Cargo.toml | 2 +- devtools/Cargo.toml | 2 +- ethash/Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- evmjit/Cargo.toml | 2 +- miner/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- sync/Cargo.toml | 2 +- util/Cargo.toml | 2 +- util/src/misc.rs | 4 +-- 11 files changed, 50 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8bf57cb6a..dfe37dbb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,18 +1,18 @@ [root] name = "parity" -version = "0.9.99" +version = "1.1.0" dependencies = [ "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/tomusdrw/rust-ctrlc.git)", "daemonize 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.78 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore 0.9.99", - "ethcore-devtools 0.9.99", - "ethcore-rpc 0.9.99", - "ethcore-util 0.9.99", - "ethminer 0.9.99", - "ethsync 0.9.99", + "ethcore 1.1.0", + "ethcore-devtools 1.1.0", + "ethcore-rpc 1.1.0", + "ethcore-util 1.1.0", + "ethminer 1.1.0", + "ethsync 1.1.0", "fdlimit 0.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -162,7 +162,7 @@ name = "docopt" version = "0.6.78" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "strsim 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -178,7 +178,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -197,7 +197,7 @@ dependencies = [ [[package]] name = "ethash" -version = "0.9.99" +version = "1.1.0" dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -206,14 +206,14 @@ dependencies = [ [[package]] name = "ethcore" -version = "0.9.99" +version = "1.1.0" dependencies = [ "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethash 0.9.99", - "ethcore-devtools 0.9.99", - "ethcore-util 0.9.99", + "ethash 1.1.0", + "ethcore-devtools 1.1.0", + "ethcore-util 1.1.0", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.1.15 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -225,21 +225,21 @@ dependencies = [ [[package]] name = "ethcore-devtools" -version = "0.9.99" +version = "1.1.0" dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "ethcore-rpc" -version = "0.9.99" +version = "1.1.0" dependencies = [ "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", - "ethash 0.9.99", - "ethcore 0.9.99", - "ethcore-util 0.9.99", - "ethminer 0.9.99", - "ethsync 0.9.99", + "ethash 1.1.0", + "ethcore 1.1.0", + "ethcore-util 1.1.0", + "ethminer 1.1.0", + "ethsync 1.1.0", "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -253,7 +253,7 @@ dependencies = [ [[package]] name = "ethcore-util" -version = "0.9.99" +version = "1.1.0" dependencies = [ "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", @@ -263,7 +263,7 @@ dependencies = [ "elastic-array 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", - "ethcore-devtools 0.9.99", + "ethcore-devtools 1.1.0", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "itertools 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)", @@ -288,12 +288,12 @@ dependencies = [ [[package]] name = "ethminer" -version = "0.9.99" +version = "1.1.0" dependencies = [ "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore 0.9.99", - "ethcore-util 0.9.99", + "ethcore 1.1.0", + "ethcore-util 1.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -302,13 +302,13 @@ dependencies = [ [[package]] name = "ethsync" -version = "0.9.99" +version = "1.1.0" dependencies = [ "clippy 0.0.50 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore 0.9.99", - "ethcore-util 0.9.99", - "ethminer 0.9.99", + "ethcore 1.1.0", + "ethcore-util 1.1.0", + "ethminer 1.1.0", "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -343,7 +343,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -406,7 +406,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.1.55 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 0.1.56 (registry+https://github.com/rust-lang/crates.io-index)", "xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)", "xmltree 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -687,12 +687,12 @@ dependencies = [ [[package]] name = "regex" -version = "0.1.55" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "memchr 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", - "regex-syntax 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -701,6 +701,11 @@ name = "regex-syntax" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "regex-syntax" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "rocksdb" version = "0.4.3" diff --git a/Cargo.toml b/Cargo.toml index 351041119..782dd1c79 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Ethcore client." name = "parity" -version = "0.9.99" +version = "1.1.0" license = "GPL-3.0" authors = ["Ethcore "] build = "build.rs" diff --git a/devtools/Cargo.toml b/devtools/Cargo.toml index ce0260936..19178fbfe 100644 --- a/devtools/Cargo.toml +++ b/devtools/Cargo.toml @@ -3,7 +3,7 @@ description = "Ethcore development/test/build tools" homepage = "http://ethcore.io" license = "GPL-3.0" name = "ethcore-devtools" -version = "0.9.99" +version = "1.1.0" authors = ["Ethcore "] [dependencies] diff --git a/ethash/Cargo.toml b/ethash/Cargo.toml index e2a2ec4d8..70d08249c 100644 --- a/ethash/Cargo.toml +++ b/ethash/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "ethash" -version = "0.9.99" +version = "1.1.0" authors = ["arkpar "] [dependencies] diff --git a/evmjit/Cargo.toml b/evmjit/Cargo.toml index 9449af82d..6586a360e 100644 --- a/evmjit/Cargo.toml +++ b/evmjit/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "evmjit" -version = "0.9.99" +version = "1.1.0" authors = ["debris "] [lib] diff --git a/miner/Cargo.toml b/miner/Cargo.toml index b450ece73..cd56aee9e 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -3,7 +3,7 @@ description = "Ethminer library" homepage = "http://ethcore.io" license = "GPL-3.0" name = "ethminer" -version = "0.9.99" +version = "1.1.0" authors = ["Ethcore "] build = "build.rs" diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index fa89041d8..88b69e82c 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Ethcore jsonrpc" name = "ethcore-rpc" -version = "0.9.99" +version = "1.1.0" license = "GPL-3.0" authors = ["Ethcore "] build = "build.rs" diff --git a/util/src/misc.rs b/util/src/misc.rs index 39ccbf2da..8dcd25988 100644 --- a/util/src/misc.rs +++ b/util/src/misc.rs @@ -70,7 +70,7 @@ pub fn contents(name: &str) -> Result { /// Get the standard version string for this software. pub fn version() -> String { - format!("Parity/v{}-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version()) + format!("Parity/v{}-unstable-{}-{}/{}-{}-{}/rustc{}", env!("CARGO_PKG_VERSION"), short_sha(), commit_date().replace("-", ""), Target::arch(), Target::os(), Target::env(), rustc_version::version()) } /// Get the standard version data for this software. @@ -85,4 +85,4 @@ pub fn version_data() -> Bytes { s.append(&format!("{}", rustc_version::version())); s.append(&&Target::os()[0..2]); s.out() -} \ No newline at end of file +} From 2117d363e23b4d159de9d76b435fd6baacfed744 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 14 Mar 2016 01:06:42 +0100 Subject: [PATCH 203/222] new account test & fix --- rpc/src/v1/impls/personal.rs | 2 +- rpc/src/v1/tests/helpers/account_provider.rs | 10 ++++++++-- rpc/src/v1/tests/personal.rs | 12 ++++++++++++ 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 0cd3f0040..2822059d6 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -49,7 +49,7 @@ impl Personal for PersonalClient where A: AccountProvider + 'static { |(pass, )| { let store = take_weak!(self.accounts); match store.new_account(&pass) { - Ok(address) => Ok(Value::String(format!("{:?}", address))), + Ok(address) => Ok(Value::String(format!("0x{:?}", address))), Err(_) => Err(Error::internal_error()) } } diff --git a/rpc/src/v1/tests/helpers/account_provider.rs b/rpc/src/v1/tests/helpers/account_provider.rs index 66f085f74..ce5b76b44 100644 --- a/rpc/src/v1/tests/helpers/account_provider.rs +++ b/rpc/src/v1/tests/helpers/account_provider.rs @@ -42,6 +42,7 @@ impl TestAccount { /// Test account provider. pub struct TestAccountProvider { accounts: RwLock>, + pub adds: RwLock>, } impl TestAccountProvider { @@ -49,6 +50,7 @@ impl TestAccountProvider { pub fn new(accounts: HashMap) -> Self { TestAccountProvider { accounts: RwLock::new(accounts), + adds: RwLock::new(vec![]), } } } @@ -69,9 +71,13 @@ impl AccountProvider for TestAccountProvider { } } - fn new_account(&self, _pass: &str) -> Result { - unimplemented!() + fn new_account(&self, pass: &str) -> Result { + let mut adds = self.adds.write().unwrap(); + let address = Address::from(adds.len() as u64 + 2); + adds.push(pass.to_owned()); + Ok(address) } + fn account_secret(&self, _account: &Address) -> Result { unimplemented!() } diff --git a/rpc/src/v1/tests/personal.rs b/rpc/src/v1/tests/personal.rs index 440a95a6f..261527c47 100644 --- a/rpc/src/v1/tests/personal.rs +++ b/rpc/src/v1/tests/personal.rs @@ -45,3 +45,15 @@ fn accounts() { assert_eq!(io.handle_request(request), Some(response.to_owned())); } + + +#[test] +fn new_account() { + let (_test_provider, io) = setup(); + + let request = r#"{"jsonrpc": "2.0", "method": "personal_newAccount", "params": ["pass"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0x0000000000000000000000000000000000000002","id":1}"#; + + assert_eq!(io.handle_request(request), Some(response.to_owned())); +} + From 7bc3c0b0269a007f0b0ff1d69ba318a97b280793 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 14 Mar 2016 01:27:27 +0100 Subject: [PATCH 204/222] Removed rocksdb build dependency --- .travis.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.travis.yml b/.travis.yml index 0c614ca5d..6ae41379e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -33,10 +33,6 @@ addons: - libcurl4-openssl-dev - libelf-dev - libdw-dev -before_script: | - sudo add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && - sudo apt-get update && - sudo apt-get install -y --force-yes librocksdb script: - cargo build --release --verbose ${FEATURES} - cargo test --release --verbose ${FEATURES} ${TARGETS} From f6b7884a1dcdd6382f522ae38c4f6d29222f40c7 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 14 Mar 2016 02:00:22 +0100 Subject: [PATCH 205/222] Allow configuration of target gas limit. --- ethcore/src/block.rs | 10 +++++----- ethcore/src/client/client.rs | 5 ++--- ethcore/src/client/mod.rs | 2 +- ethcore/src/client/test_client.rs | 2 +- ethcore/src/engine.rs | 2 +- ethcore/src/ethereum/ethash.rs | 7 +++---- miner/src/miner.rs | 19 +++++++++++++++++-- parity/main.rs | 10 ++++++++++ 8 files changed, 40 insertions(+), 17 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 7eb34670f..6f3986391 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -171,7 +171,7 @@ pub struct SealedBlock { impl<'x> OpenBlock<'x> { /// Create a new OpenBlock ready for transaction pushing. - pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, extra_data: Bytes) -> Self { + pub fn new(engine: &'x Engine, db: Box, parent: &Header, last_hashes: LastHashes, author: Address, gas_floor_target: U256, extra_data: Bytes) -> Self { let mut r = OpenBlock { block: ExecutedBlock::new(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce())), engine: engine, @@ -185,7 +185,7 @@ impl<'x> OpenBlock<'x> { r.block.base.header.extra_data = extra_data; r.block.base.header.note_dirty(); - engine.populate_from_parent(&mut r.block.base.header, parent); + engine.populate_from_parent(&mut r.block.base.header, parent, gas_floor_target); engine.on_new_block(&mut r.block); r } @@ -347,7 +347,7 @@ pub fn enact(header: &Header, transactions: &[SignedTransaction], uncles: &[Head } } - let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author().clone(), header.extra_data().clone()); + let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author().clone(), x!(3141562), header.extra_data().clone()); b.set_difficulty(*header.difficulty()); b.set_gas_limit(*header.gas_limit()); b.set_timestamp(header.timestamp()); @@ -391,7 +391,7 @@ mod tests { let mut db = db_result.take(); engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; - let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]); let b = b.close(); let _ = b.seal(engine.deref(), vec![]); } @@ -405,7 +405,7 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); engine.spec().ensure_db_good(db.as_hashdb_mut()); - let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), vec![]).close().seal(engine.deref(), vec![]).unwrap(); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, vec![genesis_header.hash()], Address::zero(), x!(3141562), vec![]).close().seal(engine.deref(), vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index d748cc4ee..cbea020ba 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -380,15 +380,13 @@ impl Client where V: Verifier { } impl BlockChainClient for Client where V: Verifier { - - // TODO [todr] Should be moved to miner crate eventually. fn try_seal(&self, block: ClosedBlock, seal: Vec) -> Result { block.try_seal(self.engine.deref().deref(), seal) } // TODO [todr] Should be moved to miner crate eventually. - fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option { + fn prepare_sealing(&self, author: Address, gas_floor_target: U256, extra_data: Bytes, transactions: Vec) -> Option { let engine = self.engine.deref().deref(); let h = self.chain.best_block_hash(); @@ -398,6 +396,7 @@ impl BlockChainClient for Client where V: Verifier { match self.chain.block_header(&h) { Some(ref x) => x, None => {return None} }, self.build_last_hashes(h.clone()), author, + gas_floor_target, extra_data, ); diff --git a/ethcore/src/client/mod.rs b/ethcore/src/client/mod.rs index e46d0b570..88e07d0b1 100644 --- a/ethcore/src/client/mod.rs +++ b/ethcore/src/client/mod.rs @@ -110,7 +110,7 @@ pub trait BlockChainClient : Sync + Send { // TODO [todr] Should be moved to miner crate eventually. /// Returns ClosedBlock prepared for sealing. - fn prepare_sealing(&self, author: Address, extra_data: Bytes, transactions: Vec) -> Option; + fn prepare_sealing(&self, author: Address, gas_floor_target: U256, extra_data: Bytes, transactions: Vec) -> Option; // TODO [todr] Should be moved to miner crate eventually. /// Attempts to seal given block. Returns `SealedBlock` on success and the same block in case of error. diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 140b8d91f..d6a5707e5 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -215,7 +215,7 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn prepare_sealing(&self, _author: Address, _extra_data: Bytes, _transactions: Vec) -> Option { + fn prepare_sealing(&self, _author: Address, _gas_floor_target: U256, _extra_data: Bytes, _transactions: Vec) -> Option { unimplemented!() } diff --git a/ethcore/src/engine.rs b/ethcore/src/engine.rs index 83e1986fd..0b2ce8ae2 100644 --- a/ethcore/src/engine.rs +++ b/ethcore/src/engine.rs @@ -85,7 +85,7 @@ pub trait Engine : Sync + Send { /// Don't forget to call Super::populate_from_parent when subclassing & overriding. // TODO: consider including State in the params. - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + fn populate_from_parent(&self, header: &mut Header, parent: &Header, _gas_floor_target: U256) { header.difficulty = parent.difficulty; header.gas_limit = parent.gas_limit; header.note_dirty(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index a882f66ae..406777251 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -92,10 +92,9 @@ impl Engine for Ethash { } } - fn populate_from_parent(&self, header: &mut Header, parent: &Header) { + fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256) { header.difficulty = self.calculate_difficuty(header, parent); header.gas_limit = { - let gas_floor_target: U256 = x!(3141562); let gas_limit = parent.gas_limit; let bound_divisor = self.u256_param("gasLimitBoundDivisor"); if gas_limit < gas_floor_target { @@ -300,7 +299,7 @@ mod tests { let mut db = db_result.take(); engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; - let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); + let b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]); let b = b.close(); assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); } @@ -313,7 +312,7 @@ mod tests { let mut db = db_result.take(); engine.spec().ensure_db_good(db.as_hashdb_mut()); let last_hashes = vec![genesis_header.hash()]; - let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), vec![]); + let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, last_hashes, Address::zero(), x!(3141562), vec![]); let mut uncle = Header::new(); let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106"); uncle.author = uncle_author.clone(); diff --git a/miner/src/miner.rs b/miner/src/miner.rs index ad403150d..43f11f9c4 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -19,7 +19,7 @@ use std::sync::{Mutex, RwLock, Arc}; use std::sync::atomic; use std::sync::atomic::AtomicBool; -use util::{H256, U256, Address, Bytes}; +use util::{H256, U256, Address, Bytes, Uint}; use ethcore::views::{BlockView}; use ethcore::client::{BlockChainClient, BlockId}; use ethcore::block::{ClosedBlock}; @@ -34,8 +34,10 @@ pub struct Miner { // for sealing... sealing_enabled: AtomicBool, sealing_block: Mutex>, + gas_floor_target: RwLock, author: RwLock
, extra_data: RwLock, + } impl Default for Miner { @@ -44,6 +46,7 @@ impl Default for Miner { transaction_queue: Mutex::new(TransactionQueue::new()), sealing_enabled: AtomicBool::new(false), sealing_block: Mutex::new(None), + gas_floor_target: RwLock::new(U256::zero()), author: RwLock::new(Address::default()), extra_data: RwLock::new(Vec::new()), } @@ -66,6 +69,11 @@ impl Miner { self.extra_data.read().unwrap().clone() } + /// Get the extra_data that we will seal blocks wuth. + fn gas_floor_target(&self) -> U256 { + self.gas_floor_target.read().unwrap().clone() + } + /// Set the author that we will seal blocks as. pub fn set_author(&self, author: Address) { *self.author.write().unwrap() = author; @@ -76,6 +84,11 @@ impl Miner { *self.extra_data.write().unwrap() = extra_data; } + /// Set the gas limit we wish to target when sealing a new block. + pub fn set_gas_floor_target(&self, target: U256) { + *self.gas_floor_target.write().unwrap() = target; + } + /// Set minimal gas price of transaction to be accepted for mining. pub fn set_minimal_gas_price(&self, min_gas_price: U256) { self.transaction_queue.lock().unwrap().set_minimal_gas_price(min_gas_price); @@ -110,12 +123,14 @@ impl MinerService for Miner { fn prepare_sealing(&self, chain: &BlockChainClient) { let no_of_transactions = 128; + // TODO: should select transactions orm queue according to gas limit of block. let transactions = self.transaction_queue.lock().unwrap().top_transactions(no_of_transactions); let b = chain.prepare_sealing( self.author(), + self.gas_floor_target(), self.extra_data(), - transactions, + transactions ); *self.sealing_block.lock().unwrap() = b; } diff --git a/parity/main.rs b/parity/main.rs index 8562b416d..0039d2ba6 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -115,6 +115,7 @@ API and Console Options: Sealing/Mining Options: --gas-price WEI Minimum amount of Wei to be paid for a transaction to be accepted for mining [default: 20000000000]. + --gas-floor-target GAS Amount of gas per block to target when sealing a new block [default: 4712388]. --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. @@ -178,6 +179,7 @@ struct Args { flag_jsonrpc_apis: String, flag_author: String, flag_gas_price: String, + flag_gas_floor_target: String, flag_extra_data: Option, flag_logging: Option, flag_version: bool, @@ -303,6 +305,13 @@ impl Configuration { }) } + fn gas_floor_target(&self) -> U256 { + let d = &self.args.flag_gas_floor_target; + U256::from_dec_str(d).unwrap_or_else(|_| { + die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d) + }) + } + fn gas_price(&self) -> U256 { let d = self.args.flag_gasprice.as_ref().unwrap_or(&self.args.flag_gas_price); U256::from_dec_str(d).unwrap_or_else(|_| { @@ -485,6 +494,7 @@ impl Configuration { // Miner let miner = Miner::new(); miner.set_author(self.author()); + miner.set_gas_floor_target(self.gas_floor_target()); miner.set_extra_data(self.extra_data()); miner.set_minimal_gas_price(self.gas_price()); From 7af0a1dc2c2e41b7af89f3e1b24e1e847dd78c27 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 14 Mar 2016 02:02:32 +0100 Subject: [PATCH 206/222] Missing comma. --- miner/src/miner.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/miner/src/miner.rs b/miner/src/miner.rs index 43f11f9c4..6d5b3086e 100644 --- a/miner/src/miner.rs +++ b/miner/src/miner.rs @@ -130,7 +130,7 @@ impl MinerService for Miner { self.author(), self.gas_floor_target(), self.extra_data(), - transactions + transactions, ); *self.sealing_block.lock().unwrap() = b; } From 6827ff9319893fe5283ab1e110951f375db79f13 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 14 Mar 2016 09:37:32 +0100 Subject: [PATCH 207/222] [ci skip] fix tesh.sh --- test.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/test.sh b/test.sh index e1881a8ad..0bf08e67f 100755 --- a/test.sh +++ b/test.sh @@ -1,5 +1,4 @@ #!/bin/sh # Running Parity Full Test Sute -cargo test --features ethcore/json-tests $1 -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p -ethminer +cargo test --features ethcore/json-tests $1 -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity -p ethminer From 8532f2dc2e4e12ea100433486a983fcccf4145e0 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 14 Mar 2016 09:44:02 +0100 Subject: [PATCH 208/222] removed tests that used fixedhash --- util/bigint/src/uint.rs | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index ea617d570..c3cc6b753 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -1948,8 +1948,6 @@ mod tests { #[test] fn u256_multi_muls() { - use hash::*; - let (result, _) = U256([0, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, 0])); assert_eq!(U256([0, 0, 0, 0]), result); @@ -1979,23 +1977,6 @@ mod tests { let (result, _) = U256([1, 0, 0, 0]).overflowing_mul(U256([0, 0, 0, ::std::u64::MAX])); assert_eq!(U256([0, 0, 0, ::std::u64::MAX]), result); - - let x1 = U256::from_str("0000000000000000000000000000000000000000000000000000012365124623").unwrap(); - let x2sqr_right = U256::from_str("000000000000000000000000000000000000000000014baeef72e0378e2328c9").unwrap(); - let x1sqr = x1 * x1; - assert_eq!(H256::from(x2sqr_right), H256::from(x1sqr)); - let x1cube = x1sqr * x1; - let x1cube_right = U256::from_str("0000000000000000000000000000000001798acde139361466f712813717897b").unwrap(); - assert_eq!(H256::from(x1cube_right), H256::from(x1cube)); - let x1quad = x1cube * x1; - let x1quad_right = U256::from_str("000000000000000000000001adbdd6bd6ff027485484b97f8a6a4c7129756dd1").unwrap(); - assert_eq!(H256::from(x1quad_right), H256::from(x1quad)); - let x1penta = x1quad * x1; - let x1penta_right = U256::from_str("00000000000001e92875ac24be246e1c57e0507e8c46cc8d233b77f6f4c72993").unwrap(); - assert_eq!(H256::from(x1penta_right), H256::from(x1penta)); - let x1septima = x1penta * x1; - let x1septima_right = U256::from_str("00022cca1da3f6e5722b7d3cc5bbfb486465ebc5a708dd293042f932d7eee119").unwrap(); - assert_eq!(H256::from(x1septima_right), H256::from(x1septima)); } #[test] From d3c1b5455b2fc046272f7f4ee040df9291397893 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 14 Mar 2016 10:25:04 +0100 Subject: [PATCH 209/222] Silenced UDP warnings --- util/src/network/discovery.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index 6fda99cdb..d755c58e7 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -297,7 +297,7 @@ impl Discovery { return; } Err(e) => { - warn!("UDP send error: {:?}, address: {:?}", e, &data.address); + debug!("UDP send error: {:?}, address: {:?}", e, &data.address); return; } } @@ -317,7 +317,7 @@ impl Discovery { }), Ok(_) => None, Err(e) => { - warn!("Error reading UPD socket: {:?}", e); + debug!("Error reading UPD socket: {:?}", e); None } } From 829ed4d0a6b6976aeff26fabd6d9cd239ccb593b Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 14 Mar 2016 10:47:22 +0100 Subject: [PATCH 210/222] commented empty slice/vec comparison --- ethcore/src/evm/tests.rs | 4 ++-- util/src/bytes.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index dc84a9a05..0853cd5f8 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -55,7 +55,7 @@ struct FakeExt { info: EnvInfo, schedule: Schedule, balances: HashMap, - calls: HashSet + calls: HashSet, } impl FakeExt { @@ -346,7 +346,7 @@ fn test_log_empty(factory: super::Factory) { assert_eq!(gas_left, U256::from(99_619)); assert_eq!(ext.logs.len(), 1); assert_eq!(ext.logs[0].topics.len(), 0); - assert_eq!(ext.logs[0].data, vec![]); + //assert_eq!(ext.logs[0].data, vec![]); } evm_test!{test_log_sender: test_log_sender_jit, test_log_sender_int} diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 5a4500ae6..13e6880cc 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -177,7 +177,7 @@ impl BytesConvertable for T where T: AsRef<[u8]> { #[test] fn bytes_convertable() { assert_eq!(vec![0x12u8, 0x34].bytes(), &[0x12u8, 0x34]); - assert_eq!([0u8; 0].bytes(), &[]); +// assert_eq!([0u8; 0].as_slice(), &[]); } /// Simple trait to allow for raw population of a Sized object from a byte slice. From dc8b9c3205132c9eb12852582990850625a6aa38 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 14 Mar 2016 10:48:32 +0100 Subject: [PATCH 211/222] Fix build. --- ethcore/src/client/client.rs | 5 +++++ ethcore/src/tests/client.rs | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index cbea020ba..0bd371c0f 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -109,6 +109,11 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1000; +// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING. +// Altering it will force a blanket DB update for *all* JournalDB-derived +// databases. +// Instead, add/upgrade the version string of the individual JournalDB-derived database +// of which you actually want force an upgrade. const CLIENT_DB_VER_STR: &'static str = "5.2"; impl Client { diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index ed0b02788..797684d1d 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -133,7 +133,7 @@ fn can_mine() { let client_result = get_test_client_with_blocks(vec![dummy_blocks[0].clone()]); let client = client_result.reference(); - let b = client.prepare_sealing(Address::default(), vec![], vec![]).unwrap(); + let b = client.prepare_sealing(Address::default(), x!(31415926), vec![], vec![]).unwrap(); assert_eq!(*b.block().header().parent_hash(), BlockView::new(&dummy_blocks[0]).header_view().sha3()); assert!(client.try_seal(b, vec![]).is_ok()); From 3eb08b0d611ba848359076a342e88523265b8671 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 14 Mar 2016 10:53:37 +0100 Subject: [PATCH 212/222] fix tests and deuncommented --- ethcore/src/evm/tests.rs | 2 +- util/src/bytes.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 0853cd5f8..445c0be41 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -346,7 +346,7 @@ fn test_log_empty(factory: super::Factory) { assert_eq!(gas_left, U256::from(99_619)); assert_eq!(ext.logs.len(), 1); assert_eq!(ext.logs[0].topics.len(), 0); - //assert_eq!(ext.logs[0].data, vec![]); + assert!(ext.logs[0].data.is_empty()); } evm_test!{test_log_sender: test_log_sender_jit, test_log_sender_int} diff --git a/util/src/bytes.rs b/util/src/bytes.rs index 13e6880cc..0683ea4df 100644 --- a/util/src/bytes.rs +++ b/util/src/bytes.rs @@ -177,7 +177,7 @@ impl BytesConvertable for T where T: AsRef<[u8]> { #[test] fn bytes_convertable() { assert_eq!(vec![0x12u8, 0x34].bytes(), &[0x12u8, 0x34]); -// assert_eq!([0u8; 0].as_slice(), &[]); + assert!([0u8; 0].as_slice().is_empty()); } /// Simple trait to allow for raw population of a Sized object from a byte slice. From 5503cd46464d2f410ebc58e497d4cd55b1a5600c Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 14 Mar 2016 12:41:11 +0100 Subject: [PATCH 213/222] Lock reports to avoid out of order badness. --- parity/main.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 8562b416d..d7e92d6ff 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -583,13 +583,15 @@ impl Informant { let chain_info = client.chain_info(); let queue_info = client.queue_info(); let cache_info = client.blockchain_cache_info(); - let report = client.report(); let sync_info = sync.status(); + let write_report = self.report.write().unwrap(); + let report = client.report(); + if let (_, _, &Some(ref last_report)) = ( self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), - self.report.read().unwrap().deref() + write_report.deref() ) { println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, @@ -613,7 +615,7 @@ impl Informant { *self.chain_info.write().unwrap().deref_mut() = Some(chain_info); *self.cache_info.write().unwrap().deref_mut() = Some(cache_info); - *self.report.write().unwrap().deref_mut() = Some(report); + *write_report.deref_mut() = Some(report); } } From c8b65c769b50545d73a307cbd9eace7ba2faf1d1 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 14 Mar 2016 13:54:06 +0100 Subject: [PATCH 214/222] Fixed handshake leak --- util/src/network/host.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 57aae51d7..02c576424 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -687,6 +687,8 @@ impl Host where Message: Send + Sync + Clone { if h.expired { return; } + io.deregister_stream(token).expect("Error deleting handshake registration"); + h.set_expired(); let originated = h.originated; let mut session = match Session::new(&mut h, &self.info.read().unwrap()) { Ok(s) => s, @@ -705,8 +707,6 @@ impl Host where Message: Send + Sync + Clone { } let result = sessions.insert_with(move |session_token| { session.set_token(session_token); - io.deregister_stream(token).expect("Error deleting handshake registration"); - h.set_expired(); io.register_stream(session_token).expect("Error creating session registration"); self.stats.inc_sessions(); trace!(target: "network", "Creating session {} -> {}", token, session_token); From 0de73609d2c0fa94067712ef229e72d4dd2541ee Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 14 Mar 2016 14:18:29 +0100 Subject: [PATCH 215/222] eth_hashrate && eth_submitHashrate tests --- ethcore/src/client/test_client.rs | 8 +-- rpc/src/v1/helpers/external_miner.rs | 59 ++++++++++++++++++ rpc/src/v1/helpers/mod.rs | 2 + rpc/src/v1/impls/eth.rs | 48 ++++++++++----- rpc/src/v1/tests/eth.rs | 72 +++++++++++++++++----- rpc/src/v1/tests/helpers/external_miner.rs | 48 +++++++++++++++ rpc/src/v1/tests/helpers/mod.rs | 2 + 7 files changed, 203 insertions(+), 36 deletions(-) create mode 100644 rpc/src/v1/helpers/external_miner.rs create mode 100644 rpc/src/v1/tests/helpers/external_miner.rs diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 140b8d91f..55c473d1e 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -87,22 +87,22 @@ impl TestBlockChainClient { } /// Set the balance of account `address` to `balance`. - pub fn set_balance(&mut self, address: Address, balance: U256) { + pub fn set_balance(&self, address: Address, balance: U256) { self.balances.write().unwrap().insert(address, balance); } /// Set `code` at `address`. - pub fn set_code(&mut self, address: Address, code: Bytes) { + pub fn set_code(&self, address: Address, code: Bytes) { self.code.write().unwrap().insert(address, code); } /// Set storage `position` to `value` for account `address`. - pub fn set_storage(&mut self, address: Address, position: H256, value: H256) { + pub fn set_storage(&self, address: Address, position: H256, value: H256) { self.storage.write().unwrap().insert((address, position), value); } /// Add blocks to test client. - pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { + pub fn add_blocks(&self, count: usize, with: EachBlockWith) { let len = self.numbers.read().unwrap().len(); for n in len..(len + count) { let mut header = BlockHeader::new(); diff --git a/rpc/src/v1/helpers/external_miner.rs b/rpc/src/v1/helpers/external_miner.rs new file mode 100644 index 000000000..4cbda8928 --- /dev/null +++ b/rpc/src/v1/helpers/external_miner.rs @@ -0,0 +1,59 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::RwLock; +use util::numbers::U256; +use util::hash::H256; + +/// External miner interface. +pub trait ExternalMinerService: Send + Sync { + /// Submit hashrate for given miner. + fn submit_hashrate(&self, hashrate: U256, id: H256); + + /// Total hashrate. + fn hashrate(&self) -> U256; + + /// Returns true if external miner is mining. + fn is_mining(&self) -> bool; +} + +/// External Miner. +pub struct ExternalMiner { + hashrates: RwLock>, +} + +impl Default for ExternalMiner { + fn default() -> Self { + ExternalMiner { + hashrates: RwLock::new(HashMap::new()), + } + } +} + +impl ExternalMinerService for ExternalMiner { + fn submit_hashrate(&self, hashrate: U256, id: H256) { + self.hashrates.write().unwrap().insert(id, hashrate); + } + + fn hashrate(&self) -> U256 { + self.hashrates.read().unwrap().iter().fold(U256::from(0), |sum, (_, v)| sum + *v) + } + + fn is_mining(&self) -> bool { + !self.hashrates.read().unwrap().is_empty() + } +} diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index b1a5c05ba..8c574cff6 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -16,6 +16,8 @@ mod poll_manager; mod poll_filter; +pub mod external_miner; pub use self::poll_manager::PollManager; pub use self::poll_filter::PollFilter; +pub use self::external_miner::{ExternalMinerService, ExternalMiner}; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 0e8b8d863..9ceea2e25 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -15,8 +15,8 @@ // along with Parity. If not, see . //! Eth rpc implementation. -use std::collections::{HashMap, HashSet}; -use std::sync::{Arc, Weak, Mutex, RwLock}; +use std::collections::HashSet; +use std::sync::{Arc, Weak, Mutex}; use std::ops::Deref; use ethsync::{SyncProvider, SyncState}; use ethminer::{MinerService}; @@ -25,42 +25,59 @@ use util::numbers::*; use util::sha3::*; use util::rlp::encode; use ethcore::client::*; -use ethcore::block::{IsBlock}; +use ethcore::block::IsBlock; use ethcore::views::*; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; use ethcore::transaction::Transaction as EthTransaction; use v1::traits::{Eth, EthFilter}; use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, TransactionRequest, OptionalValue, Index, Filter, Log}; -use v1::helpers::{PollFilter, PollManager}; +use v1::helpers::{PollFilter, PollManager, ExternalMinerService, ExternalMiner}; use util::keys::store::AccountProvider; /// Eth rpc implementation. -pub struct EthClient +pub struct EthClient where C: BlockChainClient, S: SyncProvider, A: AccountProvider, - M: MinerService { + M: MinerService, + EM: ExternalMinerService { client: Weak, sync: Weak, accounts: Weak, miner: Weak, - hashrates: RwLock>, + external_miner: EM, } -impl EthClient +impl EthClient where C: BlockChainClient, S: SyncProvider, A: AccountProvider, M: MinerService { + /// Creates new EthClient. pub fn new(client: &Arc, sync: &Arc, accounts: &Arc, miner: &Arc) -> Self { + EthClient::new_with_external_miner(client, sync, accounts, miner, ExternalMiner::default()) + } +} + + +impl EthClient + where C: BlockChainClient, + S: SyncProvider, + A: AccountProvider, + M: MinerService, + EM: ExternalMinerService { + + /// Creates new EthClient with custom external miner. + pub fn new_with_external_miner(client: &Arc, sync: &Arc, accounts: &Arc, miner: &Arc, em: EM) + -> EthClient { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), miner: Arc::downgrade(miner), accounts: Arc::downgrade(accounts), - hashrates: RwLock::new(HashMap::new()), + external_miner: em, } } @@ -110,11 +127,12 @@ impl EthClient } } -impl Eth for EthClient +impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncProvider + 'static, A: AccountProvider + 'static, - M: MinerService + 'static { + M: MinerService + 'static, + EM: ExternalMinerService + 'static { fn protocol_version(&self, params: Params) -> Result { match params { @@ -152,7 +170,7 @@ impl Eth for EthClient // TODO: return real value of mining once it's implemented. fn is_mining(&self, params: Params) -> Result { match params { - Params::None => to_value(&!self.hashrates.read().unwrap().is_empty()), + Params::None => to_value(&self.external_miner.is_mining()), _ => Err(Error::invalid_params()) } } @@ -160,7 +178,7 @@ impl Eth for EthClient // TODO: return real hashrate once we have mining fn hashrate(&self, params: Params) -> Result { match params { - Params::None => to_value(&self.hashrates.read().unwrap().iter().fold(0u64, |sum, (_, v)| sum + v)), + Params::None => to_value(&self.external_miner.hashrate()), _ => Err(Error::invalid_params()) } } @@ -318,8 +336,8 @@ impl Eth for EthClient fn submit_hashrate(&self, params: Params) -> Result { // TODO: Index should be U256. - from_params::<(Index, H256)>(params).and_then(|(rate, id)| { - self.hashrates.write().unwrap().insert(id, rate.value() as u64); + from_params::<(U256, H256)>(params).and_then(|(rate, id)| { + self.external_miner.submit_hashrate(rate, id); to_value(&true) }) } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 35c227e40..e07390165 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -15,20 +15,16 @@ // along with Parity. If not, see . use std::collections::HashMap; -use std::sync::Arc; +use std::sync::{Arc, RwLock}; use jsonrpc_core::IoHandler; use util::hash::{Address, H256}; use util::numbers::U256; use ethcore::client::{TestBlockChainClient, EachBlockWith}; use v1::{Eth, EthClient}; -use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService}; +use v1::tests::helpers::{TestAccount, TestAccountProvider, TestSyncProvider, Config, TestMinerService, TestExternalMiner}; fn blockchain_client() -> Arc { - let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Nothing); - client.set_balance(Address::from(1), U256::from(5)); - client.set_storage(Address::from(1), H256::from(4), H256::from(7)); - client.set_code(Address::from(1), vec![0xff, 0x21]); + let client = TestBlockChainClient::new(); Arc::new(client) } @@ -51,10 +47,11 @@ fn miner_service() -> Arc { } struct EthTester { - _client: Arc, + client: Arc, _sync: Arc, _accounts_provider: Arc, _miner: Arc, + hashrates: Arc>>, pub io: IoHandler, } @@ -64,15 +61,18 @@ impl Default for EthTester { let sync = sync_provider(); let ap = accounts_provider(); let miner = miner_service(); - let eth = EthClient::new(&client, &sync, &ap, &miner).to_delegate(); + let hashrates = Arc::new(RwLock::new(HashMap::new())); + let external_miner = TestExternalMiner::new(hashrates.clone()); + let eth = EthClient::new_with_external_miner(&client, &sync, &ap, &miner, external_miner).to_delegate(); let io = IoHandler::new(); io.add_delegate(eth); EthTester { - _client: client, + client: client, _sync: sync, _accounts_provider: ap, _miner: miner, - io: io + io: io, + hashrates: hashrates, } } } @@ -92,9 +92,35 @@ fn rpc_eth_syncing() { } #[test] -#[ignore] fn rpc_eth_hashrate() { - unimplemented!() + let tester = EthTester::default(); + tester.hashrates.write().unwrap().insert(H256::from(0), U256::from(0xfffa)); + tester.hashrates.write().unwrap().insert(H256::from(0), U256::from(0xfffb)); + tester.hashrates.write().unwrap().insert(H256::from(1), U256::from(0x1)); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_hashrate", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":"0xfffc","id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_submit_hashrate() { + let tester = EthTester::default(); + + let request = r#"{ + "jsonrpc": "2.0", + "method": "eth_submitHashrate", + "params": [ + "0x0000000000000000000000000000000000000000000000000000000000500000", + "0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c"], + "id": 1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); + assert_eq!(tester.hashrates.read().unwrap().get(&H256::from("0x59daa26581d0acd1fce254fb7e85952f4c09d0915afd33d3886cd914bc7d283c")).cloned(), + Some(U256::from(0x500_000))); } #[test] @@ -127,14 +153,20 @@ fn rpc_eth_accounts() { #[test] fn rpc_eth_block_number() { + let tester = EthTester::default(); + tester.client.add_blocks(10, EachBlockWith::Nothing); + let request = r#"{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":"0x0a","id":1}"#; - assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); } #[test] fn rpc_eth_balance() { + let tester = EthTester::default(); + tester.client.set_balance(Address::from(1), U256::from(5)); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getBalance", @@ -143,11 +175,14 @@ fn rpc_eth_balance() { }"#; let response = r#"{"jsonrpc":"2.0","result":"0x05","id":1}"#; - assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); } #[test] fn rpc_eth_storage_at() { + let tester = EthTester::default(); + tester.client.set_storage(Address::from(1), H256::from(4), H256::from(7)); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getStorageAt", @@ -156,7 +191,7 @@ fn rpc_eth_storage_at() { }"#; let response = r#"{"jsonrpc":"2.0","result":"0x07","id":1}"#; - assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); } #[test] @@ -226,6 +261,9 @@ fn rpc_eth_uncle_count_by_block_number() { #[test] fn rpc_eth_code() { + let tester = EthTester::default(); + tester.client.set_code(Address::from(1), vec![0xff, 0x21]); + let request = r#"{ "jsonrpc": "2.0", "method": "eth_getCode", @@ -234,7 +272,7 @@ fn rpc_eth_code() { }"#; let response = r#"{"jsonrpc":"2.0","result":"0xff21","id":1}"#; - assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); } #[test] diff --git a/rpc/src/v1/tests/helpers/external_miner.rs b/rpc/src/v1/tests/helpers/external_miner.rs new file mode 100644 index 000000000..a5111b302 --- /dev/null +++ b/rpc/src/v1/tests/helpers/external_miner.rs @@ -0,0 +1,48 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use std::sync::{Arc, RwLock}; +use util::numbers::U256; +use util::hash::H256; +use v1::helpers::ExternalMinerService; + +/// Test ExternalMinerService; +pub struct TestExternalMiner { + pub hashrates: Arc>> +} + +impl TestExternalMiner { + pub fn new(hashrates: Arc>>) -> Self { + TestExternalMiner { + hashrates: hashrates, + } + } +} + +impl ExternalMinerService for TestExternalMiner { + fn submit_hashrate(&self, hashrate: U256, id: H256) { + self.hashrates.write().unwrap().insert(id, hashrate); + } + + fn hashrate(&self) -> U256 { + self.hashrates.read().unwrap().iter().fold(U256::from(0), |sum, (_, v)| sum + *v) + } + + fn is_mining(&self) -> bool { + !self.hashrates.read().unwrap().is_empty() + } +} diff --git a/rpc/src/v1/tests/helpers/mod.rs b/rpc/src/v1/tests/helpers/mod.rs index fc429982e..fc652e7d6 100644 --- a/rpc/src/v1/tests/helpers/mod.rs +++ b/rpc/src/v1/tests/helpers/mod.rs @@ -17,7 +17,9 @@ mod account_provider; mod sync_provider; mod miner_service; +mod external_miner; pub use self::account_provider::{TestAccount, TestAccountProvider}; pub use self::sync_provider::{Config, TestSyncProvider}; pub use self::miner_service::{TestMinerService}; +pub use self::external_miner::TestExternalMiner; From 4e5ebc94579cadef5a1ca1c42f62ffbf6959d87b Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 14 Mar 2016 14:22:18 +0100 Subject: [PATCH 216/222] missing mut --- parity/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/main.rs b/parity/main.rs index 26e3e4b78..b8cc2a0f0 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -610,7 +610,7 @@ impl Informant { let cache_info = client.blockchain_cache_info(); let sync_info = sync.status(); - let write_report = self.report.write().unwrap(); + let mut write_report = self.report.write().unwrap(); let report = client.report(); if let (_, _, &Some(ref last_report)) = ( From 47ca84041b8c4b0ed346f4c10b75d015369f1266 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 14 Mar 2016 14:59:09 +0100 Subject: [PATCH 217/222] tests for eth_mining, eth_compileLLL, eth_compileSolidity, eth_compileSerpent --- rpc/src/v1/tests/eth.rs | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index e07390165..6bc929709 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -130,9 +130,18 @@ fn rpc_eth_author() { } #[test] -#[ignore] fn rpc_eth_mining() { - unimplemented!() + let tester = EthTester::default(); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":false,"id":1}"#; + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); + + tester.hashrates.write().unwrap().insert(H256::from(1), U256::from(0x1)); + + let request = r#"{"jsonrpc": "2.0", "method": "eth_mining", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!(tester.io.handle_request(request), Some(response.to_owned())); } #[test] @@ -313,5 +322,29 @@ fn rpc_eth_compilers() { assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); } +#[test] +fn rpc_eth_compile_lll() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileLLL", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_compile_solidity() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSolidity", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + +#[test] +fn rpc_eth_compile_serpent() { + let request = r#"{"jsonrpc": "2.0", "method": "eth_compileSerpent", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal error","data":null},"id":1}"#; + + assert_eq!(EthTester::default().io.handle_request(request), Some(response.to_owned())); +} + From 9b241faf010303da6c2245ba2529e439bded5034 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 14 Mar 2016 17:01:10 +0100 Subject: [PATCH 218/222] uncle method mock --- ethcore/src/views.rs | 7 ++++++- rpc/src/v1/impls/eth.rs | 15 +++++++++++++++ rpc/src/v1/traits/eth.rs | 9 ++++++--- 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/ethcore/src/views.rs b/ethcore/src/views.rs index 4a7ff054d..745cbff2c 100644 --- a/ethcore/src/views.rs +++ b/ethcore/src/views.rs @@ -223,6 +223,11 @@ impl<'a> BlockView<'a> { pub fn uncle_hashes(&self) -> Vec { self.rlp.at(2).iter().map(|rlp| rlp.as_raw().sha3()).collect() } + + /// Return nth uncle. + pub fn uncle_at(&self, index: usize) -> Option
{ + self.rlp.at(2).iter().nth(index).map(|rlp| rlp.as_val()) + } } impl<'a> Hashable for BlockView<'a> { @@ -280,7 +285,7 @@ impl<'a> HeaderView<'a> { /// Returns block number. pub fn number(&self) -> BlockNumber { self.rlp.val_at(8) } - + /// Returns block gas limit. pub fn gas_limit(&self) -> U256 { self.rlp.val_at(9) } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 9ceea2e25..fda391304 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -125,6 +125,11 @@ impl EthClient None => Ok(Value::Null) } } + + fn uncle(&self, _block: BlockId, _index: usize) -> Result { + // TODO: implement! + Ok(Value::Null) + } } impl Eth for EthClient @@ -285,6 +290,16 @@ impl Eth for EthClient .and_then(|(number, index)| self.transaction(TransactionId::Location(number.into(), index.value()))) } + fn uncle_by_block_hash_and_index(&self, params: Params) -> Result { + from_params::<(H256, Index)>(params) + .and_then(|(hash, index)| self.uncle(BlockId::Hash(hash), index.value())) + } + + fn uncle_by_block_number_and_index(&self, params: Params) -> Result { + from_params::<(BlockNumber, Index)>(params) + .and_then(|(number, index)| self.uncle(number.into(), index.value())) + } + fn compilers(&self, params: Params) -> Result { match params { Params::None => to_value(&vec![] as &Vec), diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index bcd7e7cfe..8a48e0dfe 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -102,7 +102,10 @@ pub trait Eth: Sized + Send + Sync + 'static { fn transaction_receipt(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns an uncles at given block and index. - fn uncle_at(&self, _: Params) -> Result { rpc_unimplemented!() } + fn uncle_by_block_hash_and_index(&self, _: Params) -> Result { rpc_unimplemented!() } + + /// Returns an uncles at given block and index. + fn uncle_by_block_number_and_index(&self, _: Params) -> Result { rpc_unimplemented!() } /// Returns available compilers. fn compilers(&self, _: Params) -> Result { rpc_unimplemented!() } @@ -158,8 +161,8 @@ pub trait Eth: Sized + Send + Sync + 'static { delegate.add_method("eth_getTransactionByBlockHashAndIndex", Eth::transaction_by_block_hash_and_index); delegate.add_method("eth_getTransactionByBlockNumberAndIndex", Eth::transaction_by_block_number_and_index); delegate.add_method("eth_getTransactionReceipt", Eth::transaction_receipt); - delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_at); - delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_at); + delegate.add_method("eth_getUncleByBlockHashAndIndex", Eth::uncle_by_block_hash_and_index); + delegate.add_method("eth_getUncleByBlockNumberAndIndex", Eth::uncle_by_block_number_and_index); delegate.add_method("eth_getCompilers", Eth::compilers); delegate.add_method("eth_compileLLL", Eth::compile_lll); delegate.add_method("eth_compileSolidity", Eth::compile_solidity); From 81beaf10948fa00fb51935ea6b516ca9b8bf2d4b Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 14 Mar 2016 17:53:08 +0100 Subject: [PATCH 219/222] update dockerfiles --- docker/ubuntu-dev/Dockerfile | 8 ++------ docker/ubuntu-jit/Dockerfile | 11 +++-------- docker/ubuntu/Dockerfile | 10 +--------- 3 files changed, 6 insertions(+), 23 deletions(-) diff --git a/docker/ubuntu-dev/Dockerfile b/docker/ubuntu-dev/Dockerfile index 8b016e6fd..e9113afdf 100644 --- a/docker/ubuntu-dev/Dockerfile +++ b/docker/ubuntu-dev/Dockerfile @@ -9,7 +9,7 @@ RUN apt-get update && \ software-properties-common \ curl \ gcc \ - wget \ + wget \ git \ # evmjit dependencies zlib1g-dev \ @@ -18,9 +18,8 @@ RUN apt-get update && \ # cmake, llvm and rocksdb ppas. then update ppas RUN add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \ add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \ - add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && \ apt-get update && \ - apt-get install -y --force-yes cmake llvm-3.7-dev librocksdb + apt-get install -y --force-yes cmake llvm-3.7-dev # install evmjit RUN git clone https://github.com/debris/evmjit && \ @@ -31,9 +30,6 @@ RUN git clone https://github.com/debris/evmjit && \ # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes -# install nightly and make it default -RUN multirust update nightly && multirust default nightly - # export rust LIBRARY_PATH ENV LIBRARY_PATH /usr/local/lib diff --git a/docker/ubuntu-jit/Dockerfile b/docker/ubuntu-jit/Dockerfile index 90ce531be..c50aa83b5 100644 --- a/docker/ubuntu-jit/Dockerfile +++ b/docker/ubuntu-jit/Dockerfile @@ -8,7 +8,7 @@ RUN apt-get update && \ # add-apt-repository software-properties-common \ curl \ - wget \ + wget \ git \ gcc \ # evmjit dependencies @@ -18,9 +18,8 @@ RUN apt-get update && \ # cmake, llvm and rocksdb ppas. then update ppas RUN add-apt-repository -y "ppa:george-edison55/cmake-3.x" && \ add-apt-repository "deb http://llvm.org/apt/trusty/ llvm-toolchain-trusty-3.7 main" && \ - add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && \ apt-get update && \ - apt-get install -y --force-yes cmake llvm-3.7-dev librocksdb + apt-get install -y --force-yes cmake llvm-3.7-dev # install evmjit RUN git clone https://github.com/debris/evmjit && \ @@ -31,9 +30,6 @@ RUN git clone https://github.com/debris/evmjit && \ # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes -# install nightly and make it default -RUN multirust update nightly && multirust default nightly - # export rust LIBRARY_PATH ENV LIBRARY_PATH /usr/local/lib @@ -41,7 +37,6 @@ ENV LIBRARY_PATH /usr/local/lib ENV RUST_BACKTRACE 1 # build parity -# TODO: add jit feature RUN git clone https://github.com/ethcore/parity && \ cd parity && \ - cargo install --features rpc + cargo install --features ethcore/jit diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 812e66e9e..3273b816a 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -9,17 +9,9 @@ RUN apt-get update && \ # add-apt-repository software-properties-common -# rocksdb ppas. then update ppas -RUN add-apt-repository "deb http://ppa.launchpad.net/giskou/librocksdb/ubuntu trusty main" && \ - apt-get update && \ - apt-get install -y --force-yes librocksdb - # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes -# install nightly and make it default -RUN multirust update nightly && multirust default nightly - # export rust LIBRARY_PATH ENV LIBRARY_PATH /usr/local/lib @@ -29,4 +21,4 @@ ENV RUST_BACKTRACE 1 # build parity RUN git clone https://github.com/ethcore/parity && \ cd parity && \ - cargo install --features rpc + cargo install From 97051cb949e38e18f6024012d52e42b6bb160161 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 14 Mar 2016 18:20:24 +0100 Subject: [PATCH 220/222] Add RLP, not a data item. --- ethcore/src/block.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 6f3986391..1ef28188b 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -265,7 +265,7 @@ impl<'x> OpenBlock<'x> { let mut s = self; s.engine.on_close_block(&mut s.block); s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|ref e| e.rlp_bytes().to_vec()).collect()); - let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append(&u.rlp(Seal::With)); s} ).out(); + let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); s.block.base.header.uncles_hash = uncle_bytes.sha3(); s.block.base.header.state_root = s.block.state.root().clone(); s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|ref r| r.rlp_bytes().to_vec()).collect()); @@ -420,4 +420,9 @@ mod tests { assert_eq!(orig_db.keys(), db.keys()); assert!(orig_db.keys().iter().filter(|k| orig_db.get(k.0) != db.get(k.0)).next() == None); } + + #[test] + fn enact_block_with_uncle() { + // TODO: test for when there's an uncle. + } } From c476e7da3110768aaf6325b50fd001fa270681d4 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 14 Mar 2016 18:25:05 +0100 Subject: [PATCH 221/222] update docker --- docker/ubuntu/Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 3273b816a..141cbdfb0 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -3,11 +3,10 @@ FROM ubuntu:14.04 # install tools and dependencies RUN apt-get update && \ apt-get install -y \ - gcc \ + g++ \ curl \ git \ - # add-apt-repository - software-properties-common + make # install multirust RUN curl -sf https://raw.githubusercontent.com/brson/multirust/master/blastoff.sh | sh -s -- --yes From dfef09161cd0453aeeeee8b181e502dc58ed2fc1 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 14 Mar 2016 18:47:23 +0100 Subject: [PATCH 222/222] update dockerfiles --- docker/ubuntu-dev/Dockerfile | 2 +- docker/ubuntu-jit/Dockerfile | 4 ++-- docker/ubuntu/Dockerfile | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/ubuntu-dev/Dockerfile b/docker/ubuntu-dev/Dockerfile index e9113afdf..05e8dfe8f 100644 --- a/docker/ubuntu-dev/Dockerfile +++ b/docker/ubuntu-dev/Dockerfile @@ -8,7 +8,7 @@ RUN apt-get update && \ # add-apt-repository software-properties-common \ curl \ - gcc \ + g++ \ wget \ git \ # evmjit dependencies diff --git a/docker/ubuntu-jit/Dockerfile b/docker/ubuntu-jit/Dockerfile index c50aa83b5..138882d2b 100644 --- a/docker/ubuntu-jit/Dockerfile +++ b/docker/ubuntu-jit/Dockerfile @@ -10,7 +10,7 @@ RUN apt-get update && \ curl \ wget \ git \ - gcc \ + g++ \ # evmjit dependencies zlib1g-dev \ libedit-dev @@ -39,4 +39,4 @@ ENV RUST_BACKTRACE 1 # build parity RUN git clone https://github.com/ethcore/parity && \ cd parity && \ - cargo install --features ethcore/jit + cargo build --release --features ethcore/jit diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 141cbdfb0..38c628d0e 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -20,4 +20,4 @@ ENV RUST_BACKTRACE 1 # build parity RUN git clone https://github.com/ethcore/parity && \ cd parity && \ - cargo install + cargo build --release