From 1743e480e314122ceaf462b6a20b9ab420929888 Mon Sep 17 00:00:00 2001 From: debris Date: Sat, 5 Mar 2016 11:35:44 +0100 Subject: [PATCH 01/61] rust_stable --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 8d2349dae..7213b8f09 100644 --- a/.travis.yml +++ b/.travis.yml @@ -13,6 +13,8 @@ matrix: allow_failures: - rust: nightly include: + - rust: stable + env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: beta env: FEATURES="--features travis-beta" KCOV_FEATURES="" TARGETS="-p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity" ARCHIVE_SUFFIX="-${TRAVIS_OS_NAME}-${TRAVIS_TAG}" - rust: nightly @@ -52,7 +54,7 @@ after_success: | ./kcov-master/tmp/usr/local/bin/kcov --coveralls-id=${TRAVIS_JOB_ID} --exclude-pattern /usr/,/.cargo,/root/.multirust target/kcov target/debug/parity-* && [ $TRAVIS_BRANCH = master ] && [ $TRAVIS_PULL_REQUEST = false ] && - [ $TRAVIS_RUST_VERSION = beta ] && + [ $TRAVIS_RUST_VERSION = stable ] && cargo doc --no-deps --verbose ${KCOV_FEATURES} ${TARGETS} && echo '' > target/doc/index.html && pip install --user ghp-import && From d330f0b7b7fa5db1b5891d7c1e4e61136603fed5 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sat, 5 Mar 2016 12:53:54 +0100 Subject: [PATCH 02/61] Revert "Transaction Queue integration" --- Cargo.lock | 19 ------ ethcore/src/client.rs | 21 ++----- ethcore/src/service.rs | 2 +- sync/Cargo.toml | 1 - sync/src/chain.rs | 107 ++++++---------------------------- sync/src/lib.rs | 14 ++--- sync/src/tests/chain.rs | 51 ++++++++-------- sync/src/tests/helpers.rs | 61 +++++-------------- sync/src/transaction_queue.rs | 23 +++++--- 9 files changed, 80 insertions(+), 219 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 510e69b59..55ed996ed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,14 +146,6 @@ dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "deque" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "docopt" version = "0.6.78" @@ -293,7 +285,6 @@ dependencies = [ "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -664,16 +655,6 @@ dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "rayon" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "regex" version = "0.1.54" diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 852ba6a36..858185873 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -138,9 +138,6 @@ pub trait BlockChainClient : Sync + Send { /// Get block total difficulty. fn block_total_difficulty(&self, id: BlockId) -> Option; - /// Get address nonce. - fn nonce(&self, address: &Address) -> U256; - /// Get block hash. fn block_hash(&self, id: BlockId) -> Option; @@ -368,14 +365,18 @@ impl Client where V: Verifier { bad_blocks.insert(header.hash()); continue; } + let closed_block = self.check_and_close_block(&block); if let Err(_) = closed_block { bad_blocks.insert(header.hash()); break; } + + // Insert block + let closed_block = closed_block.unwrap(); + self.chain.write().unwrap().insert_block(&block.bytes, closed_block.block().receipts().clone()); good_blocks.push(header.hash()); - // Are we committing an era? let ancient = if header.number() >= HISTORY { let n = header.number() - HISTORY; let chain = self.chain.read().unwrap(); @@ -385,16 +386,10 @@ impl Client where V: Verifier { }; // Commit results - let closed_block = closed_block.unwrap(); - let receipts = closed_block.block().receipts().clone(); closed_block.drain() .commit(header.number(), &header.hash(), ancient) .expect("State DB commit failed."); - // And update the chain - self.chain.write().unwrap() - .insert_block(&block.bytes, receipts); - self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } @@ -413,7 +408,7 @@ impl Client where V: Verifier { if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { good: good_blocks, - retracted: bad_blocks, + bad: bad_blocks, })).unwrap(); } } @@ -586,10 +581,6 @@ impl BlockChainClient for Client where V: Verifier { Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } - fn nonce(&self, address: &Address) -> U256 { - self.state().nonce(address) - } - fn block_hash(&self, id: BlockId) -> Option { let chain = self.chain.read().unwrap(); Self::block_hash(&chain, id) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index a80adb0ba..756d02407 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -30,7 +30,7 @@ pub enum SyncMessage { /// Hashes of blocks imported to blockchain good: Vec, /// Hashes of blocks not imported to blockchain - retracted: Vec, + bad: Vec, }, /// A block is ready BlockVerified, diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 0097cd47e..f10a772e3 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -17,7 +17,6 @@ time = "0.1.34" rand = "0.3.13" heapsize = "0.3" rustc-serialize = "0.3" -rayon = "0.3.1" [features] default = [] diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ddf30854a..530cfa424 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -30,17 +30,14 @@ /// use util::*; -use rayon::prelude::*; use std::mem::{replace}; -use ethcore::views::{HeaderView, BlockView}; +use ethcore::views::{HeaderView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; use range_collection::{RangeCollection, ToUsize, FromUsize}; use ethcore::error::*; use ethcore::block::Block; -use ethcore::transaction::SignedTransaction; use io::SyncIo; -use transaction_queue::TransactionQueue; use time; use super::SyncConfig; @@ -212,8 +209,6 @@ pub struct ChainSync { max_download_ahead_blocks: usize, /// Network ID network_id: U256, - /// Transactions Queue - transaction_queue: Mutex, } type RlpResponseResult = Result, PacketDecodeError>; @@ -239,7 +234,6 @@ impl ChainSync { last_send_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, - transaction_queue: Mutex::new(TransactionQueue::new()), } } @@ -298,7 +292,6 @@ impl ChainSync { self.starting_block = 0; self.highest_block = None; self.have_common_block = false; - self.transaction_queue.lock().unwrap().clear(); self.starting_block = io.chain().chain_info().best_block_number; self.state = SyncState::NotSynced; } @@ -491,7 +484,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } @@ -928,16 +921,8 @@ impl ChainSync { } } /// Called when peer sends us new transactions - fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - let chain = io.chain(); - let item_count = r.item_count(); - trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); - let fetch_latest_nonce = |a : &Address| chain.nonce(a); - for i in 0..item_count { - let tx: SignedTransaction = try!(r.val_at(i)); - self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce); - } - Ok(()) + fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + Ok(()) } /// Send Status message @@ -1263,37 +1248,6 @@ impl ChainSync { } self.last_send_block_number = chain.best_block_number; } - - /// called when block is imported to chain, updates transactions queue - pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) { - fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { - let block = chain - .block(BlockId::Hash(hash.clone())) - // Client should send message after commit to db and inserting to chain. - .expect("Expected in-chain blocks."); - let block = BlockView::new(&block); - block.transactions() - } - - - let chain = io.chain(); - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h)); - - good.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); - transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); - }); - retracted.for_each(|txs| { - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); - }); - } } #[cfg(test)] @@ -1434,7 +1388,7 @@ mod tests { #[test] fn finds_lagging_peers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10)); let chain_info = client.chain_info(); @@ -1448,7 +1402,7 @@ mod tests { #[test] fn calculates_tree_for_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(15, EachBlockWith::Uncle); + client.add_blocks(15, false); let start = client.block_hash_delta_minus(4); let end = client.block_hash_delta_minus(2); @@ -1465,7 +1419,7 @@ mod tests { #[test] fn sends_new_hashes_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1484,7 +1438,7 @@ mod tests { #[test] fn sends_latest_block_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1502,7 +1456,7 @@ mod tests { #[test] fn handles_peer_new_block_mallformed() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + client.add_blocks(10, false); let block_data = get_dummy_block(11, client.chain_info().best_block_hash); @@ -1520,7 +1474,7 @@ mod tests { #[test] fn handles_peer_new_block() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + client.add_blocks(10, false); let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); @@ -1538,7 +1492,7 @@ mod tests { #[test] fn handles_peer_new_block_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + client.add_blocks(10, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1554,7 +1508,7 @@ mod tests { #[test] fn handles_peer_new_hashes() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + client.add_blocks(10, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1570,7 +1524,7 @@ mod tests { #[test] fn handles_peer_new_hashes_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, EachBlockWith::Uncle); + client.add_blocks(10, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1588,7 +1542,7 @@ mod tests { #[test] fn hashes_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1606,7 +1560,7 @@ mod tests { #[test] fn block_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1619,37 +1573,10 @@ mod tests { assert!(result.is_ok()); } - #[test] - fn should_add_transactions_to_queue() { - // given - let mut client = TestBlockChainClient::new(); - client.add_blocks(98, EachBlockWith::Uncle); - client.add_blocks(1, EachBlockWith::UncleAndTransaction); - client.add_blocks(1, EachBlockWith::Transaction); - let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); - - let good_blocks = vec![client.block_hash_delta_minus(2)]; - let retracted_blocks = vec![client.block_hash_delta_minus(1)]; - - let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); - - // when - sync.chain_new_blocks(&io, &[], &good_blocks); - assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); - assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks); - - // then - let status = sync.transaction_queue.lock().unwrap().status(); - assert_eq!(status.pending, 1); - assert_eq!(status.future, 0); - } - #[test] fn returns_requested_block_headers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); @@ -1673,7 +1600,7 @@ mod tests { #[test] fn returns_requested_block_headers_reverse() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, EachBlockWith::Uncle); + client.add_blocks(100, false); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d67a09f3b..74541660d 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -54,7 +54,6 @@ extern crate ethcore; extern crate env_logger; extern crate time; extern crate rand; -extern crate rayon; #[macro_use] extern crate heapsize; @@ -71,7 +70,8 @@ use io::NetSyncIo; mod chain; mod io; mod range_collection; -mod transaction_queue; +// TODO [todr] Made public to suppress dead code warnings +pub mod transaction_queue; #[cfg(test)] mod tests; @@ -153,14 +153,8 @@ impl NetworkProtocolHandler for EthSync { } fn message(&self, io: &NetworkContext, message: &SyncMessage) { - match *message { - SyncMessage::BlockVerified => { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); - }, - SyncMessage::NewChainBlocks { ref good, ref retracted } => { - let sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted); - } + if let SyncMessage::BlockVerified = *message { + self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); } } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 58f50916e..b01c894a0 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -24,8 +24,8 @@ use super::helpers::*; fn two_peers() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); - net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(1000, false); + net.peer_mut(2).chain.add_blocks(1000, false); net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); @@ -35,8 +35,8 @@ fn two_peers() { fn status_after_sync() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); - net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(1000, false); + net.peer_mut(2).chain.add_blocks(1000, false); net.sync(); let status = net.peer(0).sync.status(); assert_eq!(status.state, SyncState::Idle); @@ -45,8 +45,8 @@ fn status_after_sync() { #[test] fn takes_few_steps() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); - net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(100, false); + net.peer_mut(2).chain.add_blocks(100, false); let total_steps = net.sync(); assert!(total_steps < 7); } @@ -56,9 +56,8 @@ fn empty_blocks() { ::env_logger::init().ok(); let mut net = TestNet::new(3); for n in 0..200 { - let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle }; - net.peer_mut(1).chain.add_blocks(5, with.clone()); - net.peer_mut(2).chain.add_blocks(5, with); + net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); + net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); } net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); @@ -69,14 +68,14 @@ fn empty_blocks() { fn forked() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(0).chain.add_blocks(300, EachBlockWith::Uncle); - net.peer_mut(1).chain.add_blocks(300, EachBlockWith::Uncle); - net.peer_mut(2).chain.add_blocks(300, EachBlockWith::Uncle); - net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Nothing); //fork - net.peer_mut(1).chain.add_blocks(200, EachBlockWith::Uncle); - net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle); - net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); //fork between 1 and 2 - net.peer_mut(2).chain.add_blocks(10, EachBlockWith::Nothing); + net.peer_mut(0).chain.add_blocks(300, false); + net.peer_mut(1).chain.add_blocks(300, false); + net.peer_mut(2).chain.add_blocks(300, false); + net.peer_mut(0).chain.add_blocks(100, true); //fork + net.peer_mut(1).chain.add_blocks(200, false); + net.peer_mut(2).chain.add_blocks(200, false); + net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 + net.peer_mut(2).chain.add_blocks(10, true); // peer 1 has the best chain of 601 blocks let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); net.sync(); @@ -88,8 +87,8 @@ fn forked() { #[test] fn restart() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); - net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(1000, false); + net.peer_mut(2).chain.add_blocks(1000, false); net.sync_steps(8); @@ -110,8 +109,8 @@ fn status_empty() { #[test] fn status_packet() { let mut net = TestNet::new(2); - net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Uncle); - net.peer_mut(1).chain.add_blocks(1, EachBlockWith::Uncle); + net.peer_mut(0).chain.add_blocks(100, false); + net.peer_mut(1).chain.add_blocks(1, false); net.start(); @@ -124,10 +123,10 @@ fn status_packet() { #[test] fn propagate_hashes() { let mut net = TestNet::new(6); - net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(10, false); net.sync(); - net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); + net.peer_mut(0).chain.add_blocks(10, false); net.sync(); net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); @@ -150,10 +149,10 @@ fn propagate_hashes() { #[test] fn propagate_blocks() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(10, false); net.sync(); - net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); + net.peer_mut(0).chain.add_blocks(10, false); net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); @@ -165,7 +164,7 @@ fn propagate_blocks() { #[test] fn restart_on_malformed_block() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(10, false); net.peer_mut(1).chain.corrupt_block(6); net.sync_steps(10); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 5b53ad90b..e170a4a85 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -22,7 +22,7 @@ use io::SyncIo; use chain::ChainSync; use ::SyncConfig; use ethcore::receipt::Receipt; -use ethcore::transaction::{LocalizedTransaction, Transaction, Action}; +use ethcore::transaction::LocalizedTransaction; use ethcore::filter::Filter; use ethcore::log_entry::LocalizedLogEntry; @@ -34,14 +34,6 @@ pub struct TestBlockChainClient { pub difficulty: RwLock, } -#[derive(Clone)] -pub enum EachBlockWith { - Nothing, - Uncle, - Transaction, - UncleAndTransaction -} - impl TestBlockChainClient { pub fn new() -> TestBlockChainClient { @@ -52,53 +44,30 @@ impl TestBlockChainClient { last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), }; - client.add_blocks(1, EachBlockWith::Nothing); // add genesis block + client.add_blocks(1, true); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); client } - pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { + pub fn add_blocks(&mut self, count: usize, empty: bool) { let len = self.numbers.read().unwrap().len(); for n in len..(len + count) { let mut header = BlockHeader::new(); header.difficulty = From::from(n); header.parent_hash = self.last_hash.read().unwrap().clone(); header.number = n as BlockNumber; - let uncles = match with { - EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { - let mut uncles = RlpStream::new_list(1); - let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); - uncle_header.number = n as BlockNumber; - uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); - uncles - }, - _ => RlpStream::new_list(0) - }; - let txs = match with { - EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { - let mut txs = RlpStream::new_list(1); - let keypair = KeyPair::create().unwrap(); - let tx = Transaction { - action: Action::Create, - value: U256::from(100), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::one(), - nonce: U256::zero() - }; - let signed_tx = tx.sign(&keypair.secret()); - txs.append(&signed_tx); - txs.out() - }, - _ => rlp::NULL_RLP.to_vec() - }; - + let mut uncles = RlpStream::new_list(if empty {0} else {1}); + if !empty { + let mut uncle_header = BlockHeader::new(); + uncle_header.difficulty = From::from(n); + uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); + uncle_header.number = n as BlockNumber; + uncles.append(&uncle_header); + header.uncles_hash = uncles.as_raw().sha3(); + } let mut rlp = RlpStream::new_list(3); rlp.append(&header); - rlp.append_raw(&txs, 1); + rlp.append_raw(&rlp::NULL_RLP, 1); rlp.append_raw(uncles.as_raw(), 1); self.import_block(rlp.as_raw().to_vec()).unwrap(); } @@ -140,10 +109,6 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } - fn nonce(&self, _address: &Address) -> U256 { - U256::zero() - } - fn code(&self, _address: &Address) -> Option { unimplemented!(); } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 83665dfda..4f5622a2f 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -219,19 +219,19 @@ impl TransactionQueue { /// Removes all transactions identified by hashes given in slice /// /// If gap is introduced marks subsequent transactions as future - pub fn remove_all(&mut self, transaction_hashes: &[H256], fetch_nonce: T) + pub fn remove_all(&mut self, txs: &[H256], fetch_nonce: T) where T: Fn(&Address) -> U256 { - for hash in transaction_hashes { - self.remove(&hash, &fetch_nonce); + for tx in txs { + self.remove(&tx, &fetch_nonce); } } /// Removes transaction identified by hashes from queue. /// /// If gap is introduced marks subsequent transactions as future - pub fn remove(&mut self, transaction_hash: &H256, fetch_nonce: &T) + pub fn remove(&mut self, hash: &H256, fetch_nonce: &T) where T: Fn(&Address) -> U256 { - let transaction = self.by_hash.remove(transaction_hash); + let transaction = self.by_hash.remove(hash); if transaction.is_none() { // We don't know this transaction return; @@ -240,6 +240,7 @@ impl TransactionQueue { let sender = transaction.sender(); let nonce = transaction.nonce(); + println!("Removing tx: {:?}", transaction.transaction); // Remove from future self.future.drop(&sender, &nonce); @@ -265,6 +266,7 @@ impl TransactionQueue { // Goes to future or is removed let order = self.current.drop(&sender, &k).unwrap(); if k >= current_nonce { + println!("Moving to future: {:?}", order); self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); } else { self.by_hash.remove(&order.hash); @@ -274,7 +276,7 @@ impl TransactionQueue { // And now lets check if there is some chain of transactions in future // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { + if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce - U256::one(), current_nonce) { self.last_nonces.insert(sender, new_current_top); } } @@ -297,7 +299,9 @@ impl TransactionQueue { self.last_nonces.clear(); } - fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) -> Option { + fn move_future_txs(&mut self, address: Address, current_nonce: U256, first_nonce: U256) -> Option { + println!("Moving from future for: {:?} base: {:?}", current_nonce, first_nonce); + let mut current_nonce = current_nonce + U256::one(); { let by_nonce = self.future.by_address.row_mut(&address); if let None = by_nonce { @@ -308,6 +312,7 @@ impl TransactionQueue { // remove also from priority and hash self.future.by_priority.remove(&order); // Put to current + println!("Moved: {:?}", order); let order = order.update_height(current_nonce.clone(), first_nonce); self.current.insert(address.clone(), current_nonce, order); current_nonce = current_nonce + U256::one(); @@ -328,6 +333,7 @@ impl TransactionQueue { .cloned() .map_or_else(|| fetch_nonce(&address), |n| n + U256::one()); + println!("Expected next: {:?}, got: {:?}", next_nonce, nonce); // Check height if nonce > next_nonce { let order = TransactionOrder::for_transaction(&tx, next_nonce); @@ -339,7 +345,6 @@ impl TransactionQueue { return; } else if next_nonce > nonce { // Droping transaction - trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce); return; } @@ -351,7 +356,7 @@ impl TransactionQueue { // Insert to current self.current.insert(address.clone(), nonce, order); // But maybe there are some more items waiting in future? - let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); + let new_last_nonce = self.move_future_txs(address.clone(), nonce, base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); // Enforce limit self.current.enforce_limit(&self.by_hash); From 0109e5e9d4fa1110b59d907b32a158cd3b3d5762 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 13:03:34 +0100 Subject: [PATCH 03/61] Removing memory leak when transactions are dropped from set --- sync/src/transaction_queue.rs | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 83665dfda..7f9f21638 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -113,22 +113,24 @@ impl TransactionSet { self.by_address.insert(sender, nonce, order); } - fn enforce_limit(&mut self, by_hash: &HashMap) { + fn enforce_limit(&mut self, by_hash: &mut HashMap) { let len = self.by_priority.len(); if len <= self.limit { return; } - let to_drop : Vec<&VerifiedTransaction> = { + let to_drop : Vec<(Address, U256)> = { self.by_priority .iter() .skip(self.limit) .map(|order| by_hash.get(&order.hash).expect("Inconsistency in queue detected.")) + .map(|tx| (tx.sender(), tx.nonce())) .collect() }; - for tx in to_drop { - self.drop(&tx.sender(), &tx.nonce()); + for (sender, nonce) in to_drop { + let order = self.drop(&sender, &nonce).expect("Droping transaction failed."); + by_hash.remove(&order.hash).expect("Inconsistency in queue."); } } @@ -270,7 +272,7 @@ impl TransactionQueue { self.by_hash.remove(&order.hash); } } - self.future.enforce_limit(&self.by_hash); + self.future.enforce_limit(&mut self.by_hash); // And now lets check if there is some chain of transactions in future // that should be placed in current @@ -335,7 +337,7 @@ impl TransactionQueue { self.by_hash.insert(tx.hash(), tx); // We have a gap - put to future self.future.insert(address, nonce, order); - self.future.enforce_limit(&self.by_hash); + self.future.enforce_limit(&mut self.by_hash); return; } else if next_nonce > nonce { // Droping transaction @@ -354,7 +356,7 @@ impl TransactionQueue { let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); // Enforce limit - self.current.enforce_limit(&self.by_hash); + self.current.enforce_limit(&mut self.by_hash); } } @@ -413,7 +415,7 @@ mod test { let (tx1, tx2) = new_txs(U256::from(1)); let tx1 = VerifiedTransaction::new(tx1); let tx2 = VerifiedTransaction::new(tx2); - let by_hash = { + let mut by_hash = { let mut x = HashMap::new(); let tx1 = VerifiedTransaction::new(tx1.transaction.clone()); let tx2 = VerifiedTransaction::new(tx2.transaction.clone()); @@ -430,9 +432,10 @@ mod test { assert_eq!(set.by_address.len(), 2); // when - set.enforce_limit(&by_hash); + set.enforce_limit(&mut by_hash); // then + assert_eq!(by_hash.len(), 1); assert_eq!(set.by_priority.len(), 1); assert_eq!(set.by_address.len(), 1); assert_eq!(set.by_priority.iter().next().unwrap().clone(), order1); From 78a39d3ac9b360d59c7acb430182e3fe35c0e096 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 14:34:15 +0100 Subject: [PATCH 04/61] Avoid importing same transaction twice (especially with different nonce_height) --- sync/src/transaction_queue.rs | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 7f9f21638..51ff211f6 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -129,7 +129,7 @@ impl TransactionSet { }; for (sender, nonce) in to_drop { - let order = self.drop(&sender, &nonce).expect("Droping transaction failed."); + let order = self.drop(&sender, &nonce).expect("Dropping transaction failed."); by_hash.remove(&order.hash).expect("Inconsistency in queue."); } } @@ -322,6 +322,12 @@ impl TransactionQueue { fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) where T: Fn(&Address) -> U256 { + + if self.by_hash.get(&tx.hash()).is_some() { + // Transaction is already imported. + return; + } + let nonce = tx.nonce(); let address = tx.sender(); @@ -355,7 +361,6 @@ impl TransactionQueue { // But maybe there are some more items waiting in future? let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); - // Enforce limit self.current.enforce_limit(&mut self.by_hash); } } @@ -636,7 +641,26 @@ mod test { } #[test] - fn should_accept_same_transaction_twice() { + fn should_not_insert_same_transaction_twice() { + // given + let nonce = |a: &Address| default_nonce(a) + U256::one(); + let mut txq = TransactionQueue::new(); + let (_tx1, tx2) = new_txs(U256::from(1)); + txq.add(tx2.clone(), &default_nonce); + assert_eq!(txq.status().future, 1); + assert_eq!(txq.status().pending, 0); + + // when + txq.add(tx2.clone(), &nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 1); + assert_eq!(stats.pending, 0); + } + + #[test] + fn should_accept_same_transaction_twice_if_removed() { // given let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::from(1)); From 765d7179f583245a432ec1f6e7c684836c60edd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 15:43:04 +0100 Subject: [PATCH 05/61] Failing tests for transaction queue --- sync/src/transaction_queue.rs | 77 ++++++++++++++++++++++++++++++++++- 1 file changed, 76 insertions(+), 1 deletion(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 51ff211f6..503af7b16 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -129,7 +129,7 @@ impl TransactionSet { }; for (sender, nonce) in to_drop { - let order = self.drop(&sender, &nonce).expect("Dropping transaction failed."); + let order = self.drop(&sender, &nonce).expect("Dropping transaction found in priority queue failed."); by_hash.remove(&order.hash).expect("Inconsistency in queue."); } } @@ -325,6 +325,7 @@ impl TransactionQueue { if self.by_hash.get(&tx.hash()).is_some() { // Transaction is already imported. + trace!(target: "sync", "Dropping already imported transaction with hash: {:?}", tx.hash()); return; } @@ -370,6 +371,7 @@ impl TransactionQueue { mod test { extern crate rustc_serialize; use self::rustc_serialize::hex::FromHex; + use std::ops::Deref; use std::collections::{HashMap, BTreeSet}; use util::crypto::KeyPair; use util::numbers::{U256, Uint}; @@ -702,4 +704,77 @@ mod test { assert_eq!(stats.pending, 2); } + #[test] + fn should_replace_same_transaction_when_has_higher_fee() { + // given + let mut txq = TransactionQueue::new(); + let keypair = KeyPair::create().unwrap(); + let tx = new_unsigned_tx(U256::from(123)).sign(&keypair.secret()); + let tx2 = { + let mut tx2 = tx.deref().clone(); + tx2.gas_price = U256::from(200); + tx2.sign(&keypair.secret()) + }; + + // when + txq.add(tx, &default_nonce); + txq.add(tx2, &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.pending, 1); + assert_eq!(stats.future, 0); + assert_eq!(txq.top_transactions(1)[0].gas_price, U256::from(200)); + } + + #[test] + fn should_replace_same_transaction_when_importing_to_futures() { + // given + let mut txq = TransactionQueue::new(); + let keypair = KeyPair::create().unwrap(); + let tx0 = new_unsigned_tx(U256::from(123)).sign(&keypair.secret()); + let tx1 = { + let mut tx1 = tx0.deref().clone(); + tx1.nonce = U256::from(124); + tx1.sign(&keypair.secret()) + }; + let tx2 = { + let mut tx2 = tx1.deref().clone(); + tx2.gas_price = U256::from(200); + tx2.sign(&keypair.secret()) + }; + + // when + txq.add(tx1, &default_nonce); + txq.add(tx2, &default_nonce); + assert_eq!(txq.status().future, 1); + txq.add(tx0, &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 2); + assert_eq!(txq.top_transactions(2)[1].gas_price, U256::from(200)); + } + + #[test] + fn should_recalculate_height_when_removing_from_future() { + // given + let previous_nonce = |a: &Address| default_nonce(a) - U256::one(); + let mut txq = TransactionQueue::new(); + let (tx1, tx2) = new_txs(U256::one()); + txq.add(tx1.clone(), &previous_nonce); + txq.add(tx2, &previous_nonce); + assert_eq!(txq.status().future, 2); + + // when + txq.remove(&tx1.hash(), &default_nonce); + + // then + let stats = txq.status(); + assert_eq!(stats.future, 0); + assert_eq!(stats.pending, 1); + } + + } From 6afa1c85b7862e504ee254ffb123ef14e1607213 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:20:41 +0100 Subject: [PATCH 06/61] Replacing transactions instead of just inserting --- sync/src/transaction_queue.rs | 42 ++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 13 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 503af7b16..e05210af2 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -108,9 +108,9 @@ struct TransactionSet { } impl TransactionSet { - fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) { + fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { self.by_priority.insert(order.clone()); - self.by_address.insert(sender, nonce, order); + self.by_address.insert(sender, nonce, order) } fn enforce_limit(&mut self, by_hash: &mut HashMap) { @@ -332,38 +332,54 @@ impl TransactionQueue { let nonce = tx.nonce(); let address = tx.sender(); + let state_nonce = fetch_nonce(&address); let next_nonce = self.last_nonces .get(&address) .cloned() - .map_or_else(|| fetch_nonce(&address), |n| n + U256::one()); + .map_or(state_nonce, |n| n + U256::one()); // Check height if nonce > next_nonce { - let order = TransactionOrder::for_transaction(&tx, next_nonce); - // Insert to by_hash - self.by_hash.insert(tx.hash(), tx); // We have a gap - put to future - self.future.insert(address, nonce, order); + Self::replace_transaction(tx, next_nonce, &mut self.future, &mut self.by_hash); self.future.enforce_limit(&mut self.by_hash); return; - } else if next_nonce > nonce { + } else if nonce < state_nonce { // Droping transaction trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce); return; } let base_nonce = fetch_nonce(&address); - let order = TransactionOrder::for_transaction(&tx, base_nonce); - // Insert to by_hash - self.by_hash.insert(tx.hash(), tx); - // Insert to current - self.current.insert(address.clone(), nonce, order); + Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); // But maybe there are some more items waiting in future? let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); self.current.enforce_limit(&mut self.by_hash); } + + fn replace_transaction(tx: VerifiedTransaction, base_nonce: U256, set: &mut TransactionSet, by_hash: &mut HashMap) { + let order = TransactionOrder::for_transaction(&tx, base_nonce); + let hash = tx.hash(); + let address = tx.sender(); + let nonce = tx.nonce(); + + by_hash.insert(hash.clone(), tx); + if let Some(old) = set.insert(address, nonce, order.clone()) { + // There was already transaction in queue. Let's check which one should stay + if old.cmp(&order) == Ordering::Greater { + assert!(old.nonce_height == order.nonce_height, "Both transactions should have the same height."); + // Put back old transaction since it has greater priority (higher gas_price) + set.insert(address, nonce, old); + by_hash.remove(&hash); + } else { + // Make sure we remove old transaction entirely + set.by_priority.remove(&old); + by_hash.remove(&old.hash); + } + } + } } From 0a7fc4af738ed597239036e5c39fca0473c61512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:42:34 +0100 Subject: [PATCH 07/61] Recalculating heights in future when removing transaction --- sync/src/transaction_queue.rs | 68 ++++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 24 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index e05210af2..24bb772d7 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -238,26 +238,50 @@ impl TransactionQueue { // We don't know this transaction return; } + let transaction = transaction.unwrap(); let sender = transaction.sender(); let nonce = transaction.nonce(); + let current_nonce = fetch_nonce(&sender); // Remove from future - self.future.drop(&sender, &nonce); - - // Remove from current - let order = self.current.drop(&sender, &nonce); - if order.is_none() { + let order = self.future.drop(&sender, &nonce); + if order.is_some() { + self.recalculate_future_for_sender(&sender, current_nonce); + // And now lets check if there is some chain of transactions in future + // that should be placed in current + self.move_future_txs(sender.clone(), current_nonce, current_nonce); return; } - // Let's remove transactions where tx.nonce < current_nonce - // and if there are any future transactions matching current_nonce+1 - move to current - let current_nonce = fetch_nonce(&sender); - // We will either move transaction to future or remove it completely - // so there will be no transactions from this sender in current - self.last_nonces.remove(&sender); + // Remove from current + let order = self.current.drop(&sender, &nonce); + if order.is_some() { + // We will either move transaction to future or remove it completely + // so there will be no transactions from this sender in current + self.last_nonces.remove(&sender); + // This should move all current transactions to future and remove old transactions + self.move_all_to_future(&sender, current_nonce); + // And now lets check if there is some chain of transactions in future + // that should be placed in current. It should also update last_nonces. + self.move_future_txs(sender.clone(), current_nonce, current_nonce); + return; + } + } + fn recalculate_future_for_sender(&mut self, sender: &Address, current_nonce: U256) { + // We need to drain all transactions for current sender from future and reinsert them with updated height + let all_nonces_from_sender = match self.future.by_address.row(&sender) { + Some(row_map) => row_map.keys().cloned().collect::>(), + None => vec![], + }; + for k in all_nonces_from_sender { + let order = self.future.drop(&sender, &k).unwrap(); + self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + } + } + + fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) { let all_nonces_from_sender = match self.current.by_address.row(&sender) { Some(row_map) => row_map.keys().cloned().collect::>(), None => vec![], @@ -273,14 +297,9 @@ impl TransactionQueue { } } self.future.enforce_limit(&mut self.by_hash); - - // And now lets check if there is some chain of transactions in future - // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { - self.last_nonces.insert(sender, new_current_top); - } } + /// Returns top transactions from the queue pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority @@ -299,11 +318,11 @@ impl TransactionQueue { self.last_nonces.clear(); } - fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) -> Option { + fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { { let by_nonce = self.future.by_address.row_mut(&address); if let None = by_nonce { - return None; + return; } let mut by_nonce = by_nonce.unwrap(); while let Some(order) = by_nonce.remove(¤t_nonce) { @@ -316,8 +335,8 @@ impl TransactionQueue { } } self.future.by_address.clear_if_empty(&address); - // Returns last inserted nonce - Some(current_nonce - U256::one()) + // Update last inserted nonce + self.last_nonces.insert(address, current_nonce - U256::one()); } fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) @@ -353,9 +372,9 @@ impl TransactionQueue { let base_nonce = fetch_nonce(&address); Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); + self.last_nonces.insert(address.clone(), nonce); // But maybe there are some more items waiting in future? - let new_last_nonce = self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); - self.last_nonces.insert(address.clone(), new_last_nonce.unwrap_or(nonce)); + self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); self.current.enforce_limit(&mut self.by_hash); } @@ -777,6 +796,7 @@ mod test { fn should_recalculate_height_when_removing_from_future() { // given let previous_nonce = |a: &Address| default_nonce(a) - U256::one(); + let next_nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::one()); txq.add(tx1.clone(), &previous_nonce); @@ -784,7 +804,7 @@ mod test { assert_eq!(txq.status().future, 2); // when - txq.remove(&tx1.hash(), &default_nonce); + txq.remove(&tx1.hash(), &next_nonce); // then let stats = txq.status(); From cc3839ae5744ab887c701c9007eda6162cddff2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:46:04 +0100 Subject: [PATCH 08/61] Revert "Revert "Transaction Queue integration"" This reverts commit d330f0b7b7fa5db1b5891d7c1e4e61136603fed5. Conflicts: sync/src/transaction_queue.rs --- Cargo.lock | 19 ++++++ ethcore/src/client.rs | 21 +++++-- ethcore/src/service.rs | 2 +- sync/Cargo.toml | 1 + sync/src/chain.rs | 107 ++++++++++++++++++++++++++++------ sync/src/lib.rs | 14 +++-- sync/src/tests/chain.rs | 51 ++++++++-------- sync/src/tests/helpers.rs | 61 ++++++++++++++----- sync/src/transaction_queue.rs | 17 +++--- 9 files changed, 217 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..510e69b59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -146,6 +146,14 @@ dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "deque" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "docopt" version = "0.6.78" @@ -285,6 +293,7 @@ dependencies = [ "heapsize 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rayon 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -655,6 +664,16 @@ dependencies = [ "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "rayon" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex" version = "0.1.54" diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 858185873..852ba6a36 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -138,6 +138,9 @@ pub trait BlockChainClient : Sync + Send { /// Get block total difficulty. fn block_total_difficulty(&self, id: BlockId) -> Option; + /// Get address nonce. + fn nonce(&self, address: &Address) -> U256; + /// Get block hash. fn block_hash(&self, id: BlockId) -> Option; @@ -365,18 +368,14 @@ impl Client where V: Verifier { bad_blocks.insert(header.hash()); continue; } - let closed_block = self.check_and_close_block(&block); if let Err(_) = closed_block { bad_blocks.insert(header.hash()); break; } - - // Insert block - let closed_block = closed_block.unwrap(); - self.chain.write().unwrap().insert_block(&block.bytes, closed_block.block().receipts().clone()); good_blocks.push(header.hash()); + // Are we committing an era? let ancient = if header.number() >= HISTORY { let n = header.number() - HISTORY; let chain = self.chain.read().unwrap(); @@ -386,10 +385,16 @@ impl Client where V: Verifier { }; // Commit results + let closed_block = closed_block.unwrap(); + let receipts = closed_block.block().receipts().clone(); closed_block.drain() .commit(header.number(), &header.hash(), ancient) .expect("State DB commit failed."); + // And update the chain + self.chain.write().unwrap() + .insert_block(&block.bytes, receipts); + self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } @@ -408,7 +413,7 @@ impl Client where V: Verifier { if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { good: good_blocks, - bad: bad_blocks, + retracted: bad_blocks, })).unwrap(); } } @@ -581,6 +586,10 @@ impl BlockChainClient for Client where V: Verifier { Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty) } + fn nonce(&self, address: &Address) -> U256 { + self.state().nonce(address) + } + fn block_hash(&self, id: BlockId) -> Option { let chain = self.chain.read().unwrap(); Self::block_hash(&chain, id) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 756d02407..a80adb0ba 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -30,7 +30,7 @@ pub enum SyncMessage { /// Hashes of blocks imported to blockchain good: Vec, /// Hashes of blocks not imported to blockchain - bad: Vec, + retracted: Vec, }, /// A block is ready BlockVerified, diff --git a/sync/Cargo.toml b/sync/Cargo.toml index f10a772e3..0097cd47e 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -17,6 +17,7 @@ time = "0.1.34" rand = "0.3.13" heapsize = "0.3" rustc-serialize = "0.3" +rayon = "0.3.1" [features] default = [] diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 530cfa424..ddf30854a 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -30,14 +30,17 @@ /// use util::*; +use rayon::prelude::*; use std::mem::{replace}; -use ethcore::views::{HeaderView}; +use ethcore::views::{HeaderView, BlockView}; use ethcore::header::{BlockNumber, Header as BlockHeader}; use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo}; use range_collection::{RangeCollection, ToUsize, FromUsize}; use ethcore::error::*; use ethcore::block::Block; +use ethcore::transaction::SignedTransaction; use io::SyncIo; +use transaction_queue::TransactionQueue; use time; use super::SyncConfig; @@ -209,6 +212,8 @@ pub struct ChainSync { max_download_ahead_blocks: usize, /// Network ID network_id: U256, + /// Transactions Queue + transaction_queue: Mutex, } type RlpResponseResult = Result, PacketDecodeError>; @@ -234,6 +239,7 @@ impl ChainSync { last_send_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, + transaction_queue: Mutex::new(TransactionQueue::new()), } } @@ -292,6 +298,7 @@ impl ChainSync { self.starting_block = 0; self.highest_block = None; self.have_common_block = false; + self.transaction_queue.lock().unwrap().clear(); self.starting_block = io.chain().chain_info().best_block_number; self.state = SyncState::NotSynced; } @@ -484,7 +491,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } @@ -921,8 +928,16 @@ impl ChainSync { } } /// Called when peer sends us new transactions - fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { - Ok(()) + fn on_peer_transactions(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + let chain = io.chain(); + let item_count = r.item_count(); + trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); + let fetch_latest_nonce = |a : &Address| chain.nonce(a); + for i in 0..item_count { + let tx: SignedTransaction = try!(r.val_at(i)); + self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce); + } + Ok(()) } /// Send Status message @@ -1248,6 +1263,37 @@ impl ChainSync { } self.last_send_block_number = chain.best_block_number; } + + /// called when block is imported to chain, updates transactions queue + pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) { + fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { + let block = chain + .block(BlockId::Hash(hash.clone())) + // Client should send message after commit to db and inserting to chain. + .expect("Expected in-chain blocks."); + let block = BlockView::new(&block); + block.transactions() + } + + + let chain = io.chain(); + let good = good.par_iter().map(|h| fetch_transactions(chain, h)); + let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h)); + + good.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + retracted.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } } #[cfg(test)] @@ -1388,7 +1434,7 @@ mod tests { #[test] fn finds_lagging_peers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10)); let chain_info = client.chain_info(); @@ -1402,7 +1448,7 @@ mod tests { #[test] fn calculates_tree_for_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(15, false); + client.add_blocks(15, EachBlockWith::Uncle); let start = client.block_hash_delta_minus(4); let end = client.block_hash_delta_minus(2); @@ -1419,7 +1465,7 @@ mod tests { #[test] fn sends_new_hashes_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1438,7 +1484,7 @@ mod tests { #[test] fn sends_latest_block_to_lagging_peer() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1456,7 +1502,7 @@ mod tests { #[test] fn handles_peer_new_block_mallformed() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let block_data = get_dummy_block(11, client.chain_info().best_block_hash); @@ -1474,7 +1520,7 @@ mod tests { #[test] fn handles_peer_new_block() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); @@ -1492,7 +1538,7 @@ mod tests { #[test] fn handles_peer_new_block_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1508,7 +1554,7 @@ mod tests { #[test] fn handles_peer_new_hashes() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1524,7 +1570,7 @@ mod tests { #[test] fn handles_peer_new_hashes_empty() { let mut client = TestBlockChainClient::new(); - client.add_blocks(10, false); + client.add_blocks(10, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let mut io = TestIo::new(&mut client, &mut queue, None); @@ -1542,7 +1588,7 @@ mod tests { #[test] fn hashes_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1560,7 +1606,7 @@ mod tests { #[test] fn block_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); let chain_info = client.chain_info(); @@ -1573,10 +1619,37 @@ mod tests { assert!(result.is_ok()); } + #[test] + fn should_add_transactions_to_queue() { + // given + let mut client = TestBlockChainClient::new(); + client.add_blocks(98, EachBlockWith::Uncle); + client.add_blocks(1, EachBlockWith::UncleAndTransaction); + client.add_blocks(1, EachBlockWith::Transaction); + let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5)); + + let good_blocks = vec![client.block_hash_delta_minus(2)]; + let retracted_blocks = vec![client.block_hash_delta_minus(1)]; + + let mut queue = VecDeque::new(); + let io = TestIo::new(&mut client, &mut queue, None); + + // when + sync.chain_new_blocks(&io, &[], &good_blocks); + assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); + assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); + sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks); + + // then + let status = sync.transaction_queue.lock().unwrap().status(); + assert_eq!(status.pending, 1); + assert_eq!(status.future, 0); + } + #[test] fn returns_requested_block_headers() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); @@ -1600,7 +1673,7 @@ mod tests { #[test] fn returns_requested_block_headers_reverse() { let mut client = TestBlockChainClient::new(); - client.add_blocks(100, false); + client.add_blocks(100, EachBlockWith::Uncle); let mut queue = VecDeque::new(); let io = TestIo::new(&mut client, &mut queue, None); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..d67a09f3b 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -54,6 +54,7 @@ extern crate ethcore; extern crate env_logger; extern crate time; extern crate rand; +extern crate rayon; #[macro_use] extern crate heapsize; @@ -70,8 +71,7 @@ use io::NetSyncIo; mod chain; mod io; mod range_collection; -// TODO [todr] Made public to suppress dead code warnings -pub mod transaction_queue; +mod transaction_queue; #[cfg(test)] mod tests; @@ -153,8 +153,14 @@ impl NetworkProtocolHandler for EthSync { } fn message(&self, io: &NetworkContext, message: &SyncMessage) { - if let SyncMessage::BlockVerified = *message { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); + match *message { + SyncMessage::BlockVerified => { + self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); + }, + SyncMessage::NewChainBlocks { ref good, ref retracted } => { + let sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted); + } } } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index b01c894a0..58f50916e 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -24,8 +24,8 @@ use super::helpers::*; fn two_peers() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); @@ -35,8 +35,8 @@ fn two_peers() { fn status_after_sync() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync(); let status = net.peer(0).sync.status(); assert_eq!(status.state, SyncState::Idle); @@ -45,8 +45,8 @@ fn status_after_sync() { #[test] fn takes_few_steps() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(100, false); - net.peer_mut(2).chain.add_blocks(100, false); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(100, EachBlockWith::Uncle); let total_steps = net.sync(); assert!(total_steps < 7); } @@ -56,8 +56,9 @@ fn empty_blocks() { ::env_logger::init().ok(); let mut net = TestNet::new(3); for n in 0..200 { - net.peer_mut(1).chain.add_blocks(5, n % 2 == 0); - net.peer_mut(2).chain.add_blocks(5, n % 2 == 0); + let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle }; + net.peer_mut(1).chain.add_blocks(5, with.clone()); + net.peer_mut(2).chain.add_blocks(5, with); } net.sync(); assert!(net.peer(0).chain.block(BlockId::Number(1000)).is_some()); @@ -68,14 +69,14 @@ fn empty_blocks() { fn forked() { ::env_logger::init().ok(); let mut net = TestNet::new(3); - net.peer_mut(0).chain.add_blocks(300, false); - net.peer_mut(1).chain.add_blocks(300, false); - net.peer_mut(2).chain.add_blocks(300, false); - net.peer_mut(0).chain.add_blocks(100, true); //fork - net.peer_mut(1).chain.add_blocks(200, false); - net.peer_mut(2).chain.add_blocks(200, false); - net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 - net.peer_mut(2).chain.add_blocks(10, true); + net.peer_mut(0).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(300, EachBlockWith::Uncle); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Nothing); //fork + net.peer_mut(1).chain.add_blocks(200, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(200, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(100, EachBlockWith::Uncle); //fork between 1 and 2 + net.peer_mut(2).chain.add_blocks(10, EachBlockWith::Nothing); // peer 1 has the best chain of 601 blocks let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); net.sync(); @@ -87,8 +88,8 @@ fn forked() { #[test] fn restart() { let mut net = TestNet::new(3); - net.peer_mut(1).chain.add_blocks(1000, false); - net.peer_mut(2).chain.add_blocks(1000, false); + net.peer_mut(1).chain.add_blocks(1000, EachBlockWith::Uncle); + net.peer_mut(2).chain.add_blocks(1000, EachBlockWith::Uncle); net.sync_steps(8); @@ -109,8 +110,8 @@ fn status_empty() { #[test] fn status_packet() { let mut net = TestNet::new(2); - net.peer_mut(0).chain.add_blocks(100, false); - net.peer_mut(1).chain.add_blocks(1, false); + net.peer_mut(0).chain.add_blocks(100, EachBlockWith::Uncle); + net.peer_mut(1).chain.add_blocks(1, EachBlockWith::Uncle); net.start(); @@ -123,10 +124,10 @@ fn status_packet() { #[test] fn propagate_hashes() { let mut net = TestNet::new(6); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.peer_mut(0).chain.add_blocks(10, false); + net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); @@ -149,10 +150,10 @@ fn propagate_hashes() { #[test] fn propagate_blocks() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.peer_mut(0).chain.add_blocks(10, false); + net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.trigger_block_verified(0); //first event just sets the marker net.trigger_block_verified(0); @@ -164,7 +165,7 @@ fn propagate_blocks() { #[test] fn restart_on_malformed_block() { let mut net = TestNet::new(2); - net.peer_mut(1).chain.add_blocks(10, false); + net.peer_mut(1).chain.add_blocks(10, EachBlockWith::Uncle); net.peer_mut(1).chain.corrupt_block(6); net.sync_steps(10); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index e170a4a85..5b53ad90b 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -22,7 +22,7 @@ use io::SyncIo; use chain::ChainSync; use ::SyncConfig; use ethcore::receipt::Receipt; -use ethcore::transaction::LocalizedTransaction; +use ethcore::transaction::{LocalizedTransaction, Transaction, Action}; use ethcore::filter::Filter; use ethcore::log_entry::LocalizedLogEntry; @@ -34,6 +34,14 @@ pub struct TestBlockChainClient { pub difficulty: RwLock, } +#[derive(Clone)] +pub enum EachBlockWith { + Nothing, + Uncle, + Transaction, + UncleAndTransaction +} + impl TestBlockChainClient { pub fn new() -> TestBlockChainClient { @@ -44,30 +52,53 @@ impl TestBlockChainClient { last_hash: RwLock::new(H256::new()), difficulty: RwLock::new(From::from(0)), }; - client.add_blocks(1, true); // add genesis block + client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().unwrap().clone(); client } - pub fn add_blocks(&mut self, count: usize, empty: bool) { + pub fn add_blocks(&mut self, count: usize, with: EachBlockWith) { let len = self.numbers.read().unwrap().len(); for n in len..(len + count) { let mut header = BlockHeader::new(); header.difficulty = From::from(n); header.parent_hash = self.last_hash.read().unwrap().clone(); header.number = n as BlockNumber; - let mut uncles = RlpStream::new_list(if empty {0} else {1}); - if !empty { - let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); - uncle_header.number = n as BlockNumber; - uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); - } + let uncles = match with { + EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { + let mut uncles = RlpStream::new_list(1); + let mut uncle_header = BlockHeader::new(); + uncle_header.difficulty = From::from(n); + uncle_header.parent_hash = self.last_hash.read().unwrap().clone(); + uncle_header.number = n as BlockNumber; + uncles.append(&uncle_header); + header.uncles_hash = uncles.as_raw().sha3(); + uncles + }, + _ => RlpStream::new_list(0) + }; + let txs = match with { + EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { + let mut txs = RlpStream::new_list(1); + let keypair = KeyPair::create().unwrap(); + let tx = Transaction { + action: Action::Create, + value: U256::from(100), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::one(), + nonce: U256::zero() + }; + let signed_tx = tx.sign(&keypair.secret()); + txs.append(&signed_tx); + txs.out() + }, + _ => rlp::NULL_RLP.to_vec() + }; + let mut rlp = RlpStream::new_list(3); rlp.append(&header); - rlp.append_raw(&rlp::NULL_RLP, 1); + rlp.append_raw(&txs, 1); rlp.append_raw(uncles.as_raw(), 1); self.import_block(rlp.as_raw().to_vec()).unwrap(); } @@ -109,6 +140,10 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } + fn nonce(&self, _address: &Address) -> U256 { + U256::zero() + } + fn code(&self, _address: &Address) -> Option { unimplemented!(); } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 100435530..8b38c64ad 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -221,19 +221,19 @@ impl TransactionQueue { /// Removes all transactions identified by hashes given in slice /// /// If gap is introduced marks subsequent transactions as future - pub fn remove_all(&mut self, txs: &[H256], fetch_nonce: T) + pub fn remove_all(&mut self, transaction_hashes: &[H256], fetch_nonce: T) where T: Fn(&Address) -> U256 { - for tx in txs { - self.remove(&tx, &fetch_nonce); + for hash in transaction_hashes { + self.remove(&hash, &fetch_nonce); } } /// Removes transaction identified by hashes from queue. /// /// If gap is introduced marks subsequent transactions as future - pub fn remove(&mut self, hash: &H256, fetch_nonce: &T) + pub fn remove(&mut self, transaction_hash: &H256, fetch_nonce: &T) where T: Fn(&Address) -> U256 { - let transaction = self.by_hash.remove(hash); + let transaction = self.by_hash.remove(transaction_hash); if transaction.is_none() { // We don't know this transaction return; @@ -244,7 +244,6 @@ impl TransactionQueue { let nonce = transaction.nonce(); let current_nonce = fetch_nonce(&sender); - println!("Removing tx: {:?}", transaction.transaction); // Remove from future let order = self.future.drop(&sender, &nonce); if order.is_some() { @@ -292,7 +291,6 @@ impl TransactionQueue { // Goes to future or is removed let order = self.current.drop(&sender, &k).unwrap(); if k >= current_nonce { - println!("Moving to future: {:?}", order); self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); } else { self.by_hash.remove(&order.hash); @@ -302,7 +300,7 @@ impl TransactionQueue { // And now lets check if there is some chain of transactions in future // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce - U256::one(), current_nonce) { + if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { self.last_nonces.insert(sender, new_current_top); } } @@ -337,7 +335,6 @@ impl TransactionQueue { // remove also from priority and hash self.future.by_priority.remove(&order); // Put to current - println!("Moved: {:?}", order); let order = order.update_height(current_nonce.clone(), first_nonce); self.current.insert(address.clone(), current_nonce, order); current_nonce = current_nonce + U256::one(); @@ -366,7 +363,6 @@ impl TransactionQueue { .cloned() .map_or(state_nonce, |n| n + U256::one()); - println!("Expected next: {:?}, got: {:?}", next_nonce, nonce); // Check height if nonce > next_nonce { // We have a gap - put to future @@ -375,6 +371,7 @@ impl TransactionQueue { return; } else if nonce < state_nonce { // Droping transaction + trace!(target: "sync", "Dropping transaction with nonce: {} - expecting: {}", nonce, next_nonce); return; } From 8915974cf0aba8e26f27294cb3510edd600252fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 16:48:03 +0100 Subject: [PATCH 09/61] Fixing compilation --- sync/src/transaction_queue.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 8b38c64ad..24bb772d7 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -297,12 +297,6 @@ impl TransactionQueue { } } self.future.enforce_limit(&mut self.by_hash); - - // And now lets check if there is some chain of transactions in future - // that should be placed in current - if let Some(new_current_top) = self.move_future_txs(sender.clone(), current_nonce, current_nonce) { - self.last_nonces.insert(sender, new_current_top); - } } From c13afcf40485411788b6817cb324a23f9de32d59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 17:06:04 +0100 Subject: [PATCH 10/61] Removing assertion and just comparing fees --- sync/src/transaction_queue.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 24bb772d7..f14d94c8c 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -387,8 +387,9 @@ impl TransactionQueue { by_hash.insert(hash.clone(), tx); if let Some(old) = set.insert(address, nonce, order.clone()) { // There was already transaction in queue. Let's check which one should stay - if old.cmp(&order) == Ordering::Greater { - assert!(old.nonce_height == order.nonce_height, "Both transactions should have the same height."); + let old_fee = old.gas_price; + let new_fee = order.gas_price; + if old_fee.cmp(&new_fee) == Ordering::Greater { // Put back old transaction since it has greater priority (higher gas_price) set.insert(address, nonce, old); by_hash.remove(&hash); From 18cbea394d53abde1780efbb1cc3ea0c8e678ce1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 17:14:48 +0100 Subject: [PATCH 11/61] Small renaming --- sync/src/transaction_queue.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index f14d94c8c..463607cae 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -247,10 +247,10 @@ impl TransactionQueue { // Remove from future let order = self.future.drop(&sender, &nonce); if order.is_some() { - self.recalculate_future_for_sender(&sender, current_nonce); + self.update_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current - self.move_future_txs(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); return; } @@ -264,12 +264,12 @@ impl TransactionQueue { self.move_all_to_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future // that should be placed in current. It should also update last_nonces. - self.move_future_txs(sender.clone(), current_nonce, current_nonce); + self.move_matching_future_to_current(sender.clone(), current_nonce, current_nonce); return; } } - fn recalculate_future_for_sender(&mut self, sender: &Address, current_nonce: U256) { + fn update_future(&mut self, sender: &Address, current_nonce: U256) { // We need to drain all transactions for current sender from future and reinsert them with updated height let all_nonces_from_sender = match self.future.by_address.row(&sender) { Some(row_map) => row_map.keys().cloned().collect::>(), @@ -318,7 +318,7 @@ impl TransactionQueue { self.last_nonces.clear(); } - fn move_future_txs(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { + fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { { let by_nonce = self.future.by_address.row_mut(&address); if let None = by_nonce { @@ -374,7 +374,7 @@ impl TransactionQueue { Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); self.last_nonces.insert(address.clone(), nonce); // But maybe there are some more items waiting in future? - self.move_future_txs(address.clone(), nonce + U256::one(), base_nonce); + self.move_matching_future_to_current(address.clone(), nonce + U256::one(), base_nonce); self.current.enforce_limit(&mut self.by_hash); } From 4a53d62be436513d81209db0d4a88ccc0f3aef06 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sat, 5 Mar 2016 17:41:35 +0100 Subject: [PATCH 12/61] Fixing inconsistency when replacing transactions in queue --- sync/src/transaction_queue.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 463607cae..b98772199 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -348,8 +348,8 @@ impl TransactionQueue { return; } - let nonce = tx.nonce(); let address = tx.sender(); + let nonce = tx.nonce(); let state_nonce = fetch_nonce(&address); let next_nonce = self.last_nonces @@ -370,7 +370,6 @@ impl TransactionQueue { } let base_nonce = fetch_nonce(&address); - Self::replace_transaction(tx, base_nonce.clone(), &mut self.current, &mut self.by_hash); self.last_nonces.insert(address.clone(), nonce); // But maybe there are some more items waiting in future? @@ -391,7 +390,9 @@ impl TransactionQueue { let new_fee = order.gas_price; if old_fee.cmp(&new_fee) == Ordering::Greater { // Put back old transaction since it has greater priority (higher gas_price) - set.insert(address, nonce, old); + set.by_address.insert(address, nonce, old); + // and remove new one + set.by_priority.remove(&order); by_hash.remove(&hash); } else { // Make sure we remove old transaction entirely From 57e6e1e1b59188cdf8d378b81c33842d5c5feaf7 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 5 Mar 2016 20:15:19 +0300 Subject: [PATCH 13/61] [ci ship] redundant lines --- sync/src/transaction_queue.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index b98772199..3e0d931b5 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -813,6 +813,4 @@ mod test { assert_eq!(stats.future, 0); assert_eq!(stats.pending, 1); } - - } From e100ecbeacf6bac923f4b8e416621f98668b4830 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sat, 5 Mar 2016 23:47:28 +0300 Subject: [PATCH 14/61] exposing in lib --- sync/src/chain.rs | 4 ++++ sync/src/lib.rs | 10 ++++++++++ 2 files changed, 14 insertions(+) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ddf30854a..fd1771045 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1294,6 +1294,10 @@ impl ChainSync { transaction_queue.add_all(txs, |a| chain.nonce(a)); }); } + + pub fn transaction_queue(&self) -> &Mutex { + return &self.transaction_queue; + } } #[cfg(test)] diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d67a09f3b..a6480b0ad 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -128,6 +128,16 @@ impl EthSync { pub fn restart(&mut self, io: &mut NetworkContext) { self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref())); } + + /// Insert transaction in transaction queue + pub fn insert_transaction(&self, transaction: SignedTransaction) { + use util::numbers::*; + + let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); + let sync = self.sync.write().unwrap(); + let mut queue = sync.transaction_queue().lock().unwrap(); + queue.add(transaction, &nonce_fn); + } } impl NetworkProtocolHandler for EthSync { From ad8135668392aa733dea014231a3a56469dae5fc Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Sun, 6 Mar 2016 00:48:00 +0300 Subject: [PATCH 15/61] fix namespace --- sync/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index a6480b0ad..e352144bd 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -130,7 +130,7 @@ impl EthSync { } /// Insert transaction in transaction queue - pub fn insert_transaction(&self, transaction: SignedTransaction) { + pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction) { use util::numbers::*; let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); From 003d1fd0cc1c1f815c8f0b772baefd374dab67aa Mon Sep 17 00:00:00 2001 From: arkpar Date: Sat, 5 Mar 2016 23:09:51 +0100 Subject: [PATCH 16/61] Network tracing improvements --- sync/src/chain.rs | 2 +- util/src/network/connection.rs | 10 ++++---- util/src/network/handshake.rs | 14 +++++------ util/src/network/host.rs | 43 +++++++++++++++++++--------------- util/src/network/session.rs | 9 ++++--- 5 files changed, 43 insertions(+), 35 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 530cfa424..63640f87f 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -575,7 +575,7 @@ impl ChainSync { pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}", peer); if let Err(e) = self.send_status(io) { - warn!(target:"sync", "Error sending status request: {:?}", e); + trace!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } } diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index 55e688c91..fe65be6d1 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -190,25 +190,25 @@ impl Connection { /// Register this connection with the IO event loop. pub fn register_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection register; token={:?}", reg); + trace!(target: "network", "connection register; token={:?}", reg); if let Err(e) = event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()) { - debug!("Failed to register {:?}, {:?}", reg, e); + trace!(target: "network", "Failed to register {:?}, {:?}", reg, e); } Ok(()) } /// Update connection registration. Should be called at the end of the IO handler. pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection reregister; token={:?}", reg); + trace!(target: "network", "connection reregister; token={:?}", reg); event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - debug!("Failed to reregister {:?}, {:?}", reg, e); + trace!(target: "network", "Failed to reregister {:?}, {:?}", reg, e); Ok(()) }) } /// Delete connection registration. Should be called at the end of the IO handler. pub fn deregister_socket(&self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection deregister; token={:?}", self.token); + trace!(target: "network", "connection deregister; token={:?}", self.token); event_loop.deregister(&self.socket).ok(); // ignore errors here Ok(()) } diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index cca133ba9..a72cc28ad 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -222,7 +222,7 @@ impl Handshake { /// Parse, validate and confirm auth message fn read_auth(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received handshake auth from {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received handshake auth from {:?}", self.connection.socket.peer_addr()); if data.len() != V4_AUTH_PACKET_SIZE { debug!(target:"net", "Wrong auth packet size"); return Err(From::from(NetworkError::BadProtocol)); @@ -253,7 +253,7 @@ impl Handshake { } fn read_auth_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); self.auth_cipher.extend_from_slice(data); let auth = try!(ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])); let rlp = UntrustedRlp::new(&auth); @@ -268,7 +268,7 @@ impl Handshake { /// Parse and validate ack message fn read_ack(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received handshake auth to {:?}", self.connection.socket.peer_addr()); if data.len() != V4_ACK_PACKET_SIZE { debug!(target:"net", "Wrong ack packet size"); return Err(From::from(NetworkError::BadProtocol)); @@ -296,7 +296,7 @@ impl Handshake { } fn read_ack_eip8(&mut self, secret: &Secret, data: &[u8]) -> Result<(), UtilError> { - trace!(target:"net", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Received EIP8 handshake auth from {:?}", self.connection.socket.peer_addr()); self.ack_cipher.extend_from_slice(data); let ack = try!(ecies::decrypt(secret, &self.ack_cipher[0..2], &self.ack_cipher[2..])); let rlp = UntrustedRlp::new(&ack); @@ -309,7 +309,7 @@ impl Handshake { /// Sends auth message fn write_auth(&mut self, secret: &Secret, public: &Public) -> Result<(), UtilError> { - trace!(target:"net", "Sending handshake auth to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Sending handshake auth to {:?}", self.connection.socket.peer_addr()); let mut data = [0u8; /*Signature::SIZE*/ 65 + /*H256::SIZE*/ 32 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32 + 1]; //TODO: use associated constants let len = data.len(); { @@ -336,7 +336,7 @@ impl Handshake { /// Sends ack message fn write_ack(&mut self) -> Result<(), UtilError> { - trace!(target:"net", "Sending handshake ack to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Sending handshake ack to {:?}", self.connection.socket.peer_addr()); let mut data = [0u8; 1 + /*Public::SIZE*/ 64 + /*H256::SIZE*/ 32]; //TODO: use associated constants let len = data.len(); { @@ -355,7 +355,7 @@ impl Handshake { /// Sends EIP8 ack message fn write_ack_eip8(&mut self) -> Result<(), UtilError> { - trace!(target:"net", "Sending EIP8 handshake ack to {:?}", self.connection.socket.peer_addr()); + trace!(target:"network", "Sending EIP8 handshake ack to {:?}", self.connection.socket.peer_addr()); let mut rlp = RlpStream::new_list(3); rlp.append(self.ecdhe.public()); rlp.append(&self.nonce); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index f2cc9fe48..ece24a1d1 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -170,29 +170,37 @@ pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'sta io: &'s IoContext>, protocol: ProtocolId, sessions: Arc>>, - session: Option, + session: Option, + session_id: Option, } impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, { /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. fn new(io: &'s IoContext>, protocol: ProtocolId, - session: Option, sessions: Arc>>) -> NetworkContext<'s, Message> { + session: Option, sessions: Arc>>) -> NetworkContext<'s, Message> { + let id = session.as_ref().map(|s| s.lock().unwrap().token()); NetworkContext { io: io, protocol: protocol, + session_id: id, session: session, sessions: sessions, } } + fn resolve_session(&self, peer: PeerId) -> Option { + match self.session_id { + Some(id) if id == peer => self.session.clone(), + _ => self.sessions.read().unwrap().get(peer).cloned(), + } + } + /// Send a packet over the network to another peer. pub fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { - let session = { self.sessions.read().unwrap().get(peer).cloned() }; + let session = self.resolve_session(peer); if let Some(session) = session { - session.lock().unwrap().deref_mut().send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "network", "Send error: {:?}", e); - }); //TODO: don't copy vector data + try!(session.lock().unwrap().deref_mut().send_packet(self.protocol, packet_id as u8, &data)); try!(self.io.update_registration(peer)); } else { trace!(target: "network", "Send: Peer no longer exist") @@ -200,14 +208,10 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone Ok(()) } - /// Respond to a current network message. Panics if no there is no packet in the context. + /// Respond to a current network message. Panics if no there is no packet in the context. If the session is expired returns nothing. pub fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { - match self.session { - Some(session) => self.send(session, packet_id, data), - None => { - panic!("Respond: Session does not exist") - } - } + assert!(self.session.is_some(), "Respond called without network context"); + self.send(self.session_id.unwrap(), packet_id, data) } /// Send an IO message @@ -215,7 +219,6 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone self.io.message(NetworkIoMessage::User(msg)); } - /// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected. pub fn disable_peer(&self, peer: PeerId) { //TODO: remove capability, disconnect if no capabilities left @@ -239,7 +242,7 @@ impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone /// Returns peer identification string pub fn peer_info(&self, peer: PeerId) -> String { - let session = { self.sessions.read().unwrap().get(peer).cloned() }; + let session = self.resolve_session(peer); if let Some(session) = session { return session.lock().unwrap().info.client_version.clone() } @@ -624,7 +627,7 @@ impl Host where Message: Send + Sync + Clone { let mut packet_data: Option<(ProtocolId, PacketId, Vec)> = None; let mut kill = false; let session = { self.sessions.read().unwrap().get(token).cloned() }; - if let Some(session) = session { + if let Some(session) = session.clone() { let mut s = session.lock().unwrap(); match s.readable(io, &self.info.read().unwrap()) { Err(e) => { @@ -656,11 +659,11 @@ impl Host where Message: Send + Sync + Clone { } for p in ready_data { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); - h.connected(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token); + h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone()), &token); } if let Some((p, packet_id, data)) = packet_data { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); - h.read(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token, packet_id, &data[1..]); + h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone()), &token, packet_id, &data[1..]); } io.update_registration(token).unwrap_or_else(|e| debug!(target: "network", "Token registration error: {:?}", e)); } @@ -718,6 +721,7 @@ impl Host where Message: Send + Sync + Clone { let mut to_disconnect: Vec = Vec::new(); let mut failure_id = None; let mut deregister = false; + let mut expired_session = None; match token { FIRST_HANDSHAKE ... LAST_HANDSHAKE => { let handshakes = self.handshakes.write().unwrap(); @@ -733,6 +737,7 @@ impl Host where Message: Send + Sync + Clone { FIRST_SESSION ... LAST_SESSION => { let sessions = self.sessions.write().unwrap(); if let Some(session) = sessions.get(token).cloned() { + expired_session = Some(session.clone()); let mut s = session.lock().unwrap(); if !s.expired() { if s.is_ready() { @@ -757,7 +762,7 @@ impl Host where Message: Send + Sync + Clone { } for p in to_disconnect { let h = self.handlers.read().unwrap().get(p).unwrap().clone(); - h.disconnected(&NetworkContext::new(io, p, Some(token), self.sessions.clone()), &token); + h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone()), &token); } if deregister { io.deregister_stream(token).expect("Error deregistering stream"); diff --git a/util/src/network/session.rs b/util/src/network/session.rs index edf929a9a..84c063c92 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -213,6 +213,9 @@ impl Session { /// Send a protocol packet to peer. pub fn send_packet(&mut self, protocol: &str, packet_id: u8, data: &[u8]) -> Result<(), UtilError> { + if self.expired() { + return Err(From::from(NetworkError::Expired)); + } let mut i = 0usize; while protocol != self.info.capabilities[i].protocol { i += 1; @@ -351,15 +354,15 @@ impl Session { offset += caps[i].packet_count; i += 1; } - trace!(target: "net", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); + trace!(target: "network", "Hello: {} v{} {} {:?}", client_version, protocol, id, caps); self.info.client_version = client_version; self.info.capabilities = caps; if self.info.capabilities.is_empty() { - trace!("No common capabilities with peer."); + trace!(target: "network", "No common capabilities with peer."); return Err(From::from(self.disconnect(DisconnectReason::UselessPeer))); } if protocol != host.protocol_version { - trace!("Peer protocol version mismatch: {}", protocol); + trace!(target: "network", "Peer protocol version mismatch: {}", protocol); return Err(From::from(self.disconnect(DisconnectReason::UselessPeer))); } self.had_hello = true; From aaf2e0c3fbdc0cd3e125988e96987486d73bf395 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 6 Mar 2016 11:04:13 +0100 Subject: [PATCH 17/61] Locking outside of loop --- sync/src/chain.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index ddf30854a..a8bcb653f 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -933,9 +933,11 @@ impl ChainSync { let item_count = r.item_count(); trace!(target: "sync", "{} -> Transactions ({} entries)", peer_id, item_count); let fetch_latest_nonce = |a : &Address| chain.nonce(a); + + let mut transaction_queue = self.transaction_queue.lock().unwrap(); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - self.transaction_queue.lock().unwrap().add(tx, &fetch_latest_nonce); + transaction_queue.add(tx, &fetch_latest_nonce); } Ok(()) } From e91de785281d59b591079c16c798d7252991b593 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 6 Mar 2016 11:11:59 +0100 Subject: [PATCH 18/61] Renaming back bad as retracted --- ethcore/src/client.rs | 4 +++- ethcore/src/service.rs | 2 ++ sync/src/chain.rs | 10 +++++----- sync/src/lib.rs | 4 ++-- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 852ba6a36..123847a7f 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -413,7 +413,9 @@ impl Client where V: Verifier { if !good_blocks.is_empty() && block_queue.queue_info().is_empty() { io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks { good: good_blocks, - retracted: bad_blocks, + bad: bad_blocks, + // TODO [todr] were to take those from? + retracted: vec![], })).unwrap(); } } diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index a80adb0ba..443d09e3b 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -30,6 +30,8 @@ pub enum SyncMessage { /// Hashes of blocks imported to blockchain good: Vec, /// Hashes of blocks not imported to blockchain + bad: Vec, + /// Hashes of blocks that were removed from canonical chain retracted: Vec, }, /// A block is ready diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a8bcb653f..fcc9f49c8 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -1267,7 +1267,7 @@ impl ChainSync { } /// called when block is imported to chain, updates transactions queue - pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], retracted: &[H256]) { + pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(hash.clone())) @@ -1280,14 +1280,14 @@ impl ChainSync { let chain = io.chain(); let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let retracted = retracted.par_iter().map(|h| fetch_transactions(chain, h)); + let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); good.for_each(|txs| { let mut transaction_queue = self.transaction_queue.lock().unwrap(); let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); }); - retracted.for_each(|txs| { + bad.for_each(|txs| { // populate sender for tx in &txs { let _sender = tx.sender(); @@ -1637,10 +1637,10 @@ mod tests { let io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&io, &[], &good_blocks); + sync.chain_new_blocks(&io, &[], &good_blocks, &[]); assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks); + sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks, &[]); // then let status = sync.transaction_queue.lock().unwrap().status(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index d67a09f3b..8a30385a2 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -157,9 +157,9 @@ impl NetworkProtocolHandler for EthSync { SyncMessage::BlockVerified => { self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); }, - SyncMessage::NewChainBlocks { ref good, ref retracted } => { + SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { let sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&sync_io, good, retracted); + self.sync.write().unwrap().chain_new_blocks(&sync_io, good, bad, retracted); } } } From 51c95d4d67643f03fc0fd11cf241308acb01eb5a Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 6 Mar 2016 21:57:55 +0100 Subject: [PATCH 19/61] Implement option 1. --- util/src/journaldb.rs | 393 +++++++++++++++++++++--------------------- 1 file changed, 194 insertions(+), 199 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 01e53f819..5f94dcbeb 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -20,15 +20,12 @@ use common::*; use rlp::*; use hashdb::*; use memorydb::*; -use kvdb::{Database, DBTransaction, DatabaseConfig}; +use rocksdb::{DB, Writable, WriteBatch, IteratorMode}; #[cfg(test)] use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay -/// and, possibly, latent-removal semantics. -/// -/// If `counters` is `None`, then it behaves exactly like OverlayDB. If not it behaves -/// differently: +/// and latent-removal semantics. /// /// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect @@ -36,8 +33,8 @@ use std::env; /// the removals actually take effect. pub struct JournalDB { overlay: MemoryDB, - backing: Arc, - counters: Option>>>, + backing: Arc, + counters: Arc>>, } impl Clone for JournalDB { @@ -50,51 +47,33 @@ impl Clone for JournalDB { } } -// all keys must be at least 12 bytes -const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const LAST_ERA_KEY : [u8; 4] = [ b'l', b'a', b's', b't' ]; +const VERSION_KEY : [u8; 4] = [ b'j', b'v', b'e', b'r' ]; -const DB_VERSION : u32 = 3; -const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; - -const PADDING : [u8; 10] = [ 0u8; 10 ]; +const DB_VERSION: u32 = 1; impl JournalDB { - - /// Create a new instance from file - pub fn new(path: &str) -> JournalDB { - Self::from_prefs(path, true) + /// Create a new instance given a `backing` database. + pub fn new(backing: DB) -> JournalDB { + let db = Arc::new(backing); + JournalDB::new_with_arc(db) } - /// Create a new instance from file - pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB { - let opts = DatabaseConfig { - prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix - }; - let backing = Database::open(&opts, path).unwrap_or_else(|e| { - panic!("Error opening state db: {}", e); - }); - let with_journal; - if !backing.is_empty() { + /// Create a new instance given a shared `backing` database. + pub fn new_with_arc(backing: Arc) -> JournalDB { + if backing.iterator(IteratorMode::Start).next().is_some() { match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => { with_journal = true; }, - Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; }, + Ok(Some(DB_VERSION)) => {}, v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) } } else { - backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database"); - with_journal = prefer_journal; + backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); } - - let counters = if with_journal { - Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) - } else { - None - }; + let counters = JournalDB::read_counters(&backing); JournalDB { overlay: MemoryDB::new(), - backing: Arc::new(backing), - counters: counters, + backing: backing, + counters: Arc::new(RwLock::new(counters)), } } @@ -103,55 +82,93 @@ impl JournalDB { pub fn new_temp() -> JournalDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(dir.to_str().unwrap()) + Self::new(DB::open_default(dir.to_str().unwrap()).unwrap()) } /// Check if this database has any commits pub fn is_empty(&self) -> bool { - self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + self.backing.get(&LAST_ERA_KEY).expect("Low level database error").is_none() } - /// Commit all recent insert operations. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let have_counters = self.counters.is_some(); - if have_counters { - self.commit_with_counters(now, id, end) - } else { - self.commit_without_counters() + fn morph_key(key: &H256, index: u8) -> Bytes { + let mut ret = key.bytes().to_owned(); + ret.push(index); + ret + } + + // The next three are valid only as long as there is an insert operation of `key` in the journal. + fn set_already_in(batch: &WriteBatch, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]); } + fn reset_already_in(batch: &WriteBatch, key: &H256) { batch.delete(&Self::morph_key(key, 0)); } + fn is_already_in(backing: &DB, key: &H256) -> bool { + backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() + } + + fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &DB, counters: &mut HashMap, batch: &WriteBatch) { + for &(ref h, ref d) in inserts { + if let Some(c) = counters.get_mut(h) { + // already counting. increment. + *c += 1; + continue; + } + + // this is the first entry for this node in the journal. + if backing.get(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?").is_some() { + // already in the backing DB. start counting, and remember it was already in. + Self::set_already_in(batch, &h); + counters.insert(h.clone(), 1); + continue; + } + + // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. + //Self::reset_already_in(&h); + assert!(!Self::is_already_in(backing, &h)); + batch.put(&h.bytes(), d); } } - /// Drain the overlay and place it into a batch for the DB. - fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { - let mut inserts = 0usize; - let mut deletes = 0usize; - for i in overlay.drain().into_iter() { - let (key, (value, rc)) = i; - if rc > 0 { - assert!(rc == 1); - batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); - inserts += 1; - } - if rc < 0 { - assert!(rc == -1); - deletes += 1; + fn replay_keys(inserts: &Vec, backing: &DB, counters: &mut HashMap) { + for h in inserts { + if let Some(c) = counters.get_mut(h) { + // already counting. increment. + *c += 1; + continue; } + + // this is the first entry for this node in the journal. + // it is initialised to 1 if it was already in. + counters.insert(h.clone(), if Self::is_already_in(backing, h) {1} else {0}); } - trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); - inserts + deletes } - /// Just commit the overlay into the backing DB. - fn commit_without_counters(&mut self) -> Result { - let batch = DBTransaction::new(); - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); - try!(self.backing.write(batch)); - Ok(ret as u32) + fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &WriteBatch) { + for h in deletes.into_iter() { + let mut n: Option = None; + if let Some(c) = counters.get_mut(&h) { + if *c > 1 { + *c -= 1; + continue; + } else { + n = Some(*c); + } + } + match &n { + &Some(i) if i == 1 => { + counters.remove(&h); + Self::reset_already_in(batch, &h); + } + &None => { + // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. + //assert!(!Self::is_already_in(db, &h)); + batch.delete(&h.bytes()); + } + _ => panic!("Invalid value in counters: {:?}", n), + } + } } /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -159,62 +176,84 @@ impl JournalDB { // TODO: store reclaim_period. - // when we make a new commit, we journal the inserts and removes. - // for each end_era that we journaled that we are no passing by, - // we remove all of its removes assuming it is canonical and all - // of its inserts otherwise. + // When we make a new commit, we make a journal of all blocks in the recent history and record + // all keys that were inserted and deleted. The journal is ordered by era; multiple commits can + // share the same era. This forms a data structure similar to a queue but whose items are tuples. + // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history + // into ancient history) then only one commit from the tuple is considered canonical. This commit + // is kept in the main backing database, whereas any others from the same era are reverted. + // + // It is possible that a key, properly available in the backing database be deleted and re-inserted + // in the recent history queue, yet have both operations in commits that are eventually non-canonical. + // To avoid the original, and still required, key from being deleted, we maintain a reference count + // which includes an original key, if any. + // + // The semantics of the `counter` are: + // insert key k: + // counter already contains k: count += 1 + // counter doesn't contain k: + // backing db contains k: count = 1 + // backing db doesn't contain k: insert into backing db, count = 0 + // delete key k: + // counter contains k (count is asserted to be non-zero): + // count > 1: counter -= 1 + // count == 1: remove counter + // count == 0: remove key from backing db + // counter doesn't contain k: remove key from backing db // - // We also keep reference counters for each key inserted in the journal to handle - // the following cases where key K must not be deleted from the DB when processing removals : - // Given H is the journal size in eras, 0 <= C <= H. - // Key K is removed in era A(N) and re-inserted in canonical era B(N + C). - // Key K is removed in era A(N) and re-inserted in non-canonical era B`(N + C). - // Key K is added in non-canonical era A'(N) canonical B(N + C). + // Practically, this means that for each commit block turning from recent to ancient we do the + // following: + // is_canonical: + // inserts: Ignored (left alone in the backing database). + // deletes: Enacted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // !is_canonical: + // inserts: Reverted; however, recent history queue is checked for ongoing references. This is + // reduced as a preference to deletion from the backing database. + // deletes: Ignored (they were never inserted). // - // The counter is encreased each time a key is inserted in the journal in the commit. The list of insertions - // is saved with the era record. When the era becomes end_era and goes out of journal the counter is decreased - // and the key is safe to delete. // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); - let mut counters = self.counters.as_ref().unwrap().write().unwrap(); - let batch = DBTransaction::new(); + let batch = WriteBatch::new(); + let mut counters = self.counters.write().unwrap(); { let mut index = 0usize; let mut last; - while { - let record = try!(self.backing.get({ - let mut r = RlpStream::new_list(3); - r.append(&now); - r.append(&index); - r.append(&&PADDING[..]); - last = r.drain(); - &last - })); - match record { - Some(r) => { - assert!(&Rlp::new(&r).val_at::(0) != id); - true - }, - None => false, - } - } { + while try!(self.backing.get({ + let mut r = RlpStream::new_list(2); + r.append(&now); + r.append(&index); + last = r.drain(); + &last + })).is_some() { index += 1; } + let drained = self.overlay.drain(); + let removes: Vec = drained + .iter() + .filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned() + .collect(); + let inserts: Vec<(H256, Bytes)> = drained + .into_iter() + .filter_map(|(k, (v, r))| if r > 0 { assert!(r == 1); Some((k, v)) } else { assert!(r >= -1); None }) + .collect(); + let mut r = RlpStream::new_list(3); - let inserts: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c > 0).map(|(key, _)| key.clone()).collect(); - // Increase counter for each inserted key no matter if the block is canonical or not. - for i in &inserts { - *counters.entry(i.clone()).or_insert(0) += 1; - } - let removes: Vec = self.overlay.keys().iter().filter(|&(_, &c)| c < 0).map(|(key, _)| key.clone()).collect(); r.append(id); - r.append(&inserts); + + // Process the new inserts. + // We use the inserts for three things. For each: + // - we place into the backing DB or increment the counter if already in; + // - we note in the backing db that it was already in; + // - we write the key into our journal for this block; + + r.begin_list(inserts.len()); + inserts.iter().foreach(|&(k, _)| {r.append(&k);}); r.append(&removes); + Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); try!(batch.put(&last, r.as_raw())); - try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); } // apply old commits' details @@ -222,105 +261,66 @@ impl JournalDB { let mut index = 0usize; let mut last; let mut to_remove: Vec = Vec::new(); - let mut canon_inserts: Vec = Vec::new(); while let Some(rlp_data) = try!(self.backing.get({ - let mut r = RlpStream::new_list(3); + let mut r = RlpStream::new_list(2); r.append(&end_era); r.append(&index); - r.append(&&PADDING[..]); last = r.drain(); &last })) { let rlp = Rlp::new(&rlp_data); - let mut inserts: Vec = rlp.val_at(1); - JournalDB::decrease_counters(&inserts, &mut counters); + let inserts: Vec = rlp.val_at(1); + let deletes: Vec = rlp.val_at(2); // Collect keys to be removed. These are removed keys for canonical block, inserted for non-canonical - if canon_id == rlp.val_at(0) { - let mut canon_deletes: Vec = rlp.val_at(2); - trace!("Purging nodes deleted from canon: {:?}", canon_deletes); - to_remove.append(&mut canon_deletes); - canon_inserts = inserts; - } - else { - trace!("Purging nodes inserted in non-canon: {:?}", inserts); - to_remove.append(&mut inserts); - } - trace!("commit: Delete journal for time #{}.{}: {}, (canon was {}): {} entries", end_era, index, rlp.val_at::(0), canon_id, to_remove.len()); + Self::kill_keys(if canon_id == rlp.val_at(0) {deletes} else {inserts}, &mut counters, &batch); try!(batch.delete(&last)); index += 1; } - - let canon_inserts = canon_inserts.drain(..).collect::>(); - // Purge removed keys if they are not referenced and not re-inserted in the canon commit - let mut deletes = 0; - trace!("Purging filtered nodes: {:?}", to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)).collect::>()); - for h in to_remove.iter().filter(|h| !counters.contains_key(h) && !canon_inserts.contains(h)) { - try!(batch.delete(&h)); - deletes += 1; - } - trace!("Total nodes purged: {}", deletes); + try!(batch.put(&LAST_ERA_KEY, &encode(&end_era))); + trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } - // Commit overlay insertions - let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); try!(self.backing.write(batch)); - Ok(ret as u32) - } - - - // Decrease counters for given keys. Deletes obsolete counters - fn decrease_counters(keys: &[H256], counters: &mut HashMap) { - for i in keys.iter() { - let delete_counter = { - let cnt = counters.get_mut(i).expect("Missing key counter"); - *cnt -= 1; - *cnt == 0 - }; - if delete_counter { - counters.remove(i); - } - } +// trace!("JournalDB::commit() deleted {} nodes", deletes); + Ok(0) } fn payload(&self, key: &H256) -> Option { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &Database) -> HashMap { - let mut res = HashMap::new(); - if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val); + fn read_counters(db: &DB) -> HashMap { + let mut counters = HashMap::new(); + if let Some(val) = db.get(&LAST_ERA_KEY).expect("Low-level database error.") { + let mut era = decode::(&val) + 1; loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ - let mut r = RlpStream::new_list(3); + let mut r = RlpStream::new_list(2); r.append(&era); r.append(&index); - r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { let rlp = Rlp::new(&rlp_data); - let to_add: Vec = rlp.val_at(1); - for h in to_add { - *res.entry(h).or_insert(0) += 1; - } + let inserts: Vec = rlp.val_at(1); + Self::replay_keys(&inserts, db, &mut counters); index += 1; }; - if index == 0 || era == 0 { + if index == 0 { break; } - era -= 1; + era += 1; } } - trace!("Recovered {} counters", res.len()); - res + trace!("Recovered {} counters", counters.len()); + counters } } impl HashDB for JournalDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iter() { + for (key, _) in self.backing.iterator(IteratorMode::Start) { let h = H256::from_slice(key.deref()); ret.insert(h, 1); } @@ -368,6 +368,28 @@ mod tests { use super::*; use hashdb::*; + #[test] + fn insert_same_in_fork() { + // history is 1 + let mut jdb = JournalDB::new_temp(); + + let x = jdb.insert(b"X"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), None).unwrap(); + jdb.commit(3, &b"1002a".sha3(), Some((1, b"1".sha3()))).unwrap(); + jdb.commit(4, &b"1003a".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.remove(&x); + jdb.commit(3, &b"1002b".sha3(), Some((1, b"1".sha3()))).unwrap(); + let x = jdb.insert(b"X"); + jdb.commit(4, &b"1003b".sha3(), Some((2, b"2".sha3()))).unwrap(); + + jdb.commit(5, &b"1004a".sha3(), Some((3, b"1002a".sha3()))).unwrap(); + jdb.commit(6, &b"1005a".sha3(), Some((4, b"1003a".sha3()))).unwrap(); + + assert!(jdb.exists(&x)); + } + #[test] fn long_history() { // history is 3 @@ -487,31 +509,4 @@ mod tests { jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.exists(&foo)); } - - #[test] - fn reopen() { - let mut dir = ::std::env::temp_dir(); - dir.push(H32::random().hex()); - - let foo = { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - // history is 1 - let foo = jdb.insert(b"foo"); - jdb.commit(0, &b"0".sha3(), None).unwrap(); - foo - }; - - { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - jdb.remove(&foo); - jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); - } - - { - let mut jdb = JournalDB::new(dir.to_str().unwrap()); - assert!(jdb.exists(&foo)); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(!jdb.exists(&foo)); - } - } } From bfd882c7e03dee68d39c8722e65c9b395ed8b438 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 6 Mar 2016 22:05:12 +0100 Subject: [PATCH 20/61] Fix warnings. --- util/src/journaldb.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 5f94dcbeb..b7e495503 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -97,8 +97,8 @@ impl JournalDB { } // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &WriteBatch, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]); } - fn reset_already_in(batch: &WriteBatch, key: &H256) { batch.delete(&Self::morph_key(key, 0)); } + fn set_already_in(batch: &WriteBatch, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &WriteBatch, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } fn is_already_in(backing: &DB, key: &H256) -> bool { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } @@ -122,7 +122,7 @@ impl JournalDB { // Gets removed when a key leaves the journal, so should never be set when we're placing a new key. //Self::reset_already_in(&h); assert!(!Self::is_already_in(backing, &h)); - batch.put(&h.bytes(), d); + batch.put(&h.bytes(), d).expect("Low-level database error. Some issue with your hard disk?"); } } @@ -159,7 +159,7 @@ impl JournalDB { &None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); - batch.delete(&h.bytes()); + batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); } _ => panic!("Invalid value in counters: {:?}", n), } @@ -260,7 +260,6 @@ impl JournalDB { if let Some((end_era, canon_id)) = end { let mut index = 0usize; let mut last; - let mut to_remove: Vec = Vec::new(); while let Some(rlp_data) = try!(self.backing.get({ let mut r = RlpStream::new_list(2); r.append(&end_era); From bc2fb14b5d6fbfac97a9e1d03e8e3d93d2da0283 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 6 Mar 2016 22:39:04 +0100 Subject: [PATCH 21/61] Add memory usage reports. Update to be similar to master. --- ethcore/src/client.rs | 8 +- parity/main.rs | 3 +- util/src/journaldb.rs | 213 +++++++++++++++++++++++++++++++++++------- util/src/memorydb.rs | 6 ++ 4 files changed, 194 insertions(+), 36 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 858185873..9688cc527 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -190,6 +190,8 @@ pub struct ClientReport { pub transactions_applied: usize, /// How much gas has been processed so far. pub gas_processed: U256, + /// Memory used by state DB + pub state_db_mem: usize, } impl ClientReport { @@ -222,7 +224,7 @@ pub struct Client where V: Verifier { } const HISTORY: u64 = 1000; -const CLIENT_DB_VER_STR: &'static str = "4.0"; +const CLIENT_DB_VER_STR: &'static str = "5.1"; impl Client { /// Create a new client with given spec and DB path. @@ -432,7 +434,9 @@ impl Client where V: Verifier { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.read().unwrap().clone() + let mut report = self.report.read().unwrap().clone(); + report.state_db_mem = self.state_db.lock().unwrap().mem_used(); + report } /// Tick the client. diff --git a/parity/main.rs b/parity/main.rs index 3f4243a0a..605fb315d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -395,7 +395,7 @@ impl Informant { let sync_info = sync.status(); if let (_, _, &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} chain, {} queue, {} sync ]", + println!("[ #{} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, #{}, {}+{} queued ···// mem: {} db, {} chain, {} queue, {} sync ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, @@ -408,6 +408,7 @@ impl Informant { queue_info.unverified_queue_size, queue_info.verified_queue_size, + Informant::format_bytes(report.state_db_mem), Informant::format_bytes(cache_info.total()), Informant::format_bytes(queue_info.mem_used), Informant::format_bytes(sync_info.mem_used), diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index b7e495503..f04affb7b 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -20,7 +20,7 @@ use common::*; use rlp::*; use hashdb::*; use memorydb::*; -use rocksdb::{DB, Writable, WriteBatch, IteratorMode}; +use kvdb::{Database, DBTransaction, DatabaseConfig}; #[cfg(test)] use std::env; @@ -33,8 +33,8 @@ use std::env; /// the removals actually take effect. pub struct JournalDB { overlay: MemoryDB, - backing: Arc, - counters: Arc>>, + backing: Arc, + counters: Option>>>, } impl Clone for JournalDB { @@ -47,33 +47,50 @@ impl Clone for JournalDB { } } -const LAST_ERA_KEY : [u8; 4] = [ b'l', b'a', b's', b't' ]; -const VERSION_KEY : [u8; 4] = [ b'j', b'v', b'e', b'r' ]; +// all keys must be at least 12 bytes +const LATEST_ERA_KEY : [u8; 12] = [ b'l', b'a', b's', b't', 0, 0, 0, 0, 0, 0, 0, 0 ]; +const VERSION_KEY : [u8; 12] = [ b'j', b'v', b'e', b'r', 0, 0, 0, 0, 0, 0, 0, 0 ]; -const DB_VERSION: u32 = 1; +const DB_VERSION : u32 = 3; +const DB_VERSION_NO_JOURNAL : u32 = 3 + 256; + +const PADDING : [u8; 10] = [ 0u8; 10 ]; impl JournalDB { - /// Create a new instance given a `backing` database. - pub fn new(backing: DB) -> JournalDB { - let db = Arc::new(backing); - JournalDB::new_with_arc(db) + /// Create a new instance from file + pub fn new(path: &str) -> JournalDB { + Self::from_prefs(path, true) } - /// Create a new instance given a shared `backing` database. - pub fn new_with_arc(backing: Arc) -> JournalDB { - if backing.iterator(IteratorMode::Start).next().is_some() { + /// Create a new instance from file + pub fn from_prefs(path: &str, prefer_journal: bool) -> JournalDB { + let opts = DatabaseConfig { + prefix_size: Some(12) //use 12 bytes as prefix, this must match account_db prefix + }; + let backing = Database::open(&opts, path).unwrap_or_else(|e| { + panic!("Error opening state db: {}", e); + }); + let with_journal; + if !backing.is_empty() { match backing.get(&VERSION_KEY).map(|d| d.map(|v| decode::(&v))) { - Ok(Some(DB_VERSION)) => {}, + Ok(Some(DB_VERSION)) => { with_journal = true; }, + Ok(Some(DB_VERSION_NO_JOURNAL)) => { with_journal = false; }, v => panic!("Incompatible DB version, expected {}, got {:?}", DB_VERSION, v) } } else { - backing.put(&VERSION_KEY, &encode(&DB_VERSION)).expect("Error writing version to database"); + backing.put(&VERSION_KEY, &encode(&(if prefer_journal { DB_VERSION } else { DB_VERSION_NO_JOURNAL }))).expect("Error writing version to database"); + with_journal = prefer_journal; } - let counters = JournalDB::read_counters(&backing); + + let counters = if with_journal { + Some(Arc::new(RwLock::new(JournalDB::read_counters(&backing)))) + } else { + None + }; JournalDB { overlay: MemoryDB::new(), - backing: backing, - counters: Arc::new(RwLock::new(counters)), + backing: Arc::new(backing), + counters: counters, } } @@ -87,7 +104,45 @@ impl JournalDB { /// Check if this database has any commits pub fn is_empty(&self) -> bool { - self.backing.get(&LAST_ERA_KEY).expect("Low level database error").is_none() + self.backing.get(&LATEST_ERA_KEY).expect("Low level database error").is_none() + } + + /// Commit all recent insert operations. + pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + let have_counters = self.counters.is_some(); + if have_counters { + self.commit_with_counters(now, id, end) + } else { + self.commit_without_counters() + } + } + + /// Drain the overlay and place it into a batch for the DB. + fn batch_overlay_insertions(overlay: &mut MemoryDB, batch: &DBTransaction) -> usize { + let mut inserts = 0usize; + let mut deletes = 0usize; + for i in overlay.drain().into_iter() { + let (key, (value, rc)) = i; + if rc > 0 { + assert!(rc == 1); + batch.put(&key.bytes(), &value).expect("Low-level database error. Some issue with your hard disk?"); + inserts += 1; + } + if rc < 0 { + assert!(rc == -1); + deletes += 1; + } + } + trace!("commit: Inserted {}, Deleted {} nodes", inserts, deletes); + inserts + deletes + } + + /// Just commit the overlay into the backing DB. + fn commit_without_counters(&mut self) -> Result { + let batch = DBTransaction::new(); + let ret = Self::batch_overlay_insertions(&mut self.overlay, &batch); + try!(self.backing.write(batch)); + Ok(ret as u32) } fn morph_key(key: &H256, index: u8) -> Bytes { @@ -97,13 +152,13 @@ impl JournalDB { } // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &WriteBatch, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } - fn reset_already_in(batch: &WriteBatch, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } - fn is_already_in(backing: &DB, key: &H256) -> bool { + fn set_already_in(batch: &DBTransaction, key: &H256) { batch.put(&Self::morph_key(key, 0), &[1u8]).expect("Low-level database error. Some issue with your hard disk?"); } + fn reset_already_in(batch: &DBTransaction, key: &H256) { batch.delete(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?"); } + fn is_already_in(backing: &Database, key: &H256) -> bool { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &DB, counters: &mut HashMap, batch: &WriteBatch) { + fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { for &(ref h, ref d) in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -126,7 +181,7 @@ impl JournalDB { } } - fn replay_keys(inserts: &Vec, backing: &DB, counters: &mut HashMap) { + fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { for h in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -140,7 +195,7 @@ impl JournalDB { } } - fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &WriteBatch) { + fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { for h in deletes.into_iter() { let mut n: Option = None; if let Some(c) = counters.get_mut(&h) { @@ -168,7 +223,7 @@ impl JournalDB { /// Commit all recent insert operations and historical removals from the old era /// to the backing database. - pub fn commit(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -214,8 +269,8 @@ impl JournalDB { // // record new commit's details. - let batch = WriteBatch::new(); - let mut counters = self.counters.write().unwrap(); + let batch = DBTransaction::new(); + let mut counters = self.counters.as_ref().unwrap().write().unwrap(); { let mut index = 0usize; let mut last; @@ -224,6 +279,7 @@ impl JournalDB { let mut r = RlpStream::new_list(2); r.append(&now); r.append(&index); + r.append(&&PADDING[..]); last = r.drain(); &last })).is_some() { @@ -264,6 +320,7 @@ impl JournalDB { let mut r = RlpStream::new_list(2); r.append(&end_era); r.append(&index); + r.append(&&PADDING[..]); last = r.drain(); &last })) { @@ -275,7 +332,7 @@ impl JournalDB { try!(batch.delete(&last)); index += 1; } - try!(batch.put(&LAST_ERA_KEY, &encode(&end_era))); + try!(batch.put(&LATEST_ERA_KEY, &encode(&end_era))); trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } @@ -288,9 +345,9 @@ impl JournalDB { self.backing.get(&key.bytes()).expect("Low-level database error. Some issue with your hard disk?").map(|v| v.to_vec()) } - fn read_counters(db: &DB) -> HashMap { + fn read_counters(db: &Database) -> HashMap { let mut counters = HashMap::new(); - if let Some(val) = db.get(&LAST_ERA_KEY).expect("Low-level database error.") { + if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { let mut era = decode::(&val) + 1; loop { let mut index = 0usize; @@ -298,6 +355,7 @@ impl JournalDB { let mut r = RlpStream::new_list(2); r.append(&era); r.append(&index); + r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { let rlp = Rlp::new(&rlp_data); @@ -314,12 +372,17 @@ impl JournalDB { trace!("Recovered {} counters", counters.len()); counters } -} + + /// Returns heap memory size used + pub fn mem_used(&self) -> usize { + self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 } + } + } impl HashDB for JournalDB { fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); - for (key, _) in self.backing.iterator(IteratorMode::Start) { + for (key, _) in self.backing.iter() { let h = H256::from_slice(key.deref()); ret.insert(h, 1); } @@ -508,4 +571,88 @@ mod tests { jdb.commit(2, &b"2a".sha3(), Some((1, b"1a".sha3()))).unwrap(); assert!(jdb.exists(&foo)); } + + + #[test] + fn reopen() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.emplace(bar.clone(), b"bar".to_vec()); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + } + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + assert!(jdb.exists(&foo)); + assert!(jdb.exists(&bar)); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + + #[test] + fn reopen_remove() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let bar = H256::random(); + + let foo = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.insert(b"foo"); + jdb.commit(1, &b"1".sha3(), None).unwrap(); + foo + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.remove(&foo); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(!jdb.exists(&foo)); + } + } + #[test] + fn reopen_fork() { + let mut dir = ::std::env::temp_dir(); + dir.push(H32::random().hex()); + let (foo, bar, baz) = { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + // history is 1 + let foo = jdb.insert(b"foo"); + let bar = jdb.insert(b"bar"); + jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.remove(&foo); + let baz = jdb.insert(b"baz"); + jdb.commit(1, &b"1a".sha3(), Some((0, b"0".sha3()))).unwrap(); + + jdb.remove(&bar); + jdb.commit(1, &b"1b".sha3(), Some((0, b"0".sha3()))).unwrap(); + (foo, bar, baz) + }; + + { + let mut jdb = JournalDB::new(dir.to_str().unwrap()); + jdb.commit(2, &b"2b".sha3(), Some((1, b"1b".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + assert!(!jdb.exists(&baz)); + assert!(!jdb.exists(&bar)); + } + } } diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 680a6e1d0..9cd018935 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -21,6 +21,7 @@ use bytes::*; use rlp::*; use sha3::*; use hashdb::*; +use heapsize::*; use std::mem; use std::collections::HashMap; @@ -143,6 +144,11 @@ impl MemoryDB { } self.raw(key).unwrap() } + + /// Returns the size of allocated heap memory + pub fn mem_used(&self) -> usize { + self.data.heap_size_of_children() + } } static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; From 4230fdfffea3c897402eaa095a46d959928e4692 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Sun, 6 Mar 2016 22:43:21 +0100 Subject: [PATCH 22/61] More veriosning fixups. --- util/src/journaldb.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index f04affb7b..925c1c065 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -99,7 +99,7 @@ impl JournalDB { pub fn new_temp() -> JournalDB { let mut dir = env::temp_dir(); dir.push(H32::random().hex()); - Self::new(DB::open_default(dir.to_str().unwrap()).unwrap()) + Self::new(dir.to_str().unwrap()) } /// Check if this database has any commits @@ -269,14 +269,15 @@ impl JournalDB { // // record new commit's details. - let batch = DBTransaction::new(); + trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut counters = self.counters.as_ref().unwrap().write().unwrap(); + let batch = DBTransaction::new(); { let mut index = 0usize; let mut last; while try!(self.backing.get({ - let mut r = RlpStream::new_list(2); + let mut r = RlpStream::new_list(3); r.append(&now); r.append(&index); r.append(&&PADDING[..]); @@ -310,6 +311,7 @@ impl JournalDB { r.append(&removes); Self::insert_keys(&inserts, &self.backing, &mut counters, &batch); try!(batch.put(&last, r.as_raw())); + try!(batch.put(&LATEST_ERA_KEY, &encode(&now))); } // apply old commits' details @@ -317,7 +319,7 @@ impl JournalDB { let mut index = 0usize; let mut last; while let Some(rlp_data) = try!(self.backing.get({ - let mut r = RlpStream::new_list(2); + let mut r = RlpStream::new_list(3); r.append(&end_era); r.append(&index); r.append(&&PADDING[..]); @@ -352,7 +354,7 @@ impl JournalDB { loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ - let mut r = RlpStream::new_list(2); + let mut r = RlpStream::new_list(3); r.append(&era); r.append(&index); r.append(&&PADDING[..]); From 0980c7130a5f299846876ec000b2fdbf8bc82c15 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 06:58:43 +0100 Subject: [PATCH 23/61] Fix replay_keys Counters should never have an entry with zero value. --- util/src/journaldb.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 925c1c065..a008fa47e 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -191,7 +191,9 @@ impl JournalDB { // this is the first entry for this node in the journal. // it is initialised to 1 if it was already in. - counters.insert(h.clone(), if Self::is_already_in(backing, h) {1} else {0}); + if Self::is_already_in(backing, h) { + counters.insert(h.clone(), 1); + } } } From fd87633db662126ea95b29c1027d2d88d7565639 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 07:57:50 +0100 Subject: [PATCH 24/61] Remove superfluous LATEST_KEY write. --- util/src/journaldb.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index a008fa47e..bb79a447c 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -336,7 +336,6 @@ impl JournalDB { try!(batch.delete(&last)); index += 1; } - try!(batch.put(&LATEST_ERA_KEY, &encode(&end_era))); trace!("JournalDB: delete journal for time #{}.{}, (canon was {})", end_era, index, canon_id); } From 73207c23557e958761ea72b91ccdaee4ce5dd457 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 08:01:14 +0100 Subject: [PATCH 25/61] Revert accidental beta regressions. --- util/src/journaldb.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index bb79a447c..2e78f1cce 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -351,7 +351,7 @@ impl JournalDB { fn read_counters(db: &Database) -> HashMap { let mut counters = HashMap::new(); if let Some(val) = db.get(&LATEST_ERA_KEY).expect("Low-level database error.") { - let mut era = decode::(&val) + 1; + let mut era = decode::(&val); loop { let mut index = 0usize; while let Some(rlp_data) = db.get({ @@ -366,10 +366,10 @@ impl JournalDB { Self::replay_keys(&inserts, db, &mut counters); index += 1; }; - if index == 0 { + if index == 0 || era == 0 { break; } - era += 1; + era -= 1; } } trace!("Recovered {} counters", counters.len()); From 4d1effb008303ef0a4ce98134a7c8658031ed2f2 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 09:10:02 +0100 Subject: [PATCH 26/61] Fix tests. --- util/src/journaldb.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 2e78f1cce..57af857a9 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -182,6 +182,7 @@ impl JournalDB { } fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { + println!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); for h in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -192,9 +193,11 @@ impl JournalDB { // this is the first entry for this node in the journal. // it is initialised to 1 if it was already in. if Self::is_already_in(backing, h) { + println!("replace_keys: Key {} was already in!", h); counters.insert(h.clone(), 1); } } + println!("replay_keys: (end) counters={:?}", counters); } fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { @@ -361,6 +364,7 @@ impl JournalDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { + println!("read_counters: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); Self::replay_keys(&inserts, db, &mut counters); @@ -617,17 +621,23 @@ mod tests { // history is 1 let foo = jdb.insert(b"foo"); jdb.commit(0, &b"0".sha3(), None).unwrap(); + jdb.commit(1, &b"1".sha3(), Some((0, b"0".sha3()))).unwrap(); + + // foo is ancient history. + jdb.insert(b"foo"); - jdb.commit(1, &b"1".sha3(), None).unwrap(); + jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); foo }; { let mut jdb = JournalDB::new(dir.to_str().unwrap()); jdb.remove(&foo); - jdb.commit(2, &b"2".sha3(), Some((1, b"1".sha3()))).unwrap(); - assert!(jdb.exists(&foo)); jdb.commit(3, &b"3".sha3(), Some((2, b"2".sha3()))).unwrap(); + assert!(jdb.exists(&foo)); + jdb.remove(&foo); + jdb.commit(4, &b"4".sha3(), Some((3, b"3".sha3()))).unwrap(); + jdb.commit(5, &b"5".sha3(), Some((4, b"4".sha3()))).unwrap(); assert!(!jdb.exists(&foo)); } } From bcae4f6e7b5d7e0b9d2ac4a9df370d35f92d5773 Mon Sep 17 00:00:00 2001 From: debris Date: Mon, 7 Mar 2016 10:30:19 +0100 Subject: [PATCH 27/61] fixed jsonrpc reporting current block is one less than actuall, fixed #612 --- rpc/src/v1/impls/eth.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 2313d5114..7113c55b1 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -312,7 +312,8 @@ impl EthFilter for EthFilterClient { None => Ok(Value::Array(vec![] as Vec)), Some(info) => match info.filter { PollFilter::Block => { - let current_number = client.chain_info().best_block_number; + // + 1, cause we want to return hashes including current block hash. + let current_number = client.chain_info().best_block_number + 1; let hashes = (info.block_number..current_number).into_iter() .map(BlockId::Number) .filter_map(|id| client.block_hash(id)) From 72016196cd654697dbfd58e7580807021fb0888a Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 10:56:39 +0100 Subject: [PATCH 28/61] Remove println!s. --- util/src/journaldb.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 57af857a9..d0d7c05ff 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -182,7 +182,7 @@ impl JournalDB { } fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { - println!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); + trace!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); for h in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -193,11 +193,11 @@ impl JournalDB { // this is the first entry for this node in the journal. // it is initialised to 1 if it was already in. if Self::is_already_in(backing, h) { - println!("replace_keys: Key {} was already in!", h); + trace!("replace_keys: Key {} was already in!", h); counters.insert(h.clone(), 1); } } - println!("replay_keys: (end) counters={:?}", counters); + trace!("replay_keys: (end) counters={:?}", counters); } fn kill_keys(deletes: Vec, counters: &mut HashMap, batch: &DBTransaction) { @@ -364,7 +364,7 @@ impl JournalDB { r.append(&&PADDING[..]); &r.drain() }).expect("Low-level database error.") { - println!("read_counters: era={}, index={}", era, index); + trace!("read_counters: era={}, index={}", era, index); let rlp = Rlp::new(&rlp_data); let inserts: Vec = rlp.val_at(1); Self::replay_keys(&inserts, db, &mut counters); From 58721475ffa68a77c23007caa6db884948e4b992 Mon Sep 17 00:00:00 2001 From: arkpar Date: Mon, 7 Mar 2016 11:34:07 +0100 Subject: [PATCH 29/61] Do not remove the peer immediatelly on send error --- sync/src/chain.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 63640f87f..fe1b559cd 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -575,7 +575,7 @@ impl ChainSync { pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}", peer); if let Err(e) = self.send_status(io) { - trace!(target:"sync", "Error sending status request: {:?}", e); + warn!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer); } } @@ -900,9 +900,8 @@ impl ChainSync { } match sync.send(peer_id, packet_id, packet) { Err(e) => { - warn!(target:"sync", "Error sending request: {:?}", e); + debug!(target:"sync", "Error sending request: {:?}", e); sync.disable_peer(peer_id); - self.on_peer_aborting(sync, peer_id); } Ok(_) => { let mut peer = self.peers.get_mut(&peer_id).unwrap(); @@ -915,9 +914,8 @@ impl ChainSync { /// Generic packet sender fn send_packet(&mut self, sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) { if let Err(e) = sync.send(peer_id, packet_id, packet) { - warn!(target:"sync", "Error sending packet: {:?}", e); + debug!(target:"sync", "Error sending packet: {:?}", e); sync.disable_peer(peer_id); - self.on_peer_aborting(sync, peer_id); } } /// Called when peer sends us new transactions From 3153d12bd9997f223a8a04ec40e8db21fb161663 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 11:40:44 +0100 Subject: [PATCH 30/61] feature enabled when compiling without --release --- Cargo.toml | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..d1094a110 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -12,15 +12,22 @@ rustc-serialize = "0.3" docopt = "0.6" time = "0.1" ctrlc = { git = "https://github.com/tomusdrw/rust-ctrlc.git" } -clippy = { version = "0.0.44", optional = true } -ethcore-util = { path = "util" } -ethcore = { path = "ethcore" } -ethsync = { path = "sync" } -ethcore-rpc = { path = "rpc", optional = true } fdlimit = { path = "util/fdlimit" } daemonize = "0.2" -ethcore-devtools = { path = "devtools" } number_prefix = "0.2" +clippy = { version = "0.0.44", optional = true } + +ethcore = { path = "ethcore" } +ethcore-util = { path = "util" } +ethsync = { path = "sync" } +ethcore-devtools = { path = "devtools" } +ethcore-rpc = { path = "rpc", optional = true } + +[dev-dependencies] +ethcore = { path = "ethcore", features = ["dev"]} +ethcore-util = { path = "util", features = ["dev"] } +ethsync = { path = "sync", features = ["dev"] } +ethcore-rpc = { path = "rpc", features = ["dev"]} [features] default = ["rpc"] From e83f8561041216a56a47cba39abd9ed3a0385961 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 12:16:37 +0100 Subject: [PATCH 31/61] Merging chain_blocks_verified to chain_new_blocks --- sync/src/chain.rs | 78 +++++++++++++++++++++------------------ sync/src/lib.rs | 10 ++--- sync/src/tests/chain.rs | 8 ++-- sync/src/tests/helpers.rs | 4 +- 4 files changed, 52 insertions(+), 48 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index fc0a19aba..e598c4572 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -207,7 +207,7 @@ pub struct ChainSync { /// True if common block for our and remote chain has been found have_common_block: bool, /// Last propagated block number - last_send_block_number: BlockNumber, + last_sent_block_number: BlockNumber, /// Max blocks to download ahead max_download_ahead_blocks: usize, /// Network ID @@ -236,7 +236,7 @@ impl ChainSync { last_imported_hash: None, syncing_difficulty: U256::from(0u64), have_common_block: false, - last_send_block_number: 0, + last_sent_block_number: 0, max_download_ahead_blocks: max(MAX_HEADERS_TO_REQUEST, config.max_download_ahead_blocks), network_id: config.network_id, transaction_queue: Mutex::new(TransactionQueue::new()), @@ -1248,26 +1248,25 @@ impl ChainSync { sent } + fn propagate_latest_blocks(&mut self, io: &mut SyncIo) { + let chain_info = io.chain().chain_info(); + if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { + let blocks = self.propagate_blocks(&chain_info, io); + let hashes = self.propagate_new_hashes(&chain_info, io); + if blocks != 0 || hashes != 0 { + trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); + } + } + self.last_sent_block_number = chain_info.best_block_number; + } + /// Maintain other peers. Send out any new blocks and transactions pub fn maintain_sync(&mut self, io: &mut SyncIo) { self.check_resume(io); } - /// should be called once chain has new block, triggers the latest block propagation - pub fn chain_blocks_verified(&mut self, io: &mut SyncIo) { - let chain = io.chain().chain_info(); - if (((chain.best_block_number as i64) - (self.last_send_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let blocks = self.propagate_blocks(&chain, io); - let hashes = self.propagate_new_hashes(&chain, io); - if blocks != 0 || hashes != 0 { - trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); - } - } - self.last_send_block_number = chain.best_block_number; - } - - /// called when block is imported to chain, updates transactions queue - pub fn chain_new_blocks(&mut self, io: &SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { + /// called when block is imported to chain, updates transactions queue and propagates the blocks + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, good: &[H256], bad: &[H256], _retracted: &[H256]) { fn fetch_transactions(chain: &BlockChainClient, hash: &H256) -> Vec { let block = chain .block(BlockId::Hash(hash.clone())) @@ -1278,24 +1277,31 @@ impl ChainSync { } - let chain = io.chain(); - let good = good.par_iter().map(|h| fetch_transactions(chain, h)); - let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); + { + let chain = io.chain(); + let good = good.par_iter().map(|h| fetch_transactions(chain, h)); + let bad = bad.par_iter().map(|h| fetch_transactions(chain, h)); - good.for_each(|txs| { - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); - transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); - }); - bad.for_each(|txs| { - // populate sender - for tx in &txs { - let _sender = tx.sender(); - } - let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); - }); + good.for_each(|txs| { + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + let hashes = txs.iter().map(|tx| tx.hash()).collect::>(); + transaction_queue.remove_all(&hashes, |a| chain.nonce(a)); + }); + bad.for_each(|txs| { + // populate sender + for tx in &txs { + let _sender = tx.sender(); + } + let mut transaction_queue = self.transaction_queue.lock().unwrap(); + transaction_queue.add_all(txs, |a| chain.nonce(a)); + }); + } + + // Propagate latests blocks + self.propagate_latest_blocks(io); + // TODO [todr] propagate transactions? } + } #[cfg(test)] @@ -1634,13 +1640,13 @@ mod tests { let retracted_blocks = vec![client.block_hash_delta_minus(1)]; let mut queue = VecDeque::new(); - let io = TestIo::new(&mut client, &mut queue, None); + let mut io = TestIo::new(&mut client, &mut queue, None); // when - sync.chain_new_blocks(&io, &[], &good_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &good_blocks, &[]); assert_eq!(sync.transaction_queue.lock().unwrap().status().future, 0); assert_eq!(sync.transaction_queue.lock().unwrap().status().pending, 1); - sync.chain_new_blocks(&io, &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &good_blocks, &retracted_blocks, &[]); // then let status = sync.transaction_queue.lock().unwrap().status(); diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 8a30385a2..b5869642c 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -154,13 +154,11 @@ impl NetworkProtocolHandler for EthSync { fn message(&self, io: &NetworkContext, message: &SyncMessage) { match *message { - SyncMessage::BlockVerified => { - self.sync.write().unwrap().chain_blocks_verified(&mut NetSyncIo::new(io, self.chain.deref())); - }, SyncMessage::NewChainBlocks { ref good, ref bad, ref retracted } => { - let sync_io = NetSyncIo::new(io, self.chain.deref()); - self.sync.write().unwrap().chain_new_blocks(&sync_io, good, bad, retracted); - } + let mut sync_io = NetSyncIo::new(io, self.chain.deref()); + self.sync.write().unwrap().chain_new_blocks(&mut sync_io, good, bad, retracted); + }, + _ => {/* Ignore other messages */}, } } } diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 58f50916e..855aa79a6 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -129,8 +129,8 @@ fn propagate_hashes() { net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); net.sync(); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); // 5 peers to sync assert_eq!(5, net.peer(0).queue.len()); @@ -154,8 +154,8 @@ fn propagate_blocks() { net.sync(); net.peer_mut(0).chain.add_blocks(10, EachBlockWith::Uncle); - net.trigger_block_verified(0); //first event just sets the marker - net.trigger_block_verified(0); + net.trigger_chain_new_blocks(0); //first event just sets the marker + net.trigger_chain_new_blocks(0); assert!(!net.peer(0).queue.is_empty()); // NEW_BLOCK_PACKET diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index 5b53ad90b..d01dba0b2 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -455,8 +455,8 @@ impl TestNet { self.peers.iter().all(|p| p.queue.is_empty()) } - pub fn trigger_block_verified(&mut self, peer_id: usize) { + pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let mut peer = self.peer_mut(peer_id); - peer.sync.chain_blocks_verified(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None)); + peer.sync.chain_new_blocks(&mut TestIo::new(&mut peer.chain, &mut peer.queue, None), &[], &[], &[]); } } From ec3698066b8dd3f5a061498a9203644511cf8e21 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Mon, 7 Mar 2016 12:21:11 +0100 Subject: [PATCH 32/61] Normal CLI options with geth. Support node identity. Support fine-grained JSONRPC API enabling. --- ethcore/src/client.rs | 3 + parity/main.rs | 130 ++++++++++++++++++++++++++++++------------ 2 files changed, 96 insertions(+), 37 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 9688cc527..8471666aa 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -87,6 +87,8 @@ pub struct ClientConfig { pub blockchain: BlockChainConfig, /// Prefer journal rather than archive. pub prefer_journal: bool, + /// The name of the client instance. + pub name: String, } impl Default for ClientConfig { @@ -95,6 +97,7 @@ impl Default for ClientConfig { queue: Default::default(), blockchain: Default::default(), prefer_journal: false, + name: Default::default(), } } } diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..43b0504f1 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -53,6 +53,16 @@ use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; +fn die_with_message(msg: &str) -> ! { + println!("ERROR: {}", msg); + exit(1); +} + +#[macro_export] +macro_rules! die { + ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); +} + const USAGE: &'static str = r#" Parity. Ethereum Client. By Wood/Paronyan/Kotewicz/DrwiÄ™ga/Volf. @@ -62,13 +72,16 @@ Usage: parity daemon [options] [ --no-bootstrap | ... ] parity [options] [ --no-bootstrap | ... ] -Options: +Protocol Options: --chain CHAIN Specify the blockchain type. CHAIN may be either a JSON chain specification file - or frontier, mainnet, morden, or testnet [default: frontier]. + or olympic, frontier, homestead, mainnet, morden, or testnet [default: homestead]. + --testnet Equivalent to --chain testnet (geth-compatible). + --networkid INDEX Override the network identifier from the chain we are on. --archive Client should not prune the state/storage trie. - -d --db-path PATH Specify the database & configuration directory path [default: $HOME/.parity] - --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] + -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --identity NAME Specify your node's name. +Networking Options: --no-bootstrap Don't bother trying to connect to any nodes initially. --listen-address URL Specify the IP/port on which to listen for peers [default: 0.0.0.0:30304]. --public-address URL Specify the IP/port on which peers may connect. @@ -78,18 +91,32 @@ Options: --no-upnp Disable trying to figure out the correct public adderss over UPnP. --node-key KEY Specify node secret key, either as 64-character hex string or input to SHA3 operation. +API and Console Options: + -j --jsonrpc Enable the JSON-RPC API sever. + --jsonrpc-addr HOST Specify the hostname portion of the JSONRPC API server [default: 127.0.0.1]. + --jsonrpc-port PORT Specify the port portion of the JSONRPC API server [default: 8545]. + --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. + --jsonrpc-apis APIS Specify the APIs available through the JSONRPC interface. APIS is a comma-delimited + list of API name. Possible name are web3, eth and net. [default: web3,eth,net]. + --rpc Equivalent to --jsonrpc (geth-compatible). + --rpcaddr HOST Equivalent to --jsonrpc-addr HOST (geth-compatible). + --rpcport PORT Equivalent to --jsonrpc-port PORT (geth-compatible). + --rpcapi APIS Equivalent to --jsonrpc-apis APIS (geth-compatible). + --rpccorsdomain URL Equivalent to --jsonrpc-cors URL (geth-compatible). + +Sealing/Mining Options: + --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards + from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. + --extradata STRING Specify a custom extra-data for authored blocks, no more than 32 characters. + +Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. + --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with + other cache options (geth-compatible). - -j --jsonrpc Enable the JSON-RPC API sever. - --jsonrpc-url URL Specify URL for JSON-RPC API server [default: 127.0.0.1:8545]. - --jsonrpc-cors URL Specify CORS header for JSON-RPC API responses [default: null]. - - --author ADDRESS Specify the block author (aka "coinbase") address for sending block rewards - from sealed blocks [default: 0037a6b811ffeb6e072da21179d11b1406371c63]. - --extra-data STRING Specify a custom extra-data for authored blocks, no more than 32 characters. - +Miscellaneous Options: -l --logging LOGGING Specify the logging level. -v --version Show information about version. -h --help Show this screen. @@ -101,14 +128,18 @@ struct Args { arg_pid_file: String, arg_enode: Vec, flag_chain: String, + flag_testnet: bool, flag_db_path: String, + flag_networkid: Option, + flag_identity: String, + flag_cache: Option, flag_keys_path: String, flag_archive: bool, flag_no_bootstrap: bool, flag_listen_address: String, flag_public_address: Option, flag_address: Option, - flag_peers: u32, + flag_peers: usize, flag_no_discovery: bool, flag_no_upnp: bool, flag_node_key: Option, @@ -116,8 +147,15 @@ struct Args { flag_cache_max_size: usize, flag_queue_max_size: usize, flag_jsonrpc: bool, - flag_jsonrpc_url: String, + flag_jsonrpc_addr: String, + flag_jsonrpc_port: u16, flag_jsonrpc_cors: String, + flag_jsonrpc_apis: String, + flag_rpc: bool, + flag_rpcaddr: Option, + flag_rpcport: Option, + flag_rpccorsdomain: Option, + flag_rpcapi: Option, flag_logging: Option, flag_version: bool, flag_author: String, @@ -151,14 +189,23 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) { +fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); - server.add_delegate(Web3Client::new().to_delegate()); - server.add_delegate(EthClient::new(&client, &sync).to_delegate()); - server.add_delegate(EthFilterClient::new(&client).to_delegate()); - server.add_delegate(NetClient::new(&sync).to_delegate()); + for api in apis.into_iter() { + match api { + "web3" => server.add_delegate(Web3Client::new().to_delegate()), + "net" => server.add_delegate(NetClient::new(&sync).to_delegate()), + "eth" => { + server.add_delegate(EthClient::new(&client, &sync).to_delegate()); + server.add_delegate(EthFilterClient::new(&client).to_delegate()); + } + _ => { + die!("{}: Invalid API name to be enabled.", api); + } + } + } server.start_async(url, cors_domain); } @@ -179,16 +226,6 @@ By Wood/Paronyan/Kotewicz/DrwiÄ™ga/Volf.\ ", version()); } -fn die_with_message(msg: &str) -> ! { - println!("ERROR: {}", msg); - exit(1); -} - -#[macro_export] -macro_rules! die { - ($($arg:tt)*) => (die_with_message(&format!("{}", format_args!($($arg)*)))); -} - struct Configuration { args: Args } @@ -221,8 +258,11 @@ impl Configuration { } fn spec(&self) -> Spec { + if self.args.flag_testnet { + return ethereum::new_morden(); + } match self.args.flag_chain.as_ref() { - "frontier" | "mainnet" => ethereum::new_frontier(), + "frontier" | "homestead" | "mainnet" => ethereum::new_frontier(), "morden" | "testnet" => ethereum::new_morden(), "olympic" => ethereum::new_olympic(), f => Spec::from_json_utf8(contents(f).unwrap_or_else(|_| die!("{}: Couldn't read chain specification file. Sure it exists?", f)).as_ref()), @@ -276,7 +316,7 @@ impl Configuration { ret.public_address = public; ret.use_secret = self.args.flag_node_key.as_ref().map(|s| Secret::from_str(&s).unwrap_or_else(|_| s.sha3())); ret.discovery_enabled = !self.args.flag_no_discovery; - ret.ideal_peers = self.args.flag_peers; + ret.ideal_peers = self.args.flag_peers as u32; let mut net_path = PathBuf::from(&self.path()); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); @@ -307,13 +347,22 @@ impl Configuration { let spec = self.spec(); let net_settings = self.net_settings(&spec); let mut sync_config = SyncConfig::default(); - sync_config.network_id = spec.network_id(); + sync_config.network_id = self.args.flag_networkid.as_ref().map(|id| U256::from_str(id).unwrap_or_else(|_| die!("{}: Invalid index given with --networkid", id))).unwrap_or(spec.network_id()); // Build client let mut client_config = ClientConfig::default(); - client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; - client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + match self.args.flag_cache { + Some(mb) => { + client_config.blockchain.max_cache_size = mb * 1024 * 1024; + client_config.blockchain.pref_cache_size = client_config.blockchain.max_cache_size / 2; + } + None => { + client_config.blockchain.pref_cache_size = self.args.flag_cache_pref_size; + client_config.blockchain.max_cache_size = self.args.flag_cache_max_size; + } + } client_config.prefer_journal = !self.args.flag_archive; + client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); let client = service.client().clone(); @@ -324,9 +373,16 @@ impl Configuration { let sync = EthSync::register(service.network(), sync_config, client); // Setup rpc - if self.args.flag_jsonrpc { - setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors); - SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); + if self.args.flag_jsonrpc || self.args.flag_rpc { + let url = format!("{}:{}", + self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), + self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) + ); + SocketAddr::from_str(&url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen host/port given.", url)); + let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); + // TODO: use this as the API list. + let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); + setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); } // Register IO handler From cbc2c0cf0c76bf12361641c6825e21e82bdec7ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:33:00 +0100 Subject: [PATCH 33/61] Fixing clippy warnings. When building on nightly it is required to enable clippy --- Cargo.lock | 4 +++ Cargo.toml | 13 ++++++--- build.rs | 25 ++++++++++++++++ cargo.sh | 2 ++ ethcore/Cargo.toml | 6 +++- ethcore/build.rs | 25 ++++++++++++++++ ethcore/src/basic_types.rs | 2 +- ethcore/src/block.rs | 4 +-- ethcore/src/block_queue.rs | 4 +-- ethcore/src/blockchain/blockchain.rs | 4 +-- ethcore/src/ethereum/ethash.rs | 2 +- ethcore/src/evm/interpreter.rs | 6 ++-- ethcore/src/evm/tests.rs | 11 ++++--- ethcore/src/externalities.rs | 6 ++-- ethcore/src/lib.rs | 10 +++---- ethcore/src/service.rs | 9 +++--- ethcore/src/spec.rs | 10 +++---- ethcore/src/state.rs | 2 +- ethcore/src/transaction.rs | 2 +- ethcore/src/verification/mod.rs | 2 ++ hook.sh | 2 +- parity/main.rs | 6 ++-- rpc/Cargo.toml | 3 +- rpc/build.rs | 23 +++++++++++++++ sync/Cargo.toml | 6 +++- sync/build.rs | 25 ++++++++++++++++ sync/src/chain.rs | 7 +++-- sync/src/lib.rs | 6 ++-- sync/src/range_collection.rs | 2 +- util/Cargo.toml | 2 +- util/bigint/src/uint.rs | 6 ++-- util/build.rs | 21 ++++++++++++++ util/src/hash.rs | 4 +-- util/src/journaldb.rs | 43 +++++++++++++++------------- util/src/kvdb.rs | 3 +- util/src/lib.rs | 12 ++++---- util/src/network/discovery.rs | 22 +++++++------- util/src/network/host.rs | 4 +-- util/src/panics.rs | 2 +- util/src/trie/triedb.rs | 18 ++++++------ util/src/trie/triedbmut.rs | 36 +++++++++++------------ 41 files changed, 272 insertions(+), 130 deletions(-) create mode 100644 build.rs create mode 100755 cargo.sh create mode 100644 ethcore/build.rs create mode 100644 sync/build.rs diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..61f152f69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,6 +16,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -209,6 +210,7 @@ dependencies = [ "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.34 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -232,6 +234,7 @@ dependencies = [ "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -286,6 +289,7 @@ dependencies = [ "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/Cargo.toml b/Cargo.toml index d1094a110..d8e05bb20 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,10 @@ name = "parity" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -24,17 +28,18 @@ ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } [dev-dependencies] -ethcore = { path = "ethcore", features = ["dev"]} +ethcore = { path = "ethcore", features = ["dev"] } ethcore-util = { path = "util", features = ["dev"] } ethsync = { path = "sync", features = ["dev"] } -ethcore-rpc = { path = "rpc", features = ["dev"]} +ethcore-rpc = { path = "rpc", features = ["dev"] } [features] default = ["rpc"] rpc = ["ethcore-rpc"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] +dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "dev"] +travis-nightly = ["ethcore/json-tests", "clippy", "dev"] [[bin]] path = "parity/main.rs" diff --git a/build.rs b/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/cargo.sh b/cargo.sh new file mode 100755 index 000000000..6870ab385 --- /dev/null +++ b/cargo.sh @@ -0,0 +1,2 @@ +#!/bin/sh +cargo "$@" --features dev-clippy diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index c3a3d32dc..fbfe175d7 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -5,6 +5,10 @@ license = "GPL-3.0" name = "ethcore" version = "0.9.99" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +rustc_version = "0.1" [dependencies] log = "0.3" @@ -27,5 +31,5 @@ jit = ["evmjit"] evm-debug = [] json-tests = [] test-heavy = [] -dev = ["clippy"] +dev = [] default = [] diff --git a/ethcore/build.rs b/ethcore/build.rs new file mode 100644 index 000000000..41b9a1b3e --- /dev/null +++ b/ethcore/build.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/ethcore/src/basic_types.rs b/ethcore/src/basic_types.rs index 5f6515c0d..9cba8b3a0 100644 --- a/ethcore/src/basic_types.rs +++ b/ethcore/src/basic_types.rs @@ -24,7 +24,7 @@ pub type LogBloom = H2048; /// Constant 2048-bit datum for 0. Often used as a default. pub static ZERO_LOGBLOOM: LogBloom = H2048([0x00; 256]); -#[cfg_attr(feature="dev", allow(enum_variant_names))] +#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] /// Semantic boolean for when a seal/signature is included. pub enum Seal { /// The seal/signature is included. diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 68f647e37..b3894db94 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -16,7 +16,7 @@ //! Blockchain block. -#![cfg_attr(feature="dev", allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> +#![cfg_attr(all(nightly, feature="dev"), allow(ptr_arg))] // Because of &LastHashes -> &Vec<_> use common::*; use engine::*; @@ -274,7 +274,7 @@ impl<'x> OpenBlock<'x> { s.block.base.header.note_dirty(); ClosedBlock { - block: s.block, + block: s.block, uncle_bytes: uncle_bytes, } } diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 490a17995..de6802a4f 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -121,7 +121,7 @@ struct QueueSignal { } impl QueueSignal { - #[cfg_attr(feature="dev", allow(bool_comparison))] + #[cfg_attr(all(nightly, feature="dev"), allow(bool_comparison))] fn set(&self) { if self.signalled.compare_and_swap(false, true, AtomicOrdering::Relaxed) == false { self.message_channel.send(UserMessage(SyncMessage::BlockVerified)).expect("Error sending BlockVerified message"); @@ -385,7 +385,7 @@ impl BlockQueue { } } - pub fn collect_garbage(&self) { + pub fn collect_garbage(&self) { { let mut verification = self.verification.lock().unwrap(); verification.unverified.shrink_to_fit(); diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e529f50af..d7c9d7975 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -884,7 +884,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_find_uncles() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); @@ -922,7 +922,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_small_fork() { let mut canon_chain = ChainGenerator::default(); let mut finalizer = BlockFinalizer::default(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index f9810b964..b0c0e4a9f 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -202,7 +202,7 @@ impl Engine for Ethash { } } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // to_ethash should take self impl Ethash { fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; diff --git a/ethcore/src/evm/interpreter.rs b/ethcore/src/evm/interpreter.rs index 7491321cb..fb8d19357 100644 --- a/ethcore/src/evm/interpreter.rs +++ b/ethcore/src/evm/interpreter.rs @@ -243,7 +243,7 @@ struct CodeReader<'a> { code: &'a Bytes } -#[cfg_attr(feature="dev", allow(len_without_is_empty))] +#[cfg_attr(all(nightly, feature="dev"), allow(len_without_is_empty))] impl<'a> CodeReader<'a> { /// Get `no_of_bytes` from code and convert to U256. Move PC fn read(&mut self, no_of_bytes: usize) -> U256 { @@ -258,7 +258,7 @@ impl<'a> CodeReader<'a> { } } -#[cfg_attr(feature="dev", allow(enum_variant_names))] +#[cfg_attr(all(nightly, feature="dev"), allow(enum_variant_names))] enum InstructionCost { Gas(U256), GasMem(U256, U256), @@ -347,7 +347,7 @@ impl evm::Evm for Interpreter { } impl Interpreter { - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn get_gas_cost_mem(&self, ext: &evm::Ext, instruction: Instruction, diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index 9d4dd3bc4..dc84a9a05 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -25,9 +25,8 @@ struct FakeLogEntry { } #[derive(PartialEq, Eq, Hash, Debug)] -#[cfg_attr(feature="dev", allow(enum_variant_names))] // Common prefix is C ;) enum FakeCallType { - CALL, CREATE + Call, Create } #[derive(PartialEq, Eq, Hash, Debug)] @@ -94,7 +93,7 @@ impl Ext for FakeExt { fn create(&mut self, gas: &U256, value: &U256, code: &[u8]) -> ContractCreateResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CREATE, + call_type: FakeCallType::Create, gas: *gas, sender_address: None, receive_address: None, @@ -115,7 +114,7 @@ impl Ext for FakeExt { _output: &mut [u8]) -> MessageCallResult { self.calls.insert(FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: *gas, sender_address: Some(sender_address.clone()), receive_address: Some(receive_address.clone()), @@ -909,7 +908,7 @@ fn test_calls(factory: super::Factory) { }; assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(code_address.clone()), @@ -918,7 +917,7 @@ fn test_calls(factory: super::Factory) { code_address: Some(code_address.clone()) }); assert_set_contains(&ext.calls, &FakeCall { - call_type: FakeCallType::CALL, + call_type: FakeCallType::Call, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(address.clone()), diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index beb8d62a1..a1f5763ea 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -188,7 +188,7 @@ impl<'a> Ext for Externalities<'a> { self.state.code(address).unwrap_or_else(|| vec![]) } - #[cfg_attr(feature="dev", allow(match_ref_pats))] + #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] fn ret(&mut self, gas: &U256, data: &[u8]) -> Result { match &mut self.output { &mut OutputPolicy::Return(BytesRef::Fixed(ref mut slice)) => unsafe { @@ -226,9 +226,9 @@ impl<'a> Ext for Externalities<'a> { fn log(&mut self, topics: Vec, data: &[u8]) { let address = self.origin_info.address.clone(); - self.substate.logs.push(LogEntry { + self.substate.logs.push(LogEntry { address: address, - topics: topics, + topics: topics, data: data.to_vec() }); } diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 938da02a0..469364eb3 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -15,16 +15,16 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Clippy config // TODO [todr] not really sure -#![cfg_attr(feature="dev", allow(needless_range_loop))] +#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(feature="dev", allow(match_bool))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Ethcore library //! diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 756d02407..33dca8de7 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -115,12 +115,11 @@ impl IoHandler for ClientIoHandler { } } - #[cfg_attr(feature="dev", allow(match_ref_pats))] - #[cfg_attr(feature="dev", allow(single_match))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { - if let &UserMessage(ref message) = net_message { - match message { - &SyncMessage::BlockVerified => { + if let UserMessage(ref message) = *net_message { + match *message { + SyncMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }, _ => {}, // ignore other messages diff --git a/ethcore/src/spec.rs b/ethcore/src/spec.rs index 38a0dda53..774024351 100644 --- a/ethcore/src/spec.rs +++ b/ethcore/src/spec.rs @@ -99,7 +99,7 @@ pub struct Spec { genesis_state: PodState, } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] // because to_engine(self) should be to_engine(&self) impl Spec { /// Convert this object into a boxed Engine of the right underlying type. // TODO avoid this hard-coded nastiness - use dynamic-linked plugin framework instead. @@ -136,7 +136,7 @@ impl Spec { uncles_hash: RlpStream::new_list(0).out().sha3(), extra_data: self.extra_data.clone(), state_root: self.state_root().clone(), - receipts_root: self.receipts_root.clone(), + receipts_root: self.receipts_root.clone(), log_bloom: H2048::new().clone(), gas_used: self.gas_used.clone(), gas_limit: self.gas_limit.clone(), @@ -182,7 +182,7 @@ impl Spec { ) } }; - + self.parent_hash = H256::from_json(&genesis["parentHash"]); self.transactions_root = genesis.find("transactionsTrie").and_then(|_| Some(H256::from_json(&genesis["transactionsTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); self.receipts_root = genesis.find("receiptTrie").and_then(|_| Some(H256::from_json(&genesis["receiptTrie"]))).unwrap_or(SHA3_NULL_RLP.clone()); @@ -249,7 +249,7 @@ impl FromJson for Spec { ) } }; - + Spec { name: json.find("name").map_or("unknown", |j| j.as_string().unwrap()).to_owned(), engine_name: json["engineName"].as_string().unwrap().to_owned(), @@ -278,7 +278,7 @@ impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good(&self, db: &mut HashDB) -> bool { if !db.contains(&self.state_root()) { - let mut root = H256::new(); + let mut root = H256::new(); { let mut t = SecTrieDBMut::new(db, &mut root); for (address, account) in self.genesis_state.get().iter() { diff --git a/ethcore/src/state.rs b/ethcore/src/state.rs index c13678c38..7c1064abf 100644 --- a/ethcore/src/state.rs +++ b/ethcore/src/state.rs @@ -224,7 +224,7 @@ impl State { /// Commit accounts to SecTrieDBMut. This is similar to cpp-ethereum's dev::eth::commit. /// `accounts` is mutable because we may need to commit the code or storage and record that. - #[cfg_attr(feature="dev", allow(match_ref_pats))] + #[cfg_attr(all(nightly, feature="dev"), allow(match_ref_pats))] pub fn commit_into(db: &mut HashDB, root: &mut H256, accounts: &mut HashMap>) { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? diff --git a/ethcore/src/transaction.rs b/ethcore/src/transaction.rs index a51824494..733e5ac6b 100644 --- a/ethcore/src/transaction.rs +++ b/ethcore/src/transaction.rs @@ -80,7 +80,7 @@ impl Transaction { } impl FromJson for SignedTransaction { - #[cfg_attr(feature="dev", allow(single_char_pattern))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_char_pattern))] fn from_json(json: &Json) -> SignedTransaction { let t = Transaction { nonce: xjson!(&json["nonce"]), diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 260121989..fe1f406cc 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -17,9 +17,11 @@ pub mod verification; pub mod verifier; mod canon_verifier; +#[cfg(test)] mod noop_verifier; pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; +#[cfg(test)] pub use self::noop_verifier::NoopVerifier; diff --git a/hook.sh b/hook.sh index 106ffe4f0..354fddd5d 100755 --- a/hook.sh +++ b/hook.sh @@ -1,3 +1,3 @@ #!/bin/sh -echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev" > ./.git/hooks/pre-push +echo "#!/bin/sh\ncargo test -p ethash -p ethcore-util -p ethcore -p ethsync -p ethcore-rpc -p parity --features dev-clippy" > ./.git/hooks/pre-push chmod +x ./.git/hooks/pre-push diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..4055fcf46 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -17,8 +17,8 @@ //! Ethcore client application. #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] extern crate docopt; extern crate rustc_serialize; extern crate ethcore_util as util; @@ -246,7 +246,7 @@ impl Configuration { } } - #[cfg_attr(feature="dev", allow(useless_format))] + #[cfg_attr(all(nightly, feature="dev"), allow(useless_format))] fn net_addresses(&self) -> (Option, Option) { let mut listen_address = None; let mut public_address = None; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..07c0eb85d 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -26,8 +26,9 @@ serde_macros = { version = "0.7.0", optional = true } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } syntex = "0.29.0" +rustc_version = "0.1" [features] default = ["serde_codegen"] nightly = ["serde_macros"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"] +dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev"] diff --git a/rpc/build.rs b/rpc/build.rs index b5adeaba1..3806f6fe5 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -1,3 +1,23 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + #[cfg(not(feature = "serde_macros"))] mod inner { extern crate syntex; @@ -26,4 +46,7 @@ mod inner { fn main() { inner::main(); + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } } diff --git a/sync/Cargo.toml b/sync/Cargo.toml index f10a772e3..fd4b9c46f 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -4,9 +4,13 @@ name = "ethsync" version = "0.9.99" license = "GPL-3.0" authors = ["Ethcore . + +extern crate rustc_version; + +use rustc_version::{version_meta, Channel}; + +fn main() { + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } +} diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 63640f87f..0feae01b0 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -268,7 +268,7 @@ impl ChainSync { } - #[cfg_attr(feature="dev", allow(for_kv_map))] // Because it's not possible to get `values_mut()` + #[cfg_attr(all(nightly, feature="dev"), allow(for_kv_map))] // Because it's not possible to get `values_mut()` /// Rest sync. Clear all downloaded data but keep the queue fn reset(&mut self) { self.downloading_headers.clear(); @@ -335,7 +335,7 @@ impl ChainSync { Ok(()) } - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] /// Called by peer once it has new block headers during sync fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); @@ -462,6 +462,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); @@ -484,7 +485,7 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if self.current_base_block() < header.number { + if self.current_base_block() < header.number { self.last_imported_block = Some(header.number); self.remove_downloaded_blocks(header.number); } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..3ce33e31f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -15,11 +15,11 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Blockchain sync module //! Implements ethereum protocol version 63 as specified here: diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index dc2f4e446..dad732fe8 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -207,7 +207,7 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } #[test] -#[cfg_attr(feature="dev", allow(cyclomatic_complexity))] +#[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn test_range() { use std::cmp::{Ordering}; diff --git a/util/Cargo.toml b/util/Cargo.toml index 9c5cb3fe3..0ce27ec2b 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -40,7 +40,7 @@ chrono = "0.2" [features] default = [] -dev = ["clippy"] +dev = [] [build-dependencies] vergen = "*" diff --git a/util/bigint/src/uint.rs b/util/bigint/src/uint.rs index bd57e9d6d..959df0944 100644 --- a/util/bigint/src/uint.rs +++ b/util/bigint/src/uint.rs @@ -1103,7 +1103,7 @@ macro_rules! construct_uint { } } - #[cfg_attr(feature="dev", allow(derive_hash_xor_eq))] // We are pretty sure it's ok. + #[cfg_attr(all(nightly, feature="dev"), allow(derive_hash_xor_eq))] // We are pretty sure it's ok. impl Hash for $name { fn hash(&self, state: &mut H) where H: Hasher { unsafe { state.write(::std::slice::from_raw_parts(self.0.as_ptr() as *mut u8, self.0.len() * 8)); } @@ -1485,7 +1485,7 @@ mod tests { } #[test] - #[cfg_attr(feature="dev", allow(eq_op))] + #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] pub fn uint256_comp_test() { let small = U256([10u64, 0, 0, 0]); let big = U256([0x8C8C3EE70C644118u64, 0x0209E7378231E632, 0, 0]); @@ -2032,7 +2032,7 @@ mod tests { #[test] - #[cfg_attr(feature = "dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] fn u256_multi_full_mul() { let result = U256([0, 0, 0, 0]).full_mul(U256([0, 0, 0, 0])); assert_eq!(U512([0, 0, 0, 0, 0, 0, 0, 0]), result); diff --git a/util/build.rs b/util/build.rs index eed080e29..0b9b233e0 100644 --- a/util/build.rs +++ b/util/build.rs @@ -1,7 +1,28 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate rustc_version; extern crate vergen; use vergen::*; +use rustc_version::{version_meta, Channel}; fn main() { vergen(OutputFns::all()).unwrap(); + if let Channel::Nightly = version_meta().channel { + println!("cargo:rustc-cfg=nightly"); + } } diff --git a/util/src/hash.rs b/util/src/hash.rs index 73fa33b47..4eb96b53e 100644 --- a/util/src/hash.rs +++ b/util/src/hash.rs @@ -305,7 +305,7 @@ macro_rules! impl_hash { } impl Copy for $from {} - #[cfg_attr(feature="dev", allow(expl_impl_clone_on_copy))] + #[cfg_attr(all(nightly, feature="dev"), allow(expl_impl_clone_on_copy))] impl Clone for $from { fn clone(&self) -> $from { unsafe { @@ -637,7 +637,7 @@ mod tests { use std::str::FromStr; #[test] - #[cfg_attr(feature="dev", allow(eq_op))] + #[cfg_attr(all(nightly, feature="dev"), allow(eq_op))] fn hash() { let h = H64([0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef]); assert_eq!(H64::from_str("0123456789abcdef").unwrap(), h); diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 57af857a9..3228f2201 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -27,7 +27,7 @@ use std::env; /// Implementation of the HashDB trait for a disk-backed database with a memory overlay /// and latent-removal semantics. /// -/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to +/// Like OverlayDB, there is a memory overlay; `commit()` must be called in order to /// write operations out to disk. Unlike OverlayDB, `remove()` operations do not take effect /// immediately. Rather some age (based on a linear but arbitrary metric) must pass before /// the removals actually take effect. @@ -158,7 +158,7 @@ impl JournalDB { backing.get(&Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &Vec<(H256, Bytes)>, backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, counters: &mut HashMap, batch: &DBTransaction) { for &(ref h, ref d) in inserts { if let Some(c) = counters.get_mut(h) { // already counting. increment. @@ -181,7 +181,7 @@ impl JournalDB { } } - fn replay_keys(inserts: &Vec, backing: &Database, counters: &mut HashMap) { + fn replay_keys(inserts: &[H256], backing: &Database, counters: &mut HashMap) { println!("replay_keys: inserts={:?}, counters={:?}", inserts, counters); for h in inserts { if let Some(c) = counters.get_mut(h) { @@ -211,12 +211,12 @@ impl JournalDB { n = Some(*c); } } - match &n { - &Some(i) if i == 1 => { + match n { + Some(i) if i == 1 => { counters.remove(&h); Self::reset_already_in(batch, &h); } - &None => { + None => { // Gets removed when moving from 1 to 0 additional refs. Should never be here at 0 additional refs. //assert!(!Self::is_already_in(db, &h)); batch.delete(&h.bytes()).expect("Low-level database error. Some issue with your hard disk?"); @@ -229,7 +229,7 @@ impl JournalDB { /// Commit all recent insert operations and historical removals from the old era /// to the backing database. fn commit_with_counters(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - // journal format: + // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, n] => [ ... ] @@ -242,12 +242,12 @@ impl JournalDB { // By the time comes to remove a tuple from the queue (i.e. then the era passes from recent history // into ancient history) then only one commit from the tuple is considered canonical. This commit // is kept in the main backing database, whereas any others from the same era are reverted. - // + // // It is possible that a key, properly available in the backing database be deleted and re-inserted // in the recent history queue, yet have both operations in commits that are eventually non-canonical. // To avoid the original, and still required, key from being deleted, we maintain a reference count // which includes an original key, if any. - // + // // The semantics of the `counter` are: // insert key k: // counter already contains k: count += 1 @@ -255,7 +255,7 @@ impl JournalDB { // backing db contains k: count = 1 // backing db doesn't contain k: insert into backing db, count = 0 // delete key k: - // counter contains k (count is asserted to be non-zero): + // counter contains k (count is asserted to be non-zero): // count > 1: counter -= 1 // count == 1: remove counter // count == 0: remove key from backing db @@ -274,7 +274,7 @@ impl JournalDB { // // record new commit's details. - trace!("commit: #{} ({}), end era: {:?}", now, id, end); + trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut counters = self.counters.as_ref().unwrap().write().unwrap(); let batch = DBTransaction::new(); { @@ -295,7 +295,7 @@ impl JournalDB { let drained = self.overlay.drain(); let removes: Vec = drained .iter() - .filter_map(|(ref k, &(_, ref c))| if *c < 0 {Some(k.clone())} else {None}).cloned() + .filter_map(|(k, &(_, c))| if c < 0 {Some(k.clone())} else {None}) .collect(); let inserts: Vec<(H256, Bytes)> = drained .into_iter() @@ -382,12 +382,15 @@ impl JournalDB { /// Returns heap memory size used pub fn mem_used(&self) -> usize { - self.overlay.mem_used() + match &self.counters { &Some(ref c) => c.read().unwrap().heap_size_of_children(), &None => 0 } + self.overlay.mem_used() + match self.counters { + Some(ref c) => c.read().unwrap().heap_size_of_children(), + None => 0 + } } } impl HashDB for JournalDB { - fn keys(&self) -> HashMap { + fn keys(&self) -> HashMap { let mut ret: HashMap = HashMap::new(); for (key, _) in self.backing.iter() { let h = H256::from_slice(key.deref()); @@ -401,7 +404,7 @@ impl HashDB for JournalDB { ret } - fn lookup(&self, key: &H256) -> Option<&[u8]> { + fn lookup(&self, key: &H256) -> Option<&[u8]> { let k = self.overlay.raw(key); match k { Some(&(ref d, rc)) if rc > 0 => Some(d), @@ -416,18 +419,18 @@ impl HashDB for JournalDB { } } - fn exists(&self, key: &H256) -> bool { + fn exists(&self, key: &H256) -> bool { self.lookup(key).is_some() } - fn insert(&mut self, value: &[u8]) -> H256 { + fn insert(&mut self, value: &[u8]) -> H256 { self.overlay.insert(value) } fn emplace(&mut self, key: H256, value: Bytes) { - self.overlay.emplace(key, value); + self.overlay.emplace(key, value); } - fn kill(&mut self, key: &H256) { - self.overlay.kill(key); + fn kill(&mut self, key: &H256) { + self.overlay.kill(key); } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 43a9fc532..a2fa2215a 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -55,8 +55,7 @@ pub struct DatabaseIterator<'a> { impl<'a> Iterator for DatabaseIterator<'a> { type Item = (Box<[u8]>, Box<[u8]>); - #[cfg_attr(feature="dev", allow(type_complexity))] - fn next(&mut self) -> Option<(Box<[u8]>, Box<[u8]>)> { + fn next(&mut self) -> Option { self.iter.next() } } diff --git a/util/src/lib.rs b/util/src/lib.rs index a50ba8da4..59d66a325 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -15,18 +15,18 @@ // along with Parity. If not, see . #![warn(missing_docs)] -#![cfg_attr(feature="dev", feature(plugin))] -#![cfg_attr(feature="dev", plugin(clippy))] +#![cfg_attr(all(nightly, feature="dev"), feature(plugin))] +#![cfg_attr(all(nightly, feature="dev"), plugin(clippy))] // Clippy settings // TODO [todr] not really sure -#![cfg_attr(feature="dev", allow(needless_range_loop))] +#![cfg_attr(all(nightly, feature="dev"), allow(needless_range_loop))] // Shorter than if-else -#![cfg_attr(feature="dev", allow(match_bool))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_bool))] // We use that to be more explicit about handled cases -#![cfg_attr(feature="dev", allow(match_same_arms))] +#![cfg_attr(all(nightly, feature="dev"), allow(match_same_arms))] // Keeps consistency (all lines with `.clone()`) and helpful when changing ref to non-ref. -#![cfg_attr(feature="dev", allow(clone_on_copy))] +#![cfg_attr(all(nightly, feature="dev"), allow(clone_on_copy))] //! Ethcore-util library //! diff --git a/util/src/network/discovery.rs b/util/src/network/discovery.rs index e52d5d25f..644af22af 100644 --- a/util/src/network/discovery.rs +++ b/util/src/network/discovery.rs @@ -113,14 +113,14 @@ impl Discovery { } /// Add a new node to discovery table. Pings the node. - pub fn add_node(&mut self, e: NodeEntry) { + pub fn add_node(&mut self, e: NodeEntry) { let endpoint = e.endpoint.clone(); self.update_node(e); self.ping(&endpoint); } /// Add a list of known nodes to the table. - pub fn init_node_list(&mut self, mut nodes: Vec) { + pub fn init_node_list(&mut self, mut nodes: Vec) { for n in nodes.drain(..) { self.update_node(n); } @@ -243,7 +243,7 @@ impl Discovery { self.send_to(packet, address.clone()); } - #[cfg_attr(feature="dev", allow(map_clone))] + #[cfg_attr(all(nightly, feature="dev"), allow(map_clone))] fn nearest_node_entries(target: &NodeId, buckets: &[NodeBucket]) -> Vec { let mut found: BTreeMap> = BTreeMap::new(); let mut count = 0; @@ -251,7 +251,7 @@ impl Discovery { // Sort nodes by distance to target for bucket in buckets { for node in &bucket.nodes { - let distance = Discovery::distance(target, &node.address.id); + let distance = Discovery::distance(target, &node.address.id); found.entry(distance).or_insert_with(Vec::new).push(&node.address); if count == BUCKET_SIZE { // delete the most distant element @@ -310,7 +310,7 @@ impl Discovery { None }), Ok(_) => None, - Err(e) => { + Err(e) => { warn!("Error reading UPD socket: {:?}", e); None } @@ -339,7 +339,7 @@ impl Discovery { PACKET_PONG => self.on_pong(&rlp, &node_id, &from), PACKET_FIND_NODE => self.on_find_node(&rlp, &node_id, &from), PACKET_NEIGHBOURS => self.on_neighbours(&rlp, &node_id, &from), - _ => { + _ => { debug!("Unknown UDP packet: {}", packet_id); Ok(None) } @@ -367,14 +367,14 @@ impl Discovery { } else { self.update_node(entry.clone()); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); } let hash = rlp.as_raw().sha3(); let mut response = RlpStream::new_list(2); dest.to_rlp_list(&mut response); response.append(&hash); self.send_packet(PACKET_PONG, from, &response.drain()); - + Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() })) } @@ -391,7 +391,7 @@ impl Discovery { } self.clear_ping(node); let mut added_map = HashMap::new(); - added_map.insert(node.clone(), entry); + added_map.insert(node.clone(), entry); Ok(None) } @@ -466,8 +466,8 @@ impl Discovery { pub fn round(&mut self) -> Option { let removed = self.check_expired(false); self.discover(); - if !removed.is_empty() { - Some(TableUpdates { added: HashMap::new(), removed: removed }) + if !removed.is_empty() { + Some(TableUpdates { added: HashMap::new(), removed: removed }) } else { None } } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index ece24a1d1..2d1af55ba 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -507,7 +507,7 @@ impl Host where Message: Send + Sync + Clone { debug!(target: "network", "Connecting peers: {} sessions, {} pending", self.session_count(), self.handshake_count()); } - #[cfg_attr(feature="dev", allow(single_match))] + #[cfg_attr(all(nightly, feature="dev"), allow(single_match))] fn connect_peer(&self, id: &NodeId, io: &IoContext>) { if self.have_session(id) { @@ -542,7 +542,7 @@ impl Host where Message: Send + Sync + Clone { self.create_connection(socket, Some(id), io); } - #[cfg_attr(feature="dev", allow(block_in_if_condition_stmt))] + #[cfg_attr(all(nightly, feature="dev"), allow(block_in_if_condition_stmt))] fn create_connection(&self, socket: TcpStream, id: Option<&NodeId>, io: &IoContext>) { let nonce = self.info.write().unwrap().next_nonce(); let mut handshakes = self.handshakes.write().unwrap(); diff --git a/util/src/panics.rs b/util/src/panics.rs index 05d266b8b..70ce0bc33 100644 --- a/util/src/panics.rs +++ b/util/src/panics.rs @@ -71,7 +71,7 @@ impl PanicHandler { /// Invoke closure and catch any possible panics. /// In case of panic notifies all listeners about it. - #[cfg_attr(feature="dev", allow(deprecated))] + #[cfg_attr(all(nightly, feature="dev"), allow(deprecated))] pub fn catch_panic(&self, g: G) -> thread::Result where G: FnOnce() -> R + Send + 'static { let _guard = PanicGuard { handler: self }; let result = g(); diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index c4b5e120c..182b87063 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -22,7 +22,7 @@ use super::trietraits::*; use super::node::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -54,7 +54,7 @@ pub struct TrieDB<'db> { pub hash_count: usize, } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] impl<'db> TrieDB<'db> { /// Create a new trie with the backing database `db` and `root` /// Panics, if `root` does not exist @@ -63,16 +63,16 @@ impl<'db> TrieDB<'db> { flushln!("TrieDB::new({}): Trie root not found!", root); panic!("Trie root not found!"); } - TrieDB { - db: db, + TrieDB { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -142,7 +142,7 @@ impl<'db> TrieDB<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -358,7 +358,7 @@ impl<'db> fmt::Debug for TrieDB<'db> { fn iterator() { use memorydb::*; use super::triedbmut::*; - + let d = vec![ &b"A"[..], &b"AA"[..], &b"AB"[..], &b"B"[..] ]; let mut memdb = MemoryDB::new(); diff --git a/util/src/trie/triedbmut.rs b/util/src/trie/triedbmut.rs index 829c1e518..3d5c366e5 100644 --- a/util/src/trie/triedbmut.rs +++ b/util/src/trie/triedbmut.rs @@ -23,7 +23,7 @@ use super::journal::*; use super::trietraits::*; /// A `Trie` implementation using a generic `HashDB` backing database. -/// +/// /// Use it as a `Trie` trait object. You can use `db()` to get the backing database object, `keys` /// to get the keys belonging to the trie in the backing database, and `db_items_remaining()` to get /// which items in the backing database do not belong to this trie. If this is the only trie in the @@ -66,21 +66,21 @@ enum MaybeChanged<'a> { Changed(Bytes), } -#[cfg_attr(feature="dev", allow(wrong_self_convention))] +#[cfg_attr(all(nightly, feature="dev"), allow(wrong_self_convention))] impl<'db> TrieDBMut<'db> { /// Create a new trie with the backing database `db` and empty `root` /// Initialise to the state entailed by the genesis block. /// This guarantees the trie is built correctly. - pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { + pub fn new(db: &'db mut HashDB, root: &'db mut H256) -> Self { let mut r = TrieDBMut{ - db: db, + db: db, root: root, - hash_count: 0 - }; + hash_count: 0 + }; // set root rlp - *r.root = SHA3_NULL_RLP.clone(); - r + *r.root = SHA3_NULL_RLP.clone(); + r } /// Create a new trie with the backing database `db` and `root`. @@ -91,21 +91,21 @@ impl<'db> TrieDBMut<'db> { flushln!("Trie root not found {}", root); panic!("Trie root not found!"); } - TrieDBMut { - db: db, + TrieDBMut { + db: db, root: root, - hash_count: 0 + hash_count: 0 } } /// Get the backing database. - pub fn db(&'db self) -> &'db HashDB { - self.db + pub fn db(&'db self) -> &'db HashDB { + self.db } /// Get the backing database. - pub fn db_mut(&'db mut self) -> &'db mut HashDB { - self.db + pub fn db_mut(&'db mut self) -> &'db mut HashDB { + self.db } /// Determine all the keys in the backing database that belong to the trie. @@ -184,7 +184,7 @@ impl<'db> TrieDBMut<'db> { /// Indentation helper for `formal_all`. fn fmt_indent(&self, f: &mut fmt::Formatter, size: usize) -> fmt::Result { - for _ in 0..size { + for _ in 0..size { try!(write!(f, " ")); } Ok(()) @@ -350,7 +350,7 @@ impl<'db> TrieDBMut<'db> { } } - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] + #[cfg_attr(all(nightly, feature="dev"), allow(cyclomatic_complexity))] /// Determine the RLP of the node, assuming we're inserting `partial` into the /// node currently of data `old`. This will *not* delete any hash of `old` from the database; /// it will just return the new RLP that includes the new node. @@ -378,7 +378,7 @@ impl<'db> TrieDBMut<'db> { // original had empty slot - place a leaf there. true if old_rlp.at(i).is_empty() => journal.new_node(Self::compose_leaf(&partial.mid(1), value), &mut s), // original has something there already; augment. - true => { + true => { let new = self.augmented(self.take_node(&old_rlp.at(i), journal), &partial.mid(1), value, journal); journal.new_node(new, &mut s); } From b61c0397bc476ca2253f43724646fc703eb0fdb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:36:38 +0100 Subject: [PATCH 34/61] removing unused variable --- util/src/journaldb.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index 378bc2de5..35ad83fa0 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -617,7 +617,6 @@ mod tests { fn reopen_remove() { let mut dir = ::std::env::temp_dir(); dir.push(H32::random().hex()); - let bar = H256::random(); let foo = { let mut jdb = JournalDB::new(dir.to_str().unwrap()); From ab42ec8c81bc91f82b3b81d404e5da2e81382aff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 7 Mar 2016 14:40:39 +0100 Subject: [PATCH 35/61] Removing unneeded lifetime --- ethcore/src/blockchain/generator/generator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs index 51e6294fc..88c9577e2 100644 --- a/ethcore/src/blockchain/generator/generator.rs +++ b/ethcore/src/blockchain/generator/generator.rs @@ -29,7 +29,7 @@ pub trait ChainIterator: Iterator + Sized { /// Blocks generated by fork will have lower difficulty than current chain. fn fork(&self, fork_number: usize) -> Fork where Self: Clone; /// Should be called to make every consecutive block have given bloom. - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self>; + fn with_bloom(&mut self, bloom: H2048) -> Bloom; /// Should be called to complete block. Without complete, block may have incorrect hash. fn complete<'a>(&'a mut self, finalizer: &'a mut BlockFinalizer) -> Complete<'a, Self>; /// Completes and generates block. @@ -44,7 +44,7 @@ impl ChainIterator for I where I: Iterator + Sized { } } - fn with_bloom<'a>(&'a mut self, bloom: H2048) -> Bloom<'a, Self> { + fn with_bloom(&mut self, bloom: H2048) -> Bloom { Bloom { iter: self, bloom: bloom From 655bb0ed5db4eb014d959b03b3edc43cfb5ebf4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 12:36:06 +0100 Subject: [PATCH 36/61] Additional documentation for transaction queue --- sync/src/lib.rs | 1 + sync/src/transaction_queue.rs | 103 +++++++++++++++++++++++++++++++++- 2 files changed, 102 insertions(+), 2 deletions(-) diff --git a/sync/src/lib.rs b/sync/src/lib.rs index b5869642c..39a06af8f 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -72,6 +72,7 @@ mod chain; mod io; mod range_collection; mod transaction_queue; +pub use transaction_queue::TransactionQueue; #[cfg(test)] mod tests; diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..8270c6e27 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -17,6 +17,67 @@ // TODO [todr] - own transactions should have higher priority //! Transaction Queue +//! +//! TransactionQueue keeps track of all transactions seen by the node (received from other peers) and own transactions +//! and orders them by priority. Top priority transactions are those with low nonce height (difference between +//! transaction's nonce and next nonce expected from this sender). If nonces are equal transaction's gas price is used +//! for comparison (higher gas price = higher priority). +//! +//! # Usage Example +//! +//! ```rust +//! extern crate ethcore_util as util; +//! extern crate ethcore; +//! extern crate ethsync; +//! extern crate rustc_serialize; +//! +//! use util::crypto::KeyPair; +//! use util::hash::Address; +//! use util::numbers::{Uint, U256}; +//! use ethsync::TransactionQueue; +//! use ethcore::transaction::*; +//! use rustc_serialize::hex::FromHex; +//! +//! fn main() { +//! let key = KeyPair::create().unwrap(); +//! let t1 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), +//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(10) }; +//! let t2 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), +//! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(11) }; +//! +//! let st1 = t1.sign(&key.secret()); +//! let st2 = t2.sign(&key.secret()); +//! let default_nonce = |_a: &Address| U256::from(10); +//! +//! let mut txq = TransactionQueue::new(); +//! txq.add(st2.clone(), &default_nonce); +//! txq.add(st1.clone(), &default_nonce); +//! +//! // Check status +//! assert_eq!(txq.status().pending, 2); +//! // Check top transactions +//! let top = txq.top_transactions(3); +//! assert_eq!(top.len(), 2); +//! assert_eq!(top[0], st1); +//! assert_eq!(top[1], st2); +//! +//! // And when transaction is removed (but nonce haven't changed) +//! // it will move invalid transactions to future +//! txq.remove(&st1.hash(), &default_nonce); +//! assert_eq!(txq.status().pending, 0); +//! assert_eq!(txq.status().future, 1); +//! assert_eq!(txq.top_transactions(3).len(), 0); +//! } +//! +//! +//! # Maintaing valid state +//! +//! 1. Whenever transaction is imported to queue (to queue) all other transactions from this sender are revalidated in current. It means that they are moved to future and back again (height recalculation & gap filling). +//! 2. Whenever transaction is removed: +//! - When it's removed from `future` - all `future` transactions heights are recalculated and then +//! we check if the transactions should go to `current` (comparing state nonce) +//! - When it's removed from `current` - all transactions from this sender (`current` & `future`) are recalculated. +//! use std::cmp::{Ordering}; use std::collections::{HashMap, BTreeSet}; @@ -27,9 +88,16 @@ use ethcore::transaction::*; #[derive(Clone, Debug)] +/// Light structure used to identify transaction and it's order struct TransactionOrder { + /// Primary ordering factory. Difference between transaction nonce and expected nonce in state + /// (e.g. Tx(nonce:5), State(nonce:0) -> height: 5) + /// High nonce_height = Low priority (processed later) nonce_height: U256, + /// Gas Price of the transaction. + /// Low gas price = Low priority (processed later) gas_price: U256, + /// Hash to identify associated transaction hash: H256, } @@ -70,7 +138,7 @@ impl Ord for TransactionOrder { let a_gas = self.gas_price; let b_gas = b.gas_price; if a_gas != b_gas { - return a_gas.cmp(&b_gas); + return b_gas.cmp(&a_gas); } // Compare hashes @@ -78,6 +146,7 @@ impl Ord for TransactionOrder { } } +/// Verified transaction (with sender) struct VerifiedTransaction { transaction: SignedTransaction } @@ -101,6 +170,11 @@ impl VerifiedTransaction { } } +/// Holds transactions accessible by (address, nonce) and by priority +/// +/// TransactionSet keeps number of entries below limit, but it doesn't +/// automatically happen during `insert/remove` operations. +/// You have to call `enforce_limit` to remove lowest priority transactions from set. struct TransactionSet { by_priority: BTreeSet, by_address: Table, @@ -108,11 +182,15 @@ struct TransactionSet { } impl TransactionSet { + /// Inserts `TransactionOrder` to this set fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { self.by_priority.insert(order.clone()); self.by_address.insert(sender, nonce, order) } + /// Remove low priority transactions if there is more then specified by given `limit`. + /// + /// It drops transactions from this set but also removes associated `VerifiedTransaction`. fn enforce_limit(&mut self, by_hash: &mut HashMap) { let len = self.by_priority.len(); if len <= self.limit { @@ -134,6 +212,7 @@ impl TransactionSet { } } + /// Drop transaction from this set (remove from `by_priority` and `by_address`) fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { if let Some(tx_order) = self.by_address.remove(sender, nonce) { self.by_priority.remove(&tx_order); @@ -142,6 +221,7 @@ impl TransactionSet { None } + /// Drop all transactions. fn clear(&mut self) { self.by_priority.clear(); self.by_address.clear(); @@ -260,6 +340,8 @@ impl TransactionQueue { // We will either move transaction to future or remove it completely // so there will be no transactions from this sender in current self.last_nonces.remove(&sender); + // First update height of transactions in future to avoid collisions + self.update_future(&sender, current_nonce); // This should move all current transactions to future and remove old transactions self.move_all_to_future(&sender, current_nonce); // And now lets check if there is some chain of transactions in future @@ -269,6 +351,7 @@ impl TransactionQueue { } } + /// Update height of all transactions in future transactions set. fn update_future(&mut self, sender: &Address, current_nonce: U256) { // We need to drain all transactions for current sender from future and reinsert them with updated height let all_nonces_from_sender = match self.future.by_address.row(&sender) { @@ -281,6 +364,8 @@ impl TransactionQueue { } } + /// Drop all transactions from given sender from `current`. + /// Either moves them to `future` or removes them from queue completely. fn move_all_to_future(&mut self, sender: &Address, current_nonce: U256) { let all_nonces_from_sender = match self.current.by_address.row(&sender) { Some(row_map) => row_map.keys().cloned().collect::>(), @@ -300,7 +385,7 @@ impl TransactionQueue { } - /// Returns top transactions from the queue + /// Returns top transactions from the queue ordered by priority. pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority .iter() @@ -318,6 +403,8 @@ impl TransactionQueue { self.last_nonces.clear(); } + /// Checks if there are any transactions in `future` that should actually be promoted to `current` + /// (because nonce matches). fn move_matching_future_to_current(&mut self, address: Address, mut current_nonce: U256, first_nonce: U256) { { let by_nonce = self.future.by_address.row_mut(&address); @@ -339,6 +426,14 @@ impl TransactionQueue { self.last_nonces.insert(address, current_nonce - U256::one()); } + /// Adds VerifiedTransaction to this queue. + /// + /// Determines if it should be placed in current or future. When transaction is + /// imported to `current` also checks if there are any `future` transactions that should be promoted because of + /// this. + /// + /// It ignores transactions that has already been imported (same `hash`) and replaces the transaction + /// iff `(address, nonce)` is the same but `gas_price` is higher. fn import_tx(&mut self, tx: VerifiedTransaction, fetch_nonce: &T) where T: Fn(&Address) -> U256 { @@ -377,6 +472,10 @@ impl TransactionQueue { self.current.enforce_limit(&mut self.by_hash); } + /// Replaces transaction in given set (could be `future` or `current`). + /// + /// If there is already transaction with same `(sender, nonce)` it will be replaced iff `gas_price` is higher. + /// One of the transactions is dropped from set and also removed from queue entirely (from `by_hash`). fn replace_transaction(tx: VerifiedTransaction, base_nonce: U256, set: &mut TransactionSet, by_hash: &mut HashMap) { let order = TransactionOrder::for_transaction(&tx, base_nonce); let hash = tx.hash(); From 799d3bd2c8e15e71a027caf7e8c836b578163161 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 12:42:32 +0100 Subject: [PATCH 37/61] Fixing doc test for queue --- sync/src/transaction_queue.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 8270c6e27..4b4a6226b 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -68,7 +68,7 @@ //! assert_eq!(txq.status().future, 1); //! assert_eq!(txq.top_transactions(3).len(), 0); //! } -//! +//! ``` //! //! # Maintaing valid state //! From b2fc077f8c6a8a28ca0f14e975107b7431887f21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 8 Mar 2016 16:42:30 +0100 Subject: [PATCH 38/61] Fixing CLI parameters --- parity/main.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 43b0504f1..ceb58e31e 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -79,6 +79,7 @@ Protocol Options: --networkid INDEX Override the network identifier from the chain we are on. --archive Client should not prune the state/storage trie. -d --datadir PATH Specify the database & configuration directory path [default: $HOME/.parity] + --keys-path PATH Specify the path for JSON key files to be found [default: $HOME/.web3/keys] --identity NAME Specify your node's name. Networking Options: @@ -113,7 +114,7 @@ Memory Footprint Options: --cache-pref-size BYTES Specify the prefered size of the blockchain cache in bytes [default: 16384]. --cache-max-size BYTES Specify the maximum size of the blockchain cache in bytes [default: 262144]. --queue-max-size BYTES Specify the maximum size of memory to use for block queue [default: 52428800]. - --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with + --cache MEGABYTES Set total amount of cache to use for the entire system, mutually exclusive with other cache options (geth-compatible). Miscellaneous Options: @@ -129,7 +130,7 @@ struct Args { arg_enode: Vec, flag_chain: String, flag_testnet: bool, - flag_db_path: String, + flag_datadir: String, flag_networkid: Option, flag_identity: String, flag_cache: Option, @@ -238,7 +239,7 @@ impl Configuration { } fn path(&self) -> String { - self.args.flag_db_path.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) + self.args.flag_datadir.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()) } fn author(&self) -> Address { From f84d40734d372aa2338df55dc0ad44c4d97650b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 9 Mar 2016 10:26:51 +0100 Subject: [PATCH 39/61] Validating sender before importing to queuue --- sync/src/chain.rs | 4 +- sync/src/transaction_queue.rs | 140 +++++++++++++++++++++------------- 2 files changed, 87 insertions(+), 57 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index a41b06904..8cf1beea1 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -935,7 +935,7 @@ impl ChainSync { let mut transaction_queue = self.transaction_queue.lock().unwrap(); for i in 0..item_count { let tx: SignedTransaction = try!(r.val_at(i)); - transaction_queue.add(tx, &fetch_latest_nonce); + let _ = transaction_queue.add(tx, &fetch_latest_nonce); } Ok(()) } @@ -1291,7 +1291,7 @@ impl ChainSync { let _sender = tx.sender(); } let mut transaction_queue = self.transaction_queue.lock().unwrap(); - transaction_queue.add_all(txs, |a| chain.nonce(a)); + let _ = transaction_queue.add_all(txs, |a| chain.nonce(a)); }); } diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..39ad29894 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -24,6 +24,7 @@ use util::numbers::{Uint, U256}; use util::hash::{Address, H256}; use util::table::*; use ethcore::transaction::*; +use ethcore::error::Error; #[derive(Clone, Debug)] @@ -82,10 +83,11 @@ struct VerifiedTransaction { transaction: SignedTransaction } impl VerifiedTransaction { - fn new(transaction: SignedTransaction) -> Self { - VerifiedTransaction { + fn new(transaction: SignedTransaction) -> Result { + try!(transaction.sender()); + Ok(VerifiedTransaction { transaction: transaction - } + }) } fn hash(&self) -> H256 { @@ -148,6 +150,8 @@ impl TransactionSet { } } +// Will be used when rpc merged +#[allow(dead_code)] #[derive(Debug)] /// Current status of the queue pub struct TransactionQueueStatus { @@ -196,6 +200,8 @@ impl TransactionQueue { } } + // Will be used when rpc merged + #[allow(dead_code)] /// Returns current status for this queue pub fn status(&self) -> TransactionQueueStatus { TransactionQueueStatus { @@ -205,17 +211,19 @@ impl TransactionQueue { } /// Adds all signed transactions to queue to be verified and imported - pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) + pub fn add_all(&mut self, txs: Vec, fetch_nonce: T) -> Result<(), Error> where T: Fn(&Address) -> U256 { for tx in txs.into_iter() { - self.add(tx, &fetch_nonce); + try!(self.add(tx, &fetch_nonce)); } + Ok(()) } /// Add signed transaction to queue to be verified and imported - pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) + pub fn add(&mut self, tx: SignedTransaction, fetch_nonce: &T) -> Result<(), Error> where T: Fn(&Address) -> U256 { - self.import_tx(VerifiedTransaction::new(tx), fetch_nonce); + self.import_tx(try!(VerifiedTransaction::new(tx)), fetch_nonce); + Ok(()) } /// Removes all transactions identified by hashes given in slice @@ -299,7 +307,8 @@ impl TransactionQueue { self.future.enforce_limit(&mut self.by_hash); } - + // Will be used when mining merged + #[allow(dead_code)] /// Returns top transactions from the queue pub fn top_transactions(&self, size: usize) -> Vec { self.current.by_priority @@ -407,13 +416,8 @@ impl TransactionQueue { #[cfg(test)] mod test { extern crate rustc_serialize; - use self::rustc_serialize::hex::FromHex; - use std::ops::Deref; - use std::collections::{HashMap, BTreeSet}; - use util::crypto::KeyPair; - use util::numbers::{U256, Uint}; - use util::hash::{Address}; use util::table::*; + use util::*; use ethcore::transaction::*; use super::*; use super::{TransactionSet, TransactionOrder, VerifiedTransaction}; @@ -457,12 +461,12 @@ mod test { limit: 1 }; let (tx1, tx2) = new_txs(U256::from(1)); - let tx1 = VerifiedTransaction::new(tx1); - let tx2 = VerifiedTransaction::new(tx2); + let tx1 = VerifiedTransaction::new(tx1).unwrap(); + let tx2 = VerifiedTransaction::new(tx2).unwrap(); let mut by_hash = { let mut x = HashMap::new(); - let tx1 = VerifiedTransaction::new(tx1.transaction.clone()); - let tx2 = VerifiedTransaction::new(tx2.transaction.clone()); + let tx1 = VerifiedTransaction::new(tx1.transaction.clone()).unwrap(); + let tx2 = VerifiedTransaction::new(tx2.transaction.clone()).unwrap(); x.insert(tx1.hash(), tx1); x.insert(tx2.hash(), tx2); x @@ -496,13 +500,39 @@ mod test { let tx = new_tx(); // when - txq.add(tx, &default_nonce); + let res = txq.add(tx, &default_nonce); // then + assert!(res.is_ok()); let stats = txq.status(); assert_eq!(stats.pending, 1); } + #[test] + fn should_reject_incorectly_signed_transaction() { + // given + let mut txq = TransactionQueue::new(); + let tx = new_unsigned_tx(U256::from(123)); + let stx = { + let mut s = RlpStream::new_list(9); + s.append(&tx.nonce); + s.append(&tx.gas_price); + s.append(&tx.gas); + s.append_empty_data(); // action=create + s.append(&tx.value); + s.append(&tx.data); + s.append(&0u64); // v + s.append(&U256::zero()); // r + s.append(&U256::zero()); // s + decode(s.as_raw()) + }; + // when + let res = txq.add(stx, &default_nonce); + + // then + assert!(res.is_err()); + } + #[test] fn should_import_txs_from_same_sender() { // given @@ -511,8 +541,8 @@ mod test { let (tx, tx2) = new_txs(U256::from(1)); // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let top = txq.top_transactions(5); @@ -529,8 +559,8 @@ mod test { let (tx, tx2) = new_txs(U256::from(2)); // when - txq.add(tx.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let stats = txq.status(); @@ -551,13 +581,13 @@ mod test { let tx1 = new_unsigned_tx(U256::from(124)).sign(&secret); let tx2 = new_unsigned_tx(U256::from(125)).sign(&secret); - txq.add(tx, &default_nonce); + txq.add(tx, &default_nonce).unwrap(); assert_eq!(txq.status().pending, 1); - txq.add(tx2, &default_nonce); + txq.add(tx2, &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); // when - txq.add(tx1, &default_nonce); + txq.add(tx1, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -570,8 +600,8 @@ mod test { // given let mut txq2 = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(3)); - txq2.add(tx.clone(), &default_nonce); - txq2.add(tx2.clone(), &default_nonce); + txq2.add(tx.clone(), &default_nonce).unwrap(); + txq2.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq2.status().pending, 1); assert_eq!(txq2.status().future, 1); @@ -592,10 +622,10 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 3); // when @@ -614,8 +644,8 @@ mod test { let (tx, tx2) = new_txs(U256::one()); // add - txq.add(tx2.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); let stats = txq.status(); assert_eq!(stats.pending, 2); @@ -632,11 +662,11 @@ mod test { // given let mut txq = TransactionQueue::with_limits(1, 1); let (tx, tx2) = new_txs(U256::one()); - txq.add(tx.clone(), &default_nonce); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 1); // when - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); // then let t = txq.top_transactions(2); @@ -650,14 +680,14 @@ mod test { let mut txq = TransactionQueue::with_limits(10, 1); let (tx1, tx2) = new_txs(U256::from(4)); let (tx3, tx4) = new_txs(U256::from(4)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx3.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx3.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 2); // when - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx4.clone(), &default_nonce); + txq.add(tx4.clone(), &default_nonce).unwrap(); // then assert_eq!(txq.status().future, 1); @@ -671,7 +701,7 @@ mod test { let fetch_last_nonce = |_a: &Address| last_nonce; // when - txq.add(tx, &fetch_last_nonce); + txq.add(tx, &fetch_last_nonce).unwrap(); // then let stats = txq.status(); @@ -685,12 +715,12 @@ mod test { let nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (_tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); assert_eq!(txq.status().pending, 0); // when - txq.add(tx2.clone(), &nonce); + txq.add(tx2.clone(), &nonce).unwrap(); // then let stats = txq.status(); @@ -703,15 +733,15 @@ mod test { // given let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx1.clone(), &default_nonce); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 2); // when txq.remove(&tx1.hash(), &default_nonce); assert_eq!(txq.status().pending, 0); assert_eq!(txq.status().future, 1); - txq.add(tx1.clone(), &default_nonce); + txq.add(tx1.clone(), &default_nonce).unwrap(); // then let stats = txq.status(); @@ -726,10 +756,10 @@ mod test { let mut txq = TransactionQueue::new(); let (tx, tx2) = new_txs(U256::from(1)); let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce); + txq.add(tx2.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce); - txq.add(tx.clone(), &default_nonce); + txq.add(tx3.clone(), &default_nonce).unwrap(); + txq.add(tx.clone(), &default_nonce).unwrap(); assert_eq!(txq.status().pending, 3); // when @@ -754,8 +784,8 @@ mod test { }; // when - txq.add(tx, &default_nonce); - txq.add(tx2, &default_nonce); + txq.add(tx, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -782,10 +812,10 @@ mod test { }; // when - txq.add(tx1, &default_nonce); - txq.add(tx2, &default_nonce); + txq.add(tx1, &default_nonce).unwrap(); + txq.add(tx2, &default_nonce).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx0, &default_nonce); + txq.add(tx0, &default_nonce).unwrap(); // then let stats = txq.status(); @@ -801,8 +831,8 @@ mod test { let next_nonce = |a: &Address| default_nonce(a) + U256::one(); let mut txq = TransactionQueue::new(); let (tx1, tx2) = new_txs(U256::one()); - txq.add(tx1.clone(), &previous_nonce); - txq.add(tx2, &previous_nonce); + txq.add(tx1.clone(), &previous_nonce).unwrap(); + txq.add(tx2, &previous_nonce).unwrap(); assert_eq!(txq.status().future, 2); // when From a1640dcf7205c2d47b017e2398c6fda03da889d6 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 11:38:53 +0100 Subject: [PATCH 40/61] jsonrpc panic handle --- ethcore/src/block_queue.rs | 2 +- parity/main.rs | 34 +++++++++++++++++++++------------- rpc/src/lib.rs | 22 +++++++++++++++++----- 3 files changed, 39 insertions(+), 19 deletions(-) diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 490a17995..8f1105b8b 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -385,7 +385,7 @@ impl BlockQueue { } } - pub fn collect_garbage(&self) { + pub fn collect_garbage(&self) { { let mut verification = self.verification.lock().unwrap(); verification.unverified.shrink_to_fit(); diff --git a/parity/main.rs b/parity/main.rs index 605fb315d..94db8e706 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -43,7 +43,7 @@ use std::path::PathBuf; use env_logger::LogBuilder; use ctrlc::CtrlC; use util::*; -use util::panics::MayPanic; +use util::panics::{MayPanic, PanicHandler}; use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; @@ -151,7 +151,7 @@ fn setup_log(init: &Option) { } #[cfg(feature = "rpc")] -fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) { +fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str) -> Option> { use rpc::v1::*; let mut server = rpc::HttpServer::new(1); @@ -159,11 +159,12 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom server.add_delegate(EthClient::new(&client, &sync).to_delegate()); server.add_delegate(EthFilterClient::new(&client).to_delegate()); server.add_delegate(NetClient::new(&sync).to_delegate()); - server.start_async(url, cors_domain); + Some(server.start_async(url, cors_domain)) } #[cfg(not(feature = "rpc"))] -fn setup_rpc_server(_client: Arc, _sync: Arc, _url: &str) { +fn setup_rpc_server(_client: Arc, _sync: Arc, _url: &str) -> Option> { + None } fn print_version() { @@ -323,26 +324,28 @@ impl Configuration { // Sync let sync = EthSync::register(service.network(), sync_config, client); - // Setup rpc - if self.args.flag_jsonrpc { - setup_rpc_server(service.client(), sync.clone(), &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors); - SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); - } - // Register IO handler let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), - sync: sync + sync: sync.clone(), }); service.io().register_handler(io_handler).expect("Error registering IO handler"); + // Setup rpc + let server_handler = if self.args.flag_jsonrpc { + SocketAddr::from_str(&self.args.flag_jsonrpc_url).unwrap_or_else(|_|die!("{}: Invalid JSONRPC listen address given with --jsonrpc-url. Should be of the form 'IP:port'.", self.args.flag_jsonrpc_url)); + setup_rpc_server(service.client(), sync, &self.args.flag_jsonrpc_url, &self.args.flag_jsonrpc_cors) + } else { + None + }; + // Handle exit - wait_for_exit(&service); + wait_for_exit(&service, server_handler); } } -fn wait_for_exit(client_service: &ClientService) { +fn wait_for_exit(client_service: &ClientService, server_handler: Option>) { let exit = Arc::new(Condvar::new()); // Handle possible exits @@ -351,6 +354,11 @@ fn wait_for_exit(client_service: &ClientService) { let e = exit.clone(); client_service.on_panic(move |_reason| { e.notify_all(); }); + if let Some(handler) = server_handler { + let e = exit.clone(); + handler.on_panic(move |_reason| { e.notify_all(); }); + } + // Wait for signal let mutex = Mutex::new(()); let _ = exit.wait(mutex.lock().unwrap()).unwrap(); diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 0653a0c33..97a3a5fe5 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -29,6 +29,9 @@ extern crate ethcore; extern crate ethsync; extern crate transient_hashmap; +use std::sync::Arc; +use std::thread; +use util::panics::PanicHandler; use self::jsonrpc_core::{IoHandler, IoDelegate}; pub mod v1; @@ -36,7 +39,7 @@ pub mod v1; /// Http server. pub struct HttpServer { handler: IoHandler, - threads: usize + threads: usize, } impl HttpServer { @@ -44,7 +47,7 @@ impl HttpServer { pub fn new(threads: usize) -> HttpServer { HttpServer { handler: IoHandler::new(), - threads: threads + threads: threads, } } @@ -53,9 +56,18 @@ impl HttpServer { self.handler.add_delegate(delegate); } - /// Start server asynchronously in new thread - pub fn start_async(self, addr: &str, cors_domain: &str) { + /// Start server asynchronously in new thread and returns panic handler. + pub fn start_async(self, addr: &str, cors_domain: &str) -> Arc { + let addr = addr.to_owned(); + let cors_domain = cors_domain.to_owned(); + let panic_handler = PanicHandler::new_in_arc(); + let ph = panic_handler.clone(); let server = jsonrpc_http_server::Server::new(self.handler, self.threads); - server.start_async(addr, jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain.to_owned())) + thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || { + ph.catch_panic(move || { + server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain)); + }).unwrap() + }).expect("Error while creating jsonrpc http thread"); + panic_handler } } From 6ad0ba8fe24bf35c9a74b48a25c6e84dc132429f Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 17:11:15 +0400 Subject: [PATCH 41/61] basic commands --- Cargo.lock | 20 ++++++++++++++++++++ Cargo.toml | 1 + parity/main.rs | 34 ++++++++++++++++++++++++++++++++++ util/src/keys/store.rs | 1 + 4 files changed, 56 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..65ca8f566 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,6 +15,7 @@ dependencies = [ "fdlimit 0.1.0", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "rpassword 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.34 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -680,6 +681,17 @@ dependencies = [ "librocksdb-sys 0.2.1 (git+https://github.com/arkpar/rust-rocksdb.git)", ] +[[package]] +name = "rpassword" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", + "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "rust-crypto" version = "0.2.34" @@ -813,6 +825,14 @@ dependencies = [ "winapi 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "termios" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "libc 0.2.7 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "time" version = "0.1.34" diff --git a/Cargo.toml b/Cargo.toml index 9b8ec6405..0852a16bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,6 +21,7 @@ fdlimit = { path = "util/fdlimit" } daemonize = "0.2" ethcore-devtools = { path = "devtools" } number_prefix = "0.2" +rpassword = "0.1" [features] default = ["rpc"] diff --git a/parity/main.rs b/parity/main.rs index 296e1df65..a442f4fdb 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -32,6 +32,7 @@ extern crate fdlimit; extern crate daemonize; extern crate time; extern crate number_prefix; +extern crate rpassword; #[cfg(feature = "rpc")] extern crate ethcore_rpc as rpc; @@ -70,6 +71,7 @@ Parity. Ethereum Client. Usage: parity daemon [options] [ --no-bootstrap | ... ] + parity account parity [options] [ --no-bootstrap | ... ] Protocol Options: @@ -126,8 +128,10 @@ Miscellaneous Options: #[derive(Debug, RustcDecodable)] struct Args { cmd_daemon: bool, + cmd_account: bool, arg_pid_file: String, arg_enode: Vec, + arg_command: String, flag_chain: String, flag_testnet: bool, flag_datadir: String, @@ -337,9 +341,39 @@ impl Configuration { .start() .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); } + if self.args.cmd_account { + self.execute_account_cli(&self.args.arg_command); + return; + } self.execute_client(); } + fn execute_account_cli(&self, command: &str) { + use util::keys::store::SecretStore; + use rpassword::read_password; + let mut secret_store = SecretStore::new(); + if command == "new" { + println!("Please note that password is NOT RECOVERABLE."); + println!("Type password: "); + let password = read_password().unwrap(); + println!("Repeat password: "); + let password_repeat = read_password().unwrap(); + if password != password_repeat { + println!("Passwords do not match!"); + return; + } + println!("New account address:"); + let new_address = secret_store.new_account(&password).unwrap(); + println!("{:?}", new_address); + } + if command == "list" { + println!("Known addresses:"); + for &(addr, _) in secret_store.accounts().unwrap().iter() { + println!("{:?}", addr); + } + } + } + fn execute_client(&self) { // Setup logging setup_log(&self.args.flag_logging); diff --git a/util/src/keys/store.rs b/util/src/keys/store.rs index 625d6fd8f..dcc165259 100644 --- a/util/src/keys/store.rs +++ b/util/src/keys/store.rs @@ -84,6 +84,7 @@ impl SecretStore { let mut path = ::std::env::home_dir().expect("Failed to get home dir"); path.push(".parity"); path.push("keys"); + ::std::fs::create_dir_all(&path).expect("Should panic since it is critical to be able to access home dir"); Self::new_in(&path) } From bcb9b0e45723f6e35035505ad89f8f747909041b Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 15:32:27 +0100 Subject: [PATCH 42/61] wait_for_exit takes only one input param, which is PanicHandler --- parity/main.rs | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 296e1df65..adc3972e4 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -43,7 +43,7 @@ use std::path::PathBuf; use env_logger::LogBuilder; use ctrlc::CtrlC; use util::*; -use util::panics::{MayPanic, PanicHandler}; +use util::panics::{MayPanic, ForwardPanic, PanicHandler}; use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; @@ -341,6 +341,9 @@ impl Configuration { } fn execute_client(&self) { + // Setup panic handler + let panic_handler = PanicHandler::new_in_arc(); + // Setup logging setup_log(&self.args.flag_logging); // Raise fdlimit @@ -367,6 +370,7 @@ impl Configuration { client_config.name = self.args.flag_identity.clone(); client_config.queue.max_mem_use = self.args.flag_queue_max_size; let mut service = ClientService::start(client_config, spec, net_settings, &Path::new(&self.path())).unwrap(); + panic_handler.forward_from(&service); let client = service.client().clone(); client.set_author(self.author()); client.set_extra_data(self.extra_data()); @@ -375,7 +379,7 @@ impl Configuration { let sync = EthSync::register(service.network(), sync_config, client); // Setup rpc - let server_handler = if self.args.flag_jsonrpc || self.args.flag_rpc { + if self.args.flag_jsonrpc || self.args.flag_rpc { let url = format!("{}:{}", self.args.flag_rpcaddr.as_ref().unwrap_or(&self.args.flag_jsonrpc_addr), self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port) @@ -384,10 +388,12 @@ impl Configuration { let cors = self.args.flag_rpccorsdomain.as_ref().unwrap_or(&self.args.flag_jsonrpc_cors); // TODO: use this as the API list. let apis = self.args.flag_rpcapi.as_ref().unwrap_or(&self.args.flag_jsonrpc_apis); - setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()) - } else { - None - }; + let server_handler = setup_rpc_server(service.client(), sync.clone(), &url, cors, apis.split(",").collect()); + if let Some(handler) = server_handler { + panic_handler.forward_from(handler.deref()); + } + + } // Register IO handler let io_handler = Arc::new(ClientIoHandler { @@ -398,23 +404,20 @@ impl Configuration { service.io().register_handler(io_handler).expect("Error registering IO handler"); // Handle exit - wait_for_exit(&service, server_handler); + wait_for_exit(panic_handler); } } -fn wait_for_exit(client_service: &ClientService, server_handler: Option>) { +fn wait_for_exit(panic_handler: Arc) { let exit = Arc::new(Condvar::new()); // Handle possible exits let e = exit.clone(); CtrlC::set_handler(move || { e.notify_all(); }); - let e = exit.clone(); - client_service.on_panic(move |_reason| { e.notify_all(); }); - if let Some(handler) = server_handler { - let e = exit.clone(); - handler.on_panic(move |_reason| { e.notify_all(); }); - } + // Handle panics + let e = exit.clone(); + panic_handler.on_panic(move |_reason| { e.notify_all(); }); // Wait for signal let mutex = Mutex::new(()); From 7ff4d145448487685be000f572f790c3bcef5ae9 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 19:27:44 +0400 Subject: [PATCH 43/61] adding return to if branch --- parity/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/parity/main.rs b/parity/main.rs index 1a2847439..9a45980ef 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -365,6 +365,7 @@ impl Configuration { println!("New account address:"); let new_address = secret_store.new_account(&password).unwrap(); println!("{:?}", new_address); + return; } if command == "list" { println!("Known addresses:"); From 082a4d9078cff6b90f4b01ca3614cc9c3825f265 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 17:31:43 +0100 Subject: [PATCH 44/61] jsonrpc uses client and sync interfaces as a preparetion for jsonrpc tests --- ethcore/src/client.rs | 73 +++++++++++++++++++++------------------ parity/main.rs | 2 +- rpc/src/v1/impls/eth.rs | 25 +++++++------- rpc/src/v1/impls/net.rs | 12 +++---- sync/src/lib.rs | 18 +++++++--- sync/src/tests/helpers.rs | 9 +++++ 6 files changed, 81 insertions(+), 58 deletions(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 8471666aa..374011f71 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -182,6 +182,13 @@ pub trait BlockChainClient : Sync + Send { /// Returns logs matching given filter. fn logs(&self, filter: Filter) -> Vec; + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self) -> &Mutex>; + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error>; } #[derive(Default, Clone, Debug, Eq, PartialEq)] @@ -511,39 +518,6 @@ impl Client where V: Verifier { trace!("Sealing: number={}, hash={}, diff={}", b.hash(), b.block().header().difficulty(), b.block().header().number()); *self.sealing_block.lock().unwrap() = Some(b); } - - /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. - pub fn sealing_block(&self) -> &Mutex> { - if self.sealing_block.lock().unwrap().is_none() { - self.sealing_enabled.store(true, atomic::Ordering::Relaxed); - // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. - self.prepare_sealing(); - } - &self.sealing_block - } - - /// Submit `seal` as a valid solution for the header of `pow_hash`. - /// Will check the seal, but not actually insert the block into the chain. - pub fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { - let mut maybe_b = self.sealing_block.lock().unwrap(); - match *maybe_b { - Some(ref b) if b.hash() == pow_hash => {} - _ => { return Err(Error::PowHashInvalid); } - } - - let b = maybe_b.take(); - match b.unwrap().try_seal(self.engine.deref().deref(), seal) { - Err(old) => { - *maybe_b = Some(old); - Err(Error::PowInvalid) - } - Ok(sealed) => { - // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. - try!(self.import_block(sealed.rlp_bytes())); - Ok(()) - } - } - } } // TODO: need MinerService MinerIoHandler @@ -702,6 +676,39 @@ impl BlockChainClient for Client where V: Verifier { }) .collect() } + + /// Grab the `ClosedBlock` that we want to be sealed. Comes as a mutex that you have to lock. + fn sealing_block(&self) -> &Mutex> { + if self.sealing_block.lock().unwrap().is_none() { + self.sealing_enabled.store(true, atomic::Ordering::Relaxed); + // TODO: Above should be on a timer that resets after two blocks have arrived without being asked for. + self.prepare_sealing(); + } + &self.sealing_block + } + + /// Submit `seal` as a valid solution for the header of `pow_hash`. + /// Will check the seal, but not actually insert the block into the chain. + fn submit_seal(&self, pow_hash: H256, seal: Vec) -> Result<(), Error> { + let mut maybe_b = self.sealing_block.lock().unwrap(); + match *maybe_b { + Some(ref b) if b.hash() == pow_hash => {} + _ => { return Err(Error::PowHashInvalid); } + } + + let b = maybe_b.take(); + match b.unwrap().try_seal(self.engine.deref().deref(), seal) { + Err(old) => { + *maybe_b = Some(old); + Err(Error::PowInvalid) + } + Ok(sealed) => { + // TODO: commit DB from `sealed.drain` and make a VerifiedBlock to skip running the transactions twice. + try!(self.import_block(sealed.rlp_bytes())); + Ok(()) + } + } + } } impl MayPanic for Client { diff --git a/parity/main.rs b/parity/main.rs index ceb58e31e..1cd2970b4 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -48,7 +48,7 @@ use ethcore::spec::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; -use ethsync::{EthSync, SyncConfig}; +use ethsync::{EthSync, SyncConfig, SyncStatusProvider}; use docopt::Docopt; use daemonize::Daemonize; use number_prefix::{binary_prefix, Standalone, Prefixed}; diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7113c55b1..abcb54ab7 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -17,7 +17,7 @@ //! Eth rpc implementation. use std::collections::HashMap; use std::sync::{Arc, Weak, Mutex, RwLock}; -use ethsync::{EthSync, SyncState}; +use ethsync::{SyncStatusProvider, SyncState}; use jsonrpc_core::*; use util::numbers::*; use util::sha3::*; @@ -25,7 +25,6 @@ use util::rlp::encode; use ethcore::client::*; use ethcore::block::{IsBlock}; use ethcore::views::*; -//#[macro_use] extern crate log; use ethcore::ethereum::Ethash; use ethcore::ethereum::denominations::shannon; use v1::traits::{Eth, EthFilter}; @@ -33,15 +32,15 @@ use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncIn use v1::helpers::{PollFilter, PollManager}; /// Eth rpc implementation. -pub struct EthClient { - client: Weak, - sync: Weak, +pub struct EthClient where C: BlockChainClient, S: SyncStatusProvider { + client: Weak, + sync: Weak, hashrates: RwLock>, } -impl EthClient { +impl EthClient where C: BlockChainClient, S: SyncStatusProvider { /// Creates new EthClient. - pub fn new(client: &Arc, sync: &Arc) -> Self { + pub fn new(client: &Arc, sync: &Arc) -> Self { EthClient { client: Arc::downgrade(client), sync: Arc::downgrade(sync), @@ -95,7 +94,7 @@ impl EthClient { } } -impl Eth for EthClient { +impl Eth for EthClient where C: BlockChainClient + 'static, S: SyncStatusProvider + 'static { fn protocol_version(&self, params: Params) -> Result { match params { Params::None => to_value(&U256::from(take_weak!(self.sync).status().protocol_version)), @@ -256,14 +255,14 @@ impl Eth for EthClient { } /// Eth filter rpc implementation. -pub struct EthFilterClient { - client: Weak, +pub struct EthFilterClient where C: BlockChainClient { + client: Weak, polls: Mutex>, } -impl EthFilterClient { +impl EthFilterClient where C: BlockChainClient { /// Creates new Eth filter client. - pub fn new(client: &Arc) -> Self { + pub fn new(client: &Arc) -> Self { EthFilterClient { client: Arc::downgrade(client), polls: Mutex::new(PollManager::new()) @@ -271,7 +270,7 @@ impl EthFilterClient { } } -impl EthFilter for EthFilterClient { +impl EthFilter for EthFilterClient where C: BlockChainClient + 'static { fn new_filter(&self, params: Params) -> Result { from_params::<(Filter,)>(params) .and_then(|(filter,)| { diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 9e24caad2..a686ed66f 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -17,24 +17,24 @@ //! Net rpc implementation. use std::sync::{Arc, Weak}; use jsonrpc_core::*; -use ethsync::EthSync; +use ethsync::SyncStatusProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient { - sync: Weak +pub struct NetClient where S: SyncStatusProvider { + sync: Weak } -impl NetClient { +impl NetClient where S: SyncStatusProvider { /// Creates new NetClient. - pub fn new(sync: &Arc) -> Self { + pub fn new(sync: &Arc) -> Self { NetClient { sync: Arc::downgrade(sync) } } } -impl Net for NetClient { +impl Net for NetClient where S: SyncStatusProvider + 'static { fn version(&self, _: Params) -> Result { Ok(Value::U64(take_weak!(self.sync).status().protocol_version as u64)) } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index 74541660d..427a58e15 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -93,6 +93,12 @@ impl Default for SyncConfig { } } +/// Current sync status +pub trait SyncStatusProvider: Send + Sync { + /// Get sync status + fn status(&self) -> SyncStatus; +} + /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint @@ -114,11 +120,6 @@ impl EthSync { sync } - /// Get sync status - pub fn status(&self) -> SyncStatus { - self.sync.read().unwrap().status() - } - /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref())); @@ -130,6 +131,13 @@ impl EthSync { } } +impl SyncStatusProvider for EthSync { + /// Get sync status + fn status(&self) -> SyncStatus { + self.sync.read().unwrap().status() + } +} + impl NetworkProtocolHandler for EthSync { fn initialize(&self, io: &NetworkContext) { io.register_timer(0, 1000).expect("Error registering sync timer"); diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index e170a4a85..e7d5cf57f 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -25,6 +25,7 @@ use ethcore::receipt::Receipt; use ethcore::transaction::LocalizedTransaction; use ethcore::filter::Filter; use ethcore::log_entry::LocalizedLogEntry; +use ethcore::block::ClosedBlock; pub struct TestBlockChainClient { pub blocks: RwLock>, @@ -125,6 +126,14 @@ impl BlockChainClient for TestBlockChainClient { unimplemented!(); } + fn sealing_block(&self) -> &Mutex> { + unimplemented!(); + } + + fn submit_seal(&self, _pow_hash: H256, _seal: Vec) -> Result<(), Error> { + unimplemented!(); + } + fn block_header(&self, id: BlockId) -> Option { self.block_hash(id).and_then(|hash| self.blocks.read().unwrap().get(&hash).map(|r| Rlp::new(r).at(0).as_raw().to_vec())) } From 423dd7e0a967a808b769304a245f4316e5a2aa5b Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 18:04:13 +0100 Subject: [PATCH 45/61] updated jsonrpc-core and http-server libs --- Cargo.lock | 10 +++++----- parity/main.rs | 4 ++-- rpc/Cargo.toml | 4 ++-- rpc/src/lib.rs | 22 ++++++++++------------ 4 files changed, 19 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 55ed996ed..15845c806 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -228,8 +228,8 @@ dependencies = [ "ethcore 0.9.99", "ethcore-util 0.9.99", "ethsync 0.9.99", - "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-http-server 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-http-server 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.18 (registry+https://github.com/rust-lang/crates.io-index)", "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -400,7 +400,7 @@ dependencies = [ [[package]] name = "jsonrpc-core" -version = "1.2.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "serde 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -411,11 +411,11 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" -version = "2.1.0" +version = "3.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "hyper 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/parity/main.rs b/parity/main.rs index adc3972e4..f28ef84c3 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -193,7 +193,7 @@ fn setup_log(init: &Option) { fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_domain: &str, apis: Vec<&str>) -> Option> { use rpc::v1::*; - let mut server = rpc::HttpServer::new(1); + let server = rpc::RpcServer::new(); for api in apis.into_iter() { match api { "web3" => server.add_delegate(Web3Client::new().to_delegate()), @@ -207,7 +207,7 @@ fn setup_rpc_server(client: Arc, sync: Arc, url: &str, cors_dom } } } - Some(server.start_async(url, cors_domain)) + Some(server.start_http(url, cors_domain, 1)) } #[cfg(not(feature = "rpc"))] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index bfdf8f2d3..f324aba10 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -12,8 +12,8 @@ build = "build.rs" log = "0.3" serde = "0.7.0" serde_json = "0.7.0" -jsonrpc-core = "1.2" -jsonrpc-http-server = "2.1" +jsonrpc-core = "2.0" +jsonrpc-http-server = "3.0" ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } ethash = { path = "../ethash" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 97a3a5fe5..731ded8c4 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -37,35 +37,33 @@ use self::jsonrpc_core::{IoHandler, IoDelegate}; pub mod v1; /// Http server. -pub struct HttpServer { - handler: IoHandler, - threads: usize, +pub struct RpcServer { + handler: Arc, } -impl HttpServer { +impl RpcServer { /// Construct new http server object with given number of threads. - pub fn new(threads: usize) -> HttpServer { - HttpServer { - handler: IoHandler::new(), - threads: threads, + pub fn new() -> RpcServer { + RpcServer { + handler: Arc::new(IoHandler::new()), } } /// Add io delegate. - pub fn add_delegate(&mut self, delegate: IoDelegate) where D: Send + Sync + 'static { + pub fn add_delegate(&self, delegate: IoDelegate) where D: Send + Sync + 'static { self.handler.add_delegate(delegate); } /// Start server asynchronously in new thread and returns panic handler. - pub fn start_async(self, addr: &str, cors_domain: &str) -> Arc { + pub fn start_http(&self, addr: &str, cors_domain: &str, threads: usize) -> Arc { let addr = addr.to_owned(); let cors_domain = cors_domain.to_owned(); let panic_handler = PanicHandler::new_in_arc(); let ph = panic_handler.clone(); - let server = jsonrpc_http_server::Server::new(self.handler, self.threads); + let server = jsonrpc_http_server::Server::new(self.handler.clone()); thread::Builder::new().name("jsonrpc_http".to_string()).spawn(move || { ph.catch_panic(move || { - server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain)); + server.start(addr.as_ref(), jsonrpc_http_server::AccessControlAllowOrigin::Value(cors_domain), threads); }).unwrap() }).expect("Error while creating jsonrpc http thread"); panic_handler From 8a83e27d6a8f2f298e6b0dc44a60f9190e8e6c2a Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 22:55:41 +0400 Subject: [PATCH 46/61] cfg-test for noop verifier --- ethcore/src/verification/mod.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethcore/src/verification/mod.rs b/ethcore/src/verification/mod.rs index 260121989..fe1f406cc 100644 --- a/ethcore/src/verification/mod.rs +++ b/ethcore/src/verification/mod.rs @@ -17,9 +17,11 @@ pub mod verification; pub mod verifier; mod canon_verifier; +#[cfg(test)] mod noop_verifier; pub use self::verification::*; pub use self::verifier::Verifier; pub use self::canon_verifier::CanonVerifier; +#[cfg(test)] pub use self::noop_verifier::NoopVerifier; From accc1db43fc46e3bd2ab425c778dcfaa843bdec8 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 9 Mar 2016 23:39:36 +0400 Subject: [PATCH 47/61] chaning docopt config a bit --- parity/main.rs | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/parity/main.rs b/parity/main.rs index 9a45980ef..92400728d 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -71,7 +71,7 @@ Parity. Ethereum Client. Usage: parity daemon [options] [ --no-bootstrap | ... ] - parity account + parity account (new | list) parity [options] [ --no-bootstrap | ... ] Protocol Options: @@ -129,9 +129,10 @@ Miscellaneous Options: struct Args { cmd_daemon: bool, cmd_account: bool, + cmd_new: bool, + cmd_list: bool, arg_pid_file: String, arg_enode: Vec, - arg_command: String, flag_chain: String, flag_testnet: bool, flag_datadir: String, @@ -342,17 +343,17 @@ impl Configuration { .unwrap_or_else(|e| die!("Couldn't daemonize; {}", e)); } if self.args.cmd_account { - self.execute_account_cli(&self.args.arg_command); + self.execute_account_cli(); return; } self.execute_client(); } - fn execute_account_cli(&self, command: &str) { + fn execute_account_cli(&self) { use util::keys::store::SecretStore; use rpassword::read_password; let mut secret_store = SecretStore::new(); - if command == "new" { + if self.args.cmd_new { println!("Please note that password is NOT RECOVERABLE."); println!("Type password: "); let password = read_password().unwrap(); @@ -367,7 +368,7 @@ impl Configuration { println!("{:?}", new_address); return; } - if command == "list" { + if self.args.cmd_list { println!("Known addresses:"); for &(addr, _) in secret_store.accounts().unwrap().iter() { println!("{:?}", addr); From 8b042ac875f0abe9968f70d76b44773cd64c4350 Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 9 Mar 2016 21:55:23 +0100 Subject: [PATCH 48/61] blockchain import_route --- ethcore/src/blockchain/block_info.rs | 6 +- ethcore/src/blockchain/blockchain.rs | 55 +++++++++--- ethcore/src/blockchain/import_route.rs | 119 +++++++++++++++++++++++++ ethcore/src/blockchain/mod.rs | 2 + 4 files changed, 167 insertions(+), 15 deletions(-) create mode 100644 ethcore/src/blockchain/import_route.rs diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index ce639bfed..335bdbb4e 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -18,6 +18,7 @@ use util::numbers::{U256,H256}; use header::BlockNumber; /// Brief info about inserted block. +#[derive(Clone)] pub struct BlockInfo { /// Block hash. pub hash: H256, @@ -30,6 +31,7 @@ pub struct BlockInfo { } /// Describes location of newly inserted block. +#[derive(Clone)] pub enum BlockLocation { /// It's part of the canon chain. CanonChain, @@ -42,6 +44,8 @@ pub enum BlockLocation { /// Hash of the newest common ancestor with old canon chain. ancestor: H256, /// Hashes of the blocks between ancestor and this block. - route: Vec + route: Vec, + /// Hashes of the blocks which were invalidated. + old_route: Vec, } } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e529f50af..e57f7208a 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -28,7 +28,7 @@ use blockchain::best_block::BestBlock; use blockchain::bloom_indexer::BloomIndexer; use blockchain::tree_route::TreeRoute; use blockchain::update::ExtrasUpdate; -use blockchain::CacheSize; +use blockchain::{CacheSize, ImportRoute}; const BLOOM_INDEX_SIZE: usize = 16; const BLOOM_LEVELS: u8 = 3; @@ -414,14 +414,14 @@ impl BlockChain { /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. - pub fn insert_block(&self, bytes: &[u8], receipts: Vec) { + pub fn insert_block(&self, bytes: &[u8], receipts: Vec) -> ImportRoute { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); let hash = header.sha3(); if self.is_known(&hash) { - return; + return ImportRoute::none(); } // store block in db @@ -435,8 +435,10 @@ impl BlockChain { block_receipts: self.prepare_block_receipts_update(receipts, &info), transactions_addresses: self.prepare_transaction_addresses_update(bytes, &info), blocks_blooms: self.prepare_block_blooms_update(bytes, &info), - info: info + info: info.clone(), }); + + ImportRoute::from(info) } /// Applies extras update. @@ -549,9 +551,14 @@ impl BlockChain { match route.blocks.len() { 0 => BlockLocation::CanonChain, - _ => BlockLocation::BranchBecomingCanonChain { - ancestor: route.ancestor, - route: route.blocks.into_iter().skip(route.index).collect() + _ => { + let old_route = route.blocks.iter().take(route.index).cloned().collect::>(); + + BlockLocation::BranchBecomingCanonChain { + ancestor: route.ancestor, + route: route.blocks.into_iter().skip(route.index).collect(), + old_route: old_route.into_iter().rev().collect(), + } } } } else { @@ -572,7 +579,7 @@ impl BlockChain { BlockLocation::CanonChain => { block_hashes.insert(number, info.hash.clone()); }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; @@ -661,7 +668,7 @@ impl BlockChain { ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels()) .add_bloom(&header.log_bloom(), header.number() as usize) }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; @@ -825,7 +832,7 @@ mod tests { use rustc_serialize::hex::FromHex; use util::hash::*; use util::sha3::Hashable; - use blockchain::{BlockProvider, BlockChain, BlockChainConfig}; + use blockchain::{BlockProvider, BlockChain, BlockChainConfig, ImportRoute}; use tests::helpers::*; use devtools::*; use blockchain::generator::{ChainGenerator, ChainIterator, BlockFinalizer}; @@ -943,10 +950,30 @@ mod tests { let temp = RandomTempPath::new(); let bc = BlockChain::new(BlockChainConfig::default(), &genesis, temp.as_path()); - bc.insert_block(&b1, vec![]); - bc.insert_block(&b2, vec![]); - bc.insert_block(&b3a, vec![]); - bc.insert_block(&b3b, vec![]); + let ir1 = bc.insert_block(&b1, vec![]); + let ir2 = bc.insert_block(&b2, vec![]); + let ir3b = bc.insert_block(&b3b, vec![]); + let ir3a = bc.insert_block(&b3a, vec![]); + + assert_eq!(ir1, ImportRoute { + validated_blocks: vec![b1_hash], + invalidated_blocks: vec![], + }); + + assert_eq!(ir2, ImportRoute { + validated_blocks: vec![b2_hash], + invalidated_blocks: vec![], + }); + + assert_eq!(ir3b, ImportRoute { + validated_blocks: vec![b3b_hash], + invalidated_blocks: vec![], + }); + + assert_eq!(ir3a, ImportRoute { + validated_blocks: vec![b3a_hash], + invalidated_blocks: vec![b3b_hash], + }); assert_eq!(bc.best_block_hash(), best_block_hash); assert_eq!(bc.block_number(&genesis_hash).unwrap(), 0); diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs new file mode 100644 index 000000000..10629f2cb --- /dev/null +++ b/ethcore/src/blockchain/import_route.rs @@ -0,0 +1,119 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Import route. + +use util::hash::H256; +use blockchain::block_info::{BlockInfo, BlockLocation}; + +/// Import route for newly inserted block. +#[derive(Debug, PartialEq)] +pub struct ImportRoute { + /// Blocks that were invalidated by new block. + pub invalidated_blocks: Vec, + /// Blocks that were validted by new block. + pub validated_blocks: Vec, +} + +impl ImportRoute { + pub fn none() -> Self { + ImportRoute { + invalidated_blocks: vec![], + validated_blocks: vec![], + } + } +} + +impl From for ImportRoute { + fn from(info: BlockInfo) -> ImportRoute { + match info.location { + BlockLocation::CanonChain => ImportRoute { + invalidated_blocks: vec![], + validated_blocks: vec![info.hash], + }, + BlockLocation::Branch => ImportRoute::none(), + BlockLocation::BranchBecomingCanonChain { mut route, old_route, .. } => { + route.push(info.hash); + ImportRoute { + invalidated_blocks: old_route, + validated_blocks: route, + } + } + } + } +} + +#[cfg(test)] +mod tests { + use util::hash::H256; + use util::numbers::U256; + use blockchain::block_info::{BlockInfo, BlockLocation}; + use blockchain::ImportRoute; + + #[test] + fn import_route_none() { + assert_eq!(ImportRoute::none(), ImportRoute { + validated_blocks: vec![], + invalidated_blocks: vec![], + }); + } + + #[test] + fn import_route_branch() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::Branch, + }; + + assert_eq!(ImportRoute::from(info), ImportRoute::none()); + } + + #[test] + fn import_route_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(1)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::CanonChain, + }; + + assert_eq!(ImportRoute::from(info), ImportRoute { + invalidated_blocks: vec![], + validated_blocks: vec![H256::from(U256::from(1))], + }); + } + + #[test] + fn import_route_branch_becoming_canon_chain() { + let info = BlockInfo { + hash: H256::from(U256::from(2)), + number: 0, + total_difficulty: U256::from(0), + location: BlockLocation::BranchBecomingCanonChain { + ancestor: H256::from(U256::from(0)), + route: vec![H256::from(U256::from(1))], + old_route: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + } + }; + + assert_eq!(ImportRoute::from(info), ImportRoute { + invalidated_blocks: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + validated_blocks: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], + }); + } +} diff --git a/ethcore/src/blockchain/mod.rs b/ethcore/src/blockchain/mod.rs index b0679b563..6559d8364 100644 --- a/ethcore/src/blockchain/mod.rs +++ b/ethcore/src/blockchain/mod.rs @@ -25,7 +25,9 @@ mod tree_route; mod update; #[cfg(test)] mod generator; +mod import_route; pub use self::blockchain::{BlockProvider, BlockChain, BlockChainConfig}; pub use self::cache::CacheSize; pub use self::tree_route::TreeRoute; +pub use self::import_route::ImportRoute; From d7e729a4eaee966d5ef4ea9b1ac57ac32a0714f9 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 9 Mar 2016 23:55:56 +0100 Subject: [PATCH 49/61] Fixed sync handling large forks --- sync/src/chain.rs | 4 ++-- sync/src/range_collection.rs | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 2 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index fe1b559cd..14f6d6344 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -843,8 +843,8 @@ impl ChainSync { self.downloading_bodies.remove(&n); self.downloading_headers.remove(&n); } - self.headers.remove_tail(&start); - self.bodies.remove_tail(&start); + self.headers.remove_from(&start); + self.bodies.remove_from(&start); } /// Request headers from a peer by block hash diff --git a/sync/src/range_collection.rs b/sync/src/range_collection.rs index dc2f4e446..9bb5cc522 100644 --- a/sync/src/range_collection.rs +++ b/sync/src/range_collection.rs @@ -42,6 +42,8 @@ pub trait RangeCollection { fn remove_head(&mut self, start: &K); /// Remove all elements >= `start` in the range that contains `start` fn remove_tail(&mut self, start: &K); + /// Remove all elements >= `start` + fn remove_from(&mut self, start: &K); /// Remove all elements >= `tail` fn insert_item(&mut self, key: K, value: V); /// Get an iterator over ranges @@ -137,6 +139,28 @@ impl RangeCollection for Vec<(K, Vec)> where K: Ord + PartialEq + } } + /// Remove the element and all following it. + fn remove_from(&mut self, key: &K) { + match self.binary_search_by(|&(k, _)| k.cmp(key).reverse()) { + Ok(index) => { self.drain(.. index + 1); }, + Err(index) =>{ + let mut empty = false; + match self.get_mut(index) { + Some(&mut (ref k, ref mut v)) if k <= key && (*k + FromUsize::from_usize(v.len())) > *key => { + v.truncate((*key - *k).to_usize()); + empty = v.is_empty(); + } + _ => {} + } + if empty { + self.drain(.. index + 1); + } else { + self.drain(.. index); + } + }, + } + } + /// Remove range elements up to key fn remove_head(&mut self, key: &K) { if *key == FromUsize::from_usize(0) { @@ -272,5 +296,17 @@ fn test_range() { assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); r.remove_tail(&2); assert_eq!(r.range_iter().next(), None); + + let mut r = ranges.clone(); + r.remove_from(&20); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p', 'q', 'r'][..])]), Ordering::Equal); + r.remove_from(&17); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..]), (16, &['p'][..])]), Ordering::Equal); + r.remove_from(&15); + assert_eq!(r.range_iter().cmp(vec![(2, &['b', 'c', 'd'][..])]), Ordering::Equal); + r.remove_from(&3); + assert_eq!(r.range_iter().cmp(vec![(2, &['b'][..])]), Ordering::Equal); + r.remove_from(&2); + assert_eq!(r.range_iter().next(), None); } From f397fb210f3ff0695595a28d3dc5483f59b07848 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 10 Mar 2016 00:11:35 +0100 Subject: [PATCH 50/61] fixed typo --- ethcore/src/blockchain/import_route.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs index 10629f2cb..b0d76ef6e 100644 --- a/ethcore/src/blockchain/import_route.rs +++ b/ethcore/src/blockchain/import_route.rs @@ -24,7 +24,7 @@ use blockchain::block_info::{BlockInfo, BlockLocation}; pub struct ImportRoute { /// Blocks that were invalidated by new block. pub invalidated_blocks: Vec, - /// Blocks that were validted by new block. + /// Blocks that were validated by new block. pub validated_blocks: Vec, } From 84a741d0f9ba09bff6a409c077ebacdc531fd561 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 00:21:07 +0100 Subject: [PATCH 51/61] Don't call mark_as_bad needlessly --- ethcore/src/block_queue.rs | 6 ++++++ ethcore/src/client.rs | 8 ++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 8f1105b8b..c83542f12 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -320,6 +320,9 @@ impl BlockQueue { /// Mark given block and all its children as bad. Stops verification. pub fn mark_as_bad(&mut self, block_hashes: &[H256]) { + if block_hashes.is_empty() { + return; + } let mut verification_lock = self.verification.lock().unwrap(); let mut processing = self.processing.write().unwrap(); @@ -345,6 +348,9 @@ impl BlockQueue { /// Mark given block as processed pub fn mark_as_good(&mut self, block_hashes: &[H256]) { + if block_hashes.is_empty() { + return; + } let mut processing = self.processing.write().unwrap(); for hash in block_hashes { processing.remove(&hash); diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 8471666aa..2d9b2e3c5 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -404,8 +404,12 @@ impl Client where V: Verifier { { let mut block_queue = self.block_queue.write().unwrap(); - block_queue.mark_as_bad(&bad_blocks); - block_queue.mark_as_good(&good_blocks); + if !bad_blocks.is_empty() { + block_queue.mark_as_bad(&bad_blocks); + } + if !good_blocks.is_empty() { + block_queue.mark_as_good(&good_blocks); + } } { From 3c1888c26abd94107f20a4a42cabaa3f01fbef53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:26:04 +0100 Subject: [PATCH 52/61] Fixing deps --- Cargo.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7df0c2541..1dbe54c8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,14 +25,14 @@ ethcore-util = { path = "util" } ethsync = { path = "sync" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } +number_prefix = "0.2" +rpassword = "0.1" [dev-dependencies] ethcore = { path = "ethcore", features = ["dev"] } ethcore-util = { path = "util", features = ["dev"] } ethsync = { path = "sync", features = ["dev"] } ethcore-rpc = { path = "rpc", features = ["dev"] } -number_prefix = "0.2" -rpassword = "0.1" [features] default = ["rpc"] From 878e38c0cf922fbdd3b51b0f71892418181903ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:33:25 +0100 Subject: [PATCH 53/61] Fixing deps again --- Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1dbe54c8f..e797a3eac 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,7 +25,6 @@ ethcore-util = { path = "util" } ethsync = { path = "sync" } ethcore-devtools = { path = "devtools" } ethcore-rpc = { path = "rpc", optional = true } -number_prefix = "0.2" rpassword = "0.1" [dev-dependencies] From 9f77a85491b714ee03de491471c23b2714548cc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 09:35:46 +0100 Subject: [PATCH 54/61] Fixing compilation on nightly --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index e797a3eac..22d0f9288 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -39,7 +39,7 @@ rpc = ["ethcore-rpc"] dev = ["ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev"] dev-clippy = ["clippy", "ethcore/clippy", "ethcore-util/clippy", "ethsync/clippy", "ethcore-rpc/clippy"] travis-beta = ["ethcore/json-tests"] -travis-nightly = ["ethcore/json-tests", "clippy", "dev"] +travis-nightly = ["ethcore/json-tests", "dev-clippy", "dev"] [[bin]] path = "parity/main.rs" From 0a7cda09ffc9ec876599b23f2955763ab0ae6539 Mon Sep 17 00:00:00 2001 From: debris Date: Thu, 10 Mar 2016 10:17:17 +0100 Subject: [PATCH 55/61] changed route name to enacted and retracted --- ethcore/src/blockchain/block_info.rs | 4 +-- ethcore/src/blockchain/blockchain.rs | 30 ++++++++++----------- ethcore/src/blockchain/import_route.rs | 36 +++++++++++++------------- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/ethcore/src/blockchain/block_info.rs b/ethcore/src/blockchain/block_info.rs index 335bdbb4e..cf16a8834 100644 --- a/ethcore/src/blockchain/block_info.rs +++ b/ethcore/src/blockchain/block_info.rs @@ -44,8 +44,8 @@ pub enum BlockLocation { /// Hash of the newest common ancestor with old canon chain. ancestor: H256, /// Hashes of the blocks between ancestor and this block. - route: Vec, + enacted: Vec, /// Hashes of the blocks which were invalidated. - old_route: Vec, + retracted: Vec, } } diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e57f7208a..d67c1b7f1 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -552,12 +552,12 @@ impl BlockChain { match route.blocks.len() { 0 => BlockLocation::CanonChain, _ => { - let old_route = route.blocks.iter().take(route.index).cloned().collect::>(); + let retracted = route.blocks.iter().take(route.index).cloned().collect::>(); BlockLocation::BranchBecomingCanonChain { ancestor: route.ancestor, - route: route.blocks.into_iter().skip(route.index).collect(), - old_route: old_route.into_iter().rev().collect(), + enacted: route.blocks.into_iter().skip(route.index).collect(), + retracted: retracted.into_iter().rev().collect(), } } } @@ -579,11 +579,11 @@ impl BlockChain { BlockLocation::CanonChain => { block_hashes.insert(number, info.hash.clone()); }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; - for (index, hash) in route.iter().cloned().enumerate() { + for (index, hash) in enacted.iter().cloned().enumerate() { block_hashes.insert(start_number + index as BlockNumber, hash); } @@ -668,11 +668,11 @@ impl BlockChain { ChainFilter::new(self, self.bloom_indexer.index_size(), self.bloom_indexer.levels()) .add_bloom(&header.log_bloom(), header.number() as usize) }, - BlockLocation::BranchBecomingCanonChain { ref ancestor, ref route, .. } => { + BlockLocation::BranchBecomingCanonChain { ref ancestor, ref enacted, .. } => { let ancestor_number = self.block_number(ancestor).unwrap(); let start_number = ancestor_number + 1; - let mut blooms: Vec = route.iter() + let mut blooms: Vec = enacted.iter() .map(|hash| self.block(hash).unwrap()) .map(|bytes| BlockView::new(&bytes).header_view().log_bloom()) .collect(); @@ -956,23 +956,23 @@ mod tests { let ir3a = bc.insert_block(&b3a, vec![]); assert_eq!(ir1, ImportRoute { - validated_blocks: vec![b1_hash], - invalidated_blocks: vec![], + enacted: vec![b1_hash], + retracted: vec![], }); assert_eq!(ir2, ImportRoute { - validated_blocks: vec![b2_hash], - invalidated_blocks: vec![], + enacted: vec![b2_hash], + retracted: vec![], }); assert_eq!(ir3b, ImportRoute { - validated_blocks: vec![b3b_hash], - invalidated_blocks: vec![], + enacted: vec![b3b_hash], + retracted: vec![], }); assert_eq!(ir3a, ImportRoute { - validated_blocks: vec![b3a_hash], - invalidated_blocks: vec![b3b_hash], + enacted: vec![b3a_hash], + retracted: vec![b3b_hash], }); assert_eq!(bc.best_block_hash(), best_block_hash); diff --git a/ethcore/src/blockchain/import_route.rs b/ethcore/src/blockchain/import_route.rs index b0d76ef6e..262b70899 100644 --- a/ethcore/src/blockchain/import_route.rs +++ b/ethcore/src/blockchain/import_route.rs @@ -23,16 +23,16 @@ use blockchain::block_info::{BlockInfo, BlockLocation}; #[derive(Debug, PartialEq)] pub struct ImportRoute { /// Blocks that were invalidated by new block. - pub invalidated_blocks: Vec, + pub retracted: Vec, /// Blocks that were validated by new block. - pub validated_blocks: Vec, + pub enacted: Vec, } impl ImportRoute { pub fn none() -> Self { ImportRoute { - invalidated_blocks: vec![], - validated_blocks: vec![], + retracted: vec![], + enacted: vec![], } } } @@ -41,15 +41,15 @@ impl From for ImportRoute { fn from(info: BlockInfo) -> ImportRoute { match info.location { BlockLocation::CanonChain => ImportRoute { - invalidated_blocks: vec![], - validated_blocks: vec![info.hash], + retracted: vec![], + enacted: vec![info.hash], }, BlockLocation::Branch => ImportRoute::none(), - BlockLocation::BranchBecomingCanonChain { mut route, old_route, .. } => { - route.push(info.hash); + BlockLocation::BranchBecomingCanonChain { mut enacted, retracted, .. } => { + enacted.push(info.hash); ImportRoute { - invalidated_blocks: old_route, - validated_blocks: route, + retracted: retracted, + enacted: enacted, } } } @@ -66,8 +66,8 @@ mod tests { #[test] fn import_route_none() { assert_eq!(ImportRoute::none(), ImportRoute { - validated_blocks: vec![], - invalidated_blocks: vec![], + enacted: vec![], + retracted: vec![], }); } @@ -93,8 +93,8 @@ mod tests { }; assert_eq!(ImportRoute::from(info), ImportRoute { - invalidated_blocks: vec![], - validated_blocks: vec![H256::from(U256::from(1))], + retracted: vec![], + enacted: vec![H256::from(U256::from(1))], }); } @@ -106,14 +106,14 @@ mod tests { total_difficulty: U256::from(0), location: BlockLocation::BranchBecomingCanonChain { ancestor: H256::from(U256::from(0)), - route: vec![H256::from(U256::from(1))], - old_route: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + enacted: vec![H256::from(U256::from(1))], + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], } }; assert_eq!(ImportRoute::from(info), ImportRoute { - invalidated_blocks: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], - validated_blocks: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], + retracted: vec![H256::from(U256::from(3)), H256::from(U256::from(4))], + enacted: vec![H256::from(U256::from(1)), H256::from(U256::from(2))], }); } } From a2046b429f9120a0532dc1da3fc4467fa20c6469 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Thu, 10 Mar 2016 13:27:33 +0400 Subject: [PATCH 56/61] exposing only one func --- sync/src/chain.rs | 9 +++++++-- sync/src/lib.rs | 3 +-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 7294570fe..ea9a47da2 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -43,6 +43,7 @@ use io::SyncIo; use transaction_queue::TransactionQueue; use time; use super::SyncConfig; +use ethcore; known_heap_size!(0, PeerInfo, Header, HeaderId); @@ -1300,8 +1301,12 @@ impl ChainSync { // TODO [todr] propagate transactions? } - pub fn transaction_queue(&self) -> &Mutex { - return &self.transaction_queue; + /// Add transaction to the transaction queue + pub fn insert_transaction(&self, transaction: ethcore::transaction::SignedTransaction, fetch_nonce: &T) + where T: Fn(&Address) -> U256 + { + let mut queue = self.transaction_queue.lock().unwrap(); + queue.add(transaction, fetch_nonce); } } diff --git a/sync/src/lib.rs b/sync/src/lib.rs index fdcf79749..c9eb792a1 100644 --- a/sync/src/lib.rs +++ b/sync/src/lib.rs @@ -135,8 +135,7 @@ impl EthSync { let nonce_fn = |a: &Address| self.chain.state().nonce(a) + U256::one(); let sync = self.sync.write().unwrap(); - let mut queue = sync.transaction_queue().lock().unwrap(); - queue.add(transaction, &nonce_fn); + sync.insert_transaction(transaction, &nonce_fn); } } From ff12b53ba69899dd4a700ff58720bdf6d29ca22c Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 10 Mar 2016 10:40:16 +0100 Subject: [PATCH 57/61] Stop workers before stopping event loop --- util/src/io/service.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 83fa71b8a..8a34ee80a 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -153,7 +153,7 @@ struct UserTimer { pub struct IoManager where Message: Send + Sync { timers: Arc>>, handlers: Vec>>, - _workers: Vec, + workers: Vec, worker_channel: chase_lev::Worker>, work_ready: Arc, } @@ -180,7 +180,7 @@ impl IoManager where Message: Send + Sync + Clone + 'static { timers: Arc::new(RwLock::new(HashMap::new())), handlers: Vec::new(), worker_channel: worker, - _workers: workers, + workers: workers, work_ready: work_ready, }; try!(event_loop.run(&mut io)); @@ -230,7 +230,10 @@ impl Handler for IoManager where Message: Send + Clone + Sync fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { match msg { - IoMessage::Shutdown => event_loop.shutdown(), + IoMessage::Shutdown => { + self.workers.clear(); + event_loop.shutdown(); + }, IoMessage::AddHandler { handler } => { let handler_id = { self.handlers.push(handler.clone()); From 276768a82600045a2f95767d5d5c6dbb72873753 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 11:11:59 +0100 Subject: [PATCH 58/61] Failing test case for #656 --- sync/src/transaction_queue.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 3e0d931b5..38e70d1fc 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -541,6 +541,28 @@ mod test { assert_eq!(top[0], tx); } + #[test] + fn should_correctly_update_futures_when_removing() { + // given + let prev_nonce = |a: &Address| default_nonce(a) - U256::one(); + let next2_nonce = |a: &Address| default_nonce(a) + U256::from(2); + + let mut txq = TransactionQueue::new(); + + let (tx, tx2) = new_txs(U256::from(1)); + txq.add(tx.clone(), &prev_nonce); + txq.add(tx2.clone(), &prev_nonce); + assert_eq!(txq.status().future, 2); + + // when + txq.remove(&tx.hash(), &next2_nonce); + // should remove both transactions since they are not valid + + // then + assert_eq!(txq.status().pending, 0); + assert_eq!(txq.status().future, 0); + } + #[test] fn should_move_transactions_if_gap_filled() { // given From 0cf405527e80879a6e97fae68707b86ec67403be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 11:14:25 +0100 Subject: [PATCH 59/61] Fixing update height bug --- sync/src/transaction_queue.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sync/src/transaction_queue.rs b/sync/src/transaction_queue.rs index 38e70d1fc..ac8debfc9 100644 --- a/sync/src/transaction_queue.rs +++ b/sync/src/transaction_queue.rs @@ -277,7 +277,12 @@ impl TransactionQueue { }; for k in all_nonces_from_sender { let order = self.future.drop(&sender, &k).unwrap(); - self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + if k >= current_nonce { + self.future.insert(sender.clone(), k, order.update_height(k, current_nonce)); + } else { + // Remove the transaction completely + self.by_hash.remove(&order.hash); + } } } From 6d0578e19c5b5442ccdb42d695fb6a70238cf6ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 11:16:54 +0100 Subject: [PATCH 60/61] Additional explanation for ordering of commit/insert_block --- ethcore/src/client.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/src/client.rs b/ethcore/src/client.rs index 874fc9646..aaf5fd728 100644 --- a/ethcore/src/client.rs +++ b/ethcore/src/client.rs @@ -396,7 +396,8 @@ impl Client where V: Verifier { .commit(header.number(), &header.hash(), ancient) .expect("State DB commit failed."); - // And update the chain + // And update the chain after commit to prevent race conditions + // (when something is in chain but you are not able to fetch details) self.chain.write().unwrap() .insert_block(&block.bytes, receipts); From 027f122aea154b14c7563386251ff0407b4d2969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 10 Mar 2016 14:24:33 +0100 Subject: [PATCH 61/61] Removing get prefix from poll_info --- rpc/src/v1/helpers/poll_manager.rs | 18 +++++++++--------- rpc/src/v1/impls/eth.rs | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/rpc/src/v1/helpers/poll_manager.rs b/rpc/src/v1/helpers/poll_manager.rs index 36a6352c2..0297384d1 100644 --- a/rpc/src/v1/helpers/poll_manager.rs +++ b/rpc/src/v1/helpers/poll_manager.rs @@ -84,7 +84,7 @@ impl PollManager where T: Timer { } /// Returns number of block when last poll happend. - pub fn get_poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { + pub fn poll_info(&mut self, id: &PollId) -> Option<&PollInfo> { self.polls.prune(); self.polls.get(id) } @@ -124,21 +124,21 @@ mod tests { *time.borrow_mut() = 10; indexer.update_poll(&0, 21); - assert_eq!(indexer.get_poll_info(&0).unwrap().filter, false); - assert_eq!(indexer.get_poll_info(&0).unwrap().block_number, 21); + assert_eq!(indexer.poll_info(&0).unwrap().filter, false); + assert_eq!(indexer.poll_info(&0).unwrap().block_number, 21); *time.borrow_mut() = 30; indexer.update_poll(&1, 23); - assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23); + assert_eq!(indexer.poll_info(&1).unwrap().filter, true); + assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); *time.borrow_mut() = 75; indexer.update_poll(&0, 30); - assert!(indexer.get_poll_info(&0).is_none()); - assert_eq!(indexer.get_poll_info(&1).unwrap().filter, true); - assert_eq!(indexer.get_poll_info(&1).unwrap().block_number, 23); + assert!(indexer.poll_info(&0).is_none()); + assert_eq!(indexer.poll_info(&1).unwrap().filter, true); + assert_eq!(indexer.poll_info(&1).unwrap().block_number, 23); indexer.remove_poll(&1); - assert!(indexer.get_poll_info(&1).is_none()); + assert!(indexer.poll_info(&1).is_none()); } } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 7113c55b1..479bae95b 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -307,7 +307,7 @@ impl EthFilter for EthFilterClient { let client = take_weak!(self.client); from_params::<(Index,)>(params) .and_then(|(index,)| { - let info = self.polls.lock().unwrap().get_poll_info(&index.value()).cloned(); + let info = self.polls.lock().unwrap().poll_info(&index.value()).cloned(); match info { None => Ok(Value::Array(vec![] as Vec)), Some(info) => match info.filter {