From 0de2a031d1df9c28d1676e8966dfc9cb18242c50 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 13 Jan 2016 15:10:48 +0100 Subject: [PATCH 01/11] New network IO API --- src/sync/io.rs | 17 +++++++++-------- src/sync/mod.rs | 31 ++++++++++++++++++++----------- 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/src/sync/io.rs b/src/sync/io.rs index 9806a3bf5..884babe3e 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -1,6 +1,7 @@ use client::BlockChainClient; -use util::network::{HandlerIo, PeerId, PacketId,}; +use util::{NetworkContext, PeerId, PacketId,}; use util::error::UtilError; +use sync::SyncMessage; /// IO interface for the syning handler. /// Provides peer connection management and an interface to the blockchain client. @@ -16,15 +17,15 @@ pub trait SyncIo { fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; } -/// Wraps `HandlerIo` and the blockchain client -pub struct NetSyncIo<'s, 'h> where 'h:'s { - network: &'s mut HandlerIo<'h>, +/// Wraps `NetworkContext` and the blockchain client +pub struct NetSyncIo<'s, 'h, 'io> where 'h: 's, 'io: 'h { + network: &'s mut NetworkContext<'h, 'io, SyncMessage>, chain: &'s mut BlockChainClient } -impl<'s, 'h> NetSyncIo<'s, 'h> { - /// Creates a new instance from the `HandlerIo` and the blockchain client reference. - pub fn new(network: &'s mut HandlerIo<'h>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h> { +impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> { + /// Creates a new instance from the `NetworkContext` and the blockchain client reference. + pub fn new(network: &'s mut NetworkContext<'h, 'io, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h,'io> { NetSyncIo { network: network, chain: chain, @@ -32,7 +33,7 @@ impl<'s, 'h> NetSyncIo<'s, 'h> { } } -impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { +impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> { fn disable_peer(&mut self, peer_id: &PeerId) { self.network.disable_peer(*peer_id); } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 300465014..50e20c439 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -24,7 +24,9 @@ use std::sync::Arc; use client::BlockChainClient; -use util::network::{ProtocolHandler, NetworkService, HandlerIo, TimerToken, PeerId, Message}; +use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; +use util::TimerToken; +use util::Bytes; use sync::chain::ChainSync; use sync::io::NetSyncIo; @@ -35,6 +37,13 @@ mod range_collection; #[cfg(test)] mod tests; +/// Message type for external events +pub enum SyncMessage { + /// New block has been imported into the blockchain + NewBlock(Bytes) +} + + /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint @@ -47,7 +56,7 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, chain: Arc) { + pub fn register(service: &mut NetworkService, chain: Arc) { let sync = Box::new(EthSync { chain: chain, sync: ChainSync::new(), @@ -61,39 +70,39 @@ impl EthSync { } /// Stop sync - pub fn stop(&mut self, io: &mut HandlerIo) { + pub fn stop(&mut self, io: &mut NetworkContext) { self.sync.abort(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); } /// Restart sync - pub fn restart(&mut self, io: &mut HandlerIo) { + pub fn restart(&mut self, io: &mut NetworkContext) { self.sync.restart(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); } } -impl ProtocolHandler for EthSync { - fn initialize(&mut self, io: &mut HandlerIo) { +impl NetworkProtocolHandler for EthSync { + fn initialize(&mut self, io: &mut NetworkContext) { self.sync.restart(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); io.register_timer(1000).unwrap(); } - fn read(&mut self, io: &mut HandlerIo, peer: &PeerId, packet_id: u8, data: &[u8]) { + fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { self.sync.on_packet(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap()), peer, packet_id, data); } - fn connected(&mut self, io: &mut HandlerIo, peer: &PeerId) { + fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId) { self.sync.on_peer_connected(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap()), peer); } - fn disconnected(&mut self, io: &mut HandlerIo, peer: &PeerId) { + fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId) { self.sync.on_peer_aborting(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap()), peer); } - fn timeout(&mut self, io: &mut HandlerIo, _timer: TimerToken) { + fn timeout(&mut self, io: &mut NetworkContext, _timer: TimerToken) { self.sync.maintain_sync(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); } - fn message(&mut self, _io: &mut HandlerIo, _message: &Message) { + fn message(&mut self, _io: &mut NetworkContext, _message: &SyncMessage) { } } From e297e598ce6d9e7b13cfb529a3087203367c9bd9 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 13 Jan 2016 23:15:44 +0100 Subject: [PATCH 02/11] Client service --- src/bin/client.rs | 13 ++----------- src/client.rs | 10 ++++++++-- src/error.rs | 12 ++++++++++++ src/lib.rs | 1 + src/queue.rs | 7 +++++-- src/sync/mod.rs | 7 +++++-- 6 files changed, 33 insertions(+), 17 deletions(-) diff --git a/src/bin/client.rs b/src/bin/client.rs index a644ecd1b..5a3e907be 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -4,23 +4,14 @@ extern crate rustc_serialize; extern crate env_logger; use std::io::*; -use std::env; -use std::sync::Arc; use util::hash::*; -use util::network::{NetworkService}; -use ethcore::client::Client; -use ethcore::sync::EthSync; +use ethcore::service::ClientService; use ethcore::ethereum; fn main() { ::env_logger::init().ok(); - let mut service = NetworkService::start().unwrap(); - //TODO: replace with proper genesis and chain params. let spec = ethereum::new_frontier(); - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - let client = Arc::new(Client::new(spec, &dir).unwrap()); - EthSync::register(&mut service, client); + let mut _service = ClientService::start(spec).unwrap(); loop { let mut cmd = String::new(); stdin().read_line(&mut cmd).unwrap(); diff --git a/src/client.rs b/src/client.rs index 100ac6bd8..f5a4b43a6 100644 --- a/src/client.rs +++ b/src/client.rs @@ -6,6 +6,7 @@ use header::BlockNumber; use spec::Spec; use engine::Engine; use queue::BlockQueue; +use sync::NetSyncMessage; /// General block status pub enum BlockStatus { @@ -99,15 +100,20 @@ pub struct Client { } impl Client { - pub fn new(spec: Spec, path: &Path) -> Result { + pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let engine = Arc::new(try!(spec.to_engine())); Ok(Client { chain: chain.clone(), _engine: engine.clone(), - queue: BlockQueue::new(chain.clone(), engine.clone()), + queue: BlockQueue::new(chain.clone(), engine.clone(), message_channel), }) } + + + pub fn import_verified_block(&mut self, bytes: Bytes) { + self.chain.write().unwrap().insert_block(&bytes); + } } impl BlockChainClient for Client { diff --git a/src/error.rs b/src/error.rs index 01618c66c..b78b3dbec 100644 --- a/src/error.rs +++ b/src/error.rs @@ -121,6 +121,18 @@ impl From for Error { } } +impl From for Error { + fn from(err: UtilError) -> Error { + Error::Util(err) + } +} + +impl From for Error { + fn from(err: IoError) -> Error { + Error::Util(From::from(err)) + } +} + // TODO: uncomment below once https://github.com/rust-lang/rust/issues/27336 sorted. /*#![feature(concat_idents)] macro_rules! assimilate { diff --git a/src/lib.rs b/src/lib.rs index 0b81f4fd3..574d442a4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -111,6 +111,7 @@ pub mod views; pub mod blockchain; pub mod extras; pub mod evm; +pub mod service; #[cfg(test)] mod tests; diff --git a/src/queue.rs b/src/queue.rs index 9cef88181..60026e818 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -4,20 +4,23 @@ use views::{BlockView}; use verification::*; use error::*; use engine::Engine; +use sync::*; /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. pub struct BlockQueue { bc: Arc>, engine: Arc>, + message_channel: IoChannel } impl BlockQueue { /// Creates a new queue instance. - pub fn new(bc: Arc>, engine: Arc>) -> BlockQueue { + pub fn new(bc: Arc>, engine: Arc>, message_channel: IoChannel) -> BlockQueue { BlockQueue { bc: bc, engine: engine, + message_channel: message_channel } } @@ -34,7 +37,7 @@ impl BlockQueue { try!(verify_block_basic(bytes, self.engine.deref().deref())); try!(verify_block_unordered(bytes, self.engine.deref().deref())); try!(verify_block_final(bytes, self.engine.deref().deref(), self.bc.read().unwrap().deref())); - self.bc.write().unwrap().insert_block(bytes); + try!(self.message_channel.send(UserMessage(SyncMessage::BlockVerified(bytes.to_vec()))).map_err(|e| Error::from(e))); Ok(()) } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 50e20c439..e2cf5529d 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use client::BlockChainClient; -use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; +use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage}; use util::TimerToken; use util::Bytes; use sync::chain::ChainSync; @@ -40,9 +40,12 @@ mod tests; /// Message type for external events pub enum SyncMessage { /// New block has been imported into the blockchain - NewBlock(Bytes) + NewChainBlock(Bytes), + /// A block is ready + BlockVerified(Bytes), } +pub type NetSyncMessage = NetworkIoMessage; /// Ethereum network protocol handler pub struct EthSync { From d3a16574d6b0b607f17f05fa56a9187d2892d486 Mon Sep 17 00:00:00 2001 From: arkpar Date: Wed, 13 Jan 2016 23:15:53 +0100 Subject: [PATCH 03/11] Client service --- src/service.rs | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 src/service.rs diff --git a/src/service.rs b/src/service.rs new file mode 100644 index 000000000..889de9723 --- /dev/null +++ b/src/service.rs @@ -0,0 +1,61 @@ +//! Client service. +//! +//! +//! +//! +//! + +use util::*; +use sync::*; +use spec::Spec; +use error::*; +use std::env; +use client::Client; + +pub struct ClientService { + _net_service: NetworkService, +} + +impl ClientService { + pub fn start(spec: Spec) -> Result { + let mut net_service = try!(NetworkService::start()); + let mut dir = env::temp_dir(); + dir.push(H32::random().hex()); + let client = Arc::new(Client::new(spec, &dir, net_service.io().channel()).unwrap()); + EthSync::register(&mut net_service, client.clone()); + let client_io = Box::new(ClientIoHandler { + client: client + }); + try!(net_service.io().register_handler(client_io)); + + Ok(ClientService { + _net_service: net_service, + }) + } +} + +struct ClientIoHandler { + client: Arc +} + +impl IoHandler for ClientIoHandler { + fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) { + } + + fn message<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, net_message: &'s mut NetSyncMessage) { + match net_message { + &mut UserMessage(ref mut message) => { + match message { + &mut SyncMessage::BlockVerified(ref mut bytes) => { + Arc::get_mut(&mut self.client).unwrap().import_verified_block(mem::replace(bytes, Bytes::new())); + }, + _ => {}, + } + + } + _ => {}, + } + + } +} + From 8f631a7c60c4a5c447238df8ddbe086de01a6695 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 14 Jan 2016 01:28:37 +0100 Subject: [PATCH 04/11] State db sync --- src/client.rs | 56 ++++++++++++++++++++++++++++++++++++++++++--- src/queue.rs | 11 +-------- src/verification.rs | 1 + 3 files changed, 55 insertions(+), 13 deletions(-) diff --git a/src/client.rs b/src/client.rs index f5a4b43a6..bb7c324d0 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,4 +1,5 @@ use util::*; +use rocksdb::{DB}; use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; @@ -7,6 +8,9 @@ use spec::Spec; use engine::Engine; use queue::BlockQueue; use sync::NetSyncMessage; +use env_info::LastHashes; +use verification::*; +use block::*; /// General block status pub enum BlockStatus { @@ -95,7 +99,8 @@ pub trait BlockChainClient : Sync { /// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue. pub struct Client { chain: Arc>, - _engine: Arc>, + engine: Arc>, + state_db: OverlayDB, queue: BlockQueue, } @@ -103,15 +108,56 @@ impl Client { pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let engine = Arc::new(try!(spec.to_engine())); + let mut state_path = path.to_path_buf(); + state_path.push("state"); + let db = DB::open_default(state_path.to_str().unwrap()).unwrap(); + Ok(Client { chain: chain.clone(), - _engine: engine.clone(), - queue: BlockQueue::new(chain.clone(), engine.clone(), message_channel), + engine: engine.clone(), + state_db: OverlayDB::new(db), + queue: BlockQueue::new(engine.clone(), message_channel), }) } pub fn import_verified_block(&mut self, bytes: Bytes) { + let block = BlockView::new(&bytes); + let header = block.header_view(); + if let Err(e) = verify_block_final(&bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { + warn!(target: "client", "Stage 3 block verification failed for {}\nError: {:?}", header.sha3(), e); + // TODO: mark as bad + return; + }; + let parent = match self.chain.read().unwrap().block_header(&header.parent_hash()) { + Some(p) => p, + None => { + warn!(target: "client", "Stage 3 import failed for {}: Parent not found ({}) ", header.sha3(), header.parent_hash()); + return; + }, + }; + // build last hashes + let mut last = self.chain.read().unwrap().best_block_hash(); + let mut last_hashes = LastHashes::new(); + last_hashes.resize(256, H256::new()); + for i in 0..255 { + match self.chain.read().unwrap().block_details(&last) { + Some(details) => { + last_hashes[i + 1] = details.parent.clone(); + last = details.parent.clone(); + }, + None => break, + } + } + + let mut b = OpenBlock::new(self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes, header.author(), header.extra_data()); + + for t in block.transactions().into_iter() { + if let Err(e) = b.push_transaction(t.clone(), None) { + warn!(target: "client", "Stage 3 transaction import failed for block {}\nTransaction:{:?}\nError: {:?}", header.sha3(), t, e); + return; + }; + } self.chain.write().unwrap().insert_block(&bytes); } } @@ -171,6 +217,10 @@ impl BlockChainClient for Client { } fn import_block(&mut self, bytes: &[u8]) -> ImportResult { + let header = BlockView::new(bytes).header(); + if self.chain.read().unwrap().is_known(&header.hash()) { + return Err(ImportError::AlreadyInChain); + } self.queue.import_block(bytes) } diff --git a/src/queue.rs b/src/queue.rs index 60026e818..f02a4ea72 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -1,6 +1,4 @@ use util::*; -use blockchain::*; -use views::{BlockView}; use verification::*; use error::*; use engine::Engine; @@ -9,16 +7,14 @@ use sync::*; /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. pub struct BlockQueue { - bc: Arc>, engine: Arc>, message_channel: IoChannel } impl BlockQueue { /// Creates a new queue instance. - pub fn new(bc: Arc>, engine: Arc>, message_channel: IoChannel) -> BlockQueue { + pub fn new(engine: Arc>, message_channel: IoChannel) -> BlockQueue { BlockQueue { - bc: bc, engine: engine, message_channel: message_channel } @@ -30,13 +26,8 @@ impl BlockQueue { /// Add a block to the queue. pub fn import_block(&mut self, bytes: &[u8]) -> ImportResult { - let header = BlockView::new(bytes).header(); - if self.bc.read().unwrap().is_known(&header.hash()) { - return Err(ImportError::AlreadyInChain); - } try!(verify_block_basic(bytes, self.engine.deref().deref())); try!(verify_block_unordered(bytes, self.engine.deref().deref())); - try!(verify_block_final(bytes, self.engine.deref().deref(), self.bc.read().unwrap().deref())); try!(self.message_channel.send(UserMessage(SyncMessage::BlockVerified(bytes.to_vec()))).map_err(|e| Error::from(e))); Ok(()) } diff --git a/src/verification.rs b/src/verification.rs index 5dabcf84f..2c3fe130d 100644 --- a/src/verification.rs +++ b/src/verification.rs @@ -38,6 +38,7 @@ pub fn verify_block_unordered(bytes: &[u8], engine: &Engine) -> Result<(), Error /// Phase 3 verification. Check block information against parent and uncles. pub fn verify_block_final(bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { + // TODO: verify timestamp let block = BlockView::new(bytes); let header = block.header(); let parent = try!(bc.block_header(&header.parent_hash).ok_or::(From::from(BlockError::UnknownParent(header.parent_hash.clone())))); From 00868488cf1365f0604d1af246bb744f2f6139c6 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 14 Jan 2016 19:03:48 +0100 Subject: [PATCH 05/11] Post enactment block verification --- src/block.rs | 14 +++++++++--- src/client.rs | 41 ++++++++++++++++++++++------------ src/engine.rs | 2 +- src/error.rs | 11 +++++----- src/ethereum/ethash.rs | 2 +- src/queue.rs | 11 ++++++++-- src/service.rs | 12 +++++----- src/sync/chain.rs | 50 ++++++++++++++++++++++-------------------- src/sync/io.rs | 14 +++++++++--- src/sync/mod.rs | 23 +++++++++---------- src/verification.rs | 35 +++++++++++++++++++++-------- 11 files changed, 137 insertions(+), 78 deletions(-) diff --git a/src/block.rs b/src/block.rs index ce125b8df..016c72641 100644 --- a/src/block.rs +++ b/src/block.rs @@ -229,6 +229,9 @@ impl<'x, 'y> ClosedBlock<'x, 'y> { /// Turn this back into an `OpenBlock`. pub fn reopen(self) -> OpenBlock<'x, 'y> { self.open_block } + + /// Drop this object and return the underlieing database. + pub fn drain(self) -> OverlayDB { self.open_block.block.state.drop().1 } } impl SealedBlock { @@ -251,15 +254,20 @@ impl IsBlock for SealedBlock { } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header -/// -pub fn enact(block_bytes: &[u8], engine: &Engine, db: OverlayDB, parent: &Header, last_hashes: &LastHashes) -> Result { +pub fn enact<'x, 'y>(block_bytes: &[u8], engine: &'x Engine, db: OverlayDB, parent: &Header, last_hashes: &'y LastHashes) -> Result, Error> { let block = BlockView::new(block_bytes); let header = block.header_view(); let mut b = OpenBlock::new(engine, db, parent, last_hashes, header.author(), header.extra_data()); b.set_timestamp(header.timestamp()); for t in block.transactions().into_iter() { try!(b.push_transaction(t, None)); } for u in block.uncles().into_iter() { try!(b.push_uncle(u)); } - Ok(try!(b.close().seal(header.seal()))) + Ok(b.close()) +} + +/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards +pub fn enact_and_seal(block_bytes: &[u8], engine: &Engine, db: OverlayDB, parent: &Header, last_hashes: &LastHashes) -> Result { + let header = BlockView::new(block_bytes).header_view(); + Ok(try!(try!(enact(block_bytes, engine, db, parent, last_hashes)).seal(header.seal()))) } #[test] diff --git a/src/client.rs b/src/client.rs index bb7c324d0..c933e7817 100644 --- a/src/client.rs +++ b/src/client.rs @@ -46,7 +46,7 @@ pub struct BlockQueueStatus { pub type TreeRoute = ::blockchain::TreeRoute; /// Blockchain database client. Owns and manages a blockchain and a block queue. -pub trait BlockChainClient : Sync { +pub trait BlockChainClient : Sync + Send { /// Get raw block header data by block header hash. fn block_header(&self, hash: &H256) -> Option; @@ -111,11 +111,13 @@ impl Client { let mut state_path = path.to_path_buf(); state_path.push("state"); let db = DB::open_default(state_path.to_str().unwrap()).unwrap(); + let mut state_db = OverlayDB::new(db); + engine.spec().ensure_db_good(&mut state_db); Ok(Client { chain: chain.clone(), engine: engine.clone(), - state_db: OverlayDB::new(db), + state_db: state_db, queue: BlockQueue::new(engine.clone(), message_channel), }) } @@ -123,16 +125,16 @@ impl Client { pub fn import_verified_block(&mut self, bytes: Bytes) { let block = BlockView::new(&bytes); - let header = block.header_view(); - if let Err(e) = verify_block_final(&bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { - warn!(target: "client", "Stage 3 block verification failed for {}\nError: {:?}", header.sha3(), e); + let header = block.header(); + if let Err(e) = verify_block_family(&bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { + warn!(target: "client", "Stage 3 block verification failed for {}\nError: {:?}", header.hash(), e); // TODO: mark as bad return; }; - let parent = match self.chain.read().unwrap().block_header(&header.parent_hash()) { + let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { Some(p) => p, None => { - warn!(target: "client", "Stage 3 import failed for {}: Parent not found ({}) ", header.sha3(), header.parent_hash()); + warn!(target: "client", "Block import failed for {}: Parent not found ({}) ", header.hash(), header.parent_hash); return; }, }; @@ -150,15 +152,26 @@ impl Client { } } - let mut b = OpenBlock::new(self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes, header.author(), header.extra_data()); - - for t in block.transactions().into_iter() { - if let Err(e) = b.push_transaction(t.clone(), None) { - warn!(target: "client", "Stage 3 transaction import failed for block {}\nTransaction:{:?}\nError: {:?}", header.sha3(), t, e); + let result = match enact(&bytes, self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes) { + Ok(b) => b, + Err(e) => { + warn!(target: "client", "Block import failed for {}\nError: {:?}", header.hash(), e); return; - }; + } + }; + if let Err(e) = verify_block_final(&header, result.block().header()) { + warn!(target: "client", "Stage 4 block verification failed for {}\nError: {:?}", header.hash(), e); } - self.chain.write().unwrap().insert_block(&bytes); + + self.chain.write().unwrap().insert_block(&bytes); //TODO: err here? + match result.drain().commit() { + Ok(_) => (), + Err(e) => { + warn!(target: "client", "State DB commit failed: {:?}", e); + return; + } + } + info!(target: "client", "Imported {}", header.hash()); } } diff --git a/src/engine.rs b/src/engine.rs index 59888d12a..e76b3b28f 100644 --- a/src/engine.rs +++ b/src/engine.rs @@ -45,7 +45,7 @@ pub trait Engine : Sync + Send { /// Phase 3 verification. Check block information against parent and uncles. `block` (the header's full block) /// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import. - fn verify_block_final(&self, _header: &Header, _parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) } + fn verify_block_family(&self, _header: &Header, _parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { Ok(()) } /// Additional verification for transactions in blocks. // TODO: Add flags for which bits of the transaction to check. diff --git a/src/error.rs b/src/error.rs index b78b3dbec..9973cb91c 100644 --- a/src/error.rs +++ b/src/error.rs @@ -2,6 +2,7 @@ use util::*; use header::BlockNumber; +use basic_types::LogBloom; #[derive(Debug, PartialEq, Eq)] pub struct Mismatch { @@ -50,15 +51,15 @@ pub enum BlockError { UncleIsBrother(OutOfBounds), UncleInChain(H256), UncleParentNotInChain(H256), - InvalidStateRoot, - InvalidGasUsed, + InvalidStateRoot(Mismatch), + InvalidGasUsed(Mismatch), InvalidTransactionsRoot(Mismatch), InvalidDifficulty(Mismatch), InvalidGasLimit(OutOfBounds), - InvalidReceiptsStateRoot, + InvalidReceiptsStateRoot(Mismatch), InvalidTimestamp(OutOfBounds), - InvalidLogBloom, - InvalidBlockNonce, + InvalidLogBloom(Mismatch), + InvalidBlockNonce(Mismatch), InvalidParentHash(Mismatch), InvalidNumber(OutOfBounds), UnknownParent(H256), diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index f2f5eeafd..2d99bfa56 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -60,7 +60,7 @@ impl Engine for Ethash { Ok(()) } - fn verify_block_final(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { + fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { // Check difficulty is correct given the two timestamps. let expected_difficulty = self.calculate_difficuty(header, parent); if header.difficulty != expected_difficulty { diff --git a/src/queue.rs b/src/queue.rs index f02a4ea72..6c398e6f5 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -3,6 +3,7 @@ use verification::*; use error::*; use engine::Engine; use sync::*; +use views::*; /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. @@ -26,8 +27,14 @@ impl BlockQueue { /// Add a block to the queue. pub fn import_block(&mut self, bytes: &[u8]) -> ImportResult { - try!(verify_block_basic(bytes, self.engine.deref().deref())); - try!(verify_block_unordered(bytes, self.engine.deref().deref())); + try!(verify_block_basic(bytes, self.engine.deref().deref()).map_err(|e| { + warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), e); + e + })); + try!(verify_block_unordered(bytes, self.engine.deref().deref()).map_err(|e| { + warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), e); + e + })); try!(self.message_channel.send(UserMessage(SyncMessage::BlockVerified(bytes.to_vec()))).map_err(|e| Error::from(e))); Ok(()) } diff --git a/src/service.rs b/src/service.rs index 889de9723..6f3294ce6 100644 --- a/src/service.rs +++ b/src/service.rs @@ -19,9 +19,11 @@ pub struct ClientService { impl ClientService { pub fn start(spec: Spec) -> Result { let mut net_service = try!(NetworkService::start()); - let mut dir = env::temp_dir(); - dir.push(H32::random().hex()); - let client = Arc::new(Client::new(spec, &dir, net_service.io().channel()).unwrap()); + info!("Starting {}", net_service.host_info()); + let mut dir = env::home_dir().unwrap(); + dir.push(".parity"); + dir.push(H64::from(spec.genesis_header().hash()).hex()); + let client = Arc::new(RwLock::new(try!(Client::new(spec, &dir, net_service.io().channel())))); EthSync::register(&mut net_service, client.clone()); let client_io = Box::new(ClientIoHandler { client: client @@ -35,7 +37,7 @@ impl ClientService { } struct ClientIoHandler { - client: Arc + client: Arc> } impl IoHandler for ClientIoHandler { @@ -47,7 +49,7 @@ impl IoHandler for ClientIoHandler { &mut UserMessage(ref mut message) => { match message { &mut SyncMessage::BlockVerified(ref mut bytes) => { - Arc::get_mut(&mut self.client).unwrap().import_verified_block(mem::replace(bytes, Bytes::new())); + self.client.write().unwrap().import_verified_block(mem::replace(bytes, Bytes::new())); }, _ => {}, } diff --git a/src/sync/chain.rs b/src/sync/chain.rs index ffa5d8add..ce567556c 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -234,7 +234,7 @@ impl ChainSync { } /// Called by peer to report status - fn on_peer_status(&mut self, io: &mut SyncIo, peer_id: &PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn on_peer_status(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let peer = PeerInfo { protocol_version: try!(r.val_at(0)), network_id: try!(r.val_at(1)), @@ -263,12 +263,13 @@ impl ChainSync { if old.is_some() { panic!("ChainSync: new peer already exists"); } + info!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); self.sync_peer(io, peer_id, false); Ok(()) } /// Called by peer once it has new block headers during sync - fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: &PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn on_peer_block_headers(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { self.reset_peer_asking(peer_id, PeerAsking::BlockHeaders); let item_count = r.item_count(); trace!(target: "sync", "{} -> BlockHeaders ({} entries)", peer_id, item_count); @@ -351,7 +352,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies - fn on_peer_block_bodies(&mut self, io: &mut SyncIo, peer_id: &PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn on_peer_block_bodies(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { use util::triehash::ordered_trie_root; self.reset_peer_asking(peer_id, PeerAsking::BlockBodies); let item_count = r.item_count(); @@ -391,7 +392,7 @@ impl ChainSync { } /// Called by peer once it has new block bodies - fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: &PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn on_peer_new_block(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { let block_rlp = try!(r.at(0)); let header_rlp = try!(block_rlp.at(0)); let h = header_rlp.as_raw().sha3(); @@ -430,7 +431,7 @@ impl ChainSync { } /// Handles NewHashes packet. Initiates headers download for any unknown hashes. - fn on_peer_new_hashes(&mut self, io: &mut SyncIo, peer_id: &PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn on_peer_new_hashes(&mut self, io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { if self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").asking != PeerAsking::Nothing { trace!(target: "sync", "Ignoring new hashes since we're already downloading."); return Ok(()); @@ -467,16 +468,17 @@ impl ChainSync { } /// Called by peer when it is disconnecting - pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: &PeerId) { - trace!(target: "sync", "== Disconnected {}", peer); + pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) { + trace!(target: "sync", "== Disconnecting {}", peer); if self.peers.contains_key(&peer) { + info!(target: "sync", "Disconneced {}:{}", peer, io.peer_info(peer)); self.clear_peer_download(peer); self.continue_sync(io); } } /// Called when a new peer is connected - pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: &PeerId) { + pub fn on_peer_connected(&mut self, io: &mut SyncIo, peer: PeerId) { trace!(target: "sync", "== Connected {}", peer); self.send_status(io, peer); } @@ -486,7 +488,7 @@ impl ChainSync { let mut peers: Vec<(PeerId, U256)> = self.peers.iter().map(|(k, p)| (*k, p.difficulty)).collect(); peers.sort_by(|&(_, d1), &(_, d2)| d1.cmp(&d2).reverse()); //TODO: sort by rating for (p, _) in peers { - self.sync_peer(io, &p, false); + self.sync_peer(io, p, false); } } @@ -504,7 +506,7 @@ impl ChainSync { } /// Find something to do for a peer. Called for a new peer or when a peer is done with it's task. - fn sync_peer(&mut self, io: &mut SyncIo, peer_id: &PeerId, force: bool) { + fn sync_peer(&mut self, io: &mut SyncIo, peer_id: PeerId, force: bool) { let (peer_latest, peer_difficulty) = { let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); if peer.asking != PeerAsking::Nothing { @@ -534,7 +536,7 @@ impl ChainSync { } /// Find some headers or blocks to download for a peer. - fn request_blocks(&mut self, io: &mut SyncIo, peer_id: &PeerId) { + fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) { self.clear_peer_download(peer_id); if io.chain().queue_status().full { @@ -564,7 +566,7 @@ impl ChainSync { } } if !needed_bodies.is_empty() { - replace(&mut self.peers.get_mut(peer_id).unwrap().asking_blocks, needed_numbers); + replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, needed_numbers); self.request_bodies(io, peer_id, needed_bodies); } else { @@ -607,7 +609,7 @@ impl ChainSync { if !headers.is_empty() { start = headers[0] as usize; let count = headers.len(); - replace(&mut self.peers.get_mut(peer_id).unwrap().asking_blocks, headers); + replace(&mut self.peers.get_mut(&peer_id).unwrap().asking_blocks, headers); assert!(!self.headers.have_item(&(start as BlockNumber))); self.request_headers_by_number(io, peer_id, start as BlockNumber, count, 0, false); } @@ -619,7 +621,7 @@ impl ChainSync { } /// Clear all blocks/headers marked as being downloaded by a peer. - fn clear_peer_download(&mut self, peer_id: &PeerId) { + fn clear_peer_download(&mut self, peer_id: PeerId) { let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); for b in &peer.asking_blocks { self.downloading_headers.remove(&b); @@ -715,7 +717,7 @@ impl ChainSync { } /// Request headers from a peer by block hash - fn request_headers_by_hash(&mut self, sync: &mut SyncIo, peer_id: &PeerId, h: &H256, count: usize, skip: usize, reverse: bool) { + fn request_headers_by_hash(&mut self, sync: &mut SyncIo, peer_id: PeerId, h: &H256, count: usize, skip: usize, reverse: bool) { trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}", peer_id, count, h); let mut rlp = RlpStream::new_list(4); rlp.append(h); @@ -726,7 +728,7 @@ impl ChainSync { } /// Request headers from a peer by block number - fn request_headers_by_number(&mut self, sync: &mut SyncIo, peer_id: &PeerId, n: BlockNumber, count: usize, skip: usize, reverse: bool) { + fn request_headers_by_number(&mut self, sync: &mut SyncIo, peer_id: PeerId, n: BlockNumber, count: usize, skip: usize, reverse: bool) { let mut rlp = RlpStream::new_list(4); trace!(target: "sync", "{} <- GetBlockHeaders: {} entries starting from {}", peer_id, count, n); rlp.append(&n); @@ -737,7 +739,7 @@ impl ChainSync { } /// Request block bodies from a peer - fn request_bodies(&mut self, sync: &mut SyncIo, peer_id: &PeerId, hashes: Vec) { + fn request_bodies(&mut self, sync: &mut SyncIo, peer_id: PeerId, hashes: Vec) { let mut rlp = RlpStream::new_list(hashes.len()); trace!(target: "sync", "{} <- GetBlockBodies: {} entries", peer_id, hashes.len()); for h in hashes { @@ -747,7 +749,7 @@ impl ChainSync { } /// Reset peer status after request is complete. - fn reset_peer_asking(&mut self, peer_id: &PeerId, asking: PeerAsking) { + fn reset_peer_asking(&mut self, peer_id: PeerId, asking: PeerAsking) { let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); if peer.asking != asking { warn!(target:"sync", "Asking {:?} while expected {:?}", peer.asking, asking); @@ -758,14 +760,14 @@ impl ChainSync { } /// Generic request sender - fn send_request(&mut self, sync: &mut SyncIo, peer_id: &PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) { + fn send_request(&mut self, sync: &mut SyncIo, peer_id: PeerId, asking: PeerAsking, packet_id: PacketId, packet: Bytes) { { let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); if peer.asking != PeerAsking::Nothing { warn!(target:"sync", "Asking {:?} while requesting {:?}", asking, peer.asking); } } - match sync.send(*peer_id, packet_id, packet) { + match sync.send(peer_id, packet_id, packet) { Err(e) => { warn!(target:"sync", "Error sending request: {:?}", e); sync.disable_peer(peer_id); @@ -779,12 +781,12 @@ impl ChainSync { } /// Called when peer sends us new transactions - fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: &PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + fn on_peer_transactions(&mut self, _io: &mut SyncIo, _peer_id: PeerId, _r: &UntrustedRlp) -> Result<(), PacketDecodeError> { Ok(()) } /// Send Status message - fn send_status(&mut self, io: &mut SyncIo, peer_id: &PeerId) { + fn send_status(&mut self, io: &mut SyncIo, peer_id: PeerId) { let mut packet = RlpStream::new_list(5); let chain = io.chain().chain_info(); packet.append(&(PROTOCOL_VERSION as u32)); @@ -793,7 +795,7 @@ impl ChainSync { packet.append(&chain.best_block_hash); packet.append(&chain.genesis_hash); //TODO: handle timeout for status request - match io.send(*peer_id, STATUS_PACKET, packet.out()) { + match io.send(peer_id, STATUS_PACKET, packet.out()) { Err(e) => { warn!(target:"sync", "Error sending status request: {:?}", e); io.disable_peer(peer_id); @@ -940,7 +942,7 @@ impl ChainSync { } /// Dispatch incoming requests and responses - pub fn on_packet(&mut self, io: &mut SyncIo, peer: &PeerId, packet_id: u8, data: &[u8]) { + pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) { let rlp = UntrustedRlp::new(data); let result = match packet_id { STATUS_PACKET => self.on_peer_status(io, peer, &rlp), diff --git a/src/sync/io.rs b/src/sync/io.rs index 884babe3e..affcbc0d7 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -8,13 +8,17 @@ use sync::SyncMessage; // TODO: ratings pub trait SyncIo { /// Disable a peer - fn disable_peer(&mut self, peer_id: &PeerId); + fn disable_peer(&mut self, peer_id: PeerId); /// Respond to current request with a packet. Can be called from an IO handler for incoming packet. fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), UtilError>; /// Send a packet to a peer. fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError>; /// Get the blockchain fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; + /// Returns peer client identifier string + fn peer_info(&self, peer_id: PeerId) -> String { + peer_id.to_string() + } } /// Wraps `NetworkContext` and the blockchain client @@ -34,8 +38,8 @@ impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> { } impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> { - fn disable_peer(&mut self, peer_id: &PeerId) { - self.network.disable_peer(*peer_id); + fn disable_peer(&mut self, peer_id: PeerId) { + self.network.disable_peer(peer_id); } fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), UtilError>{ @@ -49,6 +53,10 @@ impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> { fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { self.chain } + + fn peer_info(&self, peer_id: PeerId) -> String { + self.network.peer_info(peer_id) + } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index e2cf5529d..491fa8e40 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -22,8 +22,9 @@ /// } /// ``` -use std::sync::Arc; -use client::BlockChainClient; +use std::ops::*; +use std::sync::*; +use client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage}; use util::TimerToken; use util::Bytes; @@ -50,7 +51,7 @@ pub type NetSyncMessage = NetworkIoMessage; /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint - chain: Arc, + chain: Arc>, /// Sync strategy sync: ChainSync } @@ -59,7 +60,7 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, chain: Arc) { + pub fn register(service: &mut NetworkService, chain: Arc>) { let sync = Box::new(EthSync { chain: chain, sync: ChainSync::new(), @@ -74,35 +75,35 @@ impl EthSync { /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { - self.sync.abort(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); + self.sync.abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } /// Restart sync pub fn restart(&mut self, io: &mut NetworkContext) { - self.sync.restart(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); + self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } } impl NetworkProtocolHandler for EthSync { fn initialize(&mut self, io: &mut NetworkContext) { - self.sync.restart(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); + self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); io.register_timer(1000).unwrap(); } fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.sync.on_packet(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap()), peer, packet_id, data); + self.sync.on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); } fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId) { - self.sync.on_peer_connected(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap()), peer); + self.sync.on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); } fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId) { - self.sync.on_peer_aborting(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap()), peer); + self.sync.on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); } fn timeout(&mut self, io: &mut NetworkContext, _timer: TimerToken) { - self.sync.maintain_sync(&mut NetSyncIo::new(io, Arc::get_mut(&mut self.chain).unwrap())); + self.sync.maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } fn message(&mut self, _io: &mut NetworkContext, _message: &SyncMessage) { diff --git a/src/verification.rs b/src/verification.rs index 2c3fe130d..bfc97e1eb 100644 --- a/src/verification.rs +++ b/src/verification.rs @@ -37,13 +37,13 @@ pub fn verify_block_unordered(bytes: &[u8], engine: &Engine) -> Result<(), Error } /// Phase 3 verification. Check block information against parent and uncles. -pub fn verify_block_final(bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { +pub fn verify_block_family(bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { // TODO: verify timestamp let block = BlockView::new(bytes); let header = block.header(); let parent = try!(bc.block_header(&header.parent_hash).ok_or::(From::from(BlockError::UnknownParent(header.parent_hash.clone())))); try!(verify_parent(&header, &parent)); - try!(engine.verify_block_final(&header, &parent, Some(bytes))); + try!(engine.verify_block_family(&header, &parent, Some(bytes))); let num_uncles = Rlp::new(bytes).at(2).item_count(); if num_uncles != 0 { @@ -113,12 +113,29 @@ pub fn verify_block_final(bytes: &[u8], engine: &Engine, bc: &BC) -> Result< } try!(verify_parent(&uncle, &uncle_parent)); - try!(engine.verify_block_final(&uncle, &uncle_parent, Some(bytes))); + try!(engine.verify_block_family(&uncle, &uncle_parent, Some(bytes))); } } Ok(()) } +/// Phase 4 verification. Check block information against transaction enactment results, +pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> { + if expected.state_root != got.state_root { + return Err(From::from(BlockError::InvalidStateRoot(Mismatch { expected: expected.state_root.clone(), found: got.state_root.clone() }))) + } + if expected.receipts_root != got.receipts_root { + return Err(From::from(BlockError::InvalidReceiptsStateRoot(Mismatch { expected: expected.receipts_root.clone(), found: got.receipts_root.clone() }))) + } + if expected.log_bloom != got.log_bloom { + return Err(From::from(BlockError::InvalidLogBloom(Mismatch { expected: expected.log_bloom.clone(), found: got.log_bloom.clone() }))) + } + if expected.gas_used != got.gas_used { + return Err(From::from(BlockError::InvalidGasUsed(Mismatch { expected: expected.gas_used, found: got.gas_used }))) + } + Ok(()) +} + /// Check basic header parameters. fn verify_header(header: &Header, engine: &Engine) -> Result<(), Error> { if header.number >= From::from(BlockNumber::max_value()) { @@ -372,28 +389,28 @@ mod tests { check_fail(verify_block_basic(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref()), InvalidUnclesHash(Mismatch { expected: good_uncles_hash.clone(), found: header.uncles_hash })); - check_ok(verify_block_final(&create_test_block(&good), engine.deref(), &bc)); - check_ok(verify_block_final(&create_test_block_with_data(&good, &good_transactions, &good_uncles), engine.deref(), &bc)); + check_ok(verify_block_family(&create_test_block(&good), engine.deref(), &bc)); + check_ok(verify_block_family(&create_test_block_with_data(&good, &good_transactions, &good_uncles), engine.deref(), &bc)); header = good.clone(); header.parent_hash = H256::random(); - check_fail(verify_block_final(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), + check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), UnknownParent(header.parent_hash)); header = good.clone(); header.timestamp = 10; - check_fail(verify_block_final(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), + check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp + 1), found: header.timestamp })); header = good.clone(); header.number = 9; - check_fail(verify_block_final(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), + check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), InvalidNumber(OutOfBounds { max: None, min: Some(parent.number + 1), found: header.number })); header = good.clone(); let mut bad_uncles = good_uncles.clone(); bad_uncles.push(good_uncle1.clone()); - check_fail(verify_block_final(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine.deref(), &bc), + check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine.deref(), &bc), TooManyUncles(OutOfBounds { max: Some(engine.maximum_uncle_count()), min: None, found: bad_uncles.len() })); // TODO: some additional uncle checks From 77c5b315df67c04dafe57efa1bbb64b1bebb8181 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 14 Jan 2016 22:38:49 +0100 Subject: [PATCH 06/11] Spec name; uncle reward calculation fixed --- res/ethereum/frontier.json | 1 + res/ethereum/frontier_test.json | 3 ++- res/ethereum/homestead_test.json | 1 + res/ethereum/morden.json | 3 ++- res/ethereum/olympic.json | 3 ++- res/null_morden.json | 3 ++- src/bin/client.rs | 21 ++++++++++++++++++++- src/block.rs | 2 +- src/client.rs | 2 ++ src/ethereum/ethash.rs | 22 +++++++++++++++++++++- src/service.rs | 1 + src/spec.rs | 16 ++++++++++++---- src/sync/tests.rs | 6 +++--- 13 files changed, 70 insertions(+), 14 deletions(-) diff --git a/res/ethereum/frontier.json b/res/ethereum/frontier.json index 0be9336aa..6394a9010 100644 --- a/res/ethereum/frontier.json +++ b/res/ethereum/frontier.json @@ -1,4 +1,5 @@ { + "name": "Frontier", "engineName": "Ethash", "params": { "accountStartNonce": "0x00", diff --git a/res/ethereum/frontier_test.json b/res/ethereum/frontier_test.json index 25f5ae16c..8ee1cafd9 100644 --- a/res/ethereum/frontier_test.json +++ b/res/ethereum/frontier_test.json @@ -1,4 +1,5 @@ { + "engineName": "Frontier (Test)", "engineName": "Ethash", "params": { "accountStartNonce": "0x00", @@ -30,4 +31,4 @@ "0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "linear": { "base": 600, "word": 120 } } }, "0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "linear": { "base": 15, "word": 3 } } } } -} \ No newline at end of file +} diff --git a/res/ethereum/homestead_test.json b/res/ethereum/homestead_test.json index 8d005be62..ee73d0ed3 100644 --- a/res/ethereum/homestead_test.json +++ b/res/ethereum/homestead_test.json @@ -1,4 +1,5 @@ { + "name": "Homestead (Test)", "engineName": "Ethash", "params": { "accountStartNonce": "0x00", diff --git a/res/ethereum/morden.json b/res/ethereum/morden.json index 033c0651c..79f9f3d99 100644 --- a/res/ethereum/morden.json +++ b/res/ethereum/morden.json @@ -1,4 +1,5 @@ { + "name": "Morden", "engineName": "Ethash", "params": { "accountStartNonce": "0x0100000", @@ -31,4 +32,4 @@ "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "linear": { "base": 15, "word": 3 } } }, "102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" } } -} \ No newline at end of file +} diff --git a/res/ethereum/olympic.json b/res/ethereum/olympic.json index 0b04e5499..4318d9230 100644 --- a/res/ethereum/olympic.json +++ b/res/ethereum/olympic.json @@ -1,4 +1,5 @@ { + "name": "Olympic", "engineName": "Ethash", "params": { "accountStartNonce": "0x00", @@ -38,4 +39,4 @@ "6c386a4b26f73c802f34673f7248bb118f97424a": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }, "e4157b34ea9615cfbde6b4fda419828124b70c78": { "balance": "1606938044258990275541962092341162602522202993782792835301376" } } -} \ No newline at end of file +} diff --git a/res/null_morden.json b/res/null_morden.json index 7f069cb4e..46507ff95 100644 --- a/res/null_morden.json +++ b/res/null_morden.json @@ -1,4 +1,5 @@ { + "name": "Morden", "engineName": "NullEngine", "params": { "accountStartNonce": "0x0100000", @@ -31,4 +32,4 @@ "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "linear": { "base": 15, "word": 3 } } }, "102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" } } -} \ No newline at end of file +} diff --git a/src/bin/client.rs b/src/bin/client.rs index 5a3e907be..5ef177c64 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -1,15 +1,34 @@ extern crate ethcore_util as util; extern crate ethcore; extern crate rustc_serialize; +extern crate log; extern crate env_logger; use std::io::*; +use std::env; +use log::{LogRecord, LogLevelFilter}; +use env_logger::LogBuilder; use util::hash::*; use ethcore::service::ClientService; use ethcore::ethereum; +fn setup_log() { + let format = |record: &LogRecord| { + format!("{} - {}", record.level(), record.args()) + }; + + let mut builder = LogBuilder::new(); + builder.format(format).filter(None, LogLevelFilter::Info); + + if env::var("RUST_LOG").is_ok() { + builder.parse(&env::var("RUST_LOG").unwrap()); + } + + builder.init().unwrap(); +} + fn main() { - ::env_logger::init().ok(); + setup_log(); let spec = ethereum::new_frontier(); let mut _service = ClientService::start(spec).unwrap(); loop { diff --git a/src/block.rs b/src/block.rs index 016c72641..b93d52a60 100644 --- a/src/block.rs +++ b/src/block.rs @@ -297,7 +297,7 @@ fn enact_block() { let mut db = OverlayDB::new_temp(); engine.spec().ensure_db_good(&mut db); - let e = enact(&orig_bytes, engine.deref(), db, &genesis_header, &vec![genesis_header.hash()]).unwrap(); + let e = enact_and_seal(&orig_bytes, engine.deref(), db, &genesis_header, &vec![genesis_header.hash()]).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); diff --git a/src/client.rs b/src/client.rs index c933e7817..4905fe094 100644 --- a/src/client.rs +++ b/src/client.rs @@ -113,6 +113,7 @@ impl Client { let db = DB::open_default(state_path.to_str().unwrap()).unwrap(); let mut state_db = OverlayDB::new(db); engine.spec().ensure_db_good(&mut state_db); + state_db.commit().expect("Error commiting genesis state to state DB"); Ok(Client { chain: chain.clone(), @@ -161,6 +162,7 @@ impl Client { }; if let Err(e) = verify_block_final(&header, result.block().header()) { warn!(target: "client", "Stage 4 block verification failed for {}\nError: {:?}", header.hash(), e); + return; } self.chain.write().unwrap().insert_block(&bytes); //TODO: err here? diff --git a/src/ethereum/ethash.rs b/src/ethereum/ethash.rs index 2d99bfa56..310b7b766 100644 --- a/src/ethereum/ethash.rs +++ b/src/ethereum/ethash.rs @@ -41,8 +41,9 @@ impl Engine for Ethash { // Bestow uncle rewards let current_number = fields.header.number(); for u in fields.uncles.iter() { - fields.state.add_balance(u.author(), &(reward * U256::from((8 + u.number() - current_number) / 8))); + fields.state.add_balance(u.author(), &(reward * U256::from(8 + u.number() - current_number) / U256::from(8))); } + fields.state.commit(); } @@ -128,4 +129,23 @@ fn on_close_block() { assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); } +#[test] +fn on_close_block_with_uncle() { + use super::*; + let engine = new_morden().to_engine().unwrap(); + let genesis_header = engine.spec().genesis_header(); + let mut db = OverlayDB::new_temp(); + engine.spec().ensure_db_good(&mut db); + let last_hashes = vec![genesis_header.hash()]; + let mut b = OpenBlock::new(engine.deref(), db, &genesis_header, &last_hashes, Address::zero(), vec![]); + let mut uncle = Header::new(); + let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106"); + uncle.author = uncle_author.clone(); + b.push_uncle(uncle).unwrap(); + + let b = b.close(); + assert_eq!(b.state().balance(&Address::zero()), U256::from_str("478eae0e571ba000").unwrap()); + assert_eq!(b.state().balance(&uncle_author), U256::from_str("3cb71f51fc558000").unwrap()); +} + // TODO: difficulty test diff --git a/src/service.rs b/src/service.rs index 6f3294ce6..da91451f9 100644 --- a/src/service.rs +++ b/src/service.rs @@ -20,6 +20,7 @@ impl ClientService { pub fn start(spec: Spec) -> Result { let mut net_service = try!(NetworkService::start()); info!("Starting {}", net_service.host_info()); + info!("Configured for {} using {} engine", spec.name, spec.engine_name); let mut dir = env::home_dir().unwrap(); dir.push(".parity"); dir.push(H64::from(spec.genesis_header().hash()).hex()); diff --git a/src/spec.rs b/src/spec.rs index c9e3383eb..80ea99f40 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -65,6 +65,8 @@ impl GenesisAccount { /// chain and those to be interpreted by the active chain engine. #[derive(Debug)] pub struct Spec { + // User friendly spec name + pub name: String, // What engine are we using for this? pub engine_name: String, @@ -196,6 +198,7 @@ impl FromJson for Spec { Spec { + name: json["name"].as_string().unwrap().to_string(), engine_name: json["engineName"].as_string().unwrap().to_string(), engine_params: json_to_rlp_map(&json["params"]), builtins: builtins, @@ -218,11 +221,16 @@ impl Spec { /// Ensure that the given state DB has the trie nodes in for the genesis state. pub fn ensure_db_good(&self, db: &mut HashDB) { if !db.contains(&self.state_root()) { - let mut root = H256::new(); - let mut t = SecTrieDBMut::new(db, &mut root); - for (address, account) in self.genesis_state.iter() { - t.insert(address.as_slice(), &account.rlp()); + info!("Populating genesis state..."); + let mut root = H256::new(); + { + let mut t = SecTrieDBMut::new(db, &mut root); + for (address, account) in self.genesis_state.iter() { + t.insert(address.as_slice(), &account.rlp()); + } } + assert!(db.contains(&self.state_root())); + info!("Genesis state is ready"); } } diff --git a/src/sync/tests.rs b/src/sync/tests.rs index bc0e171d2..84a8bf21f 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -187,7 +187,7 @@ impl<'p> TestIo<'p> { } impl<'p> SyncIo for TestIo<'p> { - fn disable_peer(&mut self, _peer_id: &PeerId) { + fn disable_peer(&mut self, _peer_id: PeerId) { } fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { @@ -257,7 +257,7 @@ impl TestNet { for client in 0..self.peers.len() { if peer != client { let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.on_peer_connected(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(client as PeerId)), &(client as PeerId)); + p.sync.on_peer_connected(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(client as PeerId)), client as PeerId); } } } @@ -269,7 +269,7 @@ impl TestNet { Some(packet) => { let mut p = self.peers.get_mut(packet.recipient).unwrap(); trace!("--- {} -> {} ---", peer, packet.recipient); - p.sync.on_packet(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(peer as PeerId)), &(peer as PeerId), packet.packet_id, &packet.data); + p.sync.on_packet(&mut TestIo::new(&mut p.chain, &mut p.queue, Some(peer as PeerId)), peer as PeerId, packet.packet_id, &packet.data); trace!("----------------"); }, None => {} From e5a51707ec5beb2ebd22f52c8ecd45a92f8d06dd Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 14 Jan 2016 23:01:41 +0100 Subject: [PATCH 07/11] Fixed empty name handling --- src/spec.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spec.rs b/src/spec.rs index 80ea99f40..e1d4a3cad 100644 --- a/src/spec.rs +++ b/src/spec.rs @@ -198,7 +198,7 @@ impl FromJson for Spec { Spec { - name: json["name"].as_string().unwrap().to_string(), + name: json.find("name").map(|j| j.as_string().unwrap()).unwrap_or("unknown").to_string(), engine_name: json["engineName"].as_string().unwrap().to_string(), engine_params: json_to_rlp_map(&json["params"]), builtins: builtins, From f6dff483125dac05214f874b5f2214405b45f61b Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 15 Jan 2016 01:03:29 +0100 Subject: [PATCH 08/11] Documentation --- src/client.rs | 5 +++-- src/service.rs | 17 ++++++----------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/src/client.rs b/src/client.rs index 4905fe094..2c87af249 100644 --- a/src/client.rs +++ b/src/client.rs @@ -105,6 +105,7 @@ pub struct Client { } impl Client { + /// Create a new client with given spec and DB path. pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let engine = Arc::new(try!(spec.to_engine())); @@ -123,7 +124,7 @@ impl Client { }) } - + /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_block(&mut self, bytes: Bytes) { let block = BlockView::new(&bytes); let header = block.header(); @@ -173,7 +174,7 @@ impl Client { return; } } - info!(target: "client", "Imported {}", header.hash()); + info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } diff --git a/src/service.rs b/src/service.rs index da91451f9..da1f65f88 100644 --- a/src/service.rs +++ b/src/service.rs @@ -1,10 +1,3 @@ -//! Client service. -//! -//! -//! -//! -//! - use util::*; use sync::*; use spec::Spec; @@ -12,11 +5,13 @@ use error::*; use std::env; use client::Client; +/// Client service setup. Creates and registers client and network services with the IO subsystem. pub struct ClientService { _net_service: NetworkService, } impl ClientService { + /// Start the service in a separate thread. pub fn start(spec: Spec) -> Result { let mut net_service = try!(NetworkService::start()); info!("Starting {}", net_service.host_info()); @@ -37,13 +32,13 @@ impl ClientService { } } +/// IO interface for the Client handler struct ClientIoHandler { client: Arc> } impl IoHandler for ClientIoHandler { - fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) { - } + fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) { } fn message<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, net_message: &'s mut NetSyncMessage) { match net_message { @@ -52,11 +47,11 @@ impl IoHandler for ClientIoHandler { &mut SyncMessage::BlockVerified(ref mut bytes) => { self.client.write().unwrap().import_verified_block(mem::replace(bytes, Bytes::new())); }, - _ => {}, + _ => {}, // ignore other messages } } - _ => {}, + _ => {}, // ignore other messages } } From dcfafdd101d0b19ab39b2d85e656da92cd3044ce Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 15 Jan 2016 01:44:57 +0100 Subject: [PATCH 09/11] Removed log format string --- src/bin/client.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/bin/client.rs b/src/bin/client.rs index 5ef177c64..3dda6fc22 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -6,19 +6,15 @@ extern crate env_logger; use std::io::*; use std::env; -use log::{LogRecord, LogLevelFilter}; +use log::{LogLevelFilter}; use env_logger::LogBuilder; use util::hash::*; use ethcore::service::ClientService; use ethcore::ethereum; fn setup_log() { - let format = |record: &LogRecord| { - format!("{} - {}", record.level(), record.args()) - }; - let mut builder = LogBuilder::new(); - builder.format(format).filter(None, LogLevelFilter::Info); + builder.filter(None, LogLevelFilter::Info); if env::var("RUST_LOG").is_ok() { builder.parse(&env::var("RUST_LOG").unwrap()); From 76223d3d137a791d205c8d39a694c8d96a671fcb Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 15 Jan 2016 02:52:37 +0100 Subject: [PATCH 10/11] Receipt logging --- src/client.rs | 8 ++++---- src/receipt.rs | 1 + src/state.rs | 4 +++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/src/client.rs b/src/client.rs index 2c87af249..59734b387 100644 --- a/src/client.rs +++ b/src/client.rs @@ -129,14 +129,14 @@ impl Client { let block = BlockView::new(&bytes); let header = block.header(); if let Err(e) = verify_block_family(&bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { - warn!(target: "client", "Stage 3 block verification failed for {}\nError: {:?}", header.hash(), e); + warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); // TODO: mark as bad return; }; let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { Some(p) => p, None => { - warn!(target: "client", "Block import failed for {}: Parent not found ({}) ", header.hash(), header.parent_hash); + warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); return; }, }; @@ -157,12 +157,12 @@ impl Client { let result = match enact(&bytes, self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes) { Ok(b) => b, Err(e) => { - warn!(target: "client", "Block import failed for {}\nError: {:?}", header.hash(), e); + warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return; } }; if let Err(e) = verify_block_final(&header, result.block().header()) { - warn!(target: "client", "Stage 4 block verification failed for {}\nError: {:?}", header.hash(), e); + warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return; } diff --git a/src/receipt.rs b/src/receipt.rs index ef7e03cc8..21a66f5cf 100644 --- a/src/receipt.rs +++ b/src/receipt.rs @@ -3,6 +3,7 @@ use basic_types::LogBloom; use log_entry::LogEntry; /// Information describing execution of a transaction. +#[derive(Debug)] pub struct Receipt { pub state_root: H256, pub gas_used: U256, diff --git a/src/state.rs b/src/state.rs index fdf71a496..c6e5516e1 100644 --- a/src/state.rs +++ b/src/state.rs @@ -142,7 +142,9 @@ impl State { let e = try!(Executive::new(self, env_info, engine).transact(t)); //println!("Executed: {:?}", e); self.commit(); - Ok(Receipt::new(self.root().clone(), e.gas_used, e.logs)) + let receipt = Receipt::new(self.root().clone(), e.cumulative_gas_used, e.logs); + debug!("Transaction receipt: {:?}", receipt); + Ok(receipt) } pub fn revert(&mut self, backup: State) { From 0b511a180acfe509f6bbb44d68a68281d1fd21bf Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 15 Jan 2016 12:26:04 +0100 Subject: [PATCH 11/11] Bad block reporting --- src/client.rs | 7 ++++-- src/error.rs | 4 ++-- src/queue.rs | 25 +++++++++++++++++---- src/sync/chain.rs | 1 + src/verification.rs | 53 +++++++++++++++++++++++++-------------------- 5 files changed, 58 insertions(+), 32 deletions(-) diff --git a/src/client.rs b/src/client.rs index 59734b387..6a8acf6d3 100644 --- a/src/client.rs +++ b/src/client.rs @@ -128,15 +128,16 @@ impl Client { pub fn import_verified_block(&mut self, bytes: Bytes) { let block = BlockView::new(&bytes); let header = block.header(); - if let Err(e) = verify_block_family(&bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { + if let Err(e) = verify_block_family(&header, &bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - // TODO: mark as bad + self.queue.mark_as_bad(&header.hash()); return; }; let parent = match self.chain.read().unwrap().block_header(&header.parent_hash) { Some(p) => p, None => { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); + self.queue.mark_as_bad(&header.hash()); return; }, }; @@ -158,11 +159,13 @@ impl Client { Ok(b) => b, Err(e) => { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + self.queue.mark_as_bad(&header.hash()); return; } }; if let Err(e) = verify_block_final(&header, result.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); + self.queue.mark_as_bad(&header.hash()); return; } diff --git a/src/error.rs b/src/error.rs index 9973cb91c..dec13288d 100644 --- a/src/error.rs +++ b/src/error.rs @@ -68,14 +68,14 @@ pub enum BlockError { #[derive(Debug)] pub enum ImportError { - Bad(Error), + Bad(Option), AlreadyInChain, AlreadyQueued, } impl From for ImportError { fn from(err: Error) -> ImportError { - ImportError::Bad(err) + ImportError::Bad(Some(err)) } } diff --git a/src/queue.rs b/src/queue.rs index 6c398e6f5..5ca361834 100644 --- a/src/queue.rs +++ b/src/queue.rs @@ -9,7 +9,8 @@ use views::*; /// Sorts them ready for blockchain insertion. pub struct BlockQueue { engine: Arc>, - message_channel: IoChannel + message_channel: IoChannel, + bad: HashSet, } impl BlockQueue { @@ -17,7 +18,8 @@ impl BlockQueue { pub fn new(engine: Arc>, message_channel: IoChannel) -> BlockQueue { BlockQueue { engine: engine, - message_channel: message_channel + message_channel: message_channel, + bad: HashSet::new(), } } @@ -27,16 +29,31 @@ impl BlockQueue { /// Add a block to the queue. pub fn import_block(&mut self, bytes: &[u8]) -> ImportResult { - try!(verify_block_basic(bytes, self.engine.deref().deref()).map_err(|e| { + let header = BlockView::new(bytes).header(); + if self.bad.contains(&header.hash()) { + return Err(ImportError::Bad(None)); + } + + if self.bad.contains(&header.parent_hash) { + self.bad.insert(header.hash()); + return Err(ImportError::Bad(None)); + } + + try!(verify_block_basic(&header, bytes, self.engine.deref().deref()).map_err(|e| { warn!(target: "client", "Stage 1 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), e); e })); - try!(verify_block_unordered(bytes, self.engine.deref().deref()).map_err(|e| { + try!(verify_block_unordered(&header, bytes, self.engine.deref().deref()).map_err(|e| { warn!(target: "client", "Stage 2 block verification failed for {}\nError: {:?}", BlockView::new(&bytes).header_view().sha3(), e); e })); try!(self.message_channel.send(UserMessage(SyncMessage::BlockVerified(bytes.to_vec()))).map_err(|e| Error::from(e))); Ok(()) } + + pub fn mark_as_bad(&mut self, hash: &H256) { + self.bad.insert(hash.clone()); + //TODO: walk the queue + } } diff --git a/src/sync/chain.rs b/src/sync/chain.rs index ce567556c..43f5968f4 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -473,6 +473,7 @@ impl ChainSync { if self.peers.contains_key(&peer) { info!(target: "sync", "Disconneced {}:{}", peer, io.peer_info(peer)); self.clear_peer_download(peer); + self.peers.remove(&peer); self.continue_sync(io); } } diff --git a/src/verification.rs b/src/verification.rs index bfc97e1eb..4383f5a4a 100644 --- a/src/verification.rs +++ b/src/verification.rs @@ -10,9 +10,7 @@ use engine::Engine; use blockchain::*; /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block -pub fn verify_block_basic(bytes: &[u8], engine: &Engine) -> Result<(), Error> { - let block = BlockView::new(bytes); - let header = block.header(); +pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { try!(verify_header(&header, engine)); try!(verify_block_integrity(bytes, &header.transactions_root, &header.uncles_hash)); try!(engine.verify_block_basic(&header, Some(bytes))); @@ -26,9 +24,7 @@ pub fn verify_block_basic(bytes: &[u8], engine: &Engine) -> Result<(), Error> { /// Phase 2 verification. Perform costly checks such as transaction signatures and block nonce for ethash. /// Still operates on a individual block /// TODO: return cached transactions, header hash. -pub fn verify_block_unordered(bytes: &[u8], engine: &Engine) -> Result<(), Error> { - let block = BlockView::new(bytes); - let header = block.header(); +pub fn verify_block_unordered(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { try!(engine.verify_block_unordered(&header, Some(bytes))); for u in Rlp::new(bytes).at(2).iter().map(|rlp| rlp.as_val::
()) { try!(engine.verify_block_unordered(&u, None)); @@ -37,10 +33,8 @@ pub fn verify_block_unordered(bytes: &[u8], engine: &Engine) -> Result<(), Error } /// Phase 3 verification. Check block information against parent and uncles. -pub fn verify_block_family(bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { +pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { // TODO: verify timestamp - let block = BlockView::new(bytes); - let header = block.header(); let parent = try!(bc.block_header(&header.parent_hash).ok_or::(From::from(BlockError::UnknownParent(header.parent_hash.clone())))); try!(verify_parent(&header, &parent)); try!(engine.verify_block_family(&header, &parent, Some(bytes))); @@ -194,6 +188,7 @@ mod tests { use error::BlockError::*; use views::*; use blockchain::*; + use engine::*; use ethereum; fn create_test_block(header: &Header) -> Bytes { @@ -280,6 +275,16 @@ mod tests { } } + fn basic_test(bytes: &[u8], engine: &Engine) -> Result<(), Error> { + let header = BlockView::new(bytes).header(); + verify_block_basic(&header, bytes, engine) + } + + fn family_test(bytes: &[u8], engine: &Engine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { + let header = BlockView::new(bytes).header(); + verify_block_family(&header, bytes, engine, bc) + } + #[test] fn test_verify_block() { // Test against morden @@ -348,69 +353,69 @@ mod tests { bc.insert(create_test_block(&parent7)); bc.insert(create_test_block(&parent8)); - check_ok(verify_block_basic(&create_test_block(&good), engine.deref())); + check_ok(basic_test(&create_test_block(&good), engine.deref())); let mut header = good.clone(); header.transactions_root = good_transactions_root.clone(); header.uncles_hash = good_uncles_hash.clone(); - check_ok(verify_block_basic(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref())); + check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref())); header.gas_limit = min_gas_limit - From::from(1); - check_fail(verify_block_basic(&create_test_block(&header), engine.deref()), + check_fail(basic_test(&create_test_block(&header), engine.deref()), InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit })); header = good.clone(); header.number = BlockNumber::max_value(); - check_fail(verify_block_basic(&create_test_block(&header), engine.deref()), + check_fail(basic_test(&create_test_block(&header), engine.deref()), InvalidNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number })); header = good.clone(); header.gas_used = header.gas_limit + From::from(1); - check_fail(verify_block_basic(&create_test_block(&header), engine.deref()), + check_fail(basic_test(&create_test_block(&header), engine.deref()), TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit), min: None, found: header.gas_used })); header = good.clone(); header.extra_data.resize(engine.maximum_extra_data_size() + 1, 0u8); - check_fail(verify_block_basic(&create_test_block(&header), engine.deref()), + check_fail(basic_test(&create_test_block(&header), engine.deref()), ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data.len() })); header = good.clone(); header.extra_data.resize(engine.maximum_extra_data_size() + 1, 0u8); - check_fail(verify_block_basic(&create_test_block(&header), engine.deref()), + check_fail(basic_test(&create_test_block(&header), engine.deref()), ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data.len() })); header = good.clone(); header.uncles_hash = good_uncles_hash.clone(); - check_fail(verify_block_basic(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref()), + check_fail(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref()), InvalidTransactionsRoot(Mismatch { expected: good_transactions_root.clone(), found: header.transactions_root })); header = good.clone(); header.transactions_root = good_transactions_root.clone(); - check_fail(verify_block_basic(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref()), + check_fail(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref()), InvalidUnclesHash(Mismatch { expected: good_uncles_hash.clone(), found: header.uncles_hash })); - check_ok(verify_block_family(&create_test_block(&good), engine.deref(), &bc)); - check_ok(verify_block_family(&create_test_block_with_data(&good, &good_transactions, &good_uncles), engine.deref(), &bc)); + check_ok(family_test(&create_test_block(&good), engine.deref(), &bc)); + check_ok(family_test(&create_test_block_with_data(&good, &good_transactions, &good_uncles), engine.deref(), &bc)); header = good.clone(); header.parent_hash = H256::random(); - check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), + check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), UnknownParent(header.parent_hash)); header = good.clone(); header.timestamp = 10; - check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), + check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp + 1), found: header.timestamp })); header = good.clone(); header.number = 9; - check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), + check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine.deref(), &bc), InvalidNumber(OutOfBounds { max: None, min: Some(parent.number + 1), found: header.number })); header = good.clone(); let mut bad_uncles = good_uncles.clone(); bad_uncles.push(good_uncle1.clone()); - check_fail(verify_block_family(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine.deref(), &bc), + check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &bad_uncles), engine.deref(), &bc), TooManyUncles(OutOfBounds { max: Some(engine.maximum_uncle_count()), min: None, found: bad_uncles.len() })); // TODO: some additional uncle checks