From e514d3d80fc196851cf77cc6e82ea48300a304ab Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Jan 2016 16:48:37 +0100 Subject: [PATCH 01/10] Multithreaded event loop --- ethash/src/lib.rs | 12 +- src/bin/client.rs | 11 +- src/client.rs | 6 +- src/service.rs | 10 +- src/sync/chain.rs | 4 + src/sync/io.rs | 10 +- src/sync/mod.rs | 48 +-- util/Cargo.toml | 1 + util/src/io/mod.rs | 32 +- util/src/io/service.rs | 255 +++++++++----- util/src/lib.rs | 1 + util/src/network/connection.rs | 55 ++- util/src/network/handshake.rs | 36 +- util/src/network/host.rs | 626 ++++++++++++++++++--------------- util/src/network/mod.rs | 28 +- util/src/network/service.rs | 17 +- util/src/network/session.rs | 16 +- 17 files changed, 661 insertions(+), 507 deletions(-) diff --git a/ethash/src/lib.rs b/ethash/src/lib.rs index f7b6d2308..e87ee1a03 100644 --- a/ethash/src/lib.rs +++ b/ethash/src/lib.rs @@ -30,11 +30,13 @@ impl EthashManager { /// `nonce` - The nonce to pack into the mix pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork { let epoch = block_number / ETHASH_EPOCH_LENGTH; - if !self.lights.read().unwrap().contains_key(&epoch) { - let mut lights = self.lights.write().unwrap(); // obtain write lock - if !lights.contains_key(&epoch) { - let light = Light::new(block_number); - lights.insert(epoch, light); + while !self.lights.read().unwrap().contains_key(&epoch) { + if let Ok(mut lights) = self.lights.try_write() + { + if !lights.contains_key(&epoch) { + let light = Light::new(block_number); + lights.insert(epoch, light); + } } } self.lights.read().unwrap().get(&epoch).unwrap().compute(header_hash, nonce) diff --git a/src/bin/client.rs b/src/bin/client.rs index a862737be..92106aad4 100644 --- a/src/bin/client.rs +++ b/src/bin/client.rs @@ -29,7 +29,7 @@ fn main() { setup_log(); let spec = ethereum::new_frontier(); let mut service = ClientService::start(spec).unwrap(); - let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0 }); + let io_handler = Arc::new(ClientIoHandler { client: service.client() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); loop { let mut cmd = String::new(); @@ -43,16 +43,15 @@ fn main() { struct ClientIoHandler { client: Arc>, - timer: TimerToken, } impl IoHandler for ClientIoHandler { - fn initialize<'s>(&'s mut self, io: &mut IoContext<'s, NetSyncMessage>) { - self.timer = io.register_timer(5000).expect("Error registering timer"); + fn initialize(&self, io: &IoContext) { + io.register_timer(0, 5000).expect("Error registering timer"); } - fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, timer: TimerToken) { - if self.timer == timer { + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer == 0 { println!("Chain info: {:?}", self.client.read().unwrap().deref().chain_info()); } } diff --git a/src/client.rs b/src/client.rs index e02ab37d8..87bdc4416 100644 --- a/src/client.rs +++ b/src/client.rs @@ -1,6 +1,5 @@ use util::*; use rocksdb::{Options, DB}; -use rocksdb::DBCompactionStyle::DBUniversalCompaction; use blockchain::{BlockChain, BlockProvider}; use views::BlockView; use error::*; @@ -113,7 +112,9 @@ impl Client { pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let mut opts = Options::new(); + opts.set_max_open_files(256); opts.create_if_missing(true); + /* opts.set_max_open_files(256); opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); @@ -131,6 +132,7 @@ impl Client { opts.set_max_background_flushes(4); opts.set_filter_deletes(false); opts.set_disable_auto_compactions(true); + */ let mut state_path = path.to_path_buf(); state_path.push("state"); @@ -219,7 +221,7 @@ impl Client { return; } } - info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); + //info!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } } diff --git a/src/service.rs b/src/service.rs index 036c99bc4..02f813c2f 100644 --- a/src/service.rs +++ b/src/service.rs @@ -22,7 +22,7 @@ impl ClientService { dir.push(H64::from(spec.genesis_header().hash()).hex()); let client = Arc::new(RwLock::new(try!(Client::new(spec, &dir, net_service.io().channel())))); EthSync::register(&mut net_service, client.clone()); - let client_io = Box::new(ClientIoHandler { + let client_io = Arc::new(ClientIoHandler { client: client.clone() }); try!(net_service.io().register_handler(client_io)); @@ -48,14 +48,14 @@ struct ClientIoHandler { } impl IoHandler for ClientIoHandler { - fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) { + fn initialize(&self, _io: &IoContext) { } - fn message<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, net_message: &'s mut NetSyncMessage) { + fn message(&self, _io: &IoContext, net_message: &NetSyncMessage) { match net_message { - &mut UserMessage(ref mut message) => { + &UserMessage(ref message) => { match message { - &mut SyncMessage::BlockVerified => { + &SyncMessage::BlockVerified => { self.client.write().unwrap().import_verified_blocks(); }, _ => {}, // ignore other messages diff --git a/src/sync/chain.rs b/src/sync/chain.rs index 15fe6d1f9..af954e4ad 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -424,6 +424,10 @@ impl ChainSync { let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty; if difficulty > peer_difficulty { trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h); + { + let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer"); + peer.latest = header_view.sha3(); + } self.sync_peer(io, peer_id, true); } } diff --git a/src/sync/io.rs b/src/sync/io.rs index affcbc0d7..f49591a9f 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -22,14 +22,14 @@ pub trait SyncIo { } /// Wraps `NetworkContext` and the blockchain client -pub struct NetSyncIo<'s, 'h, 'io> where 'h: 's, 'io: 'h { - network: &'s mut NetworkContext<'h, 'io, SyncMessage>, +pub struct NetSyncIo<'s, 'h> where 'h: 's { + network: &'s NetworkContext<'h, SyncMessage>, chain: &'s mut BlockChainClient } -impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> { +impl<'s, 'h> NetSyncIo<'s, 'h> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s mut NetworkContext<'h, 'io, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h,'io> { + pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s, 'h> { NetSyncIo { network: network, chain: chain, @@ -37,7 +37,7 @@ impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> { } } -impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> { +impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { fn disable_peer(&mut self, peer_id: PeerId) { self.network.disable_peer(peer_id); } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index da91a6889..5bcaf656e 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -26,9 +26,8 @@ use std::ops::*; use std::sync::*; use client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage}; -use util::TimerToken; -use util::Bytes; use sync::chain::ChainSync; +use util::{Bytes, TimerToken}; use sync::io::NetSyncIo; mod chain; @@ -38,10 +37,13 @@ mod range_collection; #[cfg(test)] mod tests; +const SYNC_TIMER: usize = 0; + /// Message type for external events +#[derive(Clone)] pub enum SyncMessage { /// New block has been imported into the blockchain - NewChainBlock(Bytes), + NewChainBlock(Bytes), //TODO: use Cow /// A block is ready BlockVerified, } @@ -53,7 +55,7 @@ pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint chain: Arc>, /// Sync strategy - sync: ChainSync + sync: RwLock } pub use self::chain::SyncStatus; @@ -61,52 +63,50 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service pub fn register(service: &mut NetworkService, chain: Arc>) { - let sync = Box::new(EthSync { + let sync = Arc::new(EthSync { chain: chain, - sync: ChainSync::new(), + sync: RwLock::new(ChainSync::new()), }); - service.register_protocol(sync, "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); + service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); } /// Get sync status pub fn status(&self) -> SyncStatus { - self.sync.status() + self.sync.read().unwrap().status() } /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { - self.sync.abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } /// Restart sync pub fn restart(&mut self, io: &mut NetworkContext) { - self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); } } impl NetworkProtocolHandler for EthSync { - fn initialize(&mut self, io: &mut NetworkContext) { - self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); - io.register_timer(1000).unwrap(); + fn initialize(&self, io: &NetworkContext) { + io.register_timer(SYNC_TIMER, 1000).unwrap(); } - fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.sync.on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); } - fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId) { - self.sync.on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + fn connected(&self, io: &NetworkContext, peer: &PeerId) { + self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); } - fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId) { - self.sync.on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { + self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); } - fn timeout(&mut self, io: &mut NetworkContext, _timer: TimerToken) { - self.sync.maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); - } - - fn message(&mut self, _io: &mut NetworkContext, _message: &SyncMessage) { + fn timeout(&self, io: &NetworkContext, timer: TimerToken) { + if timer == SYNC_TIMER { + self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + } } } diff --git a/util/Cargo.toml b/util/Cargo.toml index 02fdad17f..15dc9523a 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -22,6 +22,7 @@ rust-crypto = "0.2.34" elastic-array = "0.4" heapsize = "0.2" itertools = "0.4" +crossbeam = "0.2" slab = { git = "https://github.com/arkpar/slab.git" } sha3 = { path = "sha3" } diff --git a/util/src/io/mod.rs b/util/src/io/mod.rs index 23a8509cc..71b882fb8 100644 --- a/util/src/io/mod.rs +++ b/util/src/io/mod.rs @@ -36,13 +36,16 @@ /// } /// ``` mod service; +mod worker; + +use mio::{EventLoop, Token}; #[derive(Debug)] pub enum IoError { Mio(::std::io::Error), } -impl From<::mio::NotifyError>> for IoError where Message: Send { +impl From<::mio::NotifyError>> for IoError where Message: Send + Clone { fn from(_err: ::mio::NotifyError>) -> IoError { IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error")) } @@ -51,27 +54,32 @@ impl From<::mio::NotifyError>> for IoError /// Generic IO handler. /// All the handler function are called from within IO event loop. /// `Message` type is used as notification data -pub trait IoHandler: Send where Message: Send + 'static { +pub trait IoHandler: Send + Sync where Message: Send + Sync + Clone + 'static { /// Initialize the handler - fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, Message>) {} + fn initialize(&self, _io: &IoContext) {} /// Timer function called after a timeout created with `HandlerIo::timeout`. - fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _timer: TimerToken) {} + fn timeout(&self, _io: &IoContext, _timer: TimerToken) {} /// Called when a broadcasted message is received. The message can only be sent from a different IO handler. - fn message<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _message: &'s mut Message) {} // TODO: make message immutable and provide internal channel for adding network handler + fn message(&self, _io: &IoContext, _message: &Message) {} /// Called when an IO stream gets closed - fn stream_hup<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} + fn stream_hup(&self, _io: &IoContext, _stream: StreamToken) {} /// Called when an IO stream can be read from - fn stream_readable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} + fn stream_readable(&self, _io: &IoContext, _stream: StreamToken) {} /// Called when an IO stream can be written to - fn stream_writable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {} + fn stream_writable(&self, _io: &IoContext, _stream: StreamToken) {} + /// Register a new stream with the event loop + fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} + /// Re-register a stream with the event loop + fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop>) {} } pub type TimerToken = service::TimerToken; pub type StreamToken = service::StreamToken; -pub type IoContext<'s, M> = service::IoContext<'s, M>; -pub type IoService = service::IoService; -pub type IoChannel = service::IoChannel; -//pub const USER_TOKEN_START: usize = service::USER_TOKEN; // TODO: ICE in rustc 1.7.0-nightly (49c382779 2016-01-12) +pub use io::service::IoContext; +pub type IoService = service::IoService; +pub type IoChannel = service::IoChannel; +pub type IoManager = service::IoManager; +pub const TOKENS_PER_HANDLER: usize = service::TOKENS_PER_HANDLER; #[cfg(test)] mod tests { diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 4a96d19a7..a229e4022 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -1,148 +1,221 @@ +use std::sync::*; use std::thread::{self, JoinHandle}; +use std::collections::HashMap; use mio::*; -use mio::util::{Slab}; use hash::*; use rlp::*; use error::*; use io::{IoError, IoHandler}; +use arrayvec::*; +use crossbeam::sync::chase_lev; +use io::worker::{Worker, Work, WorkType}; pub type TimerToken = usize; pub type StreamToken = usize; +pub type HandlerId = usize; // Tokens -const MAX_USER_TIMERS: usize = 32; -const USER_TIMER: usize = 0; -const LAST_USER_TIMER: usize = USER_TIMER + MAX_USER_TIMERS - 1; -//const USER_TOKEN: usize = LAST_USER_TIMER + 1; +pub const TOKENS_PER_HANDLER: usize = 16384; /// Messages used to communicate with the event loop from other threads. -pub enum IoMessage where Message: Send + Sized { +#[derive(Clone)] +pub enum IoMessage where Message: Send + Clone + Sized { /// Shutdown the event loop Shutdown, /// Register a new protocol handler. AddHandler { - handler: Box+Send>, + handler: Arc+Send>, + }, + AddTimer { + handler_id: HandlerId, + token: TimerToken, + delay: u64, + }, + RemoveTimer { + handler_id: HandlerId, + token: TimerToken, + }, + RegisterStream { + handler_id: HandlerId, + token: StreamToken, + }, + UpdateStreamRegistration { + handler_id: HandlerId, + token: StreamToken, }, /// Broadcast a message across all protocol handlers. UserMessage(Message) } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct IoContext<'s, Message> where Message: Send + 'static { - timers: &'s mut Slab, - /// Low leve MIO Event loop for custom handler registration. - pub event_loop: &'s mut EventLoop>, +pub struct IoContext where Message: Send + Clone + 'static { + pub channel: IoChannel, + pub handler: HandlerId, } -impl<'s, Message> IoContext<'s, Message> where Message: Send + 'static { +impl IoContext where Message: Send + Clone + 'static { /// Create a new IO access point. Takes references to all the data that can be updated within the IO handler. - fn new(event_loop: &'s mut EventLoop>, timers: &'s mut Slab) -> IoContext<'s, Message> { + pub fn new(channel: IoChannel, handler: HandlerId) -> IoContext { IoContext { - event_loop: event_loop, - timers: timers, + handler: handler, + channel: channel, } } - /// Register a new IO timer. Returns a new timer token. 'IoHandler::timeout' will be called with the token. - pub fn register_timer(&mut self, ms: u64) -> Result { - match self.timers.insert(UserTimer { + /// Register a new IO timer. 'IoHandler::timeout' will be called with the token. + pub fn register_timer(&self, token: TimerToken, ms: u64) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::AddTimer { + token: token, delay: ms, - }) { - Ok(token) => { - self.event_loop.timeout_ms(token, ms).expect("Error registering user timer"); - Ok(token.as_usize()) - }, - _ => { panic!("Max timers reached") } - } + handler_id: self.handler, + })); + Ok(()) + } + + /// Delete a timer. + pub fn clear_timer(&self, token: TimerToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::RemoveTimer { + token: token, + handler_id: self.handler, + })); + Ok(()) + } + /// Register a new IO stream. + pub fn register_stream(&self, token: StreamToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::RegisterStream { + token: token, + handler_id: self.handler, + })); + Ok(()) + } + + /// Reregister an IO stream. + pub fn update_registration(&self, token: StreamToken) -> Result<(), UtilError> { + try!(self.channel.send_io(IoMessage::UpdateStreamRegistration { + token: token, + handler_id: self.handler, + })); + Ok(()) } /// Broadcast a message to other IO clients - pub fn message(&mut self, message: Message) { - match self.event_loop.channel().send(IoMessage::UserMessage(message)) { - Ok(_) => {} - Err(e) => { panic!("Error sending io message {:?}", e); } - } + pub fn message(&self, message: Message) { + self.channel.send(message).expect("Error seding message"); } } +#[derive(Clone)] struct UserTimer { delay: u64, + timeout: Timeout, } /// Root IO handler. Manages user handlers, messages and IO timers. -pub struct IoManager where Message: Send { - timers: Slab, - handlers: Vec>>, +pub struct IoManager where Message: Send + Sync { + timers: Arc>>, + handlers: Vec>>, + _workers: Vec, + worker_channel: chase_lev::Worker>, + work_ready: Arc, } -impl IoManager where Message: Send + 'static { +impl IoManager where Message: Send + Sync + Clone + 'static { /// Creates a new instance and registers it with the event loop. pub fn start(event_loop: &mut EventLoop>) -> Result<(), UtilError> { + let (worker, stealer) = chase_lev::deque(); + let num_workers = 4; + let work_ready_mutex = Arc::new(Mutex::new(false)); + let work_ready = Arc::new(Condvar::new()); + let workers = (0..num_workers).map(|i| + Worker::new(i, stealer.clone(), IoChannel::new(event_loop.channel()), work_ready.clone(), work_ready_mutex.clone())).collect(); + let mut io = IoManager { - timers: Slab::new_starting_at(Token(USER_TIMER), MAX_USER_TIMERS), + timers: Arc::new(RwLock::new(HashMap::new())), handlers: Vec::new(), + worker_channel: worker, + _workers: workers, + work_ready: work_ready, }; try!(event_loop.run(&mut io)); Ok(()) } } -impl Handler for IoManager where Message: Send + 'static { +impl Handler for IoManager where Message: Send + Clone + Sync + 'static { type Timeout = Token; type Message = IoMessage; - fn ready(&mut self, event_loop: &mut EventLoop, token: Token, events: EventSet) { + fn ready(&mut self, _event_loop: &mut EventLoop, token: Token, events: EventSet) { + let handler_index = token.as_usize() / TOKENS_PER_HANDLER; + let token_id = token.as_usize() % TOKENS_PER_HANDLER; + if handler_index >= self.handlers.len() { + panic!("Unexpected stream token: {}", token.as_usize()); + } + let handler = self.handlers[handler_index].clone(); + if events.is_hup() { - for h in self.handlers.iter_mut() { - h.stream_hup(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - } - else if events.is_readable() { - for h in self.handlers.iter_mut() { - h.stream_readable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - } - else if events.is_writable() { - for h in self.handlers.iter_mut() { - h.stream_writable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); + self.worker_channel.push(Work { work_type: WorkType::Hup, token: token_id, handler: handler.clone(), handler_id: handler_index }); + } + else { + if events.is_readable() { + self.worker_channel.push(Work { work_type: WorkType::Readable, token: token_id, handler: handler.clone(), handler_id: handler_index }); + } + if events.is_writable() { + self.worker_channel.push(Work { work_type: WorkType::Writable, token: token_id, handler: handler.clone(), handler_id: handler_index }); } } + self.work_ready.notify_all(); } fn timeout(&mut self, event_loop: &mut EventLoop, token: Token) { - match token.as_usize() { - USER_TIMER ... LAST_USER_TIMER => { - let delay = { - let timer = self.timers.get_mut(token).expect("Unknown user timer token"); - timer.delay - }; - for h in self.handlers.iter_mut() { - h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - event_loop.timeout_ms(token, delay).expect("Error re-registering user timer"); - } - _ => { // Just pass the event down. IoHandler is supposed to re-register it if required. - for h in self.handlers.iter_mut() { - h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize()); - } - } + let handler_index = token.as_usize() / TOKENS_PER_HANDLER; + let token_id = token.as_usize() % TOKENS_PER_HANDLER; + if handler_index >= self.handlers.len() { + panic!("Unexpected timer token: {}", token.as_usize()); + } + if let Some(timer) = self.timers.read().unwrap().get(&token.as_usize()) { + event_loop.timeout_ms(token, timer.delay).expect("Error re-registering user timer"); + let handler = self.handlers[handler_index].clone(); + self.worker_channel.push(Work { work_type: WorkType::Timeout, token: token_id, handler: handler, handler_id: handler_index }); + self.work_ready.notify_all(); } } fn notify(&mut self, event_loop: &mut EventLoop, msg: Self::Message) { - let mut m = msg; - match m { + match msg { IoMessage::Shutdown => event_loop.shutdown(), - IoMessage::AddHandler { - handler, - } => { - self.handlers.push(handler); - self.handlers.last_mut().unwrap().initialize(&mut IoContext::new(event_loop, &mut self.timers)); + IoMessage::AddHandler { handler } => { + let handler_id = { + self.handlers.push(handler.clone()); + self.handlers.len() - 1 + }; + handler.initialize(&IoContext::new(IoChannel::new(event_loop.channel()), handler_id)); }, - IoMessage::UserMessage(ref mut data) => { - for h in self.handlers.iter_mut() { - h.message(&mut IoContext::new(event_loop, &mut self.timers), data); + IoMessage::AddTimer { handler_id, token, delay } => { + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + let timeout = event_loop.timeout_ms(Token(timer_id), delay).expect("Error registering user timer"); + self.timers.write().unwrap().insert(timer_id, UserTimer { delay: delay, timeout: timeout }); + }, + IoMessage::RemoveTimer { handler_id, token } => { + let timer_id = token + handler_id * TOKENS_PER_HANDLER; + if let Some(timer) = self.timers.write().unwrap().remove(&timer_id) { + event_loop.clear_timeout(timer.timeout); } + }, + IoMessage::RegisterStream { handler_id, token } => { + let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); + handler.register_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); + }, + IoMessage::UpdateStreamRegistration { handler_id, token } => { + let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone(); + handler.update_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop); + }, + IoMessage::UserMessage(data) => { + for n in 0 .. self.handlers.len() { + let handler = self.handlers[n].clone(); + self.worker_channel.push(Work { work_type: WorkType::Message(data.clone()), token: 0, handler: handler, handler_id: n }); + } + self.work_ready.notify_all(); } } } @@ -150,11 +223,19 @@ impl Handler for IoManager where Message: Send + 'static { /// Allows sending messages into the event loop. All the IO handlers will get the message /// in the `message` callback. -pub struct IoChannel where Message: Send { +pub struct IoChannel where Message: Send + Clone{ channel: Option>> } -impl IoChannel where Message: Send { +impl Clone for IoChannel where Message: Send + Clone { + fn clone(&self) -> IoChannel { + IoChannel { + channel: self.channel.clone() + } + } +} + +impl IoChannel where Message: Send + Clone { /// Send a msessage through the channel pub fn send(&self, message: Message) -> Result<(), IoError> { if let Some(ref channel) = self.channel { @@ -163,20 +244,30 @@ impl IoChannel where Message: Send { Ok(()) } + pub fn send_io(&self, message: IoMessage) -> Result<(), IoError> { + if let Some(ref channel) = self.channel { + try!(channel.send(message)) + } + Ok(()) + } /// Create a new channel to connected to event loop. pub fn disconnected() -> IoChannel { IoChannel { channel: None } } + + fn new(channel: Sender>) -> IoChannel { + IoChannel { channel: Some(channel) } + } } /// General IO Service. Starts an event loop and dispatches IO requests. /// 'Message' is a notification message type -pub struct IoService where Message: Send + 'static { +pub struct IoService where Message: Send + Sync + Clone + 'static { thread: Option>, - host_channel: Sender> + host_channel: Sender>, } -impl IoService where Message: Send + 'static { +impl IoService where Message: Send + Sync + Clone + 'static { /// Starts IO event loop pub fn start() -> Result, UtilError> { let mut event_loop = EventLoop::new().unwrap(); @@ -191,7 +282,7 @@ impl IoService where Message: Send + 'static { } /// Regiter a IO hadnler with the event loop. - pub fn register_handler(&mut self, handler: Box+Send>) -> Result<(), IoError> { + pub fn register_handler(&mut self, handler: Arc+Send>) -> Result<(), IoError> { try!(self.host_channel.send(IoMessage::AddHandler { handler: handler, })); @@ -210,7 +301,7 @@ impl IoService where Message: Send + 'static { } } -impl Drop for IoService where Message: Send { +impl Drop for IoService where Message: Send + Sync + Clone { fn drop(&mut self) { self.host_channel.send(IoMessage::Shutdown).unwrap(); self.thread.take().unwrap().join().unwrap(); diff --git a/util/src/lib.rs b/util/src/lib.rs index 4bc47e61c..2161af34f 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -51,6 +51,7 @@ extern crate crypto as rcrypto; extern crate secp256k1; extern crate arrayvec; extern crate elastic_array; +extern crate crossbeam; pub mod standard; #[macro_use] diff --git a/util/src/network/connection.rs b/util/src/network/connection.rs index f11c10384..a2c77e8cf 100644 --- a/util/src/network/connection.rs +++ b/util/src/network/connection.rs @@ -1,5 +1,5 @@ use std::collections::VecDeque; -use mio::{Handler, Token, EventSet, EventLoop, Timeout, PollOpt, TryRead, TryWrite}; +use mio::{Handler, Token, EventSet, EventLoop, PollOpt, TryRead, TryWrite}; use mio::tcp::*; use hash::*; use sha3::*; @@ -7,6 +7,7 @@ use bytes::*; use rlp::*; use std::io::{self, Cursor, Read}; use error::*; +use io::{IoContext, StreamToken}; use network::error::NetworkError; use network::handshake::Handshake; use crypto; @@ -17,11 +18,12 @@ use rcrypto::buffer::*; use tiny_keccak::Keccak; const ENCRYPTED_HEADER_LEN: usize = 32; +const RECIEVE_PAYLOAD_TIMEOUT: u64 = 30000; /// Low level tcp connection pub struct Connection { /// Connection id (token) - pub token: Token, + pub token: StreamToken, /// Network socket pub socket: TcpStream, /// Receive buffer @@ -45,14 +47,14 @@ pub enum WriteStatus { impl Connection { /// Create a new connection with given id and socket. - pub fn new(token: Token, socket: TcpStream) -> Connection { + pub fn new(token: StreamToken, socket: TcpStream) -> Connection { Connection { token: token, socket: socket, send_queue: VecDeque::new(), rec_buf: Bytes::new(), rec_size: 0, - interest: EventSet::hup(), + interest: EventSet::hup() | EventSet::readable(), } } @@ -132,20 +134,19 @@ impl Connection { } /// Register this connection with the IO event loop. - pub fn register(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection register; token={:?}", self.token); - self.interest.insert(EventSet::readable()); - event_loop.register(&self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - error!("Failed to register {:?}, {:?}", self.token, e); + pub fn register_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { + trace!(target: "net", "connection register; token={:?}", reg); + event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { + error!("Failed to register {:?}, {:?}", reg, e); Err(e) }) } /// Update connection registration. Should be called at the end of the IO handler. - pub fn reregister(&mut self, event_loop: &mut EventLoop) -> io::Result<()> { - trace!(target: "net", "connection reregister; token={:?}", self.token); - event_loop.reregister( &self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { - error!("Failed to reregister {:?}, {:?}", self.token, e); + pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> io::Result<()> { + trace!(target: "net", "connection reregister; token={:?}", reg); + event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| { + error!("Failed to reregister {:?}, {:?}", reg, e); Err(e) }) } @@ -182,8 +183,6 @@ pub struct EncryptedConnection { ingress_mac: Keccak, /// Read state read_state: EncryptedConnectionState, - /// Disconnect timeout - idle_timeout: Option, /// Protocol id for the last received packet protocol_id: u16, /// Payload expected to be received for the last header. @@ -192,7 +191,7 @@ pub struct EncryptedConnection { impl EncryptedConnection { /// Create an encrypted connection out of the handshake. Consumes a handshake object. - pub fn new(handshake: Handshake) -> Result { + pub fn new(mut handshake: Handshake) -> Result { let shared = try!(crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_public)); let mut nonce_material = H512::new(); if handshake.originated { @@ -227,6 +226,7 @@ impl EncryptedConnection { ingress_mac.update(&mac_material); ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher }); + handshake.connection.expect(ENCRYPTED_HEADER_LEN); Ok(EncryptedConnection { connection: handshake.connection, encoder: encoder, @@ -235,7 +235,6 @@ impl EncryptedConnection { egress_mac: egress_mac, ingress_mac: ingress_mac, read_state: EncryptedConnectionState::Header, - idle_timeout: None, protocol_id: 0, payload_len: 0 }) @@ -337,13 +336,14 @@ impl EncryptedConnection { } /// Readable IO handler. Tracker receive status and returns decoded packet if avaialable. - pub fn readable(&mut self, event_loop: &mut EventLoop) -> Result, UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn readable(&mut self, io: &IoContext) -> Result, UtilError> where Message: Send + Clone{ + io.clear_timer(self.connection.token).unwrap(); match self.read_state { EncryptedConnectionState::Header => { match try!(self.connection.readable()) { Some(data) => { try!(self.read_header(&data)); + try!(io.register_timer(self.connection.token, RECIEVE_PAYLOAD_TIMEOUT)); }, None => {} }; @@ -363,24 +363,15 @@ impl EncryptedConnection { } /// Writable IO handler. Processes send queeue. - pub fn writable(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn writable(&mut self, io: &IoContext) -> Result<(), UtilError> where Message: Send + Clone { + io.clear_timer(self.connection.token).unwrap(); try!(self.connection.writable()); Ok(()) } - /// Register this connection with the event handler. - pub fn register>(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.connection.expect(ENCRYPTED_HEADER_LEN); - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); - self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok(); - try!(self.connection.reregister(event_loop)); - Ok(()) - } - /// Update connection registration. This should be called at the end of the event loop. - pub fn reregister(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - try!(self.connection.reregister(event_loop)); + pub fn update_socket(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.update_socket(reg, event_loop)); Ok(()) } } diff --git a/util/src/network/handshake.rs b/util/src/network/handshake.rs index ca95808b4..ea04a5462 100644 --- a/util/src/network/handshake.rs +++ b/util/src/network/handshake.rs @@ -10,6 +10,7 @@ use network::host::{HostInfo}; use network::node::NodeId; use error::*; use network::error::NetworkError; +use io::{IoContext, StreamToken}; #[derive(PartialEq, Eq, Debug)] enum HandshakeState { @@ -33,8 +34,6 @@ pub struct Handshake { state: HandshakeState, /// Outgoing or incoming connection pub originated: bool, - /// Disconnect timeout - idle_timeout: Option, /// ECDH ephemeral pub ecdhe: KeyPair, /// Connection nonce @@ -51,16 +50,16 @@ pub struct Handshake { const AUTH_PACKET_SIZE: usize = 307; const ACK_PACKET_SIZE: usize = 210; +const HANDSHAKE_TIMEOUT: u64 = 30000; impl Handshake { /// Create a new handshake object - pub fn new(token: Token, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result { + pub fn new(token: StreamToken, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result { Ok(Handshake { id: id.clone(), connection: Connection::new(token, socket), originated: false, state: HandshakeState::New, - idle_timeout: None, ecdhe: try!(KeyPair::create()), nonce: nonce.clone(), remote_public: Public::new(), @@ -71,8 +70,9 @@ impl Handshake { } /// Start a handhsake - pub fn start(&mut self, host: &HostInfo, originated: bool) -> Result<(), UtilError> { + pub fn start(&mut self, io: &IoContext, host: &HostInfo, originated: bool) -> Result<(), UtilError> where Message: Send + Clone{ self.originated = originated; + io.register_timer(self.connection.token, HANDSHAKE_TIMEOUT).ok(); if originated { try!(self.write_auth(host)); } @@ -89,8 +89,8 @@ impl Handshake { } /// Readable IO handler. Drives the state change. - pub fn readable(&mut self, event_loop: &mut EventLoop, host: &HostInfo) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone { + io.clear_timer(self.connection.token).unwrap(); match self.state { HandshakeState::ReadingAuth => { match try!(self.connection.readable()) { @@ -110,29 +110,33 @@ impl Handshake { None => {} }; }, + HandshakeState::StartSession => {}, _ => { panic!("Unexpected state"); } } if self.state != HandshakeState::StartSession { - try!(self.connection.reregister(event_loop)); + try!(io.update_registration(self.connection.token)); } Ok(()) } /// Writabe IO handler. - pub fn writable(&mut self, event_loop: &mut EventLoop, _host: &HostInfo) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); + pub fn writable(&mut self, io: &IoContext, _host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone { + io.clear_timer(self.connection.token).unwrap(); try!(self.connection.writable()); if self.state != HandshakeState::StartSession { - try!(self.connection.reregister(event_loop)); + io.update_registration(self.connection.token).unwrap(); } Ok(()) } - /// Register the IO handler with the event loop - pub fn register>(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.idle_timeout.map(|t| event_loop.clear_timeout(t)); - self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok(); - try!(self.connection.register(event_loop)); + /// Register the socket with the event loop + pub fn register_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.register_socket(reg, event_loop)); + Ok(()) + } + + pub fn update_socket>(&self, reg: Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + try!(self.connection.update_socket(reg, event_loop)); Ok(()) } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 37b58f1f0..f83cb1908 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -1,8 +1,9 @@ -use std::mem; use std::net::{SocketAddr}; use std::collections::{HashMap}; use std::hash::{Hasher}; use std::str::{FromStr}; +use std::sync::*; +use std::ops::*; use mio::*; use mio::tcp::*; use mio::udp::*; @@ -64,14 +65,20 @@ pub type PacketId = u8; pub type ProtocolId = &'static str; /// Messages used to communitate with the event loop from other threads. -pub enum NetworkIoMessage where Message: Send { +#[derive(Clone)] +pub enum NetworkIoMessage where Message: Send + Sync + Clone { /// Register a new protocol handler. AddHandler { - handler: Option+Send>>, + handler: Arc + Sync>, protocol: ProtocolId, versions: Vec, }, - /// Send data over the network. + AddTimer { + protocol: ProtocolId, + token: TimerToken, + delay: u64, + }, + /// Send data over the network. // TODO: remove this Send { peer: PeerId, packet_id: PacketId, @@ -104,46 +111,45 @@ impl Encodable for CapabilityInfo { } /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. -pub struct NetworkContext<'s, 'io, Message> where Message: Send + 'static, 'io: 's { - io: &'s mut IoContext<'io, NetworkIoMessage>, +pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, 's { + io: &'s IoContext>, protocol: ProtocolId, - connections: &'s mut Slab, - timers: &'s mut HashMap, + connections: Arc>>, session: Option, } -impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 'static, { +impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, { /// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler. - fn new(io: &'s mut IoContext<'io, NetworkIoMessage>, + fn new(io: &'s IoContext>, protocol: ProtocolId, - session: Option, connections: &'s mut Slab, - timers: &'s mut HashMap) -> NetworkContext<'s, 'io, Message> { + session: Option, connections: Arc>>) -> NetworkContext<'s, Message> { NetworkContext { io: io, protocol: protocol, session: session, connections: connections, - timers: timers, } } /// Send a packet over the network to another peer. - pub fn send(&mut self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { - match self.connections.get_mut(peer) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "net", "Send error: {:?}", e); - }); //TODO: don't copy vector data - }, - _ => { - warn!(target: "net", "Send: Peer does not exist"); + pub fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { + if let Some(connection) = self.connections.read().unwrap().get(peer).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Session(ref mut s) => { + s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| { + warn!(target: "net", "Send error: {:?}", e); + }); //TODO: don't copy vector data + }, + _ => warn!(target: "net", "Send: Peer is not connected yet") } + } else { + warn!(target: "net", "Send: Peer does not exist") } Ok(()) } /// Respond to a current network message. Panics if no there is no packet in the context. - pub fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { + pub fn respond(&self, packet_id: PacketId, data: Vec) -> Result<(), UtilError> { match self.session { Some(session) => self.send(session, packet_id, data), None => { @@ -153,31 +159,31 @@ impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 's } /// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected. - pub fn disable_peer(&mut self, _peer: PeerId) { + pub fn disable_peer(&self, _peer: PeerId) { //TODO: remove capability, disconnect if no capabilities left } - /// Register a new IO timer. Returns a new timer token. 'NetworkProtocolHandler::timeout' will be called with the token. - pub fn register_timer(&mut self, ms: u64) -> Result{ - match self.io.register_timer(ms) { - Ok(token) => { - self.timers.insert(token, self.protocol); - Ok(token) - }, - e => e, - } + /// Register a new IO timer. 'IoHandler::timeout' will be called with the token. + pub fn register_timer(&self, token: TimerToken, ms: u64) -> Result<(), UtilError> { + self.io.message(NetworkIoMessage::AddTimer { + token: token, + delay: ms, + protocol: self.protocol, + }); + Ok(()) } /// Returns peer identification string pub fn peer_info(&self, peer: PeerId) -> String { - match self.connections.get(peer) { - Some(&ConnectionEntry::Session(ref s)) => { - s.info.client_version.clone() - }, - _ => { - "unknown".to_string() + if let Some(connection) = self.connections.read().unwrap().get(peer).map(|c| c.clone()) { + match connection.lock().unwrap().deref() { + &ConnectionEntry::Session(ref s) => { + return s.info.client_version.clone() + }, + _ => {} } } + "unknown".to_string() } } @@ -222,26 +228,35 @@ enum ConnectionEntry { Session(Session) } -/// Root IO handler. Manages protocol handlers, IO timers and network connections. -pub struct Host where Message: Send { - pub info: HostInfo, - udp_socket: UdpSocket, - listener: TcpListener, - connections: Slab, - timers: HashMap, - nodes: HashMap, - handlers: HashMap>>, +type SharedConnectionEntry = Arc>; + +#[derive(Copy, Clone)] +struct ProtocolTimer { + pub protocol: ProtocolId, + pub token: TimerToken, // Handler level token } -impl Host where Message: Send { +/// Root IO handler. Manages protocol handlers, IO timers and network connections. +pub struct Host where Message: Send + Sync + Clone { + pub info: RwLock, + udp_socket: Mutex, + tcp_listener: Mutex, + connections: Arc>>, + nodes: RwLock>, + handlers: RwLock>>>, + timers: RwLock>, + timer_counter: RwLock, +} + +impl Host where Message: Send + Sync + Clone { pub fn new() -> Host { let config = NetworkConfiguration::new(); let addr = config.listen_address; // Setup the server socket - let listener = TcpListener::bind(&addr).unwrap(); + let tcp_listener = TcpListener::bind(&addr).unwrap(); let udp_socket = UdpSocket::bound(&addr).unwrap(); - Host:: { - info: HostInfo { + let mut host = Host:: { + info: RwLock::new(HostInfo { keys: KeyPair::create().unwrap(), config: config, nonce: H256::random(), @@ -249,39 +264,64 @@ impl Host where Message: Send { client_version: "parity".to_string(), listen_port: 0, capabilities: Vec::new(), - }, - udp_socket: udp_socket, - listener: listener, - connections: Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS), - timers: HashMap::new(), - nodes: HashMap::new(), - handlers: HashMap::new(), - } + }), + udp_socket: Mutex::new(udp_socket), + tcp_listener: Mutex::new(tcp_listener), + connections: Arc::new(RwLock::new(Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS))), + nodes: RwLock::new(HashMap::new()), + handlers: RwLock::new(HashMap::new()), + timers: RwLock::new(HashMap::new()), + timer_counter: RwLock::new(LAST_CONNECTION + 1), + }; + let port = host.info.read().unwrap().config.listen_address.port(); + host.info.write().unwrap().deref_mut().listen_port = port; + + /* + match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() { + Some(iface) => config.public_address = iface.addr.unwrap(), + None => warn!("No public network interface"), + */ + + // self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303"); + // GO bootnodes + host.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE + host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR + host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG + // ETH/DEV cpp-ethereum (poc-9.ethdev.com) + host.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303"); + host } - fn add_node(&mut self, id: &str) { + pub fn add_node(&mut self, id: &str) { match Node::from_str(id) { Err(e) => { warn!("Could not add node: {:?}", e); }, Ok(n) => { - self.nodes.insert(n.id.clone(), n); + self.nodes.write().unwrap().insert(n.id.clone(), n); } } } - fn maintain_network(&mut self, io: &mut IoContext>) { + pub fn client_version(&self) -> String { + self.info.read().unwrap().client_version.clone() + } + + pub fn client_id(&self) -> NodeId { + self.info.read().unwrap().id().clone() + } + + fn maintain_network(&self, io: &IoContext>) { self.connect_peers(io); - io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap(); } fn have_session(&self, id: &NodeId) -> bool { - self.connections.iter().any(|e| match e { &ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false }) + self.connections.read().unwrap().iter().any(|e| match e.lock().unwrap().deref() { &ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false }) } fn connecting_to(&self, id: &NodeId) -> bool { - self.connections.iter().any(|e| match e { &ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false }) + self.connections.read().unwrap().iter().any(|e| match e.lock().unwrap().deref() { &ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false }) } - fn connect_peers(&mut self, io: &mut IoContext>) { + fn connect_peers(&self, io: &IoContext>) { struct NodeInfo { id: NodeId, peer_type: PeerType @@ -292,13 +332,14 @@ impl Host where Message: Send { let mut req_conn = 0; //TODO: use nodes from discovery here //for n in self.node_buckets.iter().flat_map(|n| &n.nodes).map(|id| NodeInfo { id: id.clone(), peer_type: self.nodes.get(id).unwrap().peer_type}) { - for n in self.nodes.values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) { + let pin = self.info.read().unwrap().deref().config.pin; + for n in self.nodes.read().unwrap().values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) { let connected = self.have_session(&n.id) || self.connecting_to(&n.id); let required = n.peer_type == PeerType::Required; if connected && required { req_conn += 1; } - else if !connected && (!self.info.config.pin || required) { + else if !connected && (!pin || required) { to_connect.push(n); } } @@ -312,8 +353,7 @@ impl Host where Message: Send { } } - if !self.info.config.pin - { + if !pin { let pending_count = 0; //TODO: let peer_count = 0; let mut open_slots = IDEAL_PEERS - peer_count - pending_count + req_conn; @@ -328,23 +368,24 @@ impl Host where Message: Send { } } - fn connect_peer(&mut self, id: &NodeId, io: &mut IoContext>) { - if self.have_session(id) - { + fn connect_peer(&self, id: &NodeId, io: &IoContext>) { + if self.have_session(id) { warn!("Aborted connect. Node already connected."); return; } - if self.connecting_to(id) - { + if self.connecting_to(id) { warn!("Aborted connect. Node already connecting."); return; } let socket = { - let node = self.nodes.get_mut(id).unwrap(); - node.last_attempted = Some(::time::now()); - - match TcpStream::connect(&node.endpoint.address) { + let address = { + let mut nodes = self.nodes.write().unwrap(); + let node = nodes.get_mut(id).unwrap(); + node.last_attempted = Some(::time::now()); + node.endpoint.address + }; + match TcpStream::connect(&address) { Ok(socket) => socket, Err(_) => { warn!("Cannot connect to node"); @@ -353,224 +394,186 @@ impl Host where Message: Send { } }; - let nonce = self.info.next_nonce(); - match self.connections.insert_with(|token| ConnectionEntry::Handshake(Handshake::new(Token(token), id, socket, &nonce).expect("Can't create handshake"))) { - Some(token) => { - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(ref mut h)) => { - h.start(&self.info, true) - .and_then(|_| h.register(io.event_loop)) - .unwrap_or_else (|e| { - debug!(target: "net", "Handshake create error: {:?}", e); - }); - }, - _ => {} - } - }, - None => { warn!("Max connections reached") } + let nonce = self.info.write().unwrap().next_nonce(); + if self.connections.write().unwrap().insert_with(|token| { + let mut handshake = Handshake::new(token, id, socket, &nonce).expect("Can't create handshake"); + handshake.start(io, &self.info.read().unwrap(), true).and_then(|_| io.register_stream(token)).unwrap_or_else (|e| { + debug!(target: "net", "Handshake create error: {:?}", e); + }); + Arc::new(Mutex::new(ConnectionEntry::Handshake(handshake))) + }).is_none() { + warn!("Max connections reached"); } } - - fn accept(&mut self, _io: &mut IoContext>) { + fn accept(&self, _io: &IoContext>) { trace!(target: "net", "accept"); } - fn connection_writable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { - let mut kill = false; + fn connection_writable(&self, token: StreamToken, io: &IoContext>) { let mut create_session = false; - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(ref mut h)) => { - h.writable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Handshake write error: {:?}", e); - kill = true; - }); - create_session = h.done(); - }, - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.writable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Session write error: {:?}", e); - kill = true; - }); + let mut kill = false; + if let Some(connection) = self.connections.read().unwrap().get(token).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Handshake(ref mut h) => { + match h.writable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Handshake write error: {:?}", e); + kill = true; + }, + Ok(_) => () + } + if h.done() { + create_session = true; + } + }, + &mut ConnectionEntry::Session(ref mut s) => { + match s.writable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Session write error: {:?}", e); + kill = true; + }, + Ok(_) => () + } + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); + } } - _ => { - warn!(target: "net", "Received event for unknown connection"); - } - } + } else { warn!(target: "net", "Received event for unknown connection") } if kill { - self.kill_connection(token, io); + self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection return; } else if create_session { self.start_session(token, io); - } - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); - }, - _ => (), + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); } } - fn connection_closed<'s>(&'s mut self, token: TimerToken, io: &mut IoContext<'s, NetworkIoMessage>) { + fn connection_closed(&self, token: TimerToken, io: &IoContext>) { self.kill_connection(token, io); } - fn connection_readable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { - let mut kill = false; - let mut create_session = false; + fn connection_readable(&self, token: StreamToken, io: &IoContext>) { let mut ready_data: Vec = Vec::new(); let mut packet_data: Option<(ProtocolId, PacketId, Vec)> = None; - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(ref mut h)) => { - h.readable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Handshake read error: {:?}", e); - kill = true; - }); - create_session = h.done(); - }, - Some(&mut ConnectionEntry::Session(ref mut s)) => { - let sd = { s.readable(io.event_loop, &self.info).unwrap_or_else(|e| { - debug!(target: "net", "Session read error: {:?}", e); - kill = true; - SessionData::None - }) }; - match sd { - SessionData::Ready => { - for (p, _) in self.handlers.iter_mut() { - if s.have_capability(p) { - ready_data.push(p); + let mut create_session = false; + let mut kill = false; + if let Some(connection) = self.connections.read().unwrap().get(token).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Handshake(ref mut h) => { + match h.readable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Handshake read error: {:?}", e); + kill = true; + }, + Ok(_) => () + } + if h.done() { + create_session = true; + } + }, + &mut ConnectionEntry::Session(ref mut s) => { + match s.readable(io, &self.info.read().unwrap()) { + Err(e) => { + debug!(target: "net", "Handshake read error: {:?}", e); + kill = true; + }, + Ok(SessionData::Ready) => { + for (p, _) in self.handlers.read().unwrap().iter() { + if s.have_capability(p) { + ready_data.push(p); + } } - } - }, - SessionData::Packet { - data, - protocol, - packet_id, - } => { - match self.handlers.get_mut(protocol) { - None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, - Some(_) => packet_data = Some((protocol, packet_id, data)), - } - }, - SessionData::None => {}, - } - } - _ => { - warn!(target: "net", "Received event for unknown connection"); - } - } - if kill { - self.kill_connection(token, io); - return; - } - if create_session { - self.start_session(token, io); - } - for p in ready_data { - let mut h = self.handlers.get_mut(p).unwrap(); - h.connected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token); - } - if let Some((p, packet_id, data)) = packet_data { - let mut h = self.handlers.get_mut(p).unwrap(); - h.read(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token, packet_id, &data[1..]); - } - - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); - }, - _ => (), - } - } - - fn start_session(&mut self, token: StreamToken, io: &mut IoContext>) { - let info = &self.info; - // TODO: use slab::replace_with (currently broken) - /* - match self.connections.remove(token) { - Some(ConnectionEntry::Handshake(h)) => { - match Session::new(h, io.event_loop, info) { - Ok(session) => { - assert!(token == self.connections.insert(ConnectionEntry::Session(session)).ok().unwrap()); - }, - Err(e) => { - debug!(target: "net", "Session construction error: {:?}", e); + }, + Ok(SessionData::Packet { + data, + protocol, + packet_id, + }) => { + match self.handlers.read().unwrap().get(protocol) { + None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, + Some(_) => packet_data = Some((protocol, packet_id, data)), + } + }, + Ok(SessionData::None) => {}, } } - }, - _ => panic!("Error updating slab with session") - }*/ - self.connections.replace_with(token, |c| { - match c { - ConnectionEntry::Handshake(h) => Session::new(h, io.event_loop, info) - .map(|s| Some(ConnectionEntry::Session(s))) - .unwrap_or_else(|e| { - debug!(target: "net", "Session construction error: {:?}", e); - None - }), - _ => { panic!("No handshake to create a session from"); } } - }).expect("Error updating slab with session"); + } else { + warn!(target: "net", "Received event for unknown connection"); + } + if kill { + self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection + return; + } else if create_session { + self.start_session(token, io); + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); + } + for p in ready_data { + let h = self.handlers.read().unwrap().get(p).unwrap().clone(); + h.connected(&mut NetworkContext::new(io, p, Some(token), self.connections.clone()), &token); + } + if let Some((p, packet_id, data)) = packet_data { + let h = self.handlers.read().unwrap().get(p).unwrap().clone(); + h.read(&mut NetworkContext::new(io, p, Some(token), self.connections.clone()), &token, packet_id, &data[1..]); + } + io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Token registration error: {:?}", e)); } - fn connection_timeout<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { + fn start_session(&self, token: StreamToken, io: &IoContext>) { + self.connections.write().unwrap().replace_with(token, |c| { + match Arc::try_unwrap(c).ok().unwrap().into_inner().unwrap() { + ConnectionEntry::Handshake(h) => { + let session = Session::new(h, io, &self.info.read().unwrap()).expect("Session creation error"); + io.update_registration(token).expect("Error updating session registration"); + Some(Arc::new(Mutex::new(ConnectionEntry::Session(session)))) + }, + _ => { None } // handshake expired + } + }).ok(); + } + + fn connection_timeout(&self, token: StreamToken, io: &IoContext>) { self.kill_connection(token, io) } - fn kill_connection<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage>) { + fn kill_connection(&self, token: StreamToken, io: &IoContext>) { let mut to_disconnect: Vec = Vec::new(); - let mut remove = true; - match self.connections.get_mut(token) { - Some(&mut ConnectionEntry::Handshake(_)) => (), // just abandon handshake - Some(&mut ConnectionEntry::Session(ref mut s)) if s.is_ready() => { - for (p, _) in self.handlers.iter_mut() { - if s.have_capability(p) { - to_disconnect.push(p); - } + { + let mut connections = self.connections.write().unwrap(); + if let Some(connection) = connections.get(token).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Handshake(_) => { + connections.remove(token); + }, + &mut ConnectionEntry::Session(ref mut s) if s.is_ready() => { + for (p, _) in self.handlers.read().unwrap().iter() { + if s.have_capability(p) { + to_disconnect.push(p); + } + } + connections.remove(token); + }, + _ => {}, } - }, - _ => { - remove = false; - }, + } } for p in to_disconnect { - let mut h = self.handlers.get_mut(p).unwrap(); - h.disconnected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token); - } - if remove { - self.connections.remove(token); + let h = self.handlers.read().unwrap().get(p).unwrap().clone(); + h.disconnected(&mut NetworkContext::new(io, p, Some(token), self.connections.clone()), &token); } } } -impl IoHandler> for Host where Message: Send + 'static { +impl IoHandler> for Host where Message: Send + Sync + Clone + 'static { /// Initialize networking - fn initialize(&mut self, io: &mut IoContext>) { - /* - match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() { - Some(iface) => config.public_address = iface.addr.unwrap(), - None => warn!("No public network interface"), - */ - - // Start listening for incoming connections - io.event_loop.register(&self.listener, Token(TCP_ACCEPT), EventSet::readable(), PollOpt::edge()).unwrap(); - io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap(); - // open the udp socket - io.event_loop.register(&self.udp_socket, Token(NODETABLE_RECEIVE), EventSet::readable(), PollOpt::edge()).unwrap(); - io.event_loop.timeout_ms(Token(NODETABLE_MAINTAIN), 7200).unwrap(); - let port = self.info.config.listen_address.port(); - self.info.listen_port = port; - - self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303"); -/* // GO bootnodes - self.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE - self.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR - self.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG - // ETH/DEV cpp-ethereum (poc-9.ethdev.com) - self.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303");*/ + fn initialize(&self, io: &IoContext>) { + io.register_stream(TCP_ACCEPT).expect("Error registering TCP listener"); + io.register_stream(NODETABLE_RECEIVE).expect("Error registering UDP listener"); + io.register_timer(IDLE, MAINTENANCE_TIMEOUT).expect("Error registering Network idle timer"); + //io.register_timer(NODETABLE_MAINTAIN, 7200); } - fn stream_hup<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { + fn stream_hup(&self, io: &IoContext>, stream: StreamToken) { trace!(target: "net", "Hup: {}", stream); match stream { FIRST_CONNECTION ... LAST_CONNECTION => self.connection_closed(stream, io), @@ -578,7 +581,7 @@ impl IoHandler> for Host where Messa }; } - fn stream_readable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { + fn stream_readable(&self, io: &IoContext>, stream: StreamToken) { match stream { FIRST_CONNECTION ... LAST_CONNECTION => self.connection_readable(stream, io), NODETABLE_RECEIVE => {}, @@ -587,65 +590,114 @@ impl IoHandler> for Host where Messa } } - fn stream_writable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, stream: StreamToken) { + fn stream_writable(&self, io: &IoContext>, stream: StreamToken) { match stream { FIRST_CONNECTION ... LAST_CONNECTION => self.connection_writable(stream, io), + NODETABLE_RECEIVE => {}, _ => panic!("Received unknown writable token"), } } - fn timeout<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, token: TimerToken) { + fn timeout(&self, io: &IoContext>, token: TimerToken) { match token { IDLE => self.maintain_network(io), FIRST_CONNECTION ... LAST_CONNECTION => self.connection_timeout(token, io), NODETABLE_DISCOVERY => {}, NODETABLE_MAINTAIN => {}, - _ => match self.timers.get_mut(&token).map(|p| *p) { - Some(protocol) => match self.handlers.get_mut(protocol) { - None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) }, - Some(h) => { h.timeout(&mut NetworkContext::new(io, protocol, Some(token), &mut self.connections, &mut self.timers), token); } - }, - None => {} // time not registerd through us + _ => match self.timers.read().unwrap().get(&token).map(|p| *p) { + Some(timer) => match self.handlers.read().unwrap().get(timer.protocol).map(|h| h.clone()) { + None => { warn!(target: "net", "No handler found for protocol: {:?}", timer.protocol) }, + Some(h) => { h.timeout(&NetworkContext::new(io, timer.protocol, None, self.connections.clone()), timer.token); } + }, + None => { warn!("Unknown timer token: {}", token); } // timer is not registerd through us } } } - fn message<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage>, message: &'s mut NetworkIoMessage) { + fn message(&self, io: &IoContext>, message: &NetworkIoMessage) { match message { - &mut NetworkIoMessage::AddHandler { - ref mut handler, + &NetworkIoMessage::AddHandler { + ref handler, ref protocol, ref versions } => { - let mut h = mem::replace(handler, None).unwrap(); - h.initialize(&mut NetworkContext::new(io, protocol, None, &mut self.connections, &mut self.timers)); - self.handlers.insert(protocol, h); + let h = handler.clone(); + h.initialize(&NetworkContext::new(io, protocol, None, self.connections.clone())); + self.handlers.write().unwrap().insert(protocol, h); + let mut info = self.info.write().unwrap(); for v in versions { - self.info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); + info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 }); } }, - &mut NetworkIoMessage::Send { + &NetworkIoMessage::AddTimer { + ref protocol, + ref delay, + ref token, + } => { + let handler_token = { + let mut timer_counter = self.timer_counter.write().unwrap(); + let counter = timer_counter.deref_mut(); + let handler_token = *counter; + *counter += 1; + handler_token + }; + self.timers.write().unwrap().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token }); + io.register_timer(handler_token, *delay).expect("Error registering timer"); + }, + &NetworkIoMessage::Send { ref peer, ref packet_id, ref protocol, ref data, } => { - match self.connections.get_mut(*peer as usize) { - Some(&mut ConnectionEntry::Session(ref mut s)) => { - s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "net", "Send error: {:?}", e); - }); //TODO: don't copy vector data - }, - _ => { - warn!(target: "net", "Send: Peer does not exist"); + if let Some(connection) = self.connections.read().unwrap().get(*peer).map(|c| c.clone()) { + match connection.lock().unwrap().deref_mut() { + &mut ConnectionEntry::Session(ref mut s) => { + s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| { + warn!(target: "net", "Send error: {:?}", e); + }); //TODO: don't copy vector data + }, + _ => { warn!(target: "net", "Send: Peer session not exist"); } } - } + } else { warn!(target: "net", "Send: Peer does not exist"); } }, - &mut NetworkIoMessage::User(ref message) => { - for (p, h) in self.handlers.iter_mut() { - h.message(&mut NetworkContext::new(io, p, None, &mut self.connections, &mut self.timers), &message); + &NetworkIoMessage::User(ref message) => { + for (p, h) in self.handlers.read().unwrap().iter() { + h.message(&mut NetworkContext::new(io, p, None, self.connections.clone()), &message); } } } } + + fn register_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>>) { + match stream { + FIRST_CONNECTION ... LAST_CONNECTION => { + if let Some(connection) = self.connections.read().unwrap().get(stream).map(|c| c.clone()) { + match connection.lock().unwrap().deref() { + &ConnectionEntry::Handshake(ref h) => h.register_socket(reg, event_loop).expect("Error registering socket"), + _ => warn!("Unexpected session stream registration") + } + } else { warn!("Unexpected stream registration")} + } + NODETABLE_RECEIVE => event_loop.register(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), + TCP_ACCEPT => event_loop.register(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), + _ => warn!("Unexpected stream regitration") + } + } + + fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop>>) { + match stream { + FIRST_CONNECTION ... LAST_CONNECTION => { + if let Some(connection) = self.connections.read().unwrap().get(stream).map(|c| c.clone()) { + match connection.lock().unwrap().deref() { + &ConnectionEntry::Handshake(ref h) => h.update_socket(reg, event_loop).expect("Error updating socket"), + &ConnectionEntry::Session(ref s) => s.update_socket(reg, event_loop).expect("Error updating socket"), + } + } else { warn!("Unexpected stream update")} + } + NODETABLE_RECEIVE => event_loop.reregister(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), + TCP_ACCEPT => event_loop.reregister(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), + _ => warn!("Unexpected stream update") + } + } } diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index a47e88927..6facaf704 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -56,31 +56,31 @@ mod service; mod error; mod node; -pub type PeerId = host::PeerId; -pub type PacketId = host::PacketId; -pub type NetworkContext<'s,'io, Message> = host::NetworkContext<'s, 'io, Message>; -pub type NetworkService = service::NetworkService; -pub type NetworkIoMessage = host::NetworkIoMessage; +pub use network::host::PeerId; +pub use network::host::PacketId; +pub use network::host::NetworkContext; +pub use network::service::NetworkService; +pub use network::host::NetworkIoMessage; pub use network::host::NetworkIoMessage::User as UserMessage; -pub type NetworkError = error::NetworkError; +pub use network::error::NetworkError; -use io::*; +use io::TimerToken; /// Network IO protocol handler. This needs to be implemented for each new subprotocol. /// All the handler function are called from within IO event loop. /// `Message` is the type for message data. -pub trait NetworkProtocolHandler: Send where Message: Send { +pub trait NetworkProtocolHandler: Sync + Send where Message: Send + Sync + Clone { /// Initialize the handler - fn initialize(&mut self, _io: &mut NetworkContext) {} + fn initialize(&self, _io: &NetworkContext) {} /// Called when new network packet received. - fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]); + fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]); /// Called when new peer is connected. Only called when peer supports the same protocol. - fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId); + fn connected(&self, io: &NetworkContext, peer: &PeerId); /// Called when a previously connected peer disconnects. - fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId); + fn disconnected(&self, io: &NetworkContext, peer: &PeerId); /// Timer function called after a timeout created with `NetworkContext::timeout`. - fn timeout(&mut self, _io: &mut NetworkContext, _timer: TimerToken) {} + fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) {} /// Called when a broadcasted message is received. The message can only be sent from a different IO handler. - fn message(&mut self, _io: &mut NetworkContext, _message: &Message) {} + fn message(&self, _io: &NetworkContext, _message: &Message) {} } diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 4c333b8af..1318737a3 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -1,3 +1,4 @@ +use std::sync::*; use error::*; use network::{NetworkProtocolHandler}; use network::error::{NetworkError}; @@ -6,18 +7,18 @@ use io::*; /// IO Service with networking /// `Message` defines a notification data type. -pub struct NetworkService where Message: Send + 'static { +pub struct NetworkService where Message: Send + Sync + Clone + 'static { io_service: IoService>, host_info: String, } -impl NetworkService where Message: Send + 'static { +impl NetworkService where Message: Send + Sync + Clone + 'static { /// Starts IO event loop pub fn start() -> Result, UtilError> { let mut io_service = try!(IoService::>::start()); - let host = Box::new(Host::new()); - let host_info = host.info.client_version.clone(); - info!("NetworkService::start(): id={:?}", host.info.id()); + let host = Arc::new(Host::new()); + let host_info = host.client_version(); + info!("NetworkService::start(): id={:?}", host.client_id()); try!(io_service.register_handler(host)); Ok(NetworkService { io_service: io_service, @@ -37,9 +38,9 @@ impl NetworkService where Message: Send + 'static { } /// Regiter a new protocol handler with the event loop. - pub fn register_protocol(&mut self, handler: Box+Send>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> { + pub fn register_protocol(&mut self, handler: Arc+Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> { try!(self.io_service.send_message(NetworkIoMessage::AddHandler { - handler: Some(handler), + handler: handler, protocol: protocol, versions: versions.to_vec(), })); @@ -55,7 +56,5 @@ impl NetworkService where Message: Send + 'static { pub fn io(&mut self) -> &mut IoService> { &mut self.io_service } - - } diff --git a/util/src/network/session.rs b/util/src/network/session.rs index 828e4b062..5722ffde4 100644 --- a/util/src/network/session.rs +++ b/util/src/network/session.rs @@ -4,6 +4,7 @@ use rlp::*; use network::connection::{EncryptedConnection, Packet}; use network::handshake::Handshake; use error::*; +use io::{IoContext}; use network::error::{NetworkError, DisconnectReason}; use network::host::*; use network::node::NodeId; @@ -84,7 +85,7 @@ const PACKET_LAST: u8 = 0x7f; impl Session { /// Create a new session out of comepleted handshake. Consumes handshake object. - pub fn new>(h: Handshake, event_loop: &mut EventLoop, host: &HostInfo) -> Result { + pub fn new(h: Handshake, _io: &IoContext, host: &HostInfo) -> Result where Message: Send + Sync + Clone { let id = h.id.clone(); let connection = try!(EncryptedConnection::new(h)); let mut session = Session { @@ -99,7 +100,6 @@ impl Session { }; try!(session.write_hello(host)); try!(session.write_ping()); - try!(session.connection.register(event_loop)); Ok(session) } @@ -109,16 +109,16 @@ impl Session { } /// Readable IO handler. Returns packet data if available. - pub fn readable(&mut self, event_loop: &mut EventLoop, host: &HostInfo) -> Result { - match try!(self.connection.readable(event_loop)) { + pub fn readable(&mut self, io: &IoContext, host: &HostInfo) -> Result where Message: Send + Sync + Clone { + match try!(self.connection.readable(io)) { Some(data) => Ok(try!(self.read_packet(data, host))), None => Ok(SessionData::None) } } /// Writable IO handler. Sends pending packets. - pub fn writable(&mut self, event_loop: &mut EventLoop, _host: &HostInfo) -> Result<(), UtilError> { - self.connection.writable(event_loop) + pub fn writable(&mut self, io: &IoContext, _host: &HostInfo) -> Result<(), UtilError> where Message: Send + Sync + Clone { + self.connection.writable(io) } /// Checks if peer supports given capability @@ -127,8 +127,8 @@ impl Session { } /// Update registration with the event loop. Should be called at the end of the IO handler. - pub fn reregister(&mut self, event_loop: &mut EventLoop) -> Result<(), UtilError> { - self.connection.reregister(event_loop) + pub fn update_socket(&self, reg:Token, event_loop: &mut EventLoop) -> Result<(), UtilError> { + self.connection.update_socket(reg, event_loop) } /// Send a protocol packet to peer. From 4bf1c205b41b4471b018c511f3b093b43ea14476 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Jan 2016 23:33:52 +0100 Subject: [PATCH 02/10] DB commit queue --- src/bin/client/main.rs | 8 +-- src/{queue.rs => block_queue.rs} | 4 +- src/client.rs | 95 ++++++++++++++++++++++---------- src/lib.rs | 3 +- src/service.rs | 26 ++++++--- src/sync/io.rs | 10 ++-- src/sync/mod.rs | 33 ++++------- util/src/io/service.rs | 5 ++ util/src/journaldb.rs | 10 ++++ util/src/network/host.rs | 8 +-- 10 files changed, 127 insertions(+), 75 deletions(-) rename src/{queue.rs => block_queue.rs} (98%) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index e49dc2dbc..3ebf4e080 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -10,10 +10,9 @@ use log::{LogLevelFilter}; use env_logger::LogBuilder; use util::*; use ethcore::client::*; -use ethcore::service::ClientService; +use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethcore::blockchain::CacheSize; -use ethcore::sync::*; fn setup_log() { let mut builder = LogBuilder::new(); @@ -90,7 +89,7 @@ impl Informant { const INFO_TIMER: TimerToken = 0; struct ClientIoHandler { - client: Arc>, + client: Arc, info: Informant, } @@ -101,8 +100,7 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { if INFO_TIMER == timer { - let client = self.client.read().unwrap(); - self.info.tick(client.deref()); + self.info.tick(&self.client); } } } diff --git a/src/queue.rs b/src/block_queue.rs similarity index 98% rename from src/queue.rs rename to src/block_queue.rs index 7c74b56d7..0bb184a1b 100644 --- a/src/queue.rs +++ b/src/block_queue.rs @@ -1,12 +1,14 @@ +//! A queue of blocks. Sits between network or other I/O and the BlockChain. +//! Sorts them ready for blockchain insertion. use std::thread::{JoinHandle, self}; use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; use util::*; use verification::*; use error::*; use engine::Engine; -use sync::*; use views::*; use header::*; +use service::*; /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. diff --git a/src/client.rs b/src/client.rs index 226a022ca..cf8b0fd7c 100644 --- a/src/client.rs +++ b/src/client.rs @@ -6,8 +6,9 @@ use error::*; use header::BlockNumber; use spec::Spec; use engine::Engine; -use queue::BlockQueue; -use sync::NetSyncMessage; +use block_queue::BlockQueue; +use db_queue::{DbQueue, StateDBCommit}; +use service::NetSyncMessage; use env_info::LastHashes; use verification::*; use block::*; @@ -95,13 +96,13 @@ pub trait BlockChainClient : Sync + Send { fn block_receipts(&self, hash: &H256) -> Option; /// Import a block into the blockchain. - fn import_block(&mut self, bytes: Bytes) -> ImportResult; + fn import_block(&self, bytes: Bytes) -> ImportResult; /// Get block queue information. fn queue_status(&self) -> BlockQueueStatus; /// Clear block queue and abort all import activity. - fn clear_queue(&mut self); + fn clear_queue(&self); /// Get blockchain information. fn chain_info(&self) -> BlockChainInfo; @@ -132,19 +133,24 @@ pub struct Client { chain: Arc>, engine: Arc>, state_db: JournalDB, - queue: BlockQueue, - report: ClientReport, + block_queue: RwLock, + db_queue: RwLock, + report: RwLock, + uncommited_states: RwLock>, + import_lock: Mutex<()> } const HISTORY: u64 = 1000; impl Client { /// Create a new client with given spec and DB path. - pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result { + pub fn new(spec: Spec, path: &Path, message_channel: IoChannel ) -> Result, Error> { let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path))); let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); + opts.set_disable_data_sync(true); + opts.set_disable_auto_compactions(true); /*opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); opts.set_disable_data_sync(false); @@ -164,37 +170,46 @@ impl Client { let mut state_path = path.to_path_buf(); state_path.push("state"); - let db = DB::open(&opts, state_path.to_str().unwrap()).unwrap(); - let mut state_db = JournalDB::new(db); + let db = Arc::new(DB::open(&opts, state_path.to_str().unwrap()).unwrap()); let engine = Arc::new(try!(spec.to_engine())); - if engine.spec().ensure_db_good(&mut state_db) { - state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + { + let mut state_db = JournalDB::new_with_arc(db.clone()); + if engine.spec().ensure_db_good(&mut state_db) { + state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB"); + } } + let state_db = JournalDB::new_with_arc(db); -// chain.write().unwrap().ensure_good(&state_db); - - Ok(Client { + let client = Arc::new(Client { chain: chain, engine: engine.clone(), state_db: state_db, - queue: BlockQueue::new(engine, message_channel), - report: Default::default(), - }) + block_queue: RwLock::new(BlockQueue::new(engine, message_channel)), + db_queue: RwLock::new(DbQueue::new()), + report: RwLock::new(Default::default()), + uncommited_states: RwLock::new(HashMap::new()), + import_lock: Mutex::new(()), + }); + + let weak = Arc::downgrade(&client); + client.db_queue.read().unwrap().start(weak); + Ok(client) } /// This is triggered by a message coming from a block queue when the block is ready for insertion - pub fn import_verified_blocks(&mut self) { + pub fn import_verified_blocks(&self, _io: &IoChannel) { let mut bad = HashSet::new(); - let blocks = self.queue.drain(128); + let _import_lock = self.import_lock.lock(); + let blocks = self.block_queue.write().unwrap().drain(128); if blocks.is_empty() { return; } for block in blocks { if bad.contains(&block.header.parent_hash) { - self.queue.mark_as_bad(&block.header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&block.header.hash()); bad.insert(block.header.hash()); continue; } @@ -202,7 +217,7 @@ impl Client { let header = &block.header; if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) { warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); return; }; @@ -210,7 +225,7 @@ impl Client { Some(p) => p, None => { warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); bad.insert(block.header.hash()); return; }, @@ -228,18 +243,23 @@ impl Client { } } - let result = match enact_verified(&block, self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes) { + let db = match self.uncommited_states.read().unwrap().get(&header.parent_hash) { + Some(db) => db.clone(), + None => self.state_db.clone(), + }; + + let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); bad.insert(block.header.hash()); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); return; } }; if let Err(e) = verify_block_final(&header, result.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); - self.queue.mark_as_bad(&header.hash()); + self.block_queue.write().unwrap().mark_as_bad(&header.hash()); return; } @@ -252,11 +272,25 @@ impl Client { return; } } - self.report.accrue_block(&block); + /* + let db = result.drain(); + self.uncommited_states.write().unwrap().insert(header.hash(), db.clone()); + self.db_queue.write().unwrap().queue(StateDBCommit { + now: header.number(), + hash: header.hash().clone(), + end: ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap())), + db: db, + });*/ + self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } } + /// Clear cached state overlay + pub fn clear_state(&self, hash: &H256) { + self.uncommited_states.write().unwrap().remove(hash); + } + /// Get info on the cache. pub fn cache_info(&self) -> CacheSize { self.chain.read().unwrap().cache_size() @@ -264,7 +298,7 @@ impl Client { /// Get the report. pub fn report(&self) -> ClientReport { - self.report.clone() + self.report.read().unwrap().clone() } /// Tick the client. @@ -327,12 +361,12 @@ impl BlockChainClient for Client { unimplemented!(); } - fn import_block(&mut self, bytes: Bytes) -> ImportResult { + fn import_block(&self, bytes: Bytes) -> ImportResult { let header = BlockView::new(&bytes).header(); if self.chain.read().unwrap().is_known(&header.hash()) { return Err(ImportError::AlreadyInChain); } - self.queue.import_block(bytes) + self.block_queue.write().unwrap().import_block(bytes) } fn queue_status(&self) -> BlockQueueStatus { @@ -341,7 +375,8 @@ impl BlockChainClient for Client { } } - fn clear_queue(&mut self) { + fn clear_queue(&self) { + self.block_queue.write().unwrap().clear(); } fn chain_info(&self) -> BlockChainInfo { diff --git a/src/lib.rs b/src/lib.rs index a5b6c3dae..58d84764a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -150,5 +150,6 @@ pub mod block; /// TODO [arkpar] Please document me pub mod verification; /// TODO [debris] Please document me -pub mod queue; +pub mod db_queue; +pub mod block_queue; pub mod ethereum; diff --git a/src/service.rs b/src/service.rs index b97c1cb69..4034ce841 100644 --- a/src/service.rs +++ b/src/service.rs @@ -5,10 +5,22 @@ use error::*; use std::env; use client::Client; +/// Message type for external and internal events +#[derive(Clone)] +pub enum SyncMessage { + /// New block has been imported into the blockchain + NewChainBlock(Bytes), //TODO: use Cow + /// A block is ready + BlockVerified, +} + +/// TODO [arkpar] Please document me +pub type NetSyncMessage = NetworkIoMessage; + /// Client service setup. Creates and registers client and network services with the IO subsystem. pub struct ClientService { net_service: NetworkService, - client: Arc>, + client: Arc, } impl ClientService { @@ -20,7 +32,7 @@ impl ClientService { let mut dir = env::home_dir().unwrap(); dir.push(".parity"); dir.push(H64::from(spec.genesis_header().hash()).hex()); - let client = Arc::new(RwLock::new(try!(Client::new(spec, &dir, net_service.io().channel())))); + let client = try!(Client::new(spec, &dir, net_service.io().channel())); EthSync::register(&mut net_service, client.clone()); let client_io = Arc::new(ClientIoHandler { client: client.clone() @@ -39,14 +51,14 @@ impl ClientService { } /// TODO [arkpar] Please document me - pub fn client(&self) -> Arc> { + pub fn client(&self) -> Arc { self.client.clone() } } /// IO interface for the Client handler struct ClientIoHandler { - client: Arc> + client: Arc } const CLIENT_TICK_TIMER: TimerToken = 0; @@ -59,16 +71,16 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { if timer == CLIENT_TICK_TIMER { - self.client.read().unwrap().tick(); + self.client.tick(); } } - fn message(&self, _io: &IoContext, net_message: &NetSyncMessage) { + fn message(&self, io: &IoContext, net_message: &NetSyncMessage) { match net_message { &UserMessage(ref message) => { match message { &SyncMessage::BlockVerified => { - self.client.write().unwrap().import_verified_blocks(); + self.client.import_verified_blocks(&io.channel()); }, _ => {}, // ignore other messages } diff --git a/src/sync/io.rs b/src/sync/io.rs index f49591a9f..754e3add5 100644 --- a/src/sync/io.rs +++ b/src/sync/io.rs @@ -1,7 +1,7 @@ use client::BlockChainClient; use util::{NetworkContext, PeerId, PacketId,}; use util::error::UtilError; -use sync::SyncMessage; +use service::SyncMessage; /// IO interface for the syning handler. /// Provides peer connection management and an interface to the blockchain client. @@ -14,7 +14,7 @@ pub trait SyncIo { /// Send a packet to a peer. fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), UtilError>; /// Get the blockchain - fn chain<'s>(&'s mut self) -> &'s mut BlockChainClient; + fn chain<'s>(&'s self) -> &'s BlockChainClient; /// Returns peer client identifier string fn peer_info(&self, peer_id: PeerId) -> String { peer_id.to_string() @@ -24,12 +24,12 @@ pub trait SyncIo { /// Wraps `NetworkContext` and the blockchain client pub struct NetSyncIo<'s, 'h> where 'h: 's { network: &'s NetworkContext<'h, SyncMessage>, - chain: &'s mut BlockChainClient + chain: &'s BlockChainClient } impl<'s, 'h> NetSyncIo<'s, 'h> { /// Creates a new instance from the `NetworkContext` and the blockchain client reference. - pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s, 'h> { + pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> { NetSyncIo { network: network, chain: chain, @@ -50,7 +50,7 @@ impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> { self.network.send(peer_id, packet_id, data) } - fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { + fn chain<'a>(&'a self) -> &'a BlockChainClient { self.chain } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index 9bb18a1c0..c87dee569 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -25,9 +25,10 @@ use std::ops::*; use std::sync::*; use client::Client; -use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage}; +use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use sync::chain::ChainSync; -use util::{Bytes, TimerToken}; +use util::TimerToken; +use service::SyncMessage; use sync::io::NetSyncIo; mod chain; @@ -39,22 +40,10 @@ mod tests; const SYNC_TIMER: usize = 0; -/// Message type for external events -#[derive(Clone)] -pub enum SyncMessage { - /// New block has been imported into the blockchain - NewChainBlock(Bytes), //TODO: use Cow - /// A block is ready - BlockVerified, -} - -/// TODO [arkpar] Please document me -pub type NetSyncMessage = NetworkIoMessage; - /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint - chain: Arc>, + chain: Arc, /// Sync strategy sync: RwLock } @@ -63,7 +52,7 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, chain: Arc>) { + pub fn register(service: &mut NetworkService, chain: Arc) { let sync = Arc::new(EthSync { chain: chain, sync: RwLock::new(ChainSync::new()), @@ -78,12 +67,12 @@ impl EthSync { /// Stop sync pub fn stop(&mut self, io: &mut NetworkContext) { - self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref())); } /// Restart sync pub fn restart(&mut self, io: &mut NetworkContext) { - self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref())); } } @@ -93,20 +82,20 @@ impl NetworkProtocolHandler for EthSync { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { - self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data); + self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.deref()) , *peer, packet_id, data); } fn connected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.deref()), *peer); } fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { - self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer); + self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer); } fn timeout(&self, io: &NetworkContext, timer: TimerToken) { if timer == SYNC_TIMER { - self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut())); + self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); } } } diff --git a/util/src/io/service.rs b/util/src/io/service.rs index a229e4022..fab0f113d 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -102,6 +102,11 @@ impl IoContext where Message: Send + Clone + 'static { pub fn message(&self, message: Message) { self.channel.send(message).expect("Error seding message"); } + + /// Get message channel + pub fn channel(&self) -> IoChannel { + self.channel.clone() + } } #[derive(Clone)] diff --git a/util/src/journaldb.rs b/util/src/journaldb.rs index ada9c0d2b..9115c4362 100644 --- a/util/src/journaldb.rs +++ b/util/src/journaldb.rs @@ -34,6 +34,16 @@ impl JournalDB { } } + /// Create a new instance given a shared `backing` database. + pub fn new_with_arc(backing: Arc) -> JournalDB { + JournalDB { + forward: OverlayDB::new_with_arc(backing.clone()), + backing: backing, + inserts: vec![], + removes: vec![], + } + } + /// Create a new instance with an anonymous temporary database. pub fn new_temp() -> JournalDB { let mut dir = env::temp_dir(); diff --git a/util/src/network/host.rs b/util/src/network/host.rs index f83cb1908..b9b9496c4 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -675,13 +675,13 @@ impl IoHandler> for Host where Messa if let Some(connection) = self.connections.read().unwrap().get(stream).map(|c| c.clone()) { match connection.lock().unwrap().deref() { &ConnectionEntry::Handshake(ref h) => h.register_socket(reg, event_loop).expect("Error registering socket"), - _ => warn!("Unexpected session stream registration") + &ConnectionEntry::Session(_) => warn!("Unexpected session stream registration") } - } else { warn!("Unexpected stream registration")} + } else {} // expired } NODETABLE_RECEIVE => event_loop.register(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), TCP_ACCEPT => event_loop.register(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error registering stream"), - _ => warn!("Unexpected stream regitration") + _ => warn!("Unexpected stream registration") } } @@ -693,7 +693,7 @@ impl IoHandler> for Host where Messa &ConnectionEntry::Handshake(ref h) => h.update_socket(reg, event_loop).expect("Error updating socket"), &ConnectionEntry::Session(ref s) => s.update_socket(reg, event_loop).expect("Error updating socket"), } - } else { warn!("Unexpected stream update")} + } else {} // expired } NODETABLE_RECEIVE => event_loop.reregister(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), TCP_ACCEPT => event_loop.reregister(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"), From 67286901091f75dcfaf5b5de0030d215b05ccec6 Mon Sep 17 00:00:00 2001 From: arkpar Date: Thu, 21 Jan 2016 23:34:06 +0100 Subject: [PATCH 03/10] DB commit queue --- src/db_queue.rs | 111 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 src/db_queue.rs diff --git a/src/db_queue.rs b/src/db_queue.rs new file mode 100644 index 000000000..242fd9cc4 --- /dev/null +++ b/src/db_queue.rs @@ -0,0 +1,111 @@ +//! A queue of state changes that are written to database in background. +use std::thread::{JoinHandle, self}; +use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; +use util::*; +use engine::Engine; +use client::Client; + +/// State DB commit params +pub struct StateDBCommit { + /// Database to commit + pub db: JournalDB, + /// Starting block number + pub now: u64, + /// Block ahash + pub hash: H256, + /// End block number + hash + pub end: Option<(u64, H256)>, +} + +/// A queue of state changes that are written to database in background. +pub struct DbQueue { + more_to_write: Arc, + queue: Arc>>, + writer: Mutex>>, + deleting: Arc, +} + +impl DbQueue { + /// Creates a new queue instance. + pub fn new() -> DbQueue { + let queue = Arc::new(Mutex::new(VecDeque::new())); + let more_to_write = Arc::new(Condvar::new()); + let deleting = Arc::new(AtomicBool::new(false)); + + DbQueue { + more_to_write: more_to_write.clone(), + queue: queue.clone(), + writer: Mutex::new(None), + deleting: deleting.clone(), + } + } + + /// Start processing the queue + pub fn start(&self, client: Weak) { + let writer = { + let queue = self.queue.clone(); + let client = client.clone(); + let more_to_write = self.more_to_write.clone(); + let deleting = self.deleting.clone(); + thread::Builder::new().name("DB Writer".to_string()).spawn(move || DbQueue::writer_loop(client, queue, more_to_write, deleting)).expect("Error creating db writer thread") + }; + mem::replace(self.writer.lock().unwrap().deref_mut(), Some(writer)); + } + + fn writer_loop(client: Weak, queue: Arc>>, wait: Arc, deleting: Arc) { + while !deleting.load(AtomicOrdering::Relaxed) { + let mut batch = { + let mut locked = queue.lock().unwrap(); + while locked.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { + locked = wait.wait(locked).unwrap(); + } + + if deleting.load(AtomicOrdering::Relaxed) { + return; + } + mem::replace(locked.deref_mut(), VecDeque::new()) + }; + + for mut state in batch.drain(..) { //TODO: make this a single write transaction + match state.db.commit(state.now, &state.hash, state.end.clone()) { + Ok(_) => (), + Err(e) => { + warn!(target: "client", "State DB commit failed: {:?}", e); + } + } + client.upgrade().unwrap().clear_state(&state.hash); + } + + } + } + + /// Add a state to the queue + pub fn queue(&self, state: StateDBCommit) { + let mut queue = self.queue.lock().unwrap(); + queue.push_back(state); + self.more_to_write.notify_all(); + } +} + +impl Drop for DbQueue { + fn drop(&mut self) { + self.deleting.store(true, AtomicOrdering::Relaxed); + self.more_to_write.notify_all(); + mem::replace(self.writer.lock().unwrap().deref_mut(), None).unwrap().join().unwrap(); + } +} + +#[cfg(test)] +mod tests { + use util::*; + use spec::*; + use queue::*; + + #[test] + fn test_block_queue() { + // TODO better test + let spec = Spec::new_test(); + let engine = spec.to_engine().unwrap(); + let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected()); + } +} From ccf1cc4d54b3df779f28c9c1d94d7f99813dafab Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 00:11:19 +0100 Subject: [PATCH 04/10] Removed obsolete code and added documentation --- Cargo.toml | 1 + src/bin/client/main.rs | 19 +++++++++++-------- util/src/io/service.rs | 10 +++++++--- util/src/network/error.rs | 6 ++++++ util/src/network/host.rs | 31 +++++++------------------------ util/src/network/service.rs | 13 +------------ 6 files changed, 33 insertions(+), 47 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 04c4bf956..ee04e6fdf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,6 +20,7 @@ time = "0.1" evmjit = { path = "rust-evmjit", optional = true } ethash = { path = "ethash" } num_cpus = "0.2" +ctrlc = "1.0" [features] jit = ["evmjit"] diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 3ebf4e080..7d673f8d3 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -3,11 +3,12 @@ extern crate ethcore; extern crate rustc_serialize; extern crate log; extern crate env_logger; +extern crate ctrlc; -use std::io::stdin; use std::env; use log::{LogLevelFilter}; use env_logger::LogBuilder; +use ctrlc::CtrlC; use util::*; use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; @@ -31,13 +32,15 @@ fn main() { let mut service = ClientService::start(spec).unwrap(); let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); - loop { - let mut cmd = String::new(); - stdin().read_line(&mut cmd).unwrap(); - if cmd == "quit\n" || cmd == "exit\n" || cmd == "q\n" { - break; - } - } + + let exit = Arc::new(Condvar::new()); + let e = exit.clone(); + CtrlC::set_handler(move || { + e.notify_all(); + }); + + let mutex = Mutex::new(()); + let _ = exit.wait(mutex.lock().unwrap()).unwrap(); } struct Informant { diff --git a/util/src/io/service.rs b/util/src/io/service.rs index fab0f113d..7df064794 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -10,11 +10,14 @@ use arrayvec::*; use crossbeam::sync::chase_lev; use io::worker::{Worker, Work, WorkType}; +/// Timer ID pub type TimerToken = usize; +/// Timer ID pub type StreamToken = usize; +/// IO Hadndler ID pub type HandlerId = usize; -// Tokens +/// Maximum number of tokens a handler can use pub const TOKENS_PER_HANDLER: usize = 16384; /// Messages used to communicate with the event loop from other threads. @@ -49,8 +52,8 @@ pub enum IoMessage where Message: Send + Clone + Sized { /// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem. pub struct IoContext where Message: Send + Clone + 'static { - pub channel: IoChannel, - pub handler: HandlerId, + channel: IoChannel, + handler: HandlerId, } impl IoContext where Message: Send + Clone + 'static { @@ -249,6 +252,7 @@ impl IoChannel where Message: Send + Clone { Ok(()) } + /// Send low level io message pub fn send_io(&self, message: IoMessage) -> Result<(), IoError> { if let Some(ref channel) = self.channel { try!(channel.send(message)) diff --git a/util/src/network/error.rs b/util/src/network/error.rs index d255cb043..b9dfdc892 100644 --- a/util/src/network/error.rs +++ b/util/src/network/error.rs @@ -19,11 +19,17 @@ pub enum DisconnectReason } #[derive(Debug)] +/// Network error. pub enum NetworkError { + /// Authentication error. Auth, + /// Unrecognised protocol. BadProtocol, + /// Peer not found. PeerNotFound, + /// Peer is diconnected. Disconnect(DisconnectReason), + /// Socket IO error. Io(IoError), } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index b9b9496c4..55ade8090 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -69,22 +69,22 @@ pub type ProtocolId = &'static str; pub enum NetworkIoMessage where Message: Send + Sync + Clone { /// Register a new protocol handler. AddHandler { + /// Handler shared instance. handler: Arc + Sync>, + /// Protocol Id. protocol: ProtocolId, + /// Supported protocol versions. versions: Vec, }, + /// Register a new protocol timer AddTimer { + /// Protocol Id. protocol: ProtocolId, + /// Timer token. token: TimerToken, + /// Timer delay in milliseconds. delay: u64, }, - /// Send data over the network. // TODO: remove this - Send { - peer: PeerId, - packet_id: PacketId, - protocol: ProtocolId, - data: Vec, - }, /// User message User(Message), } @@ -644,23 +644,6 @@ impl IoHandler> for Host where Messa self.timers.write().unwrap().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token }); io.register_timer(handler_token, *delay).expect("Error registering timer"); }, - &NetworkIoMessage::Send { - ref peer, - ref packet_id, - ref protocol, - ref data, - } => { - if let Some(connection) = self.connections.read().unwrap().get(*peer).map(|c| c.clone()) { - match connection.lock().unwrap().deref_mut() { - &mut ConnectionEntry::Session(ref mut s) => { - s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| { - warn!(target: "net", "Send error: {:?}", e); - }); //TODO: don't copy vector data - }, - _ => { warn!(target: "net", "Send: Peer session not exist"); } - } - } else { warn!(target: "net", "Send: Peer does not exist"); } - }, &NetworkIoMessage::User(ref message) => { for (p, h) in self.handlers.read().unwrap().iter() { h.message(&mut NetworkContext::new(io, p, None, self.connections.clone()), &message); diff --git a/util/src/network/service.rs b/util/src/network/service.rs index 1318737a3..67d2b55e2 100644 --- a/util/src/network/service.rs +++ b/util/src/network/service.rs @@ -2,7 +2,7 @@ use std::sync::*; use error::*; use network::{NetworkProtocolHandler}; use network::error::{NetworkError}; -use network::host::{Host, NetworkIoMessage, PeerId, PacketId, ProtocolId}; +use network::host::{Host, NetworkIoMessage, ProtocolId}; use io::*; /// IO Service with networking @@ -26,17 +26,6 @@ impl NetworkService where Message: Send + Sync + Clone + 'stat }) } - /// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads. - pub fn send(&mut self, peer: &PeerId, packet_id: PacketId, protocol: ProtocolId, data: &[u8]) -> Result<(), NetworkError> { - try!(self.io_service.send_message(NetworkIoMessage::Send { - peer: *peer, - packet_id: packet_id, - protocol: protocol, - data: data.to_vec() - })); - Ok(()) - } - /// Regiter a new protocol handler with the event loop. pub fn register_protocol(&mut self, handler: Arc+Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> { try!(self.io_service.send_message(NetworkIoMessage::AddHandler { From d431854421d024da69c22ac3bc1ed0725f79aaf2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 00:47:45 +0100 Subject: [PATCH 05/10] Fixed tests --- util/src/io/mod.rs | 23 +++++++++------- util/src/network/mod.rs | 58 +++++++++++++++++++++++++++++++++++------ 2 files changed, 63 insertions(+), 18 deletions(-) diff --git a/util/src/io/mod.rs b/util/src/io/mod.rs index 4406c751f..48c02f6ee 100644 --- a/util/src/io/mod.rs +++ b/util/src/io/mod.rs @@ -8,27 +8,28 @@ /// /// struct MyHandler; /// +/// #[derive(Clone)] /// struct MyMessage { /// data: u32 /// } /// /// impl IoHandler for MyHandler { -/// fn initialize(&mut self, io: &mut IoContext) { -/// io.register_timer(1000).unwrap(); +/// fn initialize(&self, io: &IoContext) { +/// io.register_timer(0, 1000).unwrap(); /// } /// -/// fn timeout(&mut self, _io: &mut IoContext, timer: TimerToken) { +/// fn timeout(&self, _io: &IoContext, timer: TimerToken) { /// println!("Timeout {}", timer); /// } /// -/// fn message(&mut self, _io: &mut IoContext, message: &mut MyMessage) { +/// fn message(&self, _io: &IoContext, message: &MyMessage) { /// println!("Message {}", message.data); /// } /// } /// /// fn main () { /// let mut service = IoService::::start().expect("Error creating network service"); -/// service.register_handler(Box::new(MyHandler)).unwrap(); +/// service.register_handler(Arc::new(MyHandler)).unwrap(); /// /// // Wait for quit condition /// // ... @@ -93,24 +94,26 @@ pub use io::service::TOKENS_PER_HANDLER; #[cfg(test)] mod tests { + use std::sync::Arc; use io::*; struct MyHandler; + #[derive(Clone)] struct MyMessage { data: u32 } impl IoHandler for MyHandler { - fn initialize(&mut self, io: &mut IoContext) { - io.register_timer(1000).unwrap(); + fn initialize(&self, io: &IoContext) { + io.register_timer(0, 1000).unwrap(); } - fn timeout(&mut self, _io: &mut IoContext, timer: TimerToken) { + fn timeout(&self, _io: &IoContext, timer: TimerToken) { println!("Timeout {}", timer); } - fn message(&mut self, _io: &mut IoContext, message: &mut MyMessage) { + fn message(&self, _io: &IoContext, message: &MyMessage) { println!("Message {}", message.data); } } @@ -118,7 +121,7 @@ mod tests { #[test] fn test_service_register_handler () { let mut service = IoService::::start().expect("Error creating network service"); - service.register_handler(Box::new(MyHandler)).unwrap(); + service.register_handler(Arc::new(MyHandler)).unwrap(); } } diff --git a/util/src/network/mod.rs b/util/src/network/mod.rs index 32296d476..0c734442d 100644 --- a/util/src/network/mod.rs +++ b/util/src/network/mod.rs @@ -8,39 +8,40 @@ /// /// struct MyHandler; /// +/// #[derive(Clone)] /// struct MyMessage { /// data: u32 /// } /// /// impl NetworkProtocolHandler for MyHandler { -/// fn initialize(&mut self, io: &mut NetworkContext) { -/// io.register_timer(1000); +/// fn initialize(&self, io: &NetworkContext) { +/// io.register_timer(0, 1000); /// } /// -/// fn read(&mut self, io: &mut NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { +/// fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { /// println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer); /// } /// -/// fn connected(&mut self, io: &mut NetworkContext, peer: &PeerId) { +/// fn connected(&self, io: &NetworkContext, peer: &PeerId) { /// println!("Connected {}", peer); /// } /// -/// fn disconnected(&mut self, io: &mut NetworkContext, peer: &PeerId) { +/// fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { /// println!("Disconnected {}", peer); /// } /// -/// fn timeout(&mut self, io: &mut NetworkContext, timer: TimerToken) { +/// fn timeout(&self, io: &NetworkContext, timer: TimerToken) { /// println!("Timeout {}", timer); /// } /// -/// fn message(&mut self, io: &mut NetworkContext, message: &MyMessage) { +/// fn message(&self, io: &NetworkContext, message: &MyMessage) { /// println!("Message {}", message.data); /// } /// } /// /// fn main () { /// let mut service = NetworkService::::start().expect("Error creating network service"); -/// service.register_protocol(Box::new(MyHandler), "myproto", &[1u8]); +/// service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]); /// /// // Wait for quit condition /// // ... @@ -91,3 +92,44 @@ pub trait NetworkProtocolHandler: Sync + Send where Message: Send + Syn fn message(&self, _io: &NetworkContext, _message: &Message) {} } + +#[test] +fn test_net_service() { + + use std::sync::Arc; + struct MyHandler; + + #[derive(Clone)] + struct MyMessage { + data: u32 + } + + impl NetworkProtocolHandler for MyHandler { + fn initialize(&self, io: &NetworkContext) { + io.register_timer(0, 1000).unwrap(); + } + + fn read(&self, _io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { + println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer); + } + + fn connected(&self, _io: &NetworkContext, peer: &PeerId) { + println!("Connected {}", peer); + } + + fn disconnected(&self, _io: &NetworkContext, peer: &PeerId) { + println!("Disconnected {}", peer); + } + + fn timeout(&self, _io: &NetworkContext, timer: TimerToken) { + println!("Timeout {}", timer); + } + + fn message(&self, _io: &NetworkContext, message: &MyMessage) { + println!("Message {}", message.data); + } + } + + let mut service = NetworkService::::start().expect("Error creating network service"); + service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]).unwrap(); +} From 81bb86d0ed1d379d9879138139339ca362cbf20d Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 01:27:51 +0100 Subject: [PATCH 06/10] Removed obsolete warnings --- util/src/io/service.rs | 2 +- util/src/io/worker.rs | 2 +- util/src/network/host.rs | 6 ++---- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/util/src/io/service.rs b/util/src/io/service.rs index 7df064794..6daaf8cc6 100644 --- a/util/src/io/service.rs +++ b/util/src/io/service.rs @@ -313,7 +313,7 @@ impl IoService where Message: Send + Sync + Clone + 'static { impl Drop for IoService where Message: Send + Sync + Clone { fn drop(&mut self) { self.host_channel.send(IoMessage::Shutdown).unwrap(); - self.thread.take().unwrap().join().unwrap(); + self.thread.take().unwrap().join().ok(); } } diff --git a/util/src/io/worker.rs b/util/src/io/worker.rs index 8527b245a..d4418afe0 100644 --- a/util/src/io/worker.rs +++ b/util/src/io/worker.rs @@ -99,6 +99,6 @@ impl Drop for Worker { self.deleting.store(true, AtomicOrdering::Relaxed); self.wait.notify_all(); let thread = mem::replace(&mut self.thread, None).unwrap(); - thread.join().unwrap(); + thread.join().ok(); } } diff --git a/util/src/network/host.rs b/util/src/network/host.rs index 55ade8090..9203e73b6 100644 --- a/util/src/network/host.rs +++ b/util/src/network/host.rs @@ -438,7 +438,7 @@ impl Host where Message: Send + Sync + Clone { io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e)); } } - } else { warn!(target: "net", "Received event for unknown connection") } + } if kill { self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection return; @@ -498,9 +498,7 @@ impl Host where Message: Send + Sync + Clone { } } } - } else { - warn!(target: "net", "Received event for unknown connection"); - } + } if kill { self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection return; From 9bcb720f1f6ef9f33000345a17d335b031e8ccb7 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 04:54:38 +0100 Subject: [PATCH 07/10] Minor optimizations --- src/bin/client/main.rs | 19 ++++++++++++++----- src/block_queue.rs | 29 +++++++++++++++++++++++++---- src/blockchain.rs | 24 +++++++++++++----------- src/client.rs | 37 ++++++------------------------------- src/service.rs | 10 +++++++++- src/sync/chain.rs | 14 ++++++++++---- src/sync/mod.rs | 15 +++------------ 7 files changed, 80 insertions(+), 68 deletions(-) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 7d673f8d3..147ea2be2 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -14,6 +14,7 @@ use ethcore::client::*; use ethcore::service::{ClientService, NetSyncMessage}; use ethcore::ethereum; use ethcore::blockchain::CacheSize; +use ethcore::sync::EthSync; fn setup_log() { let mut builder = LogBuilder::new(); @@ -30,7 +31,7 @@ fn main() { setup_log(); let spec = ethereum::new_frontier(); let mut service = ClientService::start(spec).unwrap(); - let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default() }); + let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: service.sync() }); service.io().register_handler(io_handler).expect("Error registering IO handler"); let exit = Arc::new(Condvar::new()); @@ -60,22 +61,29 @@ impl Default for Informant { } impl Informant { - pub fn tick(&self, client: &Client) { + pub fn tick(&self, client: &Client, sync: &EthSync) { // 5 seconds betwen calls. TODO: calculate this properly. let dur = 5usize; let chain_info = client.chain_info(); + let queue_info = client.queue_info(); let cache_info = client.cache_info(); let report = client.report(); + let sync_info = sync.status(); if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) { - println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //···{}···// {} ({}) bl {} ({}) ex ]", + println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, {} downloaded, {} queued ···// {} ({}) bl {} ({}) ex ]", chain_info.best_block_number, chain_info.best_block_hash, (report.blocks_imported - last_report.blocks_imported) / dur, (report.transactions_applied - last_report.transactions_applied) / dur, (report.gas_processed - last_report.gas_processed) / From::from(dur), - 0, // TODO: peers + + sync_info.num_active_peers, + sync_info.num_peers, + sync_info.blocks_received, + queue_info.queue_size, + cache_info.blocks, cache_info.blocks as isize - last_cache_info.blocks as isize, cache_info.block_details, @@ -93,6 +101,7 @@ const INFO_TIMER: TimerToken = 0; struct ClientIoHandler { client: Arc, + sync: Arc, info: Informant, } @@ -103,7 +112,7 @@ impl IoHandler for ClientIoHandler { fn timeout(&self, _io: &IoContext, timer: TimerToken) { if INFO_TIMER == timer { - self.info.tick(&self.client); + self.info.tick(&self.client, &self.sync); } } } diff --git a/src/block_queue.rs b/src/block_queue.rs index 0bb184a1b..1ffd0f7ec 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -10,6 +10,15 @@ use views::*; use header::*; use service::*; +/// Block queue status +#[derive(Debug)] +pub struct BlockQueueInfo { + /// Indicates that queue is full + pub full: bool, + /// Number of queued blocks + pub queue_size: usize, +} + /// A queue of blocks. Sits between network or other I/O and the BlockChain. /// Sorts them ready for blockchain insertion. pub struct BlockQueue { @@ -65,14 +74,15 @@ impl BlockQueue { let deleting = Arc::new(AtomicBool::new(false)); let mut verifiers: Vec> = Vec::new(); - let thread_count = max(::num_cpus::get(), 2) - 1; - for _ in 0..thread_count { + let thread_count = max(::num_cpus::get(), 3) - 2; + for i in 0..thread_count { let verification = verification.clone(); let engine = engine.clone(); let more_to_verify = more_to_verify.clone(); let ready_signal = ready_signal.clone(); let deleting = deleting.clone(); - verifiers.push(thread::spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting))); + verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting)) + .expect("Error starting block verification thread")); } BlockQueue { engine: engine, @@ -206,7 +216,7 @@ impl BlockQueue { verification.verified = new_verified; } - /// TODO [arkpar] Please document me + /// Removes up to `max` verified blocks from the queue pub fn drain(&mut self, max: usize) -> Vec { let mut verification = self.verification.lock().unwrap(); let count = min(max, verification.verified.len()); @@ -217,8 +227,19 @@ impl BlockQueue { result.push(block); } self.ready_signal.reset(); + if !verification.verified.is_empty() { + self.ready_signal.set(); + } result } + + /// Get queue status. + pub fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { + full: false, + queue_size: self.verification.lock().unwrap().unverified.len(), + } + } } impl Drop for BlockQueue { diff --git a/src/blockchain.rs b/src/blockchain.rs index 27abe9ee3..0720d7229 100644 --- a/src/blockchain.rs +++ b/src/blockchain.rs @@ -342,19 +342,19 @@ impl BlockChain { Some(h) => h, None => return None, }; - Some(self._tree_route((from_details, from), (to_details, to))) + Some(self._tree_route((&from_details, &from), (&to_details, &to))) } /// Similar to `tree_route` function, but can be used to return a route /// between blocks which may not be in database yet. - fn _tree_route(&self, from: (BlockDetails, H256), to: (BlockDetails, H256)) -> TreeRoute { + fn _tree_route(&self, from: (&BlockDetails, &H256), to: (&BlockDetails, &H256)) -> TreeRoute { let mut from_branch = vec![]; let mut to_branch = vec![]; - let mut from_details = from.0; - let mut to_details = to.0; - let mut current_from = from.1; - let mut current_to = to.1; + let mut from_details = from.0.clone(); + let mut to_details = to.0.clone(); + let mut current_from = from.1.clone(); + let mut current_to = to.1.clone(); // reset from && to to the same level while from_details.number > to_details.number { @@ -409,7 +409,7 @@ impl BlockChain { // store block in db self.blocks_db.put(&hash, &bytes).unwrap(); - let (batch, new_best) = self.block_to_extras_insert_batch(bytes); + let (batch, new_best, details) = self.block_to_extras_insert_batch(bytes); // update best block let mut best_block = self.best_block.write().unwrap(); @@ -420,6 +420,8 @@ impl BlockChain { // update caches let mut write = self.block_details.write().unwrap(); write.remove(&header.parent_hash()); + write.insert(hash.clone(), details); + self.note_used(CacheID::Block(hash)); // update extras database self.extras_db.write(batch).unwrap(); @@ -427,7 +429,7 @@ impl BlockChain { /// Transforms block into WriteBatch that may be written into database /// Additionally, if it's new best block it returns new best block object. - fn block_to_extras_insert_batch(&self, bytes: &[u8]) -> (WriteBatch, Option) { + fn block_to_extras_insert_batch(&self, bytes: &[u8]) -> (WriteBatch, Option, BlockDetails) { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); @@ -459,7 +461,7 @@ impl BlockChain { // if it's not new best block, just return if !is_new_best { - return (batch, None); + return (batch, None, details); } // if its new best block we need to make sure that all ancestors @@ -467,7 +469,7 @@ impl BlockChain { // find the route between old best block and the new one let best_hash = self.best_block_hash(); let best_details = self.block_details(&best_hash).expect("best block hash is invalid!"); - let route = self._tree_route((best_details, best_hash), (details, hash.clone())); + let route = self._tree_route((&best_details, &best_hash), (&details, &hash)); match route.blocks.len() { // its our parent @@ -494,7 +496,7 @@ impl BlockChain { total_difficulty: total_difficulty }; - (batch, Some(best_block)) + (batch, Some(best_block), details) } /// Returns true if transaction is known. diff --git a/src/client.rs b/src/client.rs index cf8b0fd7c..04d372786 100644 --- a/src/client.rs +++ b/src/client.rs @@ -6,8 +6,8 @@ use error::*; use header::BlockNumber; use spec::Spec; use engine::Engine; -use block_queue::BlockQueue; -use db_queue::{DbQueue, StateDBCommit}; +use block_queue::{BlockQueue, BlockQueueInfo}; +use db_queue::{DbQueue}; use service::NetSyncMessage; use env_info::LastHashes; use verification::*; @@ -47,13 +47,6 @@ impl fmt::Display for BlockChainInfo { } } -/// Block queue status -#[derive(Debug)] -pub struct BlockQueueStatus { - /// TODO [arkpar] Please document me - pub full: bool, -} - /// TODO [arkpar] Please document me pub type TreeRoute = ::blockchain::TreeRoute; @@ -99,7 +92,7 @@ pub trait BlockChainClient : Sync + Send { fn import_block(&self, bytes: Bytes) -> ImportResult; /// Get block queue information. - fn queue_status(&self) -> BlockQueueStatus; + fn queue_info(&self) -> BlockQueueInfo; /// Clear block queue and abort all import activity. fn clear_queue(&self); @@ -149,8 +142,6 @@ impl Client { let mut opts = Options::new(); opts.set_max_open_files(256); opts.create_if_missing(true); - opts.set_disable_data_sync(true); - opts.set_disable_auto_compactions(true); /*opts.set_use_fsync(false); opts.set_bytes_per_sync(8388608); opts.set_disable_data_sync(false); @@ -199,7 +190,6 @@ impl Client { /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self, _io: &IoChannel) { - let mut bad = HashSet::new(); let _import_lock = self.import_lock.lock(); let blocks = self.block_queue.write().unwrap().drain(128); @@ -243,11 +233,7 @@ impl Client { } } - let db = match self.uncommited_states.read().unwrap().get(&header.parent_hash) { - Some(db) => db.clone(), - None => self.state_db.clone(), - }; - + let db = self.state_db.clone(); let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) { Ok(b) => b, Err(e) => { @@ -272,15 +258,6 @@ impl Client { return; } } - /* - let db = result.drain(); - self.uncommited_states.write().unwrap().insert(header.hash(), db.clone()); - self.db_queue.write().unwrap().queue(StateDBCommit { - now: header.number(), - hash: header.hash().clone(), - end: ancient.map(|n|(n, self.chain.read().unwrap().block_hash(n).unwrap())), - db: db, - });*/ self.report.write().unwrap().accrue_block(&block); trace!(target: "client", "Imported #{} ({})", header.number(), header.hash()); } @@ -369,10 +346,8 @@ impl BlockChainClient for Client { self.block_queue.write().unwrap().import_block(bytes) } - fn queue_status(&self) -> BlockQueueStatus { - BlockQueueStatus { - full: false - } + fn queue_info(&self) -> BlockQueueInfo { + self.block_queue.read().unwrap().queue_info() } fn clear_queue(&self) { diff --git a/src/service.rs b/src/service.rs index 4034ce841..b9b510d5e 100644 --- a/src/service.rs +++ b/src/service.rs @@ -21,6 +21,7 @@ pub type NetSyncMessage = NetworkIoMessage; pub struct ClientService { net_service: NetworkService, client: Arc, + sync: Arc, } impl ClientService { @@ -33,7 +34,7 @@ impl ClientService { dir.push(".parity"); dir.push(H64::from(spec.genesis_header().hash()).hex()); let client = try!(Client::new(spec, &dir, net_service.io().channel())); - EthSync::register(&mut net_service, client.clone()); + let sync = EthSync::register(&mut net_service, client.clone()); let client_io = Arc::new(ClientIoHandler { client: client.clone() }); @@ -42,6 +43,7 @@ impl ClientService { Ok(ClientService { net_service: net_service, client: client, + sync: sync, }) } @@ -53,6 +55,12 @@ impl ClientService { /// TODO [arkpar] Please document me pub fn client(&self) -> Arc { self.client.clone() + + } + + /// Get shared sync handler + pub fn sync(&self) -> Arc { + self.sync.clone() } } diff --git a/src/sync/chain.rs b/src/sync/chain.rs index ce748da08..f44f058c8 100644 --- a/src/sync/chain.rs +++ b/src/sync/chain.rs @@ -107,6 +107,10 @@ pub struct SyncStatus { pub blocks_total: usize, /// Number of blocks downloaded so far. pub blocks_received: usize, + /// Total number of connected peers + pub num_peers: usize, + /// Total number of active peers + pub num_active_peers: usize, } #[derive(PartialEq, Eq, Debug)] @@ -195,8 +199,10 @@ impl ChainSync { start_block_number: self.starting_block, last_imported_block_number: self.last_imported_block, highest_block_number: self.highest_block, - blocks_total: (self.last_imported_block - self.starting_block) as usize, - blocks_received: (self.highest_block - self.starting_block) as usize, + blocks_received: (self.last_imported_block - self.starting_block) as usize, + blocks_total: (self.highest_block - self.starting_block) as usize, + num_peers: self.peers.len(), + num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(), } } @@ -544,7 +550,7 @@ impl ChainSync { fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) { self.clear_peer_download(peer_id); - if io.chain().queue_status().full { + if io.chain().queue_info().full { self.pause_sync(); return; } @@ -971,7 +977,7 @@ impl ChainSync { } /// Maintain other peers. Send out any new blocks and transactions - pub fn maintain_sync(&mut self, _io: &mut SyncIo) { + pub fn _maintain_sync(&mut self, _io: &mut SyncIo) { } } diff --git a/src/sync/mod.rs b/src/sync/mod.rs index c87dee569..078100084 100644 --- a/src/sync/mod.rs +++ b/src/sync/mod.rs @@ -27,7 +27,6 @@ use std::sync::*; use client::Client; use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId}; use sync::chain::ChainSync; -use util::TimerToken; use service::SyncMessage; use sync::io::NetSyncIo; @@ -38,8 +37,6 @@ mod range_collection; #[cfg(test)] mod tests; -const SYNC_TIMER: usize = 0; - /// Ethereum network protocol handler pub struct EthSync { /// Shared blockchain client. TODO: this should evetually become an IPC endpoint @@ -52,12 +49,13 @@ pub use self::chain::SyncStatus; impl EthSync { /// Creates and register protocol with the network service - pub fn register(service: &mut NetworkService, chain: Arc) { + pub fn register(service: &mut NetworkService, chain: Arc) -> Arc { let sync = Arc::new(EthSync { chain: chain, sync: RwLock::new(ChainSync::new()), }); service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler"); + sync } /// Get sync status @@ -77,8 +75,7 @@ impl EthSync { } impl NetworkProtocolHandler for EthSync { - fn initialize(&self, io: &NetworkContext) { - io.register_timer(SYNC_TIMER, 1000).unwrap(); + fn initialize(&self, _io: &NetworkContext) { } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -92,12 +89,6 @@ impl NetworkProtocolHandler for EthSync { fn disconnected(&self, io: &NetworkContext, peer: &PeerId) { self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer); } - - fn timeout(&self, io: &NetworkContext, timer: TimerToken) { - if timer == SYNC_TIMER { - self.sync.write().unwrap().maintain_sync(&mut NetSyncIo::new(io, self.chain.deref())); - } - } } From 0ce15af91e3c38ba6f86ee9689b2184ed85fa3d0 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 04:57:02 +0100 Subject: [PATCH 08/10] Removed db_queue --- src/client.rs | 11 +---- src/db_queue.rs | 111 ------------------------------------------------ src/lib.rs | 2 - 3 files changed, 2 insertions(+), 122 deletions(-) delete mode 100644 src/db_queue.rs diff --git a/src/client.rs b/src/client.rs index 04d372786..6f47d0601 100644 --- a/src/client.rs +++ b/src/client.rs @@ -7,7 +7,6 @@ use header::BlockNumber; use spec::Spec; use engine::Engine; use block_queue::{BlockQueue, BlockQueueInfo}; -use db_queue::{DbQueue}; use service::NetSyncMessage; use env_info::LastHashes; use verification::*; @@ -127,7 +126,6 @@ pub struct Client { engine: Arc>, state_db: JournalDB, block_queue: RwLock, - db_queue: RwLock, report: RwLock, uncommited_states: RwLock>, import_lock: Mutex<()> @@ -172,20 +170,15 @@ impl Client { } let state_db = JournalDB::new_with_arc(db); - let client = Arc::new(Client { + Ok(Arc::new(Client { chain: chain, engine: engine.clone(), state_db: state_db, block_queue: RwLock::new(BlockQueue::new(engine, message_channel)), - db_queue: RwLock::new(DbQueue::new()), report: RwLock::new(Default::default()), uncommited_states: RwLock::new(HashMap::new()), import_lock: Mutex::new(()), - }); - - let weak = Arc::downgrade(&client); - client.db_queue.read().unwrap().start(weak); - Ok(client) + })) } /// This is triggered by a message coming from a block queue when the block is ready for insertion diff --git a/src/db_queue.rs b/src/db_queue.rs deleted file mode 100644 index 242fd9cc4..000000000 --- a/src/db_queue.rs +++ /dev/null @@ -1,111 +0,0 @@ -//! A queue of state changes that are written to database in background. -use std::thread::{JoinHandle, self}; -use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering}; -use util::*; -use engine::Engine; -use client::Client; - -/// State DB commit params -pub struct StateDBCommit { - /// Database to commit - pub db: JournalDB, - /// Starting block number - pub now: u64, - /// Block ahash - pub hash: H256, - /// End block number + hash - pub end: Option<(u64, H256)>, -} - -/// A queue of state changes that are written to database in background. -pub struct DbQueue { - more_to_write: Arc, - queue: Arc>>, - writer: Mutex>>, - deleting: Arc, -} - -impl DbQueue { - /// Creates a new queue instance. - pub fn new() -> DbQueue { - let queue = Arc::new(Mutex::new(VecDeque::new())); - let more_to_write = Arc::new(Condvar::new()); - let deleting = Arc::new(AtomicBool::new(false)); - - DbQueue { - more_to_write: more_to_write.clone(), - queue: queue.clone(), - writer: Mutex::new(None), - deleting: deleting.clone(), - } - } - - /// Start processing the queue - pub fn start(&self, client: Weak) { - let writer = { - let queue = self.queue.clone(); - let client = client.clone(); - let more_to_write = self.more_to_write.clone(); - let deleting = self.deleting.clone(); - thread::Builder::new().name("DB Writer".to_string()).spawn(move || DbQueue::writer_loop(client, queue, more_to_write, deleting)).expect("Error creating db writer thread") - }; - mem::replace(self.writer.lock().unwrap().deref_mut(), Some(writer)); - } - - fn writer_loop(client: Weak, queue: Arc>>, wait: Arc, deleting: Arc) { - while !deleting.load(AtomicOrdering::Relaxed) { - let mut batch = { - let mut locked = queue.lock().unwrap(); - while locked.is_empty() && !deleting.load(AtomicOrdering::Relaxed) { - locked = wait.wait(locked).unwrap(); - } - - if deleting.load(AtomicOrdering::Relaxed) { - return; - } - mem::replace(locked.deref_mut(), VecDeque::new()) - }; - - for mut state in batch.drain(..) { //TODO: make this a single write transaction - match state.db.commit(state.now, &state.hash, state.end.clone()) { - Ok(_) => (), - Err(e) => { - warn!(target: "client", "State DB commit failed: {:?}", e); - } - } - client.upgrade().unwrap().clear_state(&state.hash); - } - - } - } - - /// Add a state to the queue - pub fn queue(&self, state: StateDBCommit) { - let mut queue = self.queue.lock().unwrap(); - queue.push_back(state); - self.more_to_write.notify_all(); - } -} - -impl Drop for DbQueue { - fn drop(&mut self) { - self.deleting.store(true, AtomicOrdering::Relaxed); - self.more_to_write.notify_all(); - mem::replace(self.writer.lock().unwrap().deref_mut(), None).unwrap().join().unwrap(); - } -} - -#[cfg(test)] -mod tests { - use util::*; - use spec::*; - use queue::*; - - #[test] - fn test_block_queue() { - // TODO better test - let spec = Spec::new_test(); - let engine = spec.to_engine().unwrap(); - let _ = BlockQueue::new(Arc::new(engine), IoChannel::disconnected()); - } -} diff --git a/src/lib.rs b/src/lib.rs index 58d84764a..68c658267 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -149,7 +149,5 @@ pub mod sync; pub mod block; /// TODO [arkpar] Please document me pub mod verification; -/// TODO [debris] Please document me -pub mod db_queue; pub mod block_queue; pub mod ethereum; From 74d34614cfb55226ef082661ee7f1537e6cb9ab2 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 05:20:47 +0100 Subject: [PATCH 09/10] Tests compiling again --- src/block_queue.rs | 2 +- src/sync/tests.rs | 99 ++++++++++++++++++++++++---------------------- 2 files changed, 52 insertions(+), 49 deletions(-) diff --git a/src/block_queue.rs b/src/block_queue.rs index 1ffd0f7ec..c2cbb35b6 100644 --- a/src/block_queue.rs +++ b/src/block_queue.rs @@ -257,7 +257,7 @@ impl Drop for BlockQueue { mod tests { use util::*; use spec::*; - use queue::*; + use block_queue::*; #[test] fn test_block_queue() { diff --git a/src/sync/tests.rs b/src/sync/tests.rs index 05d7ac317..b0c93a790 100644 --- a/src/sync/tests.rs +++ b/src/sync/tests.rs @@ -1,38 +1,40 @@ use util::*; -use client::{BlockChainClient, BlockStatus, TreeRoute, BlockQueueStatus, BlockChainInfo}; +use client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo}; +use block_queue::BlockQueueInfo; use header::{Header as BlockHeader, BlockNumber}; use error::*; use sync::io::SyncIo; use sync::chain::ChainSync; struct TestBlockChainClient { - blocks: HashMap, - numbers: HashMap, + blocks: RwLock>, + numbers: RwLock>, genesis_hash: H256, - last_hash: H256, - difficulty: U256 + last_hash: RwLock, + difficulty: RwLock, } impl TestBlockChainClient { fn new() -> TestBlockChainClient { let mut client = TestBlockChainClient { - blocks: HashMap::new(), - numbers: HashMap::new(), + blocks: RwLock::new(HashMap::new()), + numbers: RwLock::new(HashMap::new()), genesis_hash: H256::new(), - last_hash: H256::new(), - difficulty: From::from(0), + last_hash: RwLock::new(H256::new()), + difficulty: RwLock::new(From::from(0)), }; client.add_blocks(1, true); // add genesis block - client.genesis_hash = client.last_hash.clone(); + client.genesis_hash = client.last_hash.read().unwrap().clone(); client } pub fn add_blocks(&mut self, count: usize, empty: bool) { - for n in self.numbers.len()..(self.numbers.len() + count) { + let len = self.numbers.read().unwrap().len(); + for n in len..(len + count) { let mut header = BlockHeader::new(); header.difficulty = From::from(n); - header.parent_hash = self.last_hash.clone(); + header.parent_hash = self.last_hash.read().unwrap().clone(); header.number = n as BlockNumber; let mut uncles = RlpStream::new_list(if empty {0} else {1}); if !empty { @@ -50,12 +52,12 @@ impl TestBlockChainClient { impl BlockChainClient for TestBlockChainClient { fn block_header(&self, h: &H256) -> Option { - self.blocks.get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec()) + self.blocks.read().unwrap().get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec()) } fn block_body(&self, h: &H256) -> Option { - self.blocks.get(h).map(|r| { + self.blocks.read().unwrap().get(h).map(|r| { let mut stream = RlpStream::new_list(2); stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1); stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1); @@ -64,30 +66,30 @@ impl BlockChainClient for TestBlockChainClient { } fn block(&self, h: &H256) -> Option { - self.blocks.get(h).map(|b| b.clone()) + self.blocks.read().unwrap().get(h).map(|b| b.clone()) } fn block_status(&self, h: &H256) -> BlockStatus { - match self.blocks.get(h) { + match self.blocks.read().unwrap().get(h) { Some(_) => BlockStatus::InChain, None => BlockStatus::Unknown } } fn block_header_at(&self, n: BlockNumber) -> Option { - self.numbers.get(&(n as usize)).and_then(|h| self.block_header(h)) + self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_header(h)) } fn block_body_at(&self, n: BlockNumber) -> Option { - self.numbers.get(&(n as usize)).and_then(|h| self.block_body(h)) + self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_body(h)) } fn block_at(&self, n: BlockNumber) -> Option { - self.numbers.get(&(n as usize)).map(|h| self.blocks.get(h).unwrap().clone()) + self.numbers.read().unwrap().get(&(n as usize)).map(|h| self.blocks.read().unwrap().get(h).unwrap().clone()) } fn block_status_at(&self, n: BlockNumber) -> BlockStatus { - if (n as usize) < self.blocks.len() { + if (n as usize) < self.blocks.read().unwrap().len() { BlockStatus::InChain } else { BlockStatus::Unknown @@ -110,14 +112,14 @@ impl BlockChainClient for TestBlockChainClient { None } - fn import_block(&mut self, b: Bytes) -> ImportResult { + fn import_block(&self, b: Bytes) -> ImportResult { let header = Rlp::new(&b).val_at::(0); let number: usize = header.number as usize; - if number > self.blocks.len() { - panic!("Unexpected block number. Expected {}, got {}", self.blocks.len(), number); + if number > self.blocks.read().unwrap().len() { + panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number); } if number > 0 { - match self.blocks.get(&header.parent_hash) { + match self.blocks.read().unwrap().get(&header.parent_hash) { Some(parent) => { let parent = Rlp::new(parent).val_at::(0); if parent.number != (header.number - 1) { @@ -129,43 +131,44 @@ impl BlockChainClient for TestBlockChainClient { } } } - if number == self.numbers.len() { - self.difficulty = self.difficulty + header.difficulty; - self.last_hash = header.hash(); - self.blocks.insert(header.hash(), b); - self.numbers.insert(number, header.hash()); + if number == self.numbers.read().unwrap().len() { + *self.difficulty.write().unwrap().deref_mut() += header.difficulty; + mem::replace(self.last_hash.write().unwrap().deref_mut(), header.hash()); + self.blocks.write().unwrap().insert(header.hash(), b); + self.numbers.write().unwrap().insert(number, header.hash()); let mut parent_hash = header.parent_hash; if number > 0 { let mut n = number - 1; - while n > 0 && self.numbers[&n] != parent_hash { - *self.numbers.get_mut(&n).unwrap() = parent_hash.clone(); + while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash { + *self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone(); n -= 1; - parent_hash = Rlp::new(&self.blocks[&parent_hash]).val_at::(0).parent_hash; + parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::(0).parent_hash; } } } else { - self.blocks.insert(header.hash(), b.to_vec()); + self.blocks.write().unwrap().insert(header.hash(), b.to_vec()); } Ok(()) } - fn queue_status(&self) -> BlockQueueStatus { - BlockQueueStatus { + fn queue_info(&self) -> BlockQueueInfo { + BlockQueueInfo { full: false, + queue_size: 0, } } - fn clear_queue(&mut self) { + fn clear_queue(&self) { } fn chain_info(&self) -> BlockChainInfo { BlockChainInfo { - total_difficulty: self.difficulty, - pending_total_difficulty: self.difficulty, + total_difficulty: *self.difficulty.read().unwrap(), + pending_total_difficulty: *self.difficulty.read().unwrap(), genesis_hash: self.genesis_hash.clone(), - best_block_hash: self.last_hash.clone(), - best_block_number: self.blocks.len() as BlockNumber - 1, + best_block_hash: self.last_hash.read().unwrap().clone(), + best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1, } } } @@ -208,7 +211,7 @@ impl<'p> SyncIo for TestIo<'p> { Ok(()) } - fn chain<'a>(&'a mut self) -> &'a mut BlockChainClient { + fn chain<'a>(&'a self) -> &'a BlockChainClient { self.chain } } @@ -275,7 +278,7 @@ impl TestNet { None => {} } let mut p = self.peers.get_mut(peer).unwrap(); - p.sync.maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); + p.sync._maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None)); } } @@ -300,7 +303,7 @@ fn full_sync_two_peers() { net.peer_mut(2).chain.add_blocks(1000, false); net.sync(); assert!(net.peer(0).chain.block_at(1000).is_some()); - assert_eq!(net.peer(0).chain.blocks, net.peer(1).chain.blocks); + assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); } #[test] @@ -313,7 +316,7 @@ fn full_sync_empty_blocks() { } net.sync(); assert!(net.peer(0).chain.block_at(1000).is_some()); - assert_eq!(net.peer(0).chain.blocks, net.peer(1).chain.blocks); + assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref()); } #[test] @@ -329,9 +332,9 @@ fn forked_sync() { net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2 net.peer_mut(2).chain.add_blocks(10, true); // peer 1 has the best chain of 601 blocks - let peer1_chain = net.peer(1).chain.numbers.clone(); + let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone(); net.sync(); - assert_eq!(net.peer(0).chain.numbers, peer1_chain); - assert_eq!(net.peer(1).chain.numbers, peer1_chain); - assert_eq!(net.peer(2).chain.numbers, peer1_chain); + assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain); + assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain); + assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain); } From 9159d3fea0873b766329c77e10273044d0d4f427 Mon Sep 17 00:00:00 2001 From: arkpar Date: Fri, 22 Jan 2016 14:03:42 +0100 Subject: [PATCH 10/10] Indent --- src/bin/client/main.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/bin/client/main.rs b/src/bin/client/main.rs index 147ea2be2..638ac8216 100644 --- a/src/bin/client/main.rs +++ b/src/bin/client/main.rs @@ -36,10 +36,7 @@ fn main() { let exit = Arc::new(Condvar::new()); let e = exit.clone(); - CtrlC::set_handler(move || { - e.notify_all(); - }); - + CtrlC::set_handler(move || { e.notify_all(); }); let mutex = Mutex::new(()); let _ = exit.wait(mutex.lock().unwrap()).unwrap(); }