commit
f999e5eda2
@ -20,6 +20,7 @@ time = "0.1"
|
|||||||
evmjit = { path = "rust-evmjit", optional = true }
|
evmjit = { path = "rust-evmjit", optional = true }
|
||||||
ethash = { path = "ethash" }
|
ethash = { path = "ethash" }
|
||||||
num_cpus = "0.2"
|
num_cpus = "0.2"
|
||||||
|
ctrlc = "1.0"
|
||||||
clippy = "*" # Always newest, since we use nightly
|
clippy = "*" # Always newest, since we use nightly
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
@ -30,11 +30,13 @@ impl EthashManager {
|
|||||||
/// `nonce` - The nonce to pack into the mix
|
/// `nonce` - The nonce to pack into the mix
|
||||||
pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork {
|
pub fn compute_light(&self, block_number: u64, header_hash: &H256, nonce: u64) -> ProofOfWork {
|
||||||
let epoch = block_number / ETHASH_EPOCH_LENGTH;
|
let epoch = block_number / ETHASH_EPOCH_LENGTH;
|
||||||
if !self.lights.read().unwrap().contains_key(&epoch) {
|
while !self.lights.read().unwrap().contains_key(&epoch) {
|
||||||
let mut lights = self.lights.write().unwrap(); // obtain write lock
|
if let Ok(mut lights) = self.lights.try_write()
|
||||||
if !lights.contains_key(&epoch) {
|
{
|
||||||
let light = Light::new(block_number);
|
if !lights.contains_key(&epoch) {
|
||||||
lights.insert(epoch, light);
|
let light = Light::new(block_number);
|
||||||
|
lights.insert(epoch, light);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.lights.read().unwrap().get(&epoch).unwrap().compute(header_hash, nonce)
|
self.lights.read().unwrap().get(&epoch).unwrap().compute(header_hash, nonce)
|
||||||
|
@ -3,17 +3,18 @@ extern crate ethcore;
|
|||||||
extern crate rustc_serialize;
|
extern crate rustc_serialize;
|
||||||
extern crate log;
|
extern crate log;
|
||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
|
extern crate ctrlc;
|
||||||
|
|
||||||
use std::io::stdin;
|
|
||||||
use std::env;
|
use std::env;
|
||||||
use log::{LogLevelFilter};
|
use log::{LogLevelFilter};
|
||||||
use env_logger::LogBuilder;
|
use env_logger::LogBuilder;
|
||||||
|
use ctrlc::CtrlC;
|
||||||
use util::*;
|
use util::*;
|
||||||
use ethcore::client::*;
|
use ethcore::client::*;
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::{ClientService, NetSyncMessage};
|
||||||
use ethcore::ethereum;
|
use ethcore::ethereum;
|
||||||
use ethcore::blockchain::CacheSize;
|
use ethcore::blockchain::CacheSize;
|
||||||
use ethcore::sync::*;
|
use ethcore::sync::EthSync;
|
||||||
|
|
||||||
fn setup_log() {
|
fn setup_log() {
|
||||||
let mut builder = LogBuilder::new();
|
let mut builder = LogBuilder::new();
|
||||||
@ -30,41 +31,56 @@ fn main() {
|
|||||||
setup_log();
|
setup_log();
|
||||||
let spec = ethereum::new_frontier();
|
let spec = ethereum::new_frontier();
|
||||||
let mut service = ClientService::start(spec).unwrap();
|
let mut service = ClientService::start(spec).unwrap();
|
||||||
let io_handler = Box::new(ClientIoHandler { client: service.client(), timer: 0, info: Default::default() });
|
let io_handler = Arc::new(ClientIoHandler { client: service.client(), info: Default::default(), sync: service.sync() });
|
||||||
service.io().register_handler(io_handler).expect("Error registering IO handler");
|
service.io().register_handler(io_handler).expect("Error registering IO handler");
|
||||||
loop {
|
|
||||||
let mut cmd = String::new();
|
let exit = Arc::new(Condvar::new());
|
||||||
stdin().read_line(&mut cmd).unwrap();
|
let e = exit.clone();
|
||||||
if cmd == "quit\n" || cmd == "exit\n" || cmd == "q\n" {
|
CtrlC::set_handler(move || { e.notify_all(); });
|
||||||
break;
|
let mutex = Mutex::new(());
|
||||||
|
let _ = exit.wait(mutex.lock().unwrap()).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Informant {
|
||||||
|
chain_info: RwLock<Option<BlockChainInfo>>,
|
||||||
|
cache_info: RwLock<Option<CacheSize>>,
|
||||||
|
report: RwLock<Option<ClientReport>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Informant {
|
||||||
|
fn default() -> Self {
|
||||||
|
Informant {
|
||||||
|
chain_info: RwLock::new(None),
|
||||||
|
cache_info: RwLock::new(None),
|
||||||
|
report: RwLock::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug)]
|
|
||||||
struct Informant {
|
|
||||||
chain_info: Option<BlockChainInfo>,
|
|
||||||
cache_info: Option<CacheSize>,
|
|
||||||
report: Option<ClientReport>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Informant {
|
impl Informant {
|
||||||
pub fn tick(&mut self, client: &Client) {
|
pub fn tick(&self, client: &Client, sync: &EthSync) {
|
||||||
// 5 seconds betwen calls. TODO: calculate this properly.
|
// 5 seconds betwen calls. TODO: calculate this properly.
|
||||||
let dur = 5usize;
|
let dur = 5usize;
|
||||||
|
|
||||||
let chain_info = client.chain_info();
|
let chain_info = client.chain_info();
|
||||||
|
let queue_info = client.queue_info();
|
||||||
let cache_info = client.cache_info();
|
let cache_info = client.cache_info();
|
||||||
let report = client.report();
|
let report = client.report();
|
||||||
|
let sync_info = sync.status();
|
||||||
|
|
||||||
if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (&self.chain_info, &self.cache_info, &self.report) {
|
if let (_, &Some(ref last_cache_info), &Some(ref last_report)) = (self.chain_info.read().unwrap().deref(), self.cache_info.read().unwrap().deref(), self.report.read().unwrap().deref()) {
|
||||||
println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //···{}···// {} ({}) bl {} ({}) ex ]",
|
println!("[ {} {} ]---[ {} blk/s | {} tx/s | {} gas/s //··· {}/{} peers, {} downloaded, {} queued ···// {} ({}) bl {} ({}) ex ]",
|
||||||
chain_info.best_block_number,
|
chain_info.best_block_number,
|
||||||
chain_info.best_block_hash,
|
chain_info.best_block_hash,
|
||||||
(report.blocks_imported - last_report.blocks_imported) / dur,
|
(report.blocks_imported - last_report.blocks_imported) / dur,
|
||||||
(report.transactions_applied - last_report.transactions_applied) / dur,
|
(report.transactions_applied - last_report.transactions_applied) / dur,
|
||||||
(report.gas_processed - last_report.gas_processed) / From::from(dur),
|
(report.gas_processed - last_report.gas_processed) / From::from(dur),
|
||||||
0, // TODO: peers
|
|
||||||
|
sync_info.num_active_peers,
|
||||||
|
sync_info.num_peers,
|
||||||
|
sync_info.blocks_received,
|
||||||
|
queue_info.queue_size,
|
||||||
|
|
||||||
cache_info.blocks,
|
cache_info.blocks,
|
||||||
cache_info.blocks as isize - last_cache_info.blocks as isize,
|
cache_info.blocks as isize - last_cache_info.blocks as isize,
|
||||||
cache_info.block_details,
|
cache_info.block_details,
|
||||||
@ -72,28 +88,28 @@ impl Informant {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.chain_info = Some(chain_info);
|
*self.chain_info.write().unwrap().deref_mut() = Some(chain_info);
|
||||||
self.cache_info = Some(cache_info);
|
*self.cache_info.write().unwrap().deref_mut() = Some(cache_info);
|
||||||
self.report = Some(report);
|
*self.report.write().unwrap().deref_mut() = Some(report);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const INFO_TIMER: TimerToken = 0;
|
||||||
|
|
||||||
struct ClientIoHandler {
|
struct ClientIoHandler {
|
||||||
client: Arc<RwLock<Client>>,
|
client: Arc<Client>,
|
||||||
timer: TimerToken,
|
sync: Arc<EthSync>,
|
||||||
info: Informant,
|
info: Informant,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
||||||
fn initialize<'s>(&'s mut self, io: &mut IoContext<'s, NetSyncMessage>) {
|
fn initialize(&self, io: &IoContext<NetSyncMessage>) {
|
||||||
self.timer = io.register_timer(5000).expect("Error registering timer");
|
io.register_timer(INFO_TIMER, 5000).expect("Error registering timer");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, timer: TimerToken) {
|
fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) {
|
||||||
if self.timer == timer {
|
if INFO_TIMER == timer {
|
||||||
let client = self.client.read().unwrap();
|
self.info.tick(&self.client, &self.sync);
|
||||||
client.tick();
|
|
||||||
self.info.tick(client.deref());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,12 +1,23 @@
|
|||||||
|
//! A queue of blocks. Sits between network or other I/O and the BlockChain.
|
||||||
|
//! Sorts them ready for blockchain insertion.
|
||||||
use std::thread::{JoinHandle, self};
|
use std::thread::{JoinHandle, self};
|
||||||
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
|
||||||
use util::*;
|
use util::*;
|
||||||
use verification::*;
|
use verification::*;
|
||||||
use error::*;
|
use error::*;
|
||||||
use engine::Engine;
|
use engine::Engine;
|
||||||
use sync::*;
|
|
||||||
use views::*;
|
use views::*;
|
||||||
use header::*;
|
use header::*;
|
||||||
|
use service::*;
|
||||||
|
|
||||||
|
/// Block queue status
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct BlockQueueInfo {
|
||||||
|
/// Indicates that queue is full
|
||||||
|
pub full: bool,
|
||||||
|
/// Number of queued blocks
|
||||||
|
pub queue_size: usize,
|
||||||
|
}
|
||||||
|
|
||||||
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
|
/// A queue of blocks. Sits between network or other I/O and the BlockChain.
|
||||||
/// Sorts them ready for blockchain insertion.
|
/// Sorts them ready for blockchain insertion.
|
||||||
@ -63,14 +74,15 @@ impl BlockQueue {
|
|||||||
let deleting = Arc::new(AtomicBool::new(false));
|
let deleting = Arc::new(AtomicBool::new(false));
|
||||||
|
|
||||||
let mut verifiers: Vec<JoinHandle<()>> = Vec::new();
|
let mut verifiers: Vec<JoinHandle<()>> = Vec::new();
|
||||||
let thread_count = max(::num_cpus::get(), 2) - 1;
|
let thread_count = max(::num_cpus::get(), 3) - 2;
|
||||||
for _ in 0..thread_count {
|
for i in 0..thread_count {
|
||||||
let verification = verification.clone();
|
let verification = verification.clone();
|
||||||
let engine = engine.clone();
|
let engine = engine.clone();
|
||||||
let more_to_verify = more_to_verify.clone();
|
let more_to_verify = more_to_verify.clone();
|
||||||
let ready_signal = ready_signal.clone();
|
let ready_signal = ready_signal.clone();
|
||||||
let deleting = deleting.clone();
|
let deleting = deleting.clone();
|
||||||
verifiers.push(thread::spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting)));
|
verifiers.push(thread::Builder::new().name(format!("Verifier #{}", i)).spawn(move || BlockQueue::verify(verification, engine, more_to_verify, ready_signal, deleting))
|
||||||
|
.expect("Error starting block verification thread"));
|
||||||
}
|
}
|
||||||
BlockQueue {
|
BlockQueue {
|
||||||
engine: engine,
|
engine: engine,
|
||||||
@ -204,7 +216,7 @@ impl BlockQueue {
|
|||||||
verification.verified = new_verified;
|
verification.verified = new_verified;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO [arkpar] Please document me
|
/// Removes up to `max` verified blocks from the queue
|
||||||
pub fn drain(&mut self, max: usize) -> Vec<PreVerifiedBlock> {
|
pub fn drain(&mut self, max: usize) -> Vec<PreVerifiedBlock> {
|
||||||
let mut verification = self.verification.lock().unwrap();
|
let mut verification = self.verification.lock().unwrap();
|
||||||
let count = min(max, verification.verified.len());
|
let count = min(max, verification.verified.len());
|
||||||
@ -215,8 +227,19 @@ impl BlockQueue {
|
|||||||
result.push(block);
|
result.push(block);
|
||||||
}
|
}
|
||||||
self.ready_signal.reset();
|
self.ready_signal.reset();
|
||||||
|
if !verification.verified.is_empty() {
|
||||||
|
self.ready_signal.set();
|
||||||
|
}
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get queue status.
|
||||||
|
pub fn queue_info(&self) -> BlockQueueInfo {
|
||||||
|
BlockQueueInfo {
|
||||||
|
full: false,
|
||||||
|
queue_size: self.verification.lock().unwrap().unverified.len(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Drop for BlockQueue {
|
impl Drop for BlockQueue {
|
||||||
@ -234,7 +257,7 @@ impl Drop for BlockQueue {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use util::*;
|
use util::*;
|
||||||
use spec::*;
|
use spec::*;
|
||||||
use queue::*;
|
use block_queue::*;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_block_queue() {
|
fn test_block_queue() {
|
@ -341,19 +341,19 @@ impl BlockChain {
|
|||||||
Some(h) => h,
|
Some(h) => h,
|
||||||
None => return None,
|
None => return None,
|
||||||
};
|
};
|
||||||
Some(self._tree_route((from_details, from), (to_details, to)))
|
Some(self._tree_route((&from_details, &from), (&to_details, &to)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Similar to `tree_route` function, but can be used to return a route
|
/// Similar to `tree_route` function, but can be used to return a route
|
||||||
/// between blocks which may not be in database yet.
|
/// between blocks which may not be in database yet.
|
||||||
fn _tree_route(&self, from: (BlockDetails, H256), to: (BlockDetails, H256)) -> TreeRoute {
|
fn _tree_route(&self, from: (&BlockDetails, &H256), to: (&BlockDetails, &H256)) -> TreeRoute {
|
||||||
let mut from_branch = vec![];
|
let mut from_branch = vec![];
|
||||||
let mut to_branch = vec![];
|
let mut to_branch = vec![];
|
||||||
|
|
||||||
let mut from_details = from.0;
|
let mut from_details = from.0.clone();
|
||||||
let mut to_details = to.0;
|
let mut to_details = to.0.clone();
|
||||||
let mut current_from = from.1;
|
let mut current_from = from.1.clone();
|
||||||
let mut current_to = to.1;
|
let mut current_to = to.1.clone();
|
||||||
|
|
||||||
// reset from && to to the same level
|
// reset from && to to the same level
|
||||||
while from_details.number > to_details.number {
|
while from_details.number > to_details.number {
|
||||||
@ -408,7 +408,7 @@ impl BlockChain {
|
|||||||
|
|
||||||
// store block in db
|
// store block in db
|
||||||
self.blocks_db.put(&hash, &bytes).unwrap();
|
self.blocks_db.put(&hash, &bytes).unwrap();
|
||||||
let (batch, new_best) = self.block_to_extras_insert_batch(bytes);
|
let (batch, new_best, details) = self.block_to_extras_insert_batch(bytes);
|
||||||
|
|
||||||
// update best block
|
// update best block
|
||||||
let mut best_block = self.best_block.write().unwrap();
|
let mut best_block = self.best_block.write().unwrap();
|
||||||
@ -419,6 +419,8 @@ impl BlockChain {
|
|||||||
// update caches
|
// update caches
|
||||||
let mut write = self.block_details.write().unwrap();
|
let mut write = self.block_details.write().unwrap();
|
||||||
write.remove(&header.parent_hash());
|
write.remove(&header.parent_hash());
|
||||||
|
write.insert(hash.clone(), details);
|
||||||
|
self.note_used(CacheID::Block(hash));
|
||||||
|
|
||||||
// update extras database
|
// update extras database
|
||||||
self.extras_db.write(batch).unwrap();
|
self.extras_db.write(batch).unwrap();
|
||||||
@ -426,7 +428,7 @@ impl BlockChain {
|
|||||||
|
|
||||||
/// Transforms block into WriteBatch that may be written into database
|
/// Transforms block into WriteBatch that may be written into database
|
||||||
/// Additionally, if it's new best block it returns new best block object.
|
/// Additionally, if it's new best block it returns new best block object.
|
||||||
fn block_to_extras_insert_batch(&self, bytes: &[u8]) -> (WriteBatch, Option<BestBlock>) {
|
fn block_to_extras_insert_batch(&self, bytes: &[u8]) -> (WriteBatch, Option<BestBlock>, BlockDetails) {
|
||||||
// create views onto rlp
|
// create views onto rlp
|
||||||
let block = BlockView::new(bytes);
|
let block = BlockView::new(bytes);
|
||||||
let header = block.header_view();
|
let header = block.header_view();
|
||||||
@ -458,7 +460,7 @@ impl BlockChain {
|
|||||||
|
|
||||||
// if it's not new best block, just return
|
// if it's not new best block, just return
|
||||||
if !is_new_best {
|
if !is_new_best {
|
||||||
return (batch, None);
|
return (batch, None, details);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if its new best block we need to make sure that all ancestors
|
// if its new best block we need to make sure that all ancestors
|
||||||
@ -466,7 +468,7 @@ impl BlockChain {
|
|||||||
// find the route between old best block and the new one
|
// find the route between old best block and the new one
|
||||||
let best_hash = self.best_block_hash();
|
let best_hash = self.best_block_hash();
|
||||||
let best_details = self.block_details(&best_hash).expect("best block hash is invalid!");
|
let best_details = self.block_details(&best_hash).expect("best block hash is invalid!");
|
||||||
let route = self._tree_route((best_details, best_hash), (details, hash.clone()));
|
let route = self._tree_route((&best_details, &best_hash), (&details, &hash));
|
||||||
|
|
||||||
match route.blocks.len() {
|
match route.blocks.len() {
|
||||||
// its our parent
|
// its our parent
|
||||||
@ -493,7 +495,7 @@ impl BlockChain {
|
|||||||
total_difficulty: total_difficulty
|
total_difficulty: total_difficulty
|
||||||
};
|
};
|
||||||
|
|
||||||
(batch, Some(best_block))
|
(batch, Some(best_block), details)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if transaction is known.
|
/// Returns true if transaction is known.
|
||||||
|
@ -6,8 +6,8 @@ use error::*;
|
|||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use engine::Engine;
|
use engine::Engine;
|
||||||
use queue::BlockQueue;
|
use block_queue::{BlockQueue, BlockQueueInfo};
|
||||||
use sync::NetSyncMessage;
|
use service::NetSyncMessage;
|
||||||
use env_info::LastHashes;
|
use env_info::LastHashes;
|
||||||
use verification::*;
|
use verification::*;
|
||||||
use block::*;
|
use block::*;
|
||||||
@ -46,13 +46,6 @@ impl fmt::Display for BlockChainInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Block queue status
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct BlockQueueStatus {
|
|
||||||
/// TODO [arkpar] Please document me
|
|
||||||
pub full: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type TreeRoute = ::blockchain::TreeRoute;
|
pub type TreeRoute = ::blockchain::TreeRoute;
|
||||||
|
|
||||||
@ -95,13 +88,13 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
|
fn block_receipts(&self, hash: &H256) -> Option<Bytes>;
|
||||||
|
|
||||||
/// Import a block into the blockchain.
|
/// Import a block into the blockchain.
|
||||||
fn import_block(&mut self, bytes: Bytes) -> ImportResult;
|
fn import_block(&self, bytes: Bytes) -> ImportResult;
|
||||||
|
|
||||||
/// Get block queue information.
|
/// Get block queue information.
|
||||||
fn queue_status(&self) -> BlockQueueStatus;
|
fn queue_info(&self) -> BlockQueueInfo;
|
||||||
|
|
||||||
/// Clear block queue and abort all import activity.
|
/// Clear block queue and abort all import activity.
|
||||||
fn clear_queue(&mut self);
|
fn clear_queue(&self);
|
||||||
|
|
||||||
/// Get blockchain information.
|
/// Get blockchain information.
|
||||||
fn chain_info(&self) -> BlockChainInfo;
|
fn chain_info(&self) -> BlockChainInfo;
|
||||||
@ -132,19 +125,21 @@ pub struct Client {
|
|||||||
chain: Arc<RwLock<BlockChain>>,
|
chain: Arc<RwLock<BlockChain>>,
|
||||||
engine: Arc<Box<Engine>>,
|
engine: Arc<Box<Engine>>,
|
||||||
state_db: JournalDB,
|
state_db: JournalDB,
|
||||||
queue: BlockQueue,
|
block_queue: RwLock<BlockQueue>,
|
||||||
report: ClientReport,
|
report: RwLock<ClientReport>,
|
||||||
|
uncommited_states: RwLock<HashMap<H256, JournalDB>>,
|
||||||
|
import_lock: Mutex<()>
|
||||||
}
|
}
|
||||||
|
|
||||||
const HISTORY: u64 = 1000;
|
const HISTORY: u64 = 1000;
|
||||||
|
|
||||||
impl Client {
|
impl Client {
|
||||||
/// Create a new client with given spec and DB path.
|
/// Create a new client with given spec and DB path.
|
||||||
pub fn new(spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Client, Error> {
|
pub fn new(spec: Spec, path: &Path, message_channel: IoChannel<NetSyncMessage> ) -> Result<Arc<Client>, Error> {
|
||||||
let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path)));
|
let chain = Arc::new(RwLock::new(BlockChain::new(&spec.genesis_block(), path)));
|
||||||
let mut opts = Options::new();
|
let mut opts = Options::new();
|
||||||
opts.create_if_missing(true);
|
|
||||||
opts.set_max_open_files(256);
|
opts.set_max_open_files(256);
|
||||||
|
opts.create_if_missing(true);
|
||||||
/*opts.set_use_fsync(false);
|
/*opts.set_use_fsync(false);
|
||||||
opts.set_bytes_per_sync(8388608);
|
opts.set_bytes_per_sync(8388608);
|
||||||
opts.set_disable_data_sync(false);
|
opts.set_disable_data_sync(false);
|
||||||
@ -164,37 +159,40 @@ impl Client {
|
|||||||
|
|
||||||
let mut state_path = path.to_path_buf();
|
let mut state_path = path.to_path_buf();
|
||||||
state_path.push("state");
|
state_path.push("state");
|
||||||
let db = DB::open(&opts, state_path.to_str().unwrap()).unwrap();
|
let db = Arc::new(DB::open(&opts, state_path.to_str().unwrap()).unwrap());
|
||||||
let mut state_db = JournalDB::new(db);
|
|
||||||
|
|
||||||
let engine = Arc::new(try!(spec.to_engine()));
|
let engine = Arc::new(try!(spec.to_engine()));
|
||||||
if engine.spec().ensure_db_good(&mut state_db) {
|
{
|
||||||
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
|
let mut state_db = JournalDB::new_with_arc(db.clone());
|
||||||
|
if engine.spec().ensure_db_good(&mut state_db) {
|
||||||
|
state_db.commit(0, &engine.spec().genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
let state_db = JournalDB::new_with_arc(db);
|
||||||
|
|
||||||
// chain.write().unwrap().ensure_good(&state_db);
|
Ok(Arc::new(Client {
|
||||||
|
|
||||||
Ok(Client {
|
|
||||||
chain: chain,
|
chain: chain,
|
||||||
engine: engine.clone(),
|
engine: engine.clone(),
|
||||||
state_db: state_db,
|
state_db: state_db,
|
||||||
queue: BlockQueue::new(engine, message_channel),
|
block_queue: RwLock::new(BlockQueue::new(engine, message_channel)),
|
||||||
report: Default::default(),
|
report: RwLock::new(Default::default()),
|
||||||
})
|
uncommited_states: RwLock::new(HashMap::new()),
|
||||||
|
import_lock: Mutex::new(()),
|
||||||
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||||
pub fn import_verified_blocks(&mut self) {
|
pub fn import_verified_blocks(&self, _io: &IoChannel<NetSyncMessage>) {
|
||||||
|
|
||||||
let mut bad = HashSet::new();
|
let mut bad = HashSet::new();
|
||||||
let blocks = self.queue.drain(128);
|
let _import_lock = self.import_lock.lock();
|
||||||
|
let blocks = self.block_queue.write().unwrap().drain(128);
|
||||||
if blocks.is_empty() {
|
if blocks.is_empty() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for block in blocks {
|
for block in blocks {
|
||||||
if bad.contains(&block.header.parent_hash) {
|
if bad.contains(&block.header.parent_hash) {
|
||||||
self.queue.mark_as_bad(&block.header.hash());
|
self.block_queue.write().unwrap().mark_as_bad(&block.header.hash());
|
||||||
bad.insert(block.header.hash());
|
bad.insert(block.header.hash());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -202,7 +200,7 @@ impl Client {
|
|||||||
let header = &block.header;
|
let header = &block.header;
|
||||||
if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) {
|
if let Err(e) = verify_block_family(&header, &block.bytes, self.engine.deref().deref(), self.chain.read().unwrap().deref()) {
|
||||||
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
self.queue.mark_as_bad(&header.hash());
|
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
|
||||||
bad.insert(block.header.hash());
|
bad.insert(block.header.hash());
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
@ -210,7 +208,7 @@ impl Client {
|
|||||||
Some(p) => p,
|
Some(p) => p,
|
||||||
None => {
|
None => {
|
||||||
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
||||||
self.queue.mark_as_bad(&header.hash());
|
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
|
||||||
bad.insert(block.header.hash());
|
bad.insert(block.header.hash());
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
@ -228,18 +226,19 @@ impl Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = match enact_verified(&block, self.engine.deref().deref(), self.state_db.clone(), &parent, &last_hashes) {
|
let db = self.state_db.clone();
|
||||||
|
let result = match enact_verified(&block, self.engine.deref().deref(), db, &parent, &last_hashes) {
|
||||||
Ok(b) => b,
|
Ok(b) => b,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
bad.insert(block.header.hash());
|
bad.insert(block.header.hash());
|
||||||
self.queue.mark_as_bad(&header.hash());
|
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if let Err(e) = verify_block_final(&header, result.block().header()) {
|
if let Err(e) = verify_block_final(&header, result.block().header()) {
|
||||||
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
self.queue.mark_as_bad(&header.hash());
|
self.block_queue.write().unwrap().mark_as_bad(&header.hash());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -252,12 +251,16 @@ impl Client {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.report.accrue_block(&block);
|
self.report.write().unwrap().accrue_block(&block);
|
||||||
|
|
||||||
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Clear cached state overlay
|
||||||
|
pub fn clear_state(&self, hash: &H256) {
|
||||||
|
self.uncommited_states.write().unwrap().remove(hash);
|
||||||
|
}
|
||||||
|
|
||||||
/// Get info on the cache.
|
/// Get info on the cache.
|
||||||
pub fn cache_info(&self) -> CacheSize {
|
pub fn cache_info(&self) -> CacheSize {
|
||||||
self.chain.read().unwrap().cache_size()
|
self.chain.read().unwrap().cache_size()
|
||||||
@ -265,7 +268,7 @@ impl Client {
|
|||||||
|
|
||||||
/// Get the report.
|
/// Get the report.
|
||||||
pub fn report(&self) -> ClientReport {
|
pub fn report(&self) -> ClientReport {
|
||||||
self.report.clone()
|
self.report.read().unwrap().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Tick the client.
|
/// Tick the client.
|
||||||
@ -328,21 +331,20 @@ impl BlockChainClient for Client {
|
|||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_block(&mut self, bytes: Bytes) -> ImportResult {
|
fn import_block(&self, bytes: Bytes) -> ImportResult {
|
||||||
let header = BlockView::new(&bytes).header();
|
let header = BlockView::new(&bytes).header();
|
||||||
if self.chain.read().unwrap().is_known(&header.hash()) {
|
if self.chain.read().unwrap().is_known(&header.hash()) {
|
||||||
return Err(ImportError::AlreadyInChain);
|
return Err(ImportError::AlreadyInChain);
|
||||||
}
|
}
|
||||||
self.queue.import_block(bytes)
|
self.block_queue.write().unwrap().import_block(bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_status(&self) -> BlockQueueStatus {
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
BlockQueueStatus {
|
self.block_queue.read().unwrap().queue_info()
|
||||||
full: false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_queue(&mut self) {
|
fn clear_queue(&self) {
|
||||||
|
self.block_queue.write().unwrap().clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn chain_info(&self) -> BlockChainInfo {
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
@ -151,6 +151,5 @@ pub mod sync;
|
|||||||
pub mod block;
|
pub mod block;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub mod verification;
|
pub mod verification;
|
||||||
/// TODO [debris] Please document me
|
pub mod block_queue;
|
||||||
pub mod queue;
|
|
||||||
pub mod ethereum;
|
pub mod ethereum;
|
||||||
|
@ -5,10 +5,23 @@ use error::*;
|
|||||||
use std::env;
|
use std::env;
|
||||||
use client::Client;
|
use client::Client;
|
||||||
|
|
||||||
|
/// Message type for external and internal events
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub enum SyncMessage {
|
||||||
|
/// New block has been imported into the blockchain
|
||||||
|
NewChainBlock(Bytes), //TODO: use Cow
|
||||||
|
/// A block is ready
|
||||||
|
BlockVerified,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// TODO [arkpar] Please document me
|
||||||
|
pub type NetSyncMessage = NetworkIoMessage<SyncMessage>;
|
||||||
|
|
||||||
/// Client service setup. Creates and registers client and network services with the IO subsystem.
|
/// Client service setup. Creates and registers client and network services with the IO subsystem.
|
||||||
pub struct ClientService {
|
pub struct ClientService {
|
||||||
net_service: NetworkService<SyncMessage>,
|
net_service: NetworkService<SyncMessage>,
|
||||||
client: Arc<RwLock<Client>>,
|
client: Arc<Client>,
|
||||||
|
sync: Arc<EthSync>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ClientService {
|
impl ClientService {
|
||||||
@ -20,9 +33,9 @@ impl ClientService {
|
|||||||
let mut dir = env::home_dir().unwrap();
|
let mut dir = env::home_dir().unwrap();
|
||||||
dir.push(".parity");
|
dir.push(".parity");
|
||||||
dir.push(H64::from(spec.genesis_header().hash()).hex());
|
dir.push(H64::from(spec.genesis_header().hash()).hex());
|
||||||
let client = Arc::new(RwLock::new(try!(Client::new(spec, &dir, net_service.io().channel()))));
|
let client = try!(Client::new(spec, &dir, net_service.io().channel()));
|
||||||
EthSync::register(&mut net_service, client.clone());
|
let sync = EthSync::register(&mut net_service, client.clone());
|
||||||
let client_io = Box::new(ClientIoHandler {
|
let client_io = Arc::new(ClientIoHandler {
|
||||||
client: client.clone()
|
client: client.clone()
|
||||||
});
|
});
|
||||||
try!(net_service.io().register_handler(client_io));
|
try!(net_service.io().register_handler(client_io));
|
||||||
@ -30,6 +43,7 @@ impl ClientService {
|
|||||||
Ok(ClientService {
|
Ok(ClientService {
|
||||||
net_service: net_service,
|
net_service: net_service,
|
||||||
client: client,
|
client: client,
|
||||||
|
sync: sync,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -39,25 +53,45 @@ impl ClientService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub fn client(&self) -> Arc<RwLock<Client>> {
|
pub fn client(&self) -> Arc<Client> {
|
||||||
self.client.clone()
|
self.client.clone()
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get shared sync handler
|
||||||
|
pub fn sync(&self) -> Arc<EthSync> {
|
||||||
|
self.sync.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// IO interface for the Client handler
|
/// IO interface for the Client handler
|
||||||
struct ClientIoHandler {
|
struct ClientIoHandler {
|
||||||
client: Arc<RwLock<Client>>
|
client: Arc<Client>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const CLIENT_TICK_TIMER: TimerToken = 0;
|
||||||
|
const CLIENT_TICK_MS: u64 = 5000;
|
||||||
|
|
||||||
impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
||||||
fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>) {
|
fn initialize(&self, io: &IoContext<NetSyncMessage>) {
|
||||||
|
io.register_timer(CLIENT_TICK_TIMER, CLIENT_TICK_MS).expect("Error registering client timer");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn timeout(&self, _io: &IoContext<NetSyncMessage>, timer: TimerToken) {
|
||||||
|
if timer == CLIENT_TICK_TIMER {
|
||||||
|
self.client.tick();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(match_ref_pats)]
|
#[allow(match_ref_pats)]
|
||||||
fn message<'s>(&'s mut self, _io: &mut IoContext<'s, NetSyncMessage>, net_message: &'s mut NetSyncMessage) {
|
#[allow(single_match)]
|
||||||
if let &mut UserMessage(ref mut message) = net_message {
|
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
||||||
if let &mut SyncMessage::BlockVerified= message {
|
if let &UserMessage(ref message) = net_message {
|
||||||
self.client.write().unwrap().import_verified_blocks();
|
match message {
|
||||||
|
&SyncMessage::BlockVerified => {
|
||||||
|
self.client.import_verified_blocks(&io.channel());
|
||||||
|
},
|
||||||
|
_ => {}, // ignore other messages
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -107,6 +107,10 @@ pub struct SyncStatus {
|
|||||||
pub blocks_total: usize,
|
pub blocks_total: usize,
|
||||||
/// Number of blocks downloaded so far.
|
/// Number of blocks downloaded so far.
|
||||||
pub blocks_received: usize,
|
pub blocks_received: usize,
|
||||||
|
/// Total number of connected peers
|
||||||
|
pub num_peers: usize,
|
||||||
|
/// Total number of active peers
|
||||||
|
pub num_active_peers: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
@ -195,8 +199,10 @@ impl ChainSync {
|
|||||||
start_block_number: self.starting_block,
|
start_block_number: self.starting_block,
|
||||||
last_imported_block_number: self.last_imported_block,
|
last_imported_block_number: self.last_imported_block,
|
||||||
highest_block_number: self.highest_block,
|
highest_block_number: self.highest_block,
|
||||||
blocks_total: (self.last_imported_block - self.starting_block) as usize,
|
blocks_received: (self.last_imported_block - self.starting_block) as usize,
|
||||||
blocks_received: (self.highest_block - self.starting_block) as usize,
|
blocks_total: (self.highest_block - self.starting_block) as usize,
|
||||||
|
num_peers: self.peers.len(),
|
||||||
|
num_active_peers: self.peers.values().filter(|p| p.asking != PeerAsking::Nothing).count(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -425,6 +431,10 @@ impl ChainSync {
|
|||||||
let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty;
|
let peer_difficulty = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer").difficulty;
|
||||||
if difficulty > peer_difficulty {
|
if difficulty > peer_difficulty {
|
||||||
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
|
trace!(target: "sync", "Received block {:?} with no known parent. Peer needs syncing...", h);
|
||||||
|
{
|
||||||
|
let peer = self.peers.get_mut(&peer_id).expect("ChainSync: unknown peer");
|
||||||
|
peer.latest = header_view.sha3();
|
||||||
|
}
|
||||||
self.sync_peer(io, peer_id, true);
|
self.sync_peer(io, peer_id, true);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -541,7 +551,7 @@ impl ChainSync {
|
|||||||
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
|
fn request_blocks(&mut self, io: &mut SyncIo, peer_id: PeerId) {
|
||||||
self.clear_peer_download(peer_id);
|
self.clear_peer_download(peer_id);
|
||||||
|
|
||||||
if io.chain().queue_status().full {
|
if io.chain().queue_info().full {
|
||||||
self.pause_sync();
|
self.pause_sync();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -950,7 +960,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Maintain other peers. Send out any new blocks and transactions
|
/// Maintain other peers. Send out any new blocks and transactions
|
||||||
pub fn maintain_sync(&mut self, _io: &mut SyncIo) {
|
pub fn _maintain_sync(&mut self, _io: &mut SyncIo) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
use client::BlockChainClient;
|
use client::BlockChainClient;
|
||||||
use util::{NetworkContext, PeerId, PacketId,};
|
use util::{NetworkContext, PeerId, PacketId,};
|
||||||
use util::error::UtilError;
|
use util::error::UtilError;
|
||||||
use sync::SyncMessage;
|
use service::SyncMessage;
|
||||||
|
|
||||||
/// IO interface for the syning handler.
|
/// IO interface for the syning handler.
|
||||||
/// Provides peer connection management and an interface to the blockchain client.
|
/// Provides peer connection management and an interface to the blockchain client.
|
||||||
@ -14,7 +14,7 @@ pub trait SyncIo {
|
|||||||
/// Send a packet to a peer.
|
/// Send a packet to a peer.
|
||||||
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
|
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError>;
|
||||||
/// Get the blockchain
|
/// Get the blockchain
|
||||||
fn chain(&mut self) -> &mut BlockChainClient;
|
fn chain(&self) -> &BlockChainClient;
|
||||||
/// Returns peer client identifier string
|
/// Returns peer client identifier string
|
||||||
fn peer_info(&self, peer_id: PeerId) -> String {
|
fn peer_info(&self, peer_id: PeerId) -> String {
|
||||||
peer_id.to_string()
|
peer_id.to_string()
|
||||||
@ -22,14 +22,14 @@ pub trait SyncIo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Wraps `NetworkContext` and the blockchain client
|
/// Wraps `NetworkContext` and the blockchain client
|
||||||
pub struct NetSyncIo<'s, 'h, 'io> where 'h: 's, 'io: 'h {
|
pub struct NetSyncIo<'s, 'h> where 'h: 's {
|
||||||
network: &'s mut NetworkContext<'h, 'io, SyncMessage>,
|
network: &'s NetworkContext<'h, SyncMessage>,
|
||||||
chain: &'s mut BlockChainClient
|
chain: &'s BlockChainClient
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> {
|
impl<'s, 'h> NetSyncIo<'s, 'h> {
|
||||||
/// Creates a new instance from the `NetworkContext` and the blockchain client reference.
|
/// Creates a new instance from the `NetworkContext` and the blockchain client reference.
|
||||||
pub fn new(network: &'s mut NetworkContext<'h, 'io, SyncMessage>, chain: &'s mut BlockChainClient) -> NetSyncIo<'s,'h,'io> {
|
pub fn new(network: &'s NetworkContext<'h, SyncMessage>, chain: &'s BlockChainClient) -> NetSyncIo<'s, 'h> {
|
||||||
NetSyncIo {
|
NetSyncIo {
|
||||||
network: network,
|
network: network,
|
||||||
chain: chain,
|
chain: chain,
|
||||||
@ -37,7 +37,7 @@ impl<'s, 'h, 'io> NetSyncIo<'s, 'h, 'io> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> {
|
impl<'s, 'h> SyncIo for NetSyncIo<'s, 'h> {
|
||||||
fn disable_peer(&mut self, peer_id: PeerId) {
|
fn disable_peer(&mut self, peer_id: PeerId) {
|
||||||
self.network.disable_peer(peer_id);
|
self.network.disable_peer(peer_id);
|
||||||
}
|
}
|
||||||
@ -50,7 +50,7 @@ impl<'s, 'h, 'op> SyncIo for NetSyncIo<'s, 'h, 'op> {
|
|||||||
self.network.send(peer_id, packet_id, data)
|
self.network.send(peer_id, packet_id, data)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn chain(&mut self) -> &mut BlockChainClient {
|
fn chain(&self) -> &BlockChainClient {
|
||||||
self.chain
|
self.chain
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,10 +25,9 @@
|
|||||||
use std::ops::*;
|
use std::ops::*;
|
||||||
use std::sync::*;
|
use std::sync::*;
|
||||||
use client::Client;
|
use client::Client;
|
||||||
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkIoMessage};
|
use util::network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId};
|
||||||
use util::TimerToken;
|
|
||||||
use util::Bytes;
|
|
||||||
use sync::chain::ChainSync;
|
use sync::chain::ChainSync;
|
||||||
|
use service::SyncMessage;
|
||||||
use sync::io::NetSyncIo;
|
use sync::io::NetSyncIo;
|
||||||
|
|
||||||
mod chain;
|
mod chain;
|
||||||
@ -38,76 +37,57 @@ mod range_collection;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
/// Message type for external events
|
|
||||||
pub enum SyncMessage {
|
|
||||||
/// New block has been imported into the blockchain
|
|
||||||
NewChainBlock(Bytes),
|
|
||||||
/// A block is ready
|
|
||||||
BlockVerified,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO [arkpar] Please document me
|
|
||||||
pub type NetSyncMessage = NetworkIoMessage<SyncMessage>;
|
|
||||||
|
|
||||||
/// Ethereum network protocol handler
|
/// Ethereum network protocol handler
|
||||||
pub struct EthSync {
|
pub struct EthSync {
|
||||||
/// Shared blockchain client. TODO: this should evetually become an IPC endpoint
|
/// Shared blockchain client. TODO: this should evetually become an IPC endpoint
|
||||||
chain: Arc<RwLock<Client>>,
|
chain: Arc<Client>,
|
||||||
/// Sync strategy
|
/// Sync strategy
|
||||||
sync: ChainSync
|
sync: RwLock<ChainSync>
|
||||||
}
|
}
|
||||||
|
|
||||||
pub use self::chain::SyncStatus;
|
pub use self::chain::SyncStatus;
|
||||||
|
|
||||||
impl EthSync {
|
impl EthSync {
|
||||||
/// Creates and register protocol with the network service
|
/// Creates and register protocol with the network service
|
||||||
pub fn register(service: &mut NetworkService<SyncMessage>, chain: Arc<RwLock<Client>>) {
|
pub fn register(service: &mut NetworkService<SyncMessage>, chain: Arc<Client>) -> Arc<EthSync> {
|
||||||
let sync = Box::new(EthSync {
|
let sync = Arc::new(EthSync {
|
||||||
chain: chain,
|
chain: chain,
|
||||||
sync: ChainSync::new(),
|
sync: RwLock::new(ChainSync::new()),
|
||||||
});
|
});
|
||||||
service.register_protocol(sync, "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler");
|
service.register_protocol(sync.clone(), "eth", &[62u8, 63u8]).expect("Error registering eth protocol handler");
|
||||||
|
sync
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get sync status
|
/// Get sync status
|
||||||
pub fn status(&self) -> SyncStatus {
|
pub fn status(&self) -> SyncStatus {
|
||||||
self.sync.status()
|
self.sync.read().unwrap().status()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Stop sync
|
/// Stop sync
|
||||||
pub fn stop(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
pub fn stop(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
||||||
self.sync.abort(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()));
|
self.sync.write().unwrap().abort(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Restart sync
|
/// Restart sync
|
||||||
pub fn restart(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
pub fn restart(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
||||||
self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()));
|
self.sync.write().unwrap().restart(&mut NetSyncIo::new(io, self.chain.deref()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
impl NetworkProtocolHandler<SyncMessage> for EthSync {
|
||||||
fn initialize(&mut self, io: &mut NetworkContext<SyncMessage>) {
|
fn initialize(&self, _io: &NetworkContext<SyncMessage>) {
|
||||||
self.sync.restart(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()));
|
|
||||||
io.register_timer(1000).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read(&mut self, io: &mut NetworkContext<SyncMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
fn read(&self, io: &NetworkContext<SyncMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
self.sync.on_packet(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()) , *peer, packet_id, data);
|
self.sync.write().unwrap().on_packet(&mut NetSyncIo::new(io, self.chain.deref()) , *peer, packet_id, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connected(&mut self, io: &mut NetworkContext<SyncMessage>, peer: &PeerId) {
|
fn connected(&self, io: &NetworkContext<SyncMessage>, peer: &PeerId) {
|
||||||
self.sync.on_peer_connected(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer);
|
self.sync.write().unwrap().on_peer_connected(&mut NetSyncIo::new(io, self.chain.deref()), *peer);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn disconnected(&mut self, io: &mut NetworkContext<SyncMessage>, peer: &PeerId) {
|
fn disconnected(&self, io: &NetworkContext<SyncMessage>, peer: &PeerId) {
|
||||||
self.sync.on_peer_aborting(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()), *peer);
|
self.sync.write().unwrap().on_peer_aborting(&mut NetSyncIo::new(io, self.chain.deref()), *peer);
|
||||||
}
|
|
||||||
|
|
||||||
fn timeout(&mut self, io: &mut NetworkContext<SyncMessage>, _timer: TimerToken) {
|
|
||||||
self.sync.maintain_sync(&mut NetSyncIo::new(io, self.chain.write().unwrap().deref_mut()));
|
|
||||||
}
|
|
||||||
|
|
||||||
fn message(&mut self, _io: &mut NetworkContext<SyncMessage>, _message: &SyncMessage) {
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,38 +1,40 @@
|
|||||||
use util::*;
|
use util::*;
|
||||||
use client::{BlockChainClient, BlockStatus, TreeRoute, BlockQueueStatus, BlockChainInfo};
|
use client::{BlockChainClient, BlockStatus, TreeRoute, BlockChainInfo};
|
||||||
|
use block_queue::BlockQueueInfo;
|
||||||
use header::{Header as BlockHeader, BlockNumber};
|
use header::{Header as BlockHeader, BlockNumber};
|
||||||
use error::*;
|
use error::*;
|
||||||
use sync::io::SyncIo;
|
use sync::io::SyncIo;
|
||||||
use sync::chain::ChainSync;
|
use sync::chain::ChainSync;
|
||||||
|
|
||||||
struct TestBlockChainClient {
|
struct TestBlockChainClient {
|
||||||
blocks: HashMap<H256, Bytes>,
|
blocks: RwLock<HashMap<H256, Bytes>>,
|
||||||
numbers: HashMap<usize, H256>,
|
numbers: RwLock<HashMap<usize, H256>>,
|
||||||
genesis_hash: H256,
|
genesis_hash: H256,
|
||||||
last_hash: H256,
|
last_hash: RwLock<H256>,
|
||||||
difficulty: U256
|
difficulty: RwLock<U256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TestBlockChainClient {
|
impl TestBlockChainClient {
|
||||||
fn new() -> TestBlockChainClient {
|
fn new() -> TestBlockChainClient {
|
||||||
|
|
||||||
let mut client = TestBlockChainClient {
|
let mut client = TestBlockChainClient {
|
||||||
blocks: HashMap::new(),
|
blocks: RwLock::new(HashMap::new()),
|
||||||
numbers: HashMap::new(),
|
numbers: RwLock::new(HashMap::new()),
|
||||||
genesis_hash: H256::new(),
|
genesis_hash: H256::new(),
|
||||||
last_hash: H256::new(),
|
last_hash: RwLock::new(H256::new()),
|
||||||
difficulty: From::from(0),
|
difficulty: RwLock::new(From::from(0)),
|
||||||
};
|
};
|
||||||
client.add_blocks(1, true); // add genesis block
|
client.add_blocks(1, true); // add genesis block
|
||||||
client.genesis_hash = client.last_hash.clone();
|
client.genesis_hash = client.last_hash.read().unwrap().clone();
|
||||||
client
|
client
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_blocks(&mut self, count: usize, empty: bool) {
|
pub fn add_blocks(&mut self, count: usize, empty: bool) {
|
||||||
for n in self.numbers.len()..(self.numbers.len() + count) {
|
let len = self.numbers.read().unwrap().len();
|
||||||
|
for n in len..(len + count) {
|
||||||
let mut header = BlockHeader::new();
|
let mut header = BlockHeader::new();
|
||||||
header.difficulty = From::from(n);
|
header.difficulty = From::from(n);
|
||||||
header.parent_hash = self.last_hash.clone();
|
header.parent_hash = self.last_hash.read().unwrap().clone();
|
||||||
header.number = n as BlockNumber;
|
header.number = n as BlockNumber;
|
||||||
let mut uncles = RlpStream::new_list(if empty {0} else {1});
|
let mut uncles = RlpStream::new_list(if empty {0} else {1});
|
||||||
if !empty {
|
if !empty {
|
||||||
@ -50,12 +52,12 @@ impl TestBlockChainClient {
|
|||||||
|
|
||||||
impl BlockChainClient for TestBlockChainClient {
|
impl BlockChainClient for TestBlockChainClient {
|
||||||
fn block_header(&self, h: &H256) -> Option<Bytes> {
|
fn block_header(&self, h: &H256) -> Option<Bytes> {
|
||||||
self.blocks.get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec())
|
self.blocks.read().unwrap().get(h).map(|r| Rlp::new(r).at(0).as_raw().to_vec())
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_body(&self, h: &H256) -> Option<Bytes> {
|
fn block_body(&self, h: &H256) -> Option<Bytes> {
|
||||||
self.blocks.get(h).map(|r| {
|
self.blocks.read().unwrap().get(h).map(|r| {
|
||||||
let mut stream = RlpStream::new_list(2);
|
let mut stream = RlpStream::new_list(2);
|
||||||
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1);
|
stream.append_raw(Rlp::new(&r).at(1).as_raw(), 1);
|
||||||
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1);
|
stream.append_raw(Rlp::new(&r).at(2).as_raw(), 1);
|
||||||
@ -64,30 +66,30 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self, h: &H256) -> Option<Bytes> {
|
fn block(&self, h: &H256) -> Option<Bytes> {
|
||||||
self.blocks.get(h).cloned()
|
self.blocks.read().unwrap().get(h).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_status(&self, h: &H256) -> BlockStatus {
|
fn block_status(&self, h: &H256) -> BlockStatus {
|
||||||
match self.blocks.get(h) {
|
match self.blocks.read().unwrap().get(h) {
|
||||||
Some(_) => BlockStatus::InChain,
|
Some(_) => BlockStatus::InChain,
|
||||||
None => BlockStatus::Unknown
|
None => BlockStatus::Unknown
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_header_at(&self, n: BlockNumber) -> Option<Bytes> {
|
fn block_header_at(&self, n: BlockNumber) -> Option<Bytes> {
|
||||||
self.numbers.get(&(n as usize)).and_then(|h| self.block_header(h))
|
self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_header(h))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_body_at(&self, n: BlockNumber) -> Option<Bytes> {
|
fn block_body_at(&self, n: BlockNumber) -> Option<Bytes> {
|
||||||
self.numbers.get(&(n as usize)).and_then(|h| self.block_body(h))
|
self.numbers.read().unwrap().get(&(n as usize)).and_then(|h| self.block_body(h))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_at(&self, n: BlockNumber) -> Option<Bytes> {
|
fn block_at(&self, n: BlockNumber) -> Option<Bytes> {
|
||||||
self.numbers.get(&(n as usize)).map(|h| self.blocks.get(h).unwrap().clone())
|
self.numbers.read().unwrap().get(&(n as usize)).map(|h| self.blocks.read().unwrap().get(h).unwrap().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_status_at(&self, n: BlockNumber) -> BlockStatus {
|
fn block_status_at(&self, n: BlockNumber) -> BlockStatus {
|
||||||
if (n as usize) < self.blocks.len() {
|
if (n as usize) < self.blocks.read().unwrap().len() {
|
||||||
BlockStatus::InChain
|
BlockStatus::InChain
|
||||||
} else {
|
} else {
|
||||||
BlockStatus::Unknown
|
BlockStatus::Unknown
|
||||||
@ -110,14 +112,14 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_block(&mut self, b: Bytes) -> ImportResult {
|
fn import_block(&self, b: Bytes) -> ImportResult {
|
||||||
let header = Rlp::new(&b).val_at::<BlockHeader>(0);
|
let header = Rlp::new(&b).val_at::<BlockHeader>(0);
|
||||||
let number: usize = header.number as usize;
|
let number: usize = header.number as usize;
|
||||||
if number > self.blocks.len() {
|
if number > self.blocks.read().unwrap().len() {
|
||||||
panic!("Unexpected block number. Expected {}, got {}", self.blocks.len(), number);
|
panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().unwrap().len(), number);
|
||||||
}
|
}
|
||||||
if number > 0 {
|
if number > 0 {
|
||||||
match self.blocks.get(&header.parent_hash) {
|
match self.blocks.read().unwrap().get(&header.parent_hash) {
|
||||||
Some(parent) => {
|
Some(parent) => {
|
||||||
let parent = Rlp::new(parent).val_at::<BlockHeader>(0);
|
let parent = Rlp::new(parent).val_at::<BlockHeader>(0);
|
||||||
if parent.number != (header.number - 1) {
|
if parent.number != (header.number - 1) {
|
||||||
@ -129,43 +131,44 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if number == self.numbers.len() {
|
if number == self.numbers.read().unwrap().len() {
|
||||||
self.difficulty = self.difficulty + header.difficulty;
|
*self.difficulty.write().unwrap().deref_mut() += header.difficulty;
|
||||||
self.last_hash = header.hash();
|
mem::replace(self.last_hash.write().unwrap().deref_mut(), header.hash());
|
||||||
self.blocks.insert(header.hash(), b);
|
self.blocks.write().unwrap().insert(header.hash(), b);
|
||||||
self.numbers.insert(number, header.hash());
|
self.numbers.write().unwrap().insert(number, header.hash());
|
||||||
let mut parent_hash = header.parent_hash;
|
let mut parent_hash = header.parent_hash;
|
||||||
if number > 0 {
|
if number > 0 {
|
||||||
let mut n = number - 1;
|
let mut n = number - 1;
|
||||||
while n > 0 && self.numbers[&n] != parent_hash {
|
while n > 0 && self.numbers.read().unwrap()[&n] != parent_hash {
|
||||||
*self.numbers.get_mut(&n).unwrap() = parent_hash.clone();
|
*self.numbers.write().unwrap().get_mut(&n).unwrap() = parent_hash.clone();
|
||||||
n -= 1;
|
n -= 1;
|
||||||
parent_hash = Rlp::new(&self.blocks[&parent_hash]).val_at::<BlockHeader>(0).parent_hash;
|
parent_hash = Rlp::new(&self.blocks.read().unwrap()[&parent_hash]).val_at::<BlockHeader>(0).parent_hash;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
self.blocks.insert(header.hash(), b.to_vec());
|
self.blocks.write().unwrap().insert(header.hash(), b.to_vec());
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_status(&self) -> BlockQueueStatus {
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
BlockQueueStatus {
|
BlockQueueInfo {
|
||||||
full: false,
|
full: false,
|
||||||
|
queue_size: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_queue(&mut self) {
|
fn clear_queue(&self) {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn chain_info(&self) -> BlockChainInfo {
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
BlockChainInfo {
|
BlockChainInfo {
|
||||||
total_difficulty: self.difficulty,
|
total_difficulty: *self.difficulty.read().unwrap(),
|
||||||
pending_total_difficulty: self.difficulty,
|
pending_total_difficulty: *self.difficulty.read().unwrap(),
|
||||||
genesis_hash: self.genesis_hash.clone(),
|
genesis_hash: self.genesis_hash.clone(),
|
||||||
best_block_hash: self.last_hash.clone(),
|
best_block_hash: self.last_hash.read().unwrap().clone(),
|
||||||
best_block_number: self.blocks.len() as BlockNumber - 1,
|
best_block_number: self.blocks.read().unwrap().len() as BlockNumber - 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -208,7 +211,7 @@ impl<'p> SyncIo for TestIo<'p> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn chain(&mut self) -> &mut BlockChainClient {
|
fn chain(&self) -> &BlockChainClient {
|
||||||
self.chain
|
self.chain
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -272,7 +275,7 @@ impl TestNet {
|
|||||||
trace!("----------------");
|
trace!("----------------");
|
||||||
}
|
}
|
||||||
let mut p = self.peers.get_mut(peer).unwrap();
|
let mut p = self.peers.get_mut(peer).unwrap();
|
||||||
p.sync.maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None));
|
p.sync._maintain_sync(&mut TestIo::new(&mut p.chain, &mut p.queue, None));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -297,7 +300,7 @@ fn full_sync_two_peers() {
|
|||||||
net.peer_mut(2).chain.add_blocks(1000, false);
|
net.peer_mut(2).chain.add_blocks(1000, false);
|
||||||
net.sync();
|
net.sync();
|
||||||
assert!(net.peer(0).chain.block_at(1000).is_some());
|
assert!(net.peer(0).chain.block_at(1000).is_some());
|
||||||
assert_eq!(net.peer(0).chain.blocks, net.peer(1).chain.blocks);
|
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -310,7 +313,7 @@ fn full_sync_empty_blocks() {
|
|||||||
}
|
}
|
||||||
net.sync();
|
net.sync();
|
||||||
assert!(net.peer(0).chain.block_at(1000).is_some());
|
assert!(net.peer(0).chain.block_at(1000).is_some());
|
||||||
assert_eq!(net.peer(0).chain.blocks, net.peer(1).chain.blocks);
|
assert_eq!(net.peer(0).chain.blocks.read().unwrap().deref(), net.peer(1).chain.blocks.read().unwrap().deref());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -326,9 +329,9 @@ fn forked_sync() {
|
|||||||
net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2
|
net.peer_mut(1).chain.add_blocks(100, false); //fork between 1 and 2
|
||||||
net.peer_mut(2).chain.add_blocks(10, true);
|
net.peer_mut(2).chain.add_blocks(10, true);
|
||||||
// peer 1 has the best chain of 601 blocks
|
// peer 1 has the best chain of 601 blocks
|
||||||
let peer1_chain = net.peer(1).chain.numbers.clone();
|
let peer1_chain = net.peer(1).chain.numbers.read().unwrap().clone();
|
||||||
net.sync();
|
net.sync();
|
||||||
assert_eq!(net.peer(0).chain.numbers, peer1_chain);
|
assert_eq!(net.peer(0).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
assert_eq!(net.peer(1).chain.numbers, peer1_chain);
|
assert_eq!(net.peer(1).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
assert_eq!(net.peer(2).chain.numbers, peer1_chain);
|
assert_eq!(net.peer(2).chain.numbers.read().unwrap().deref(), &peer1_chain);
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ rust-crypto = "0.2.34"
|
|||||||
elastic-array = "0.4"
|
elastic-array = "0.4"
|
||||||
heapsize = "0.2"
|
heapsize = "0.2"
|
||||||
itertools = "0.4"
|
itertools = "0.4"
|
||||||
|
crossbeam = "0.2"
|
||||||
slab = { git = "https://github.com/arkpar/slab.git" }
|
slab = { git = "https://github.com/arkpar/slab.git" }
|
||||||
sha3 = { path = "sha3" }
|
sha3 = { path = "sha3" }
|
||||||
clippy = "*" # Always newest, since we use nightly
|
clippy = "*" # Always newest, since we use nightly
|
||||||
|
@ -8,27 +8,28 @@
|
|||||||
///
|
///
|
||||||
/// struct MyHandler;
|
/// struct MyHandler;
|
||||||
///
|
///
|
||||||
|
/// #[derive(Clone)]
|
||||||
/// struct MyMessage {
|
/// struct MyMessage {
|
||||||
/// data: u32
|
/// data: u32
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// impl IoHandler<MyMessage> for MyHandler {
|
/// impl IoHandler<MyMessage> for MyHandler {
|
||||||
/// fn initialize(&mut self, io: &mut IoContext<MyMessage>) {
|
/// fn initialize(&self, io: &IoContext<MyMessage>) {
|
||||||
/// io.register_timer(1000).unwrap();
|
/// io.register_timer(0, 1000).unwrap();
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn timeout(&mut self, _io: &mut IoContext<MyMessage>, timer: TimerToken) {
|
/// fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
|
||||||
/// println!("Timeout {}", timer);
|
/// println!("Timeout {}", timer);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn message(&mut self, _io: &mut IoContext<MyMessage>, message: &mut MyMessage) {
|
/// fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
|
||||||
/// println!("Message {}", message.data);
|
/// println!("Message {}", message.data);
|
||||||
/// }
|
/// }
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn main () {
|
/// fn main () {
|
||||||
/// let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
|
/// let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
|
||||||
/// service.register_handler(Box::new(MyHandler)).unwrap();
|
/// service.register_handler(Arc::new(MyHandler)).unwrap();
|
||||||
///
|
///
|
||||||
/// // Wait for quit condition
|
/// // Wait for quit condition
|
||||||
/// // ...
|
/// // ...
|
||||||
@ -36,6 +37,9 @@
|
|||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
mod service;
|
mod service;
|
||||||
|
mod worker;
|
||||||
|
|
||||||
|
use mio::{EventLoop, Token};
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
@ -44,7 +48,7 @@ pub enum IoError {
|
|||||||
Mio(::std::io::Error),
|
Mio(::std::io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError where Message: Send {
|
impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError where Message: Send + Clone {
|
||||||
fn from(_err: ::mio::NotifyError<service::IoMessage<Message>>) -> IoError {
|
fn from(_err: ::mio::NotifyError<service::IoMessage<Message>>) -> IoError {
|
||||||
IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error"))
|
IoError::Mio(::std::io::Error::new(::std::io::ErrorKind::ConnectionAborted, "Network IO notification error"))
|
||||||
}
|
}
|
||||||
@ -53,54 +57,63 @@ impl<Message> From<::mio::NotifyError<service::IoMessage<Message>>> for IoError
|
|||||||
/// Generic IO handler.
|
/// Generic IO handler.
|
||||||
/// All the handler function are called from within IO event loop.
|
/// All the handler function are called from within IO event loop.
|
||||||
/// `Message` type is used as notification data
|
/// `Message` type is used as notification data
|
||||||
pub trait IoHandler<Message>: Send where Message: Send + 'static {
|
pub trait IoHandler<Message>: Send + Sync where Message: Send + Sync + Clone + 'static {
|
||||||
/// Initialize the handler
|
/// Initialize the handler
|
||||||
fn initialize<'s>(&'s mut self, _io: &mut IoContext<'s, Message>) {}
|
fn initialize(&self, _io: &IoContext<Message>) {}
|
||||||
/// Timer function called after a timeout created with `HandlerIo::timeout`.
|
/// Timer function called after a timeout created with `HandlerIo::timeout`.
|
||||||
fn timeout<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _timer: TimerToken) {}
|
fn timeout(&self, _io: &IoContext<Message>, _timer: TimerToken) {}
|
||||||
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
|
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
|
||||||
fn message<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _message: &'s mut Message) {} // TODO: make message immutable and provide internal channel for adding network handler
|
fn message(&self, _io: &IoContext<Message>, _message: &Message) {}
|
||||||
/// Called when an IO stream gets closed
|
/// Called when an IO stream gets closed
|
||||||
fn stream_hup<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {}
|
fn stream_hup(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
|
||||||
/// Called when an IO stream can be read from
|
/// Called when an IO stream can be read from
|
||||||
fn stream_readable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {}
|
fn stream_readable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
|
||||||
/// Called when an IO stream can be written to
|
/// Called when an IO stream can be written to
|
||||||
fn stream_writable<'s>(&'s mut self, _io: &mut IoContext<'s, Message>, _stream: StreamToken) {}
|
fn stream_writable(&self, _io: &IoContext<Message>, _stream: StreamToken) {}
|
||||||
|
/// Register a new stream with the event loop
|
||||||
|
fn register_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
|
||||||
|
/// Re-register a stream with the event loop
|
||||||
|
fn update_stream(&self, _stream: StreamToken, _reg: Token, _event_loop: &mut EventLoop<IoManager<Message>>) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type TimerToken = service::TimerToken;
|
pub use io::service::TimerToken;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type StreamToken = service::StreamToken;
|
pub use io::service::StreamToken;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type IoContext<'s, M> = service::IoContext<'s, M>;
|
pub use io::service::IoContext;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type IoService<M> = service::IoService<M>;
|
pub use io::service::IoService;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type IoChannel<M> = service::IoChannel<M>;
|
pub use io::service::IoChannel;
|
||||||
//pub const USER_TOKEN_START: usize = service::USER_TOKEN; // TODO: ICE in rustc 1.7.0-nightly (49c382779 2016-01-12)
|
/// TODO [arkpar] Please document me
|
||||||
|
pub use io::service::IoManager;
|
||||||
|
/// TODO [arkpar] Please document me
|
||||||
|
pub use io::service::TOKENS_PER_HANDLER;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
use io::*;
|
use io::*;
|
||||||
|
|
||||||
struct MyHandler;
|
struct MyHandler;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
struct MyMessage {
|
struct MyMessage {
|
||||||
data: u32
|
data: u32
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IoHandler<MyMessage> for MyHandler {
|
impl IoHandler<MyMessage> for MyHandler {
|
||||||
fn initialize(&mut self, io: &mut IoContext<MyMessage>) {
|
fn initialize(&self, io: &IoContext<MyMessage>) {
|
||||||
io.register_timer(1000).unwrap();
|
io.register_timer(0, 1000).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&mut self, _io: &mut IoContext<MyMessage>, timer: TimerToken) {
|
fn timeout(&self, _io: &IoContext<MyMessage>, timer: TimerToken) {
|
||||||
println!("Timeout {}", timer);
|
println!("Timeout {}", timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn message(&mut self, _io: &mut IoContext<MyMessage>, message: &mut MyMessage) {
|
fn message(&self, _io: &IoContext<MyMessage>, message: &MyMessage) {
|
||||||
println!("Message {}", message.data);
|
println!("Message {}", message.data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -108,7 +121,7 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_service_register_handler () {
|
fn test_service_register_handler () {
|
||||||
let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
|
let mut service = IoService::<MyMessage>::start().expect("Error creating network service");
|
||||||
service.register_handler(Box::new(MyHandler)).unwrap();
|
service.register_handler(Arc::new(MyHandler)).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -1,148 +1,229 @@
|
|||||||
|
use std::sync::*;
|
||||||
use std::thread::{self, JoinHandle};
|
use std::thread::{self, JoinHandle};
|
||||||
|
use std::collections::HashMap;
|
||||||
use mio::*;
|
use mio::*;
|
||||||
use mio::util::{Slab};
|
|
||||||
use hash::*;
|
use hash::*;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
use error::*;
|
use error::*;
|
||||||
use io::{IoError, IoHandler};
|
use io::{IoError, IoHandler};
|
||||||
|
use arrayvec::*;
|
||||||
|
use crossbeam::sync::chase_lev;
|
||||||
|
use io::worker::{Worker, Work, WorkType};
|
||||||
|
|
||||||
|
/// Timer ID
|
||||||
pub type TimerToken = usize;
|
pub type TimerToken = usize;
|
||||||
|
/// Timer ID
|
||||||
pub type StreamToken = usize;
|
pub type StreamToken = usize;
|
||||||
|
/// IO Hadndler ID
|
||||||
|
pub type HandlerId = usize;
|
||||||
|
|
||||||
// Tokens
|
/// Maximum number of tokens a handler can use
|
||||||
const MAX_USER_TIMERS: usize = 32;
|
pub const TOKENS_PER_HANDLER: usize = 16384;
|
||||||
const USER_TIMER: usize = 0;
|
|
||||||
const LAST_USER_TIMER: usize = USER_TIMER + MAX_USER_TIMERS - 1;
|
|
||||||
//const USER_TOKEN: usize = LAST_USER_TIMER + 1;
|
|
||||||
|
|
||||||
/// Messages used to communicate with the event loop from other threads.
|
/// Messages used to communicate with the event loop from other threads.
|
||||||
pub enum IoMessage<Message> where Message: Send + Sized {
|
#[derive(Clone)]
|
||||||
|
pub enum IoMessage<Message> where Message: Send + Clone + Sized {
|
||||||
/// Shutdown the event loop
|
/// Shutdown the event loop
|
||||||
Shutdown,
|
Shutdown,
|
||||||
/// Register a new protocol handler.
|
/// Register a new protocol handler.
|
||||||
AddHandler {
|
AddHandler {
|
||||||
handler: Box<IoHandler<Message>+Send>,
|
handler: Arc<IoHandler<Message>+Send>,
|
||||||
|
},
|
||||||
|
AddTimer {
|
||||||
|
handler_id: HandlerId,
|
||||||
|
token: TimerToken,
|
||||||
|
delay: u64,
|
||||||
|
},
|
||||||
|
RemoveTimer {
|
||||||
|
handler_id: HandlerId,
|
||||||
|
token: TimerToken,
|
||||||
|
},
|
||||||
|
RegisterStream {
|
||||||
|
handler_id: HandlerId,
|
||||||
|
token: StreamToken,
|
||||||
|
},
|
||||||
|
UpdateStreamRegistration {
|
||||||
|
handler_id: HandlerId,
|
||||||
|
token: StreamToken,
|
||||||
},
|
},
|
||||||
/// Broadcast a message across all protocol handlers.
|
/// Broadcast a message across all protocol handlers.
|
||||||
UserMessage(Message)
|
UserMessage(Message)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
||||||
pub struct IoContext<'s, Message> where Message: Send + 'static {
|
pub struct IoContext<Message> where Message: Send + Clone + 'static {
|
||||||
timers: &'s mut Slab<UserTimer>,
|
channel: IoChannel<Message>,
|
||||||
/// Low leve MIO Event loop for custom handler registration.
|
handler: HandlerId,
|
||||||
pub event_loop: &'s mut EventLoop<IoManager<Message>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'s, Message> IoContext<'s, Message> where Message: Send + 'static {
|
impl<Message> IoContext<Message> where Message: Send + Clone + 'static {
|
||||||
/// Create a new IO access point. Takes references to all the data that can be updated within the IO handler.
|
/// Create a new IO access point. Takes references to all the data that can be updated within the IO handler.
|
||||||
fn new(event_loop: &'s mut EventLoop<IoManager<Message>>, timers: &'s mut Slab<UserTimer>) -> IoContext<'s, Message> {
|
pub fn new(channel: IoChannel<Message>, handler: HandlerId) -> IoContext<Message> {
|
||||||
IoContext {
|
IoContext {
|
||||||
event_loop: event_loop,
|
handler: handler,
|
||||||
timers: timers,
|
channel: channel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register a new IO timer. Returns a new timer token. 'IoHandler::timeout' will be called with the token.
|
/// Register a new IO timer. 'IoHandler::timeout' will be called with the token.
|
||||||
pub fn register_timer(&mut self, ms: u64) -> Result<TimerToken, UtilError> {
|
pub fn register_timer(&self, token: TimerToken, ms: u64) -> Result<(), UtilError> {
|
||||||
match self.timers.insert(UserTimer {
|
try!(self.channel.send_io(IoMessage::AddTimer {
|
||||||
|
token: token,
|
||||||
delay: ms,
|
delay: ms,
|
||||||
}) {
|
handler_id: self.handler,
|
||||||
Ok(token) => {
|
}));
|
||||||
self.event_loop.timeout_ms(token, ms).expect("Error registering user timer");
|
Ok(())
|
||||||
Ok(token.as_usize())
|
}
|
||||||
},
|
|
||||||
_ => { panic!("Max timers reached") }
|
/// Delete a timer.
|
||||||
}
|
pub fn clear_timer(&self, token: TimerToken) -> Result<(), UtilError> {
|
||||||
|
try!(self.channel.send_io(IoMessage::RemoveTimer {
|
||||||
|
token: token,
|
||||||
|
handler_id: self.handler,
|
||||||
|
}));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
/// Register a new IO stream.
|
||||||
|
pub fn register_stream(&self, token: StreamToken) -> Result<(), UtilError> {
|
||||||
|
try!(self.channel.send_io(IoMessage::RegisterStream {
|
||||||
|
token: token,
|
||||||
|
handler_id: self.handler,
|
||||||
|
}));
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reregister an IO stream.
|
||||||
|
pub fn update_registration(&self, token: StreamToken) -> Result<(), UtilError> {
|
||||||
|
try!(self.channel.send_io(IoMessage::UpdateStreamRegistration {
|
||||||
|
token: token,
|
||||||
|
handler_id: self.handler,
|
||||||
|
}));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broadcast a message to other IO clients
|
/// Broadcast a message to other IO clients
|
||||||
pub fn message(&mut self, message: Message) {
|
pub fn message(&self, message: Message) {
|
||||||
match self.event_loop.channel().send(IoMessage::UserMessage(message)) {
|
self.channel.send(message).expect("Error seding message");
|
||||||
Ok(_) => {}
|
}
|
||||||
Err(e) => { panic!("Error sending io message {:?}", e); }
|
|
||||||
}
|
/// Get message channel
|
||||||
|
pub fn channel(&self) -> IoChannel<Message> {
|
||||||
|
self.channel.clone()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
struct UserTimer {
|
struct UserTimer {
|
||||||
delay: u64,
|
delay: u64,
|
||||||
|
timeout: Timeout,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Root IO handler. Manages user handlers, messages and IO timers.
|
/// Root IO handler. Manages user handlers, messages and IO timers.
|
||||||
pub struct IoManager<Message> where Message: Send {
|
pub struct IoManager<Message> where Message: Send + Sync {
|
||||||
timers: Slab<UserTimer>,
|
timers: Arc<RwLock<HashMap<HandlerId, UserTimer>>>,
|
||||||
handlers: Vec<Box<IoHandler<Message>>>,
|
handlers: Vec<Arc<IoHandler<Message>>>,
|
||||||
|
_workers: Vec<Worker>,
|
||||||
|
worker_channel: chase_lev::Worker<Work<Message>>,
|
||||||
|
work_ready: Arc<Condvar>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> IoManager<Message> where Message: Send + 'static {
|
impl<Message> IoManager<Message> where Message: Send + Sync + Clone + 'static {
|
||||||
/// Creates a new instance and registers it with the event loop.
|
/// Creates a new instance and registers it with the event loop.
|
||||||
pub fn start(event_loop: &mut EventLoop<IoManager<Message>>) -> Result<(), UtilError> {
|
pub fn start(event_loop: &mut EventLoop<IoManager<Message>>) -> Result<(), UtilError> {
|
||||||
|
let (worker, stealer) = chase_lev::deque();
|
||||||
|
let num_workers = 4;
|
||||||
|
let work_ready_mutex = Arc::new(Mutex::new(()));
|
||||||
|
let work_ready = Arc::new(Condvar::new());
|
||||||
|
let workers = (0..num_workers).map(|i|
|
||||||
|
Worker::new(i, stealer.clone(), IoChannel::new(event_loop.channel()), work_ready.clone(), work_ready_mutex.clone())).collect();
|
||||||
|
|
||||||
let mut io = IoManager {
|
let mut io = IoManager {
|
||||||
timers: Slab::new_starting_at(Token(USER_TIMER), MAX_USER_TIMERS),
|
timers: Arc::new(RwLock::new(HashMap::new())),
|
||||||
handlers: Vec::new(),
|
handlers: Vec::new(),
|
||||||
|
worker_channel: worker,
|
||||||
|
_workers: workers,
|
||||||
|
work_ready: work_ready,
|
||||||
};
|
};
|
||||||
try!(event_loop.run(&mut io));
|
try!(event_loop.run(&mut io));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> Handler for IoManager<Message> where Message: Send + 'static {
|
impl<Message> Handler for IoManager<Message> where Message: Send + Clone + Sync + 'static {
|
||||||
type Timeout = Token;
|
type Timeout = Token;
|
||||||
type Message = IoMessage<Message>;
|
type Message = IoMessage<Message>;
|
||||||
|
|
||||||
fn ready(&mut self, event_loop: &mut EventLoop<Self>, token: Token, events: EventSet) {
|
fn ready(&mut self, _event_loop: &mut EventLoop<Self>, token: Token, events: EventSet) {
|
||||||
|
let handler_index = token.as_usize() / TOKENS_PER_HANDLER;
|
||||||
|
let token_id = token.as_usize() % TOKENS_PER_HANDLER;
|
||||||
|
if handler_index >= self.handlers.len() {
|
||||||
|
panic!("Unexpected stream token: {}", token.as_usize());
|
||||||
|
}
|
||||||
|
let handler = self.handlers[handler_index].clone();
|
||||||
|
|
||||||
if events.is_hup() {
|
if events.is_hup() {
|
||||||
for h in &mut self.handlers {
|
self.worker_channel.push(Work { work_type: WorkType::Hup, token: token_id, handler: handler.clone(), handler_id: handler_index });
|
||||||
h.stream_hup(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
|
}
|
||||||
}
|
else {
|
||||||
}
|
if events.is_readable() {
|
||||||
else if events.is_readable() {
|
self.worker_channel.push(Work { work_type: WorkType::Readable, token: token_id, handler: handler.clone(), handler_id: handler_index });
|
||||||
for h in &mut self.handlers {
|
}
|
||||||
h.stream_readable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
|
if events.is_writable() {
|
||||||
}
|
self.worker_channel.push(Work { work_type: WorkType::Writable, token: token_id, handler: handler.clone(), handler_id: handler_index });
|
||||||
}
|
|
||||||
else if events.is_writable() {
|
|
||||||
for h in &mut self.handlers {
|
|
||||||
h.stream_writable(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.work_ready.notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout(&mut self, event_loop: &mut EventLoop<Self>, token: Token) {
|
fn timeout(&mut self, event_loop: &mut EventLoop<Self>, token: Token) {
|
||||||
match token.as_usize() {
|
let handler_index = token.as_usize() / TOKENS_PER_HANDLER;
|
||||||
USER_TIMER ... LAST_USER_TIMER => {
|
let token_id = token.as_usize() % TOKENS_PER_HANDLER;
|
||||||
let delay = {
|
if handler_index >= self.handlers.len() {
|
||||||
let timer = self.timers.get_mut(token).expect("Unknown user timer token");
|
panic!("Unexpected timer token: {}", token.as_usize());
|
||||||
timer.delay
|
}
|
||||||
};
|
if let Some(timer) = self.timers.read().unwrap().get(&token.as_usize()) {
|
||||||
for h in &mut self.handlers {
|
event_loop.timeout_ms(token, timer.delay).expect("Error re-registering user timer");
|
||||||
h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
|
let handler = self.handlers[handler_index].clone();
|
||||||
}
|
self.worker_channel.push(Work { work_type: WorkType::Timeout, token: token_id, handler: handler, handler_id: handler_index });
|
||||||
event_loop.timeout_ms(token, delay).expect("Error re-registering user timer");
|
self.work_ready.notify_all();
|
||||||
}
|
|
||||||
_ => { // Just pass the event down. IoHandler is supposed to re-register it if required.
|
|
||||||
for h in &mut self.handlers {
|
|
||||||
h.timeout(&mut IoContext::new(event_loop, &mut self.timers), token.as_usize());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn notify(&mut self, event_loop: &mut EventLoop<Self>, msg: Self::Message) {
|
fn notify(&mut self, event_loop: &mut EventLoop<Self>, msg: Self::Message) {
|
||||||
let mut m = msg;
|
match msg {
|
||||||
match m {
|
|
||||||
IoMessage::Shutdown => event_loop.shutdown(),
|
IoMessage::Shutdown => event_loop.shutdown(),
|
||||||
IoMessage::AddHandler {
|
IoMessage::AddHandler { handler } => {
|
||||||
handler,
|
let handler_id = {
|
||||||
} => {
|
self.handlers.push(handler.clone());
|
||||||
self.handlers.push(handler);
|
self.handlers.len() - 1
|
||||||
self.handlers.last_mut().unwrap().initialize(&mut IoContext::new(event_loop, &mut self.timers));
|
};
|
||||||
|
handler.initialize(&IoContext::new(IoChannel::new(event_loop.channel()), handler_id));
|
||||||
},
|
},
|
||||||
IoMessage::UserMessage(ref mut data) => {
|
IoMessage::AddTimer { handler_id, token, delay } => {
|
||||||
for h in &mut self.handlers {
|
let timer_id = token + handler_id * TOKENS_PER_HANDLER;
|
||||||
h.message(&mut IoContext::new(event_loop, &mut self.timers), data);
|
let timeout = event_loop.timeout_ms(Token(timer_id), delay).expect("Error registering user timer");
|
||||||
|
self.timers.write().unwrap().insert(timer_id, UserTimer { delay: delay, timeout: timeout });
|
||||||
|
},
|
||||||
|
IoMessage::RemoveTimer { handler_id, token } => {
|
||||||
|
let timer_id = token + handler_id * TOKENS_PER_HANDLER;
|
||||||
|
if let Some(timer) = self.timers.write().unwrap().remove(&timer_id) {
|
||||||
|
event_loop.clear_timeout(timer.timeout);
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
IoMessage::RegisterStream { handler_id, token } => {
|
||||||
|
let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone();
|
||||||
|
handler.register_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop);
|
||||||
|
},
|
||||||
|
IoMessage::UpdateStreamRegistration { handler_id, token } => {
|
||||||
|
let handler = self.handlers.get(handler_id).expect("Unknown handler id").clone();
|
||||||
|
handler.update_stream(token, Token(token + handler_id * TOKENS_PER_HANDLER), event_loop);
|
||||||
|
},
|
||||||
|
IoMessage::UserMessage(data) => {
|
||||||
|
for n in 0 .. self.handlers.len() {
|
||||||
|
let handler = self.handlers[n].clone();
|
||||||
|
self.worker_channel.push(Work { work_type: WorkType::Message(data.clone()), token: 0, handler: handler, handler_id: n });
|
||||||
|
}
|
||||||
|
self.work_ready.notify_all();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -150,11 +231,19 @@ impl<Message> Handler for IoManager<Message> where Message: Send + 'static {
|
|||||||
|
|
||||||
/// Allows sending messages into the event loop. All the IO handlers will get the message
|
/// Allows sending messages into the event loop. All the IO handlers will get the message
|
||||||
/// in the `message` callback.
|
/// in the `message` callback.
|
||||||
pub struct IoChannel<Message> where Message: Send {
|
pub struct IoChannel<Message> where Message: Send + Clone{
|
||||||
channel: Option<Sender<IoMessage<Message>>>
|
channel: Option<Sender<IoMessage<Message>>>
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> IoChannel<Message> where Message: Send {
|
impl<Message> Clone for IoChannel<Message> where Message: Send + Clone {
|
||||||
|
fn clone(&self) -> IoChannel<Message> {
|
||||||
|
IoChannel {
|
||||||
|
channel: self.channel.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Message> IoChannel<Message> where Message: Send + Clone {
|
||||||
/// Send a msessage through the channel
|
/// Send a msessage through the channel
|
||||||
pub fn send(&self, message: Message) -> Result<(), IoError> {
|
pub fn send(&self, message: Message) -> Result<(), IoError> {
|
||||||
if let Some(ref channel) = self.channel {
|
if let Some(ref channel) = self.channel {
|
||||||
@ -163,20 +252,31 @@ impl<Message> IoChannel<Message> where Message: Send {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Send low level io message
|
||||||
|
pub fn send_io(&self, message: IoMessage<Message>) -> Result<(), IoError> {
|
||||||
|
if let Some(ref channel) = self.channel {
|
||||||
|
try!(channel.send(message))
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
/// Create a new channel to connected to event loop.
|
/// Create a new channel to connected to event loop.
|
||||||
pub fn disconnected() -> IoChannel<Message> {
|
pub fn disconnected() -> IoChannel<Message> {
|
||||||
IoChannel { channel: None }
|
IoChannel { channel: None }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn new(channel: Sender<IoMessage<Message>>) -> IoChannel<Message> {
|
||||||
|
IoChannel { channel: Some(channel) }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// General IO Service. Starts an event loop and dispatches IO requests.
|
/// General IO Service. Starts an event loop and dispatches IO requests.
|
||||||
/// 'Message' is a notification message type
|
/// 'Message' is a notification message type
|
||||||
pub struct IoService<Message> where Message: Send + 'static {
|
pub struct IoService<Message> where Message: Send + Sync + Clone + 'static {
|
||||||
thread: Option<JoinHandle<()>>,
|
thread: Option<JoinHandle<()>>,
|
||||||
host_channel: Sender<IoMessage<Message>>
|
host_channel: Sender<IoMessage<Message>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> IoService<Message> where Message: Send + 'static {
|
impl<Message> IoService<Message> where Message: Send + Sync + Clone + 'static {
|
||||||
/// Starts IO event loop
|
/// Starts IO event loop
|
||||||
pub fn start() -> Result<IoService<Message>, UtilError> {
|
pub fn start() -> Result<IoService<Message>, UtilError> {
|
||||||
let mut event_loop = EventLoop::new().unwrap();
|
let mut event_loop = EventLoop::new().unwrap();
|
||||||
@ -191,7 +291,7 @@ impl<Message> IoService<Message> where Message: Send + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Regiter a IO hadnler with the event loop.
|
/// Regiter a IO hadnler with the event loop.
|
||||||
pub fn register_handler(&mut self, handler: Box<IoHandler<Message>+Send>) -> Result<(), IoError> {
|
pub fn register_handler(&mut self, handler: Arc<IoHandler<Message>+Send>) -> Result<(), IoError> {
|
||||||
try!(self.host_channel.send(IoMessage::AddHandler {
|
try!(self.host_channel.send(IoMessage::AddHandler {
|
||||||
handler: handler,
|
handler: handler,
|
||||||
}));
|
}));
|
||||||
@ -210,10 +310,10 @@ impl<Message> IoService<Message> where Message: Send + 'static {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> Drop for IoService<Message> where Message: Send {
|
impl<Message> Drop for IoService<Message> where Message: Send + Sync + Clone {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.host_channel.send(IoMessage::Shutdown).unwrap();
|
self.host_channel.send(IoMessage::Shutdown).unwrap();
|
||||||
self.thread.take().unwrap().join().unwrap();
|
self.thread.take().unwrap().join().ok();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
99
util/src/io/worker.rs
Normal file
99
util/src/io/worker.rs
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
use std::sync::*;
|
||||||
|
use std::mem;
|
||||||
|
use std::thread::{JoinHandle, self};
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
|
||||||
|
use crossbeam::sync::chase_lev;
|
||||||
|
use io::service::{HandlerId, IoChannel, IoContext};
|
||||||
|
use io::{IoHandler};
|
||||||
|
|
||||||
|
pub enum WorkType<Message> {
|
||||||
|
Readable,
|
||||||
|
Writable,
|
||||||
|
Hup,
|
||||||
|
Timeout,
|
||||||
|
Message(Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Work<Message> {
|
||||||
|
pub work_type: WorkType<Message>,
|
||||||
|
pub token: usize,
|
||||||
|
pub handler_id: HandlerId,
|
||||||
|
pub handler: Arc<IoHandler<Message>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An IO worker thread
|
||||||
|
/// Sorts them ready for blockchain insertion.
|
||||||
|
pub struct Worker {
|
||||||
|
thread: Option<JoinHandle<()>>,
|
||||||
|
wait: Arc<Condvar>,
|
||||||
|
deleting: Arc<AtomicBool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Worker {
|
||||||
|
/// Creates a new worker instance.
|
||||||
|
pub fn new<Message>(index: usize,
|
||||||
|
stealer: chase_lev::Stealer<Work<Message>>,
|
||||||
|
channel: IoChannel<Message>,
|
||||||
|
wait: Arc<Condvar>,
|
||||||
|
wait_mutex: Arc<Mutex<()>>) -> Worker
|
||||||
|
where Message: Send + Sync + Clone + 'static {
|
||||||
|
let deleting = Arc::new(AtomicBool::new(false));
|
||||||
|
let mut worker = Worker {
|
||||||
|
thread: None,
|
||||||
|
wait: wait.clone(),
|
||||||
|
deleting: deleting.clone(),
|
||||||
|
};
|
||||||
|
worker.thread = Some(thread::Builder::new().name(format!("IO Worker #{}", index)).spawn(
|
||||||
|
move || Worker::work_loop(stealer, channel.clone(), wait, wait_mutex.clone(), deleting))
|
||||||
|
.expect("Error creating worker thread"));
|
||||||
|
worker
|
||||||
|
}
|
||||||
|
|
||||||
|
fn work_loop<Message>(stealer: chase_lev::Stealer<Work<Message>>,
|
||||||
|
channel: IoChannel<Message>, wait: Arc<Condvar>,
|
||||||
|
wait_mutex: Arc<Mutex<()>>,
|
||||||
|
deleting: Arc<AtomicBool>)
|
||||||
|
where Message: Send + Sync + Clone + 'static {
|
||||||
|
while !deleting.load(AtomicOrdering::Relaxed) {
|
||||||
|
{
|
||||||
|
let lock = wait_mutex.lock().unwrap();
|
||||||
|
let _ = wait.wait(lock).unwrap();
|
||||||
|
if deleting.load(AtomicOrdering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
while let chase_lev::Steal::Data(work) = stealer.steal() {
|
||||||
|
Worker::do_work(work, channel.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn do_work<Message>(work: Work<Message>, channel: IoChannel<Message>) where Message: Send + Sync + Clone + 'static {
|
||||||
|
match work.work_type {
|
||||||
|
WorkType::Readable => {
|
||||||
|
work.handler.stream_readable(&IoContext::new(channel, work.handler_id), work.token);
|
||||||
|
},
|
||||||
|
WorkType::Writable => {
|
||||||
|
work.handler.stream_writable(&IoContext::new(channel, work.handler_id), work.token);
|
||||||
|
}
|
||||||
|
WorkType::Hup => {
|
||||||
|
work.handler.stream_hup(&IoContext::new(channel, work.handler_id), work.token);
|
||||||
|
}
|
||||||
|
WorkType::Timeout => {
|
||||||
|
work.handler.timeout(&IoContext::new(channel, work.handler_id), work.token);
|
||||||
|
}
|
||||||
|
WorkType::Message(message) => {
|
||||||
|
work.handler.message(&IoContext::new(channel, work.handler_id), &message);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Worker {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.deleting.store(true, AtomicOrdering::Relaxed);
|
||||||
|
self.wait.notify_all();
|
||||||
|
let thread = mem::replace(&mut self.thread, None).unwrap();
|
||||||
|
thread.join().ok();
|
||||||
|
}
|
||||||
|
}
|
@ -34,6 +34,16 @@ impl JournalDB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create a new instance given a shared `backing` database.
|
||||||
|
pub fn new_with_arc(backing: Arc<DB>) -> JournalDB {
|
||||||
|
JournalDB {
|
||||||
|
forward: OverlayDB::new_with_arc(backing.clone()),
|
||||||
|
backing: backing,
|
||||||
|
inserts: vec![],
|
||||||
|
removes: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a new instance with an anonymous temporary database.
|
/// Create a new instance with an anonymous temporary database.
|
||||||
pub fn new_temp() -> JournalDB {
|
pub fn new_temp() -> JournalDB {
|
||||||
let mut dir = env::temp_dir();
|
let mut dir = env::temp_dir();
|
||||||
|
@ -54,6 +54,7 @@ extern crate crypto as rcrypto;
|
|||||||
extern crate secp256k1;
|
extern crate secp256k1;
|
||||||
extern crate arrayvec;
|
extern crate arrayvec;
|
||||||
extern crate elastic_array;
|
extern crate elastic_array;
|
||||||
|
extern crate crossbeam;
|
||||||
|
|
||||||
/// TODO [Gav Wood] Please document me
|
/// TODO [Gav Wood] Please document me
|
||||||
pub mod standard;
|
pub mod standard;
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
use mio::{Handler, Token, EventSet, EventLoop, Timeout, PollOpt, TryRead, TryWrite};
|
use mio::{Handler, Token, EventSet, EventLoop, PollOpt, TryRead, TryWrite};
|
||||||
use mio::tcp::*;
|
use mio::tcp::*;
|
||||||
use hash::*;
|
use hash::*;
|
||||||
use sha3::*;
|
use sha3::*;
|
||||||
@ -7,6 +7,7 @@ use bytes::*;
|
|||||||
use rlp::*;
|
use rlp::*;
|
||||||
use std::io::{self, Cursor, Read};
|
use std::io::{self, Cursor, Read};
|
||||||
use error::*;
|
use error::*;
|
||||||
|
use io::{IoContext, StreamToken};
|
||||||
use network::error::NetworkError;
|
use network::error::NetworkError;
|
||||||
use network::handshake::Handshake;
|
use network::handshake::Handshake;
|
||||||
use crypto;
|
use crypto;
|
||||||
@ -17,11 +18,12 @@ use rcrypto::buffer::*;
|
|||||||
use tiny_keccak::Keccak;
|
use tiny_keccak::Keccak;
|
||||||
|
|
||||||
const ENCRYPTED_HEADER_LEN: usize = 32;
|
const ENCRYPTED_HEADER_LEN: usize = 32;
|
||||||
|
const RECIEVE_PAYLOAD_TIMEOUT: u64 = 30000;
|
||||||
|
|
||||||
/// Low level tcp connection
|
/// Low level tcp connection
|
||||||
pub struct Connection {
|
pub struct Connection {
|
||||||
/// Connection id (token)
|
/// Connection id (token)
|
||||||
pub token: Token,
|
pub token: StreamToken,
|
||||||
/// Network socket
|
/// Network socket
|
||||||
pub socket: TcpStream,
|
pub socket: TcpStream,
|
||||||
/// Receive buffer
|
/// Receive buffer
|
||||||
@ -45,14 +47,14 @@ pub enum WriteStatus {
|
|||||||
|
|
||||||
impl Connection {
|
impl Connection {
|
||||||
/// Create a new connection with given id and socket.
|
/// Create a new connection with given id and socket.
|
||||||
pub fn new(token: Token, socket: TcpStream) -> Connection {
|
pub fn new(token: StreamToken, socket: TcpStream) -> Connection {
|
||||||
Connection {
|
Connection {
|
||||||
token: token,
|
token: token,
|
||||||
socket: socket,
|
socket: socket,
|
||||||
send_queue: VecDeque::new(),
|
send_queue: VecDeque::new(),
|
||||||
rec_buf: Bytes::new(),
|
rec_buf: Bytes::new(),
|
||||||
rec_size: 0,
|
rec_size: 0,
|
||||||
interest: EventSet::hup(),
|
interest: EventSet::hup() | EventSet::readable(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,20 +134,19 @@ impl Connection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Register this connection with the IO event loop.
|
/// Register this connection with the IO event loop.
|
||||||
pub fn register<Host: Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
pub fn register_socket<Host: Handler>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
||||||
trace!(target: "net", "connection register; token={:?}", self.token);
|
trace!(target: "net", "connection register; token={:?}", reg);
|
||||||
self.interest.insert(EventSet::readable());
|
event_loop.register(&self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
|
||||||
event_loop.register(&self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
|
error!("Failed to register {:?}, {:?}", reg, e);
|
||||||
error!("Failed to register {:?}, {:?}", self.token, e);
|
|
||||||
Err(e)
|
Err(e)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Update connection registration. Should be called at the end of the IO handler.
|
/// Update connection registration. Should be called at the end of the IO handler.
|
||||||
pub fn reregister<Host: Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
pub fn update_socket<Host: Handler>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> io::Result<()> {
|
||||||
trace!(target: "net", "connection reregister; token={:?}", self.token);
|
trace!(target: "net", "connection reregister; token={:?}", reg);
|
||||||
event_loop.reregister( &self.socket, self.token, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
|
event_loop.reregister( &self.socket, reg, self.interest, PollOpt::edge() | PollOpt::oneshot()).or_else(|e| {
|
||||||
error!("Failed to reregister {:?}, {:?}", self.token, e);
|
error!("Failed to reregister {:?}, {:?}", reg, e);
|
||||||
Err(e)
|
Err(e)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -182,8 +183,6 @@ pub struct EncryptedConnection {
|
|||||||
ingress_mac: Keccak,
|
ingress_mac: Keccak,
|
||||||
/// Read state
|
/// Read state
|
||||||
read_state: EncryptedConnectionState,
|
read_state: EncryptedConnectionState,
|
||||||
/// Disconnect timeout
|
|
||||||
idle_timeout: Option<Timeout>,
|
|
||||||
/// Protocol id for the last received packet
|
/// Protocol id for the last received packet
|
||||||
protocol_id: u16,
|
protocol_id: u16,
|
||||||
/// Payload expected to be received for the last header.
|
/// Payload expected to be received for the last header.
|
||||||
@ -192,7 +191,7 @@ pub struct EncryptedConnection {
|
|||||||
|
|
||||||
impl EncryptedConnection {
|
impl EncryptedConnection {
|
||||||
/// Create an encrypted connection out of the handshake. Consumes a handshake object.
|
/// Create an encrypted connection out of the handshake. Consumes a handshake object.
|
||||||
pub fn new(handshake: Handshake) -> Result<EncryptedConnection, UtilError> {
|
pub fn new(mut handshake: Handshake) -> Result<EncryptedConnection, UtilError> {
|
||||||
let shared = try!(crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_public));
|
let shared = try!(crypto::ecdh::agree(handshake.ecdhe.secret(), &handshake.remote_public));
|
||||||
let mut nonce_material = H512::new();
|
let mut nonce_material = H512::new();
|
||||||
if handshake.originated {
|
if handshake.originated {
|
||||||
@ -227,6 +226,7 @@ impl EncryptedConnection {
|
|||||||
ingress_mac.update(&mac_material);
|
ingress_mac.update(&mac_material);
|
||||||
ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher });
|
ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher });
|
||||||
|
|
||||||
|
handshake.connection.expect(ENCRYPTED_HEADER_LEN);
|
||||||
Ok(EncryptedConnection {
|
Ok(EncryptedConnection {
|
||||||
connection: handshake.connection,
|
connection: handshake.connection,
|
||||||
encoder: encoder,
|
encoder: encoder,
|
||||||
@ -235,7 +235,6 @@ impl EncryptedConnection {
|
|||||||
egress_mac: egress_mac,
|
egress_mac: egress_mac,
|
||||||
ingress_mac: ingress_mac,
|
ingress_mac: ingress_mac,
|
||||||
read_state: EncryptedConnectionState::Header,
|
read_state: EncryptedConnectionState::Header,
|
||||||
idle_timeout: None,
|
|
||||||
protocol_id: 0,
|
protocol_id: 0,
|
||||||
payload_len: 0
|
payload_len: 0
|
||||||
})
|
})
|
||||||
@ -337,13 +336,14 @@ impl EncryptedConnection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Readable IO handler. Tracker receive status and returns decoded packet if avaialable.
|
/// Readable IO handler. Tracker receive status and returns decoded packet if avaialable.
|
||||||
pub fn readable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<Option<Packet>, UtilError> {
|
pub fn readable<Message>(&mut self, io: &IoContext<Message>) -> Result<Option<Packet>, UtilError> where Message: Send + Clone{
|
||||||
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
|
io.clear_timer(self.connection.token).unwrap();
|
||||||
match self.read_state {
|
match self.read_state {
|
||||||
EncryptedConnectionState::Header => {
|
EncryptedConnectionState::Header => {
|
||||||
if let Some(data) = try!(self.connection.readable()) {
|
if let Some(data) = try!(self.connection.readable()) {
|
||||||
try!(self.read_header(&data));
|
try!(self.read_header(&data));
|
||||||
};
|
try!(io.register_timer(self.connection.token, RECIEVE_PAYLOAD_TIMEOUT));
|
||||||
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
},
|
},
|
||||||
EncryptedConnectionState::Payload => {
|
EncryptedConnectionState::Payload => {
|
||||||
@ -360,24 +360,15 @@ impl EncryptedConnection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Writable IO handler. Processes send queeue.
|
/// Writable IO handler. Processes send queeue.
|
||||||
pub fn writable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
pub fn writable<Message>(&mut self, io: &IoContext<Message>) -> Result<(), UtilError> where Message: Send + Clone {
|
||||||
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
|
io.clear_timer(self.connection.token).unwrap();
|
||||||
try!(self.connection.writable());
|
try!(self.connection.writable());
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register this connection with the event handler.
|
|
||||||
pub fn register<Host:Handler<Timeout=Token>>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
|
||||||
self.connection.expect(ENCRYPTED_HEADER_LEN);
|
|
||||||
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
|
|
||||||
self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok();
|
|
||||||
try!(self.connection.reregister(event_loop));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update connection registration. This should be called at the end of the event loop.
|
/// Update connection registration. This should be called at the end of the event loop.
|
||||||
pub fn reregister<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
pub fn update_socket<Host:Handler>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
||||||
try!(self.connection.reregister(event_loop));
|
try!(self.connection.update_socket(reg, event_loop));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -141,7 +141,7 @@ impl Discovery {
|
|||||||
{
|
{
|
||||||
if count < BUCKET_SIZE {
|
if count < BUCKET_SIZE {
|
||||||
count += 1;
|
count += 1;
|
||||||
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
|
found.entry(Discovery::distance(target, &n)).or_insert_with(Vec::new).push(n);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
@ -151,7 +151,7 @@ impl Discovery {
|
|||||||
for n in &buckets[tail as usize].nodes {
|
for n in &buckets[tail as usize].nodes {
|
||||||
if count < BUCKET_SIZE {
|
if count < BUCKET_SIZE {
|
||||||
count += 1;
|
count += 1;
|
||||||
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
|
found.entry(Discovery::distance(target, &n)).or_insert_with(Vec::new).push(n);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
@ -170,7 +170,7 @@ impl Discovery {
|
|||||||
for n in &buckets[head as usize].nodes {
|
for n in &buckets[head as usize].nodes {
|
||||||
if count < BUCKET_SIZE {
|
if count < BUCKET_SIZE {
|
||||||
count += 1;
|
count += 1;
|
||||||
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
|
found.entry(Discovery::distance(target, &n)).or_insert_with(Vec::new).push(n);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
@ -184,7 +184,7 @@ impl Discovery {
|
|||||||
for n in &buckets[tail as usize].nodes {
|
for n in &buckets[tail as usize].nodes {
|
||||||
if count < BUCKET_SIZE {
|
if count < BUCKET_SIZE {
|
||||||
count += 1;
|
count += 1;
|
||||||
found.entry(Discovery::distance(target, &n)).or_insert(Vec::new()).push(n);
|
found.entry(Discovery::distance(target, &n)).or_insert_with(Vec::new).push(n);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
|
@ -19,11 +19,17 @@ pub enum DisconnectReason
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
/// Network error.
|
||||||
pub enum NetworkError {
|
pub enum NetworkError {
|
||||||
|
/// Authentication error.
|
||||||
Auth,
|
Auth,
|
||||||
|
/// Unrecognised protocol.
|
||||||
BadProtocol,
|
BadProtocol,
|
||||||
|
/// Peer not found.
|
||||||
PeerNotFound,
|
PeerNotFound,
|
||||||
|
/// Peer is diconnected.
|
||||||
Disconnect(DisconnectReason),
|
Disconnect(DisconnectReason),
|
||||||
|
/// Socket IO error.
|
||||||
Io(IoError),
|
Io(IoError),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,6 +10,7 @@ use network::host::{HostInfo};
|
|||||||
use network::node::NodeId;
|
use network::node::NodeId;
|
||||||
use error::*;
|
use error::*;
|
||||||
use network::error::NetworkError;
|
use network::error::NetworkError;
|
||||||
|
use io::{IoContext, StreamToken};
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
enum HandshakeState {
|
enum HandshakeState {
|
||||||
@ -33,8 +34,6 @@ pub struct Handshake {
|
|||||||
state: HandshakeState,
|
state: HandshakeState,
|
||||||
/// Outgoing or incoming connection
|
/// Outgoing or incoming connection
|
||||||
pub originated: bool,
|
pub originated: bool,
|
||||||
/// Disconnect timeout
|
|
||||||
idle_timeout: Option<Timeout>,
|
|
||||||
/// ECDH ephemeral
|
/// ECDH ephemeral
|
||||||
pub ecdhe: KeyPair,
|
pub ecdhe: KeyPair,
|
||||||
/// Connection nonce
|
/// Connection nonce
|
||||||
@ -51,16 +50,16 @@ pub struct Handshake {
|
|||||||
|
|
||||||
const AUTH_PACKET_SIZE: usize = 307;
|
const AUTH_PACKET_SIZE: usize = 307;
|
||||||
const ACK_PACKET_SIZE: usize = 210;
|
const ACK_PACKET_SIZE: usize = 210;
|
||||||
|
const HANDSHAKE_TIMEOUT: u64 = 30000;
|
||||||
|
|
||||||
impl Handshake {
|
impl Handshake {
|
||||||
/// Create a new handshake object
|
/// Create a new handshake object
|
||||||
pub fn new(token: Token, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result<Handshake, UtilError> {
|
pub fn new(token: StreamToken, id: &NodeId, socket: TcpStream, nonce: &H256) -> Result<Handshake, UtilError> {
|
||||||
Ok(Handshake {
|
Ok(Handshake {
|
||||||
id: id.clone(),
|
id: id.clone(),
|
||||||
connection: Connection::new(token, socket),
|
connection: Connection::new(token, socket),
|
||||||
originated: false,
|
originated: false,
|
||||||
state: HandshakeState::New,
|
state: HandshakeState::New,
|
||||||
idle_timeout: None,
|
|
||||||
ecdhe: try!(KeyPair::create()),
|
ecdhe: try!(KeyPair::create()),
|
||||||
nonce: nonce.clone(),
|
nonce: nonce.clone(),
|
||||||
remote_public: Public::new(),
|
remote_public: Public::new(),
|
||||||
@ -71,8 +70,9 @@ impl Handshake {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Start a handhsake
|
/// Start a handhsake
|
||||||
pub fn start(&mut self, host: &HostInfo, originated: bool) -> Result<(), UtilError> {
|
pub fn start<Message>(&mut self, io: &IoContext<Message>, host: &HostInfo, originated: bool) -> Result<(), UtilError> where Message: Send + Clone{
|
||||||
self.originated = originated;
|
self.originated = originated;
|
||||||
|
io.register_timer(self.connection.token, HANDSHAKE_TIMEOUT).ok();
|
||||||
if originated {
|
if originated {
|
||||||
try!(self.write_auth(host));
|
try!(self.write_auth(host));
|
||||||
}
|
}
|
||||||
@ -89,8 +89,8 @@ impl Handshake {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Readable IO handler. Drives the state change.
|
/// Readable IO handler. Drives the state change.
|
||||||
pub fn readable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, host: &HostInfo) -> Result<(), UtilError> {
|
pub fn readable<Message>(&mut self, io: &IoContext<Message>, host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone {
|
||||||
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
|
io.clear_timer(self.connection.token).unwrap();
|
||||||
match self.state {
|
match self.state {
|
||||||
HandshakeState::ReadingAuth => {
|
HandshakeState::ReadingAuth => {
|
||||||
if let Some(data) = try!(self.connection.readable()) {
|
if let Some(data) = try!(self.connection.readable()) {
|
||||||
@ -104,29 +104,33 @@ impl Handshake {
|
|||||||
self.state = HandshakeState::StartSession;
|
self.state = HandshakeState::StartSession;
|
||||||
};
|
};
|
||||||
},
|
},
|
||||||
|
HandshakeState::StartSession => {},
|
||||||
_ => { panic!("Unexpected state"); }
|
_ => { panic!("Unexpected state"); }
|
||||||
}
|
}
|
||||||
if self.state != HandshakeState::StartSession {
|
if self.state != HandshakeState::StartSession {
|
||||||
try!(self.connection.reregister(event_loop));
|
try!(io.update_registration(self.connection.token));
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writabe IO handler.
|
/// Writabe IO handler.
|
||||||
pub fn writable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, _host: &HostInfo) -> Result<(), UtilError> {
|
pub fn writable<Message>(&mut self, io: &IoContext<Message>, _host: &HostInfo) -> Result<(), UtilError> where Message: Send + Clone {
|
||||||
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
|
io.clear_timer(self.connection.token).unwrap();
|
||||||
try!(self.connection.writable());
|
try!(self.connection.writable());
|
||||||
if self.state != HandshakeState::StartSession {
|
if self.state != HandshakeState::StartSession {
|
||||||
try!(self.connection.reregister(event_loop));
|
io.update_registration(self.connection.token).unwrap();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register the IO handler with the event loop
|
/// Register the socket with the event loop
|
||||||
pub fn register<Host:Handler<Timeout=Token>>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
pub fn register_socket<Host:Handler<Timeout=Token>>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
||||||
self.idle_timeout.map(|t| event_loop.clear_timeout(t));
|
try!(self.connection.register_socket(reg, event_loop));
|
||||||
self.idle_timeout = event_loop.timeout_ms(self.connection.token, 1800).ok();
|
Ok(())
|
||||||
try!(self.connection.register(event_loop));
|
}
|
||||||
|
|
||||||
|
pub fn update_socket<Host:Handler<Timeout=Token>>(&self, reg: Token, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
||||||
|
try!(self.connection.update_socket(reg, event_loop));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
use std::mem;
|
|
||||||
use std::net::{SocketAddr};
|
use std::net::{SocketAddr};
|
||||||
use std::collections::{HashMap};
|
use std::collections::{HashMap};
|
||||||
use std::hash::{Hasher};
|
use std::hash::{Hasher};
|
||||||
use std::str::{FromStr};
|
use std::str::{FromStr};
|
||||||
|
use std::sync::*;
|
||||||
|
use std::ops::*;
|
||||||
use mio::*;
|
use mio::*;
|
||||||
use mio::tcp::*;
|
use mio::tcp::*;
|
||||||
use mio::udp::*;
|
use mio::udp::*;
|
||||||
@ -64,19 +65,25 @@ pub type PacketId = u8;
|
|||||||
pub type ProtocolId = &'static str;
|
pub type ProtocolId = &'static str;
|
||||||
|
|
||||||
/// Messages used to communitate with the event loop from other threads.
|
/// Messages used to communitate with the event loop from other threads.
|
||||||
pub enum NetworkIoMessage<Message> where Message: Send {
|
#[derive(Clone)]
|
||||||
|
pub enum NetworkIoMessage<Message> where Message: Send + Sync + Clone {
|
||||||
/// Register a new protocol handler.
|
/// Register a new protocol handler.
|
||||||
AddHandler {
|
AddHandler {
|
||||||
handler: Option<Box<NetworkProtocolHandler<Message>+Send>>,
|
/// Handler shared instance.
|
||||||
|
handler: Arc<NetworkProtocolHandler<Message> + Sync>,
|
||||||
|
/// Protocol Id.
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
|
/// Supported protocol versions.
|
||||||
versions: Vec<u8>,
|
versions: Vec<u8>,
|
||||||
},
|
},
|
||||||
/// Send data over the network.
|
/// Register a new protocol timer
|
||||||
Send {
|
AddTimer {
|
||||||
peer: PeerId,
|
/// Protocol Id.
|
||||||
packet_id: PacketId,
|
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
data: Vec<u8>,
|
/// Timer token.
|
||||||
|
token: TimerToken,
|
||||||
|
/// Timer delay in milliseconds.
|
||||||
|
delay: u64,
|
||||||
},
|
},
|
||||||
/// User message
|
/// User message
|
||||||
User(Message),
|
User(Message),
|
||||||
@ -104,46 +111,45 @@ impl Encodable for CapabilityInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
||||||
pub struct NetworkContext<'s, 'io, Message> where Message: Send + 'static, 'io: 's {
|
pub struct NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, 's {
|
||||||
io: &'s mut IoContext<'io, NetworkIoMessage<Message>>,
|
io: &'s IoContext<NetworkIoMessage<Message>>,
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
connections: &'s mut Slab<ConnectionEntry>,
|
connections: Arc<RwLock<Slab<SharedConnectionEntry>>>,
|
||||||
timers: &'s mut HashMap<TimerToken, ProtocolId>,
|
|
||||||
session: Option<StreamToken>,
|
session: Option<StreamToken>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 'static, {
|
impl<'s, Message> NetworkContext<'s, Message> where Message: Send + Sync + Clone + 'static, {
|
||||||
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
|
/// Create a new network IO access point. Takes references to all the data that can be updated within the IO handler.
|
||||||
fn new(io: &'s mut IoContext<'io, NetworkIoMessage<Message>>,
|
fn new(io: &'s IoContext<NetworkIoMessage<Message>>,
|
||||||
protocol: ProtocolId,
|
protocol: ProtocolId,
|
||||||
session: Option<StreamToken>, connections: &'s mut Slab<ConnectionEntry>,
|
session: Option<StreamToken>, connections: Arc<RwLock<Slab<SharedConnectionEntry>>>) -> NetworkContext<'s, Message> {
|
||||||
timers: &'s mut HashMap<TimerToken, ProtocolId>) -> NetworkContext<'s, 'io, Message> {
|
|
||||||
NetworkContext {
|
NetworkContext {
|
||||||
io: io,
|
io: io,
|
||||||
protocol: protocol,
|
protocol: protocol,
|
||||||
session: session,
|
session: session,
|
||||||
connections: connections,
|
connections: connections,
|
||||||
timers: timers,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a packet over the network to another peer.
|
/// Send a packet over the network to another peer.
|
||||||
pub fn send(&mut self, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
|
pub fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
|
||||||
match self.connections.get_mut(peer) {
|
if let Some(connection) = self.connections.read().unwrap().get(peer).cloned() {
|
||||||
Some(&mut ConnectionEntry::Session(ref mut s)) => {
|
match *connection.lock().unwrap().deref_mut() {
|
||||||
s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| {
|
ConnectionEntry::Session(ref mut s) => {
|
||||||
warn!(target: "net", "Send error: {:?}", e);
|
s.send_packet(self.protocol, packet_id as u8, &data).unwrap_or_else(|e| {
|
||||||
}); //TODO: don't copy vector data
|
warn!(target: "net", "Send error: {:?}", e);
|
||||||
},
|
}); //TODO: don't copy vector data
|
||||||
_ => {
|
},
|
||||||
warn!(target: "net", "Send: Peer does not exist");
|
_ => warn!(target: "net", "Send: Peer is not connected yet")
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
warn!(target: "net", "Send: Peer does not exist")
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Respond to a current network message. Panics if no there is no packet in the context.
|
/// Respond to a current network message. Panics if no there is no packet in the context.
|
||||||
pub fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
|
pub fn respond(&self, packet_id: PacketId, data: Vec<u8>) -> Result<(), UtilError> {
|
||||||
match self.session {
|
match self.session {
|
||||||
Some(session) => self.send(session, packet_id, data),
|
Some(session) => self.send(session, packet_id, data),
|
||||||
None => {
|
None => {
|
||||||
@ -153,31 +159,28 @@ impl<'s, 'io, Message> NetworkContext<'s, 'io, Message> where Message: Send + 's
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected.
|
/// Disable current protocol capability for given peer. If no capabilities left peer gets disconnected.
|
||||||
pub fn disable_peer(&mut self, _peer: PeerId) {
|
pub fn disable_peer(&self, _peer: PeerId) {
|
||||||
//TODO: remove capability, disconnect if no capabilities left
|
//TODO: remove capability, disconnect if no capabilities left
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Register a new IO timer. Returns a new timer token. 'NetworkProtocolHandler::timeout' will be called with the token.
|
/// Register a new IO timer. 'IoHandler::timeout' will be called with the token.
|
||||||
pub fn register_timer(&mut self, ms: u64) -> Result<TimerToken, UtilError>{
|
pub fn register_timer(&self, token: TimerToken, ms: u64) -> Result<(), UtilError> {
|
||||||
match self.io.register_timer(ms) {
|
self.io.message(NetworkIoMessage::AddTimer {
|
||||||
Ok(token) => {
|
token: token,
|
||||||
self.timers.insert(token, self.protocol);
|
delay: ms,
|
||||||
Ok(token)
|
protocol: self.protocol,
|
||||||
},
|
});
|
||||||
e => e,
|
Ok(())
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns peer identification string
|
/// Returns peer identification string
|
||||||
pub fn peer_info(&self, peer: PeerId) -> String {
|
pub fn peer_info(&self, peer: PeerId) -> String {
|
||||||
match self.connections.get(peer) {
|
if let Some(connection) = self.connections.read().unwrap().get(peer).cloned() {
|
||||||
Some(&ConnectionEntry::Session(ref s)) => {
|
if let ConnectionEntry::Session(ref s) = *connection.lock().unwrap().deref() {
|
||||||
s.info.client_version.clone()
|
return s.info.client_version.clone()
|
||||||
},
|
|
||||||
_ => {
|
|
||||||
"unknown".to_owned()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
"unknown".to_owned()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -222,26 +225,35 @@ enum ConnectionEntry {
|
|||||||
Session(Session)
|
Session(Session)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Root IO handler. Manages protocol handlers, IO timers and network connections.
|
type SharedConnectionEntry = Arc<Mutex<ConnectionEntry>>;
|
||||||
pub struct Host<Message> where Message: Send {
|
|
||||||
pub info: HostInfo,
|
#[derive(Copy, Clone)]
|
||||||
udp_socket: UdpSocket,
|
struct ProtocolTimer {
|
||||||
listener: TcpListener,
|
pub protocol: ProtocolId,
|
||||||
connections: Slab<ConnectionEntry>,
|
pub token: TimerToken, // Handler level token
|
||||||
timers: HashMap<TimerToken, ProtocolId>,
|
|
||||||
nodes: HashMap<NodeId, Node>,
|
|
||||||
handlers: HashMap<ProtocolId, Box<NetworkProtocolHandler<Message>>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> Host<Message> where Message: Send {
|
/// Root IO handler. Manages protocol handlers, IO timers and network connections.
|
||||||
|
pub struct Host<Message> where Message: Send + Sync + Clone {
|
||||||
|
pub info: RwLock<HostInfo>,
|
||||||
|
udp_socket: Mutex<UdpSocket>,
|
||||||
|
tcp_listener: Mutex<TcpListener>,
|
||||||
|
connections: Arc<RwLock<Slab<SharedConnectionEntry>>>,
|
||||||
|
nodes: RwLock<HashMap<NodeId, Node>>,
|
||||||
|
handlers: RwLock<HashMap<ProtocolId, Arc<NetworkProtocolHandler<Message>>>>,
|
||||||
|
timers: RwLock<HashMap<TimerToken, ProtocolTimer>>,
|
||||||
|
timer_counter: RwLock<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Message> Host<Message> where Message: Send + Sync + Clone {
|
||||||
pub fn new() -> Host<Message> {
|
pub fn new() -> Host<Message> {
|
||||||
let config = NetworkConfiguration::new();
|
let config = NetworkConfiguration::new();
|
||||||
let addr = config.listen_address;
|
let addr = config.listen_address;
|
||||||
// Setup the server socket
|
// Setup the server socket
|
||||||
let listener = TcpListener::bind(&addr).unwrap();
|
let tcp_listener = TcpListener::bind(&addr).unwrap();
|
||||||
let udp_socket = UdpSocket::bound(&addr).unwrap();
|
let udp_socket = UdpSocket::bound(&addr).unwrap();
|
||||||
Host::<Message> {
|
let mut host = Host::<Message> {
|
||||||
info: HostInfo {
|
info: RwLock::new(HostInfo {
|
||||||
keys: KeyPair::create().unwrap(),
|
keys: KeyPair::create().unwrap(),
|
||||||
config: config,
|
config: config,
|
||||||
nonce: H256::random(),
|
nonce: H256::random(),
|
||||||
@ -249,39 +261,64 @@ impl<Message> Host<Message> where Message: Send {
|
|||||||
client_version: "parity".to_owned(),
|
client_version: "parity".to_owned(),
|
||||||
listen_port: 0,
|
listen_port: 0,
|
||||||
capabilities: Vec::new(),
|
capabilities: Vec::new(),
|
||||||
},
|
}),
|
||||||
udp_socket: udp_socket,
|
udp_socket: Mutex::new(udp_socket),
|
||||||
listener: listener,
|
tcp_listener: Mutex::new(tcp_listener),
|
||||||
connections: Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS),
|
connections: Arc::new(RwLock::new(Slab::new_starting_at(FIRST_CONNECTION, MAX_CONNECTIONS))),
|
||||||
timers: HashMap::new(),
|
nodes: RwLock::new(HashMap::new()),
|
||||||
nodes: HashMap::new(),
|
handlers: RwLock::new(HashMap::new()),
|
||||||
handlers: HashMap::new(),
|
timers: RwLock::new(HashMap::new()),
|
||||||
}
|
timer_counter: RwLock::new(LAST_CONNECTION + 1),
|
||||||
|
};
|
||||||
|
let port = host.info.read().unwrap().config.listen_address.port();
|
||||||
|
host.info.write().unwrap().deref_mut().listen_port = port;
|
||||||
|
|
||||||
|
/*
|
||||||
|
match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() {
|
||||||
|
Some(iface) => config.public_address = iface.addr.unwrap(),
|
||||||
|
None => warn!("No public network interface"),
|
||||||
|
*/
|
||||||
|
|
||||||
|
// self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303");
|
||||||
|
// GO bootnodes
|
||||||
|
host.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE
|
||||||
|
host.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR
|
||||||
|
host.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG
|
||||||
|
// ETH/DEV cpp-ethereum (poc-9.ethdev.com)
|
||||||
|
host.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303");
|
||||||
|
host
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_node(&mut self, id: &str) {
|
pub fn add_node(&mut self, id: &str) {
|
||||||
match Node::from_str(id) {
|
match Node::from_str(id) {
|
||||||
Err(e) => { warn!("Could not add node: {:?}", e); },
|
Err(e) => { warn!("Could not add node: {:?}", e); },
|
||||||
Ok(n) => {
|
Ok(n) => {
|
||||||
self.nodes.insert(n.id.clone(), n);
|
self.nodes.write().unwrap().insert(n.id.clone(), n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn maintain_network(&mut self, io: &mut IoContext<NetworkIoMessage<Message>>) {
|
pub fn client_version(&self) -> String {
|
||||||
|
self.info.read().unwrap().client_version.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn client_id(&self) -> NodeId {
|
||||||
|
self.info.read().unwrap().id().clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn maintain_network(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
self.connect_peers(io);
|
self.connect_peers(io);
|
||||||
io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn have_session(&self, id: &NodeId) -> bool {
|
fn have_session(&self, id: &NodeId) -> bool {
|
||||||
self.connections.iter().any(|e| match *e { ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false })
|
self.connections.read().unwrap().iter().any(|e| match *e.lock().unwrap().deref() { ConnectionEntry::Session(ref s) => s.info.id.eq(&id), _ => false })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connecting_to(&self, id: &NodeId) -> bool {
|
fn connecting_to(&self, id: &NodeId) -> bool {
|
||||||
self.connections.iter().any(|e| match *e { ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false })
|
self.connections.read().unwrap().iter().any(|e| match *e.lock().unwrap().deref() { ConnectionEntry::Handshake(ref h) => h.id.eq(&id), _ => false })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connect_peers(&mut self, io: &mut IoContext<NetworkIoMessage<Message>>) {
|
fn connect_peers(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
struct NodeInfo {
|
struct NodeInfo {
|
||||||
id: NodeId,
|
id: NodeId,
|
||||||
peer_type: PeerType
|
peer_type: PeerType
|
||||||
@ -292,13 +329,14 @@ impl<Message> Host<Message> where Message: Send {
|
|||||||
let mut req_conn = 0;
|
let mut req_conn = 0;
|
||||||
//TODO: use nodes from discovery here
|
//TODO: use nodes from discovery here
|
||||||
//for n in self.node_buckets.iter().flat_map(|n| &n.nodes).map(|id| NodeInfo { id: id.clone(), peer_type: self.nodes.get(id).unwrap().peer_type}) {
|
//for n in self.node_buckets.iter().flat_map(|n| &n.nodes).map(|id| NodeInfo { id: id.clone(), peer_type: self.nodes.get(id).unwrap().peer_type}) {
|
||||||
for n in self.nodes.values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) {
|
let pin = self.info.read().unwrap().deref().config.pin;
|
||||||
|
for n in self.nodes.read().unwrap().values().map(|n| NodeInfo { id: n.id.clone(), peer_type: n.peer_type }) {
|
||||||
let connected = self.have_session(&n.id) || self.connecting_to(&n.id);
|
let connected = self.have_session(&n.id) || self.connecting_to(&n.id);
|
||||||
let required = n.peer_type == PeerType::Required;
|
let required = n.peer_type == PeerType::Required;
|
||||||
if connected && required {
|
if connected && required {
|
||||||
req_conn += 1;
|
req_conn += 1;
|
||||||
}
|
}
|
||||||
else if !connected && (!self.info.config.pin || required) {
|
else if !connected && (!pin || required) {
|
||||||
to_connect.push(n);
|
to_connect.push(n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -312,8 +350,7 @@ impl<Message> Host<Message> where Message: Send {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !self.info.config.pin
|
if !pin {
|
||||||
{
|
|
||||||
let pending_count = 0; //TODO:
|
let pending_count = 0; //TODO:
|
||||||
let peer_count = 0;
|
let peer_count = 0;
|
||||||
let mut open_slots = IDEAL_PEERS - peer_count - pending_count + req_conn;
|
let mut open_slots = IDEAL_PEERS - peer_count - pending_count + req_conn;
|
||||||
@ -329,23 +366,26 @@ impl<Message> Host<Message> where Message: Send {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[allow(single_match)]
|
#[allow(single_match)]
|
||||||
fn connect_peer(&mut self, id: &NodeId, io: &mut IoContext<NetworkIoMessage<Message>>) {
|
#[allow(block_in_if_condition_stmt)]
|
||||||
|
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
if self.have_session(id)
|
if self.have_session(id)
|
||||||
{
|
{
|
||||||
warn!("Aborted connect. Node already connected.");
|
warn!("Aborted connect. Node already connected.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if self.connecting_to(id)
|
if self.connecting_to(id) {
|
||||||
{
|
|
||||||
warn!("Aborted connect. Node already connecting.");
|
warn!("Aborted connect. Node already connecting.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let socket = {
|
let socket = {
|
||||||
let node = self.nodes.get_mut(id).unwrap();
|
let address = {
|
||||||
node.last_attempted = Some(::time::now());
|
let mut nodes = self.nodes.write().unwrap();
|
||||||
|
let node = nodes.get_mut(id).unwrap();
|
||||||
match TcpStream::connect(&node.endpoint.address) {
|
node.last_attempted = Some(::time::now());
|
||||||
|
node.endpoint.address
|
||||||
|
};
|
||||||
|
match TcpStream::connect(&address) {
|
||||||
Ok(socket) => socket,
|
Ok(socket) => socket,
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
warn!("Cannot connect to node");
|
warn!("Cannot connect to node");
|
||||||
@ -354,222 +394,182 @@ impl<Message> Host<Message> where Message: Send {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let nonce = self.info.next_nonce();
|
let nonce = self.info.write().unwrap().next_nonce();
|
||||||
match self.connections.insert_with(|token| ConnectionEntry::Handshake(Handshake::new(Token(token), id, socket, &nonce).expect("Can't create handshake"))) {
|
if self.connections.write().unwrap().insert_with(|token| {
|
||||||
Some(token) => {
|
let mut handshake = Handshake::new(token, id, socket, &nonce).expect("Can't create handshake");
|
||||||
match self.connections.get_mut(token) {
|
handshake.start(io, &self.info.read().unwrap(), true).and_then(|_| io.register_stream(token)).unwrap_or_else (|e| {
|
||||||
Some(&mut ConnectionEntry::Handshake(ref mut h)) => {
|
debug!(target: "net", "Handshake create error: {:?}", e);
|
||||||
h.start(&self.info, true)
|
});
|
||||||
.and_then(|_| h.register(io.event_loop))
|
Arc::new(Mutex::new(ConnectionEntry::Handshake(handshake)))
|
||||||
.unwrap_or_else (|e| {
|
}).is_none() {
|
||||||
debug!(target: "net", "Handshake create error: {:?}", e);
|
warn!("Max connections reached");
|
||||||
});
|
|
||||||
},
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => { warn!("Max connections reached") }
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn accept(&self, _io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
fn accept(&mut self, _io: &mut IoContext<NetworkIoMessage<Message>>) {
|
|
||||||
trace!(target: "net", "accept");
|
trace!(target: "net", "accept");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(single_match)]
|
#[allow(single_match)]
|
||||||
fn connection_writable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
|
fn connection_writable(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
let mut kill = false;
|
|
||||||
let mut create_session = false;
|
let mut create_session = false;
|
||||||
match self.connections.get_mut(token) {
|
let mut kill = false;
|
||||||
Some(&mut ConnectionEntry::Handshake(ref mut h)) => {
|
if let Some(connection) = self.connections.read().unwrap().get(token).cloned() {
|
||||||
h.writable(io.event_loop, &self.info).unwrap_or_else(|e| {
|
match *connection.lock().unwrap().deref_mut() {
|
||||||
debug!(target: "net", "Handshake write error: {:?}", e);
|
ConnectionEntry::Handshake(ref mut h) => {
|
||||||
kill = true;
|
match h.writable(io, &self.info.read().unwrap()) {
|
||||||
});
|
Err(e) => {
|
||||||
create_session = h.done();
|
debug!(target: "net", "Handshake write error: {:?}", e);
|
||||||
},
|
kill = true;
|
||||||
Some(&mut ConnectionEntry::Session(ref mut s)) => {
|
},
|
||||||
s.writable(io.event_loop, &self.info).unwrap_or_else(|e| {
|
Ok(_) => ()
|
||||||
debug!(target: "net", "Session write error: {:?}", e);
|
}
|
||||||
kill = true;
|
if h.done() {
|
||||||
});
|
create_session = true;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
ConnectionEntry::Session(ref mut s) => {
|
||||||
|
match s.writable(io, &self.info.read().unwrap()) {
|
||||||
|
Err(e) => {
|
||||||
|
debug!(target: "net", "Session write error: {:?}", e);
|
||||||
|
kill = true;
|
||||||
|
},
|
||||||
|
Ok(_) => ()
|
||||||
|
}
|
||||||
|
io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
}
|
||||||
warn!(target: "net", "Received event for unknown connection");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if kill {
|
if kill {
|
||||||
self.kill_connection(token, io);
|
self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection
|
||||||
return;
|
return;
|
||||||
} else if create_session {
|
} else if create_session {
|
||||||
self.start_session(token, io);
|
self.start_session(token, io);
|
||||||
}
|
io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
|
||||||
match self.connections.get_mut(token) {
|
|
||||||
Some(&mut ConnectionEntry::Session(ref mut s)) => {
|
|
||||||
s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
|
|
||||||
},
|
|
||||||
_ => (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connection_closed<'s>(&'s mut self, token: TimerToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
|
fn connection_closed(&self, token: TimerToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
self.kill_connection(token, io);
|
self.kill_connection(token, io);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connection_readable<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
|
fn connection_readable(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
let mut kill = false;
|
|
||||||
let mut create_session = false;
|
|
||||||
let mut ready_data: Vec<ProtocolId> = Vec::new();
|
let mut ready_data: Vec<ProtocolId> = Vec::new();
|
||||||
let mut packet_data: Option<(ProtocolId, PacketId, Vec<u8>)> = None;
|
let mut packet_data: Option<(ProtocolId, PacketId, Vec<u8>)> = None;
|
||||||
match self.connections.get_mut(token) {
|
let mut create_session = false;
|
||||||
Some(&mut ConnectionEntry::Handshake(ref mut h)) => {
|
let mut kill = false;
|
||||||
h.readable(io.event_loop, &self.info).unwrap_or_else(|e| {
|
if let Some(connection) = self.connections.read().unwrap().get(token).cloned() {
|
||||||
debug!(target: "net", "Handshake read error: {:?}", e);
|
match *connection.lock().unwrap().deref_mut() {
|
||||||
kill = true;
|
ConnectionEntry::Handshake(ref mut h) => {
|
||||||
});
|
if let Err(e) = h.readable(io, &self.info.read().unwrap()) {
|
||||||
create_session = h.done();
|
debug!(target: "net", "Handshake read error: {:?}", e);
|
||||||
},
|
kill = true;
|
||||||
Some(&mut ConnectionEntry::Session(ref mut s)) => {
|
}
|
||||||
let sd = { s.readable(io.event_loop, &self.info).unwrap_or_else(|e| {
|
if h.done() {
|
||||||
debug!(target: "net", "Session read error: {:?}", e);
|
create_session = true;
|
||||||
kill = true;
|
}
|
||||||
SessionData::None
|
},
|
||||||
}) };
|
ConnectionEntry::Session(ref mut s) => {
|
||||||
match sd {
|
match s.readable(io, &self.info.read().unwrap()) {
|
||||||
SessionData::Ready => {
|
Err(e) => {
|
||||||
for (p, _) in &mut self.handlers {
|
debug!(target: "net", "Handshake read error: {:?}", e);
|
||||||
if s.have_capability(p) {
|
kill = true;
|
||||||
ready_data.push(p);
|
},
|
||||||
|
Ok(SessionData::Ready) => {
|
||||||
|
for (p, _) in self.handlers.read().unwrap().iter() {
|
||||||
|
if s.have_capability(p) {
|
||||||
|
ready_data.push(p);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
},
|
Ok(SessionData::Packet {
|
||||||
SessionData::Packet {
|
data,
|
||||||
data,
|
protocol,
|
||||||
protocol,
|
packet_id,
|
||||||
packet_id,
|
}) => {
|
||||||
} => {
|
match self.handlers.read().unwrap().get(protocol) {
|
||||||
match self.handlers.get_mut(protocol) {
|
None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) },
|
||||||
None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) },
|
Some(_) => packet_data = Some((protocol, packet_id, data)),
|
||||||
Some(_) => packet_data = Some((protocol, packet_id, data)),
|
}
|
||||||
}
|
},
|
||||||
},
|
Ok(SessionData::None) => {},
|
||||||
SessionData::None => {},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => {
|
|
||||||
warn!(target: "net", "Received event for unknown connection");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if kill {
|
|
||||||
self.kill_connection(token, io);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if create_session {
|
|
||||||
self.start_session(token, io);
|
|
||||||
}
|
|
||||||
for p in ready_data {
|
|
||||||
let mut h = self.handlers.get_mut(p).unwrap();
|
|
||||||
h.connected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token);
|
|
||||||
}
|
|
||||||
if let Some((p, packet_id, data)) = packet_data {
|
|
||||||
let mut h = self.handlers.get_mut(p).unwrap();
|
|
||||||
h.read(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token, packet_id, &data[1..]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(&mut ConnectionEntry::Session(ref mut s)) = self.connections.get_mut(token) {
|
|
||||||
s.reregister(io.event_loop).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn start_session(&mut self, token: StreamToken, io: &mut IoContext<NetworkIoMessage<Message>>) {
|
|
||||||
let info = &self.info;
|
|
||||||
// TODO: use slab::replace_with (currently broken)
|
|
||||||
/*
|
|
||||||
match self.connections.remove(token) {
|
|
||||||
Some(ConnectionEntry::Handshake(h)) => {
|
|
||||||
match Session::new(h, io.event_loop, info) {
|
|
||||||
Ok(session) => {
|
|
||||||
assert!(token == self.connections.insert(ConnectionEntry::Session(session)).ok().unwrap());
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
debug!(target: "net", "Session construction error: {:?}", e);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
|
||||||
_ => panic!("Error updating slab with session")
|
|
||||||
}*/
|
|
||||||
self.connections.replace_with(token, |c| {
|
|
||||||
match c {
|
|
||||||
ConnectionEntry::Handshake(h) => Session::new(h, io.event_loop, info)
|
|
||||||
.map(|s| Some(ConnectionEntry::Session(s)))
|
|
||||||
.unwrap_or_else(|e| {
|
|
||||||
debug!(target: "net", "Session construction error: {:?}", e);
|
|
||||||
None
|
|
||||||
}),
|
|
||||||
_ => { panic!("No handshake to create a session from"); }
|
|
||||||
}
|
}
|
||||||
}).expect("Error updating slab with session");
|
}
|
||||||
|
if kill {
|
||||||
|
self.kill_connection(token, io); //TODO: mark connection as dead an check in kill_connection
|
||||||
|
return;
|
||||||
|
} else if create_session {
|
||||||
|
self.start_session(token, io);
|
||||||
|
io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Session registration error: {:?}", e));
|
||||||
|
}
|
||||||
|
for p in ready_data {
|
||||||
|
let h = self.handlers.read().unwrap().get(p).unwrap().clone();
|
||||||
|
h.connected(&NetworkContext::new(io, p, Some(token), self.connections.clone()), &token);
|
||||||
|
}
|
||||||
|
if let Some((p, packet_id, data)) = packet_data {
|
||||||
|
let h = self.handlers.read().unwrap().get(p).unwrap().clone();
|
||||||
|
h.read(&NetworkContext::new(io, p, Some(token), self.connections.clone()), &token, packet_id, &data[1..]);
|
||||||
|
}
|
||||||
|
io.update_registration(token).unwrap_or_else(|e| debug!(target: "net", "Token registration error: {:?}", e));
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connection_timeout<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
|
fn start_session(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
|
self.connections.write().unwrap().replace_with(token, |c| {
|
||||||
|
match Arc::try_unwrap(c).ok().unwrap().into_inner().unwrap() {
|
||||||
|
ConnectionEntry::Handshake(h) => {
|
||||||
|
let session = Session::new(h, io, &self.info.read().unwrap()).expect("Session creation error");
|
||||||
|
io.update_registration(token).expect("Error updating session registration");
|
||||||
|
Some(Arc::new(Mutex::new(ConnectionEntry::Session(session))))
|
||||||
|
},
|
||||||
|
_ => { None } // handshake expired
|
||||||
|
}
|
||||||
|
}).ok();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn connection_timeout(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
self.kill_connection(token, io)
|
self.kill_connection(token, io)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn kill_connection<'s>(&'s mut self, token: StreamToken, io: &mut IoContext<'s, NetworkIoMessage<Message>>) {
|
fn kill_connection(&self, token: StreamToken, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
let mut to_disconnect: Vec<ProtocolId> = Vec::new();
|
let mut to_disconnect: Vec<ProtocolId> = Vec::new();
|
||||||
let mut remove = true;
|
{
|
||||||
match self.connections.get_mut(token) {
|
let mut connections = self.connections.write().unwrap();
|
||||||
Some(&mut ConnectionEntry::Handshake(_)) => (), // just abandon handshake
|
if let Some(connection) = connections.get(token).cloned() {
|
||||||
Some(&mut ConnectionEntry::Session(ref mut s)) if s.is_ready() => {
|
match *connection.lock().unwrap().deref_mut() {
|
||||||
for (p, _) in &mut self.handlers {
|
ConnectionEntry::Handshake(_) => {
|
||||||
if s.have_capability(p) {
|
connections.remove(token);
|
||||||
to_disconnect.push(p);
|
},
|
||||||
}
|
ConnectionEntry::Session(ref mut s) if s.is_ready() => {
|
||||||
|
for (p, _) in self.handlers.read().unwrap().iter() {
|
||||||
|
if s.have_capability(p) {
|
||||||
|
to_disconnect.push(p);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
connections.remove(token);
|
||||||
|
},
|
||||||
|
_ => {},
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
_ => {
|
|
||||||
remove = false;
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for p in to_disconnect {
|
for p in to_disconnect {
|
||||||
let mut h = self.handlers.get_mut(p).unwrap();
|
let h = self.handlers.read().unwrap().get(p).unwrap().clone();
|
||||||
h.disconnected(&mut NetworkContext::new(io, p, Some(token), &mut self.connections, &mut self.timers), &token);
|
h.disconnected(&NetworkContext::new(io, p, Some(token), self.connections.clone()), &token);
|
||||||
}
|
|
||||||
if remove {
|
|
||||||
self.connections.remove(token);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Message: Send + 'static {
|
impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Message: Send + Sync + Clone + 'static {
|
||||||
/// Initialize networking
|
/// Initialize networking
|
||||||
fn initialize(&mut self, io: &mut IoContext<NetworkIoMessage<Message>>) {
|
fn initialize(&self, io: &IoContext<NetworkIoMessage<Message>>) {
|
||||||
/*
|
io.register_stream(TCP_ACCEPT).expect("Error registering TCP listener");
|
||||||
match ::ifaces::Interface::get_all().unwrap().into_iter().filter(|x| x.kind == ::ifaces::Kind::Packet && x.addr.is_some()).next() {
|
io.register_stream(NODETABLE_RECEIVE).expect("Error registering UDP listener");
|
||||||
Some(iface) => config.public_address = iface.addr.unwrap(),
|
io.register_timer(IDLE, MAINTENANCE_TIMEOUT).expect("Error registering Network idle timer");
|
||||||
None => warn!("No public network interface"),
|
//io.register_timer(NODETABLE_MAINTAIN, 7200);
|
||||||
*/
|
|
||||||
|
|
||||||
// Start listening for incoming connections
|
|
||||||
io.event_loop.register(&self.listener, Token(TCP_ACCEPT), EventSet::readable(), PollOpt::edge()).unwrap();
|
|
||||||
io.event_loop.timeout_ms(Token(IDLE), MAINTENANCE_TIMEOUT).unwrap();
|
|
||||||
// open the udp socket
|
|
||||||
io.event_loop.register(&self.udp_socket, Token(NODETABLE_RECEIVE), EventSet::readable(), PollOpt::edge()).unwrap();
|
|
||||||
io.event_loop.timeout_ms(Token(NODETABLE_MAINTAIN), 7200).unwrap();
|
|
||||||
let port = self.info.config.listen_address.port();
|
|
||||||
self.info.listen_port = port;
|
|
||||||
|
|
||||||
self.add_node("enode://a9a921de2ff09a9a4d38b623c67b2d6b477a8e654ae95d874750cbbcb31b33296496a7b4421934e2629269e180823e52c15c2b19fc59592ec51ffe4f2de76ed7@127.0.0.1:30303");
|
|
||||||
/* // GO bootnodes
|
|
||||||
self.add_node("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@52.16.188.185:30303"); // IE
|
|
||||||
self.add_node("enode://de471bccee3d042261d52e9bff31458daecc406142b401d4cd848f677479f73104b9fdeb090af9583d3391b7f10cb2ba9e26865dd5fca4fcdc0fb1e3b723c786@54.94.239.50:30303"); // BR
|
|
||||||
self.add_node("enode://1118980bf48b0a3640bdba04e0fe78b1add18e1cd99bf22d53daac1fd9972ad650df52176e7c7d89d1114cfef2bc23a2959aa54998a46afcf7d91809f0855082@52.74.57.123:30303"); // SG
|
|
||||||
// ETH/DEV cpp-ethereum (poc-9.ethdev.com)
|
|
||||||
self.add_node("enode://979b7fa28feeb35a4741660a16076f1943202cb72b6af70d327f053e248bab9ba81760f39d0701ef1d8f89cc1fbd2cacba0710a12cd5314d5e0c9021aa3637f9@5.1.83.226:30303");*/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_hup<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, stream: StreamToken) {
|
fn stream_hup(&self, io: &IoContext<NetworkIoMessage<Message>>, stream: StreamToken) {
|
||||||
trace!(target: "net", "Hup: {}", stream);
|
trace!(target: "net", "Hup: {}", stream);
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_closed(stream, io),
|
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_closed(stream, io),
|
||||||
@ -577,7 +577,7 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_readable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, stream: StreamToken) {
|
fn stream_readable(&self, io: &IoContext<NetworkIoMessage<Message>>, stream: StreamToken) {
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_readable(stream, io),
|
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_readable(stream, io),
|
||||||
NODETABLE_RECEIVE => {},
|
NODETABLE_RECEIVE => {},
|
||||||
@ -586,66 +586,97 @@ impl<Message> IoHandler<NetworkIoMessage<Message>> for Host<Message> where Messa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn stream_writable<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, stream: StreamToken) {
|
fn stream_writable(&self, io: &IoContext<NetworkIoMessage<Message>>, stream: StreamToken) {
|
||||||
match stream {
|
match stream {
|
||||||
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_writable(stream, io),
|
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_writable(stream, io),
|
||||||
|
NODETABLE_RECEIVE => {},
|
||||||
_ => panic!("Received unknown writable token"),
|
_ => panic!("Received unknown writable token"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn timeout<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, token: TimerToken) {
|
fn timeout(&self, io: &IoContext<NetworkIoMessage<Message>>, token: TimerToken) {
|
||||||
match token {
|
match token {
|
||||||
IDLE => self.maintain_network(io),
|
IDLE => self.maintain_network(io),
|
||||||
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_timeout(token, io),
|
FIRST_CONNECTION ... LAST_CONNECTION => self.connection_timeout(token, io),
|
||||||
NODETABLE_DISCOVERY => {},
|
NODETABLE_DISCOVERY => {},
|
||||||
NODETABLE_MAINTAIN => {},
|
NODETABLE_MAINTAIN => {},
|
||||||
_ => {
|
_ => match self.timers.read().unwrap().get(&token).cloned() {
|
||||||
if let Some(protocol) = self.timers.get_mut(&token).map(|p| *p) {
|
Some(timer) => match self.handlers.read().unwrap().get(timer.protocol).cloned() {
|
||||||
match self.handlers.get_mut(protocol) {
|
None => { warn!(target: "net", "No handler found for protocol: {:?}", timer.protocol) },
|
||||||
None => { warn!(target: "net", "No handler found for protocol: {:?}", protocol) },
|
Some(h) => { h.timeout(&NetworkContext::new(io, timer.protocol, None, self.connections.clone()), timer.token); }
|
||||||
Some(h) => { h.timeout(&mut NetworkContext::new(io, protocol, Some(token), &mut self.connections, &mut self.timers), token); }
|
},
|
||||||
};
|
None => { warn!("Unknown timer token: {}", token); } // timer is not registerd through us
|
||||||
} // else time not registerd through us
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn message<'s>(&'s mut self, io: &mut IoContext<'s, NetworkIoMessage<Message>>, message: &'s mut NetworkIoMessage<Message>) {
|
fn message(&self, io: &IoContext<NetworkIoMessage<Message>>, message: &NetworkIoMessage<Message>) {
|
||||||
match *message {
|
match *message {
|
||||||
NetworkIoMessage::AddHandler {
|
NetworkIoMessage::AddHandler {
|
||||||
ref mut handler,
|
ref handler,
|
||||||
ref protocol,
|
ref protocol,
|
||||||
ref versions
|
ref versions
|
||||||
} => {
|
} => {
|
||||||
let mut h = mem::replace(handler, None).unwrap();
|
let h = handler.clone();
|
||||||
h.initialize(&mut NetworkContext::new(io, protocol, None, &mut self.connections, &mut self.timers));
|
h.initialize(&NetworkContext::new(io, protocol, None, self.connections.clone()));
|
||||||
self.handlers.insert(protocol, h);
|
self.handlers.write().unwrap().insert(protocol, h);
|
||||||
|
let mut info = self.info.write().unwrap();
|
||||||
for v in versions {
|
for v in versions {
|
||||||
self.info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 });
|
info.capabilities.push(CapabilityInfo { protocol: protocol, version: *v, packet_count:0 });
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
NetworkIoMessage::Send {
|
NetworkIoMessage::AddTimer {
|
||||||
ref peer,
|
|
||||||
ref packet_id,
|
|
||||||
ref protocol,
|
ref protocol,
|
||||||
ref data,
|
ref delay,
|
||||||
|
ref token,
|
||||||
} => {
|
} => {
|
||||||
match self.connections.get_mut(*peer as usize) {
|
let handler_token = {
|
||||||
Some(&mut ConnectionEntry::Session(ref mut s)) => {
|
let mut timer_counter = self.timer_counter.write().unwrap();
|
||||||
s.send_packet(protocol, *packet_id as u8, &data).unwrap_or_else(|e| {
|
let counter = timer_counter.deref_mut();
|
||||||
warn!(target: "net", "Send error: {:?}", e);
|
let handler_token = *counter;
|
||||||
}); //TODO: don't copy vector data
|
*counter += 1;
|
||||||
},
|
handler_token
|
||||||
_ => {
|
};
|
||||||
warn!(target: "net", "Send: Peer does not exist");
|
self.timers.write().unwrap().insert(handler_token, ProtocolTimer { protocol: protocol, token: *token });
|
||||||
}
|
io.register_timer(handler_token, *delay).expect("Error registering timer");
|
||||||
}
|
|
||||||
},
|
},
|
||||||
NetworkIoMessage::User(ref message) => {
|
NetworkIoMessage::User(ref message) => {
|
||||||
for (p, h) in &mut self.handlers {
|
for (p, h) in self.handlers.read().unwrap().iter() {
|
||||||
h.message(&mut NetworkContext::new(io, p, None, &mut self.connections, &mut self.timers), &message);
|
h.message(&NetworkContext::new(io, p, None, self.connections.clone()), &message);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn register_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop<IoManager<NetworkIoMessage<Message>>>) {
|
||||||
|
match stream {
|
||||||
|
FIRST_CONNECTION ... LAST_CONNECTION => {
|
||||||
|
if let Some(connection) = self.connections.read().unwrap().get(stream).cloned() {
|
||||||
|
match *connection.lock().unwrap().deref() {
|
||||||
|
ConnectionEntry::Handshake(ref h) => h.register_socket(reg, event_loop).expect("Error registering socket"),
|
||||||
|
ConnectionEntry::Session(_) => warn!("Unexpected session stream registration")
|
||||||
|
}
|
||||||
|
} else {} // expired
|
||||||
|
}
|
||||||
|
NODETABLE_RECEIVE => event_loop.register(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error registering stream"),
|
||||||
|
TCP_ACCEPT => event_loop.register(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error registering stream"),
|
||||||
|
_ => warn!("Unexpected stream registration")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_stream(&self, stream: StreamToken, reg: Token, event_loop: &mut EventLoop<IoManager<NetworkIoMessage<Message>>>) {
|
||||||
|
match stream {
|
||||||
|
FIRST_CONNECTION ... LAST_CONNECTION => {
|
||||||
|
if let Some(connection) = self.connections.read().unwrap().get(stream).cloned() {
|
||||||
|
match *connection.lock().unwrap().deref() {
|
||||||
|
ConnectionEntry::Handshake(ref h) => h.update_socket(reg, event_loop).expect("Error updating socket"),
|
||||||
|
ConnectionEntry::Session(ref s) => s.update_socket(reg, event_loop).expect("Error updating socket"),
|
||||||
|
}
|
||||||
|
} else {} // expired
|
||||||
|
}
|
||||||
|
NODETABLE_RECEIVE => event_loop.reregister(self.udp_socket.lock().unwrap().deref(), Token(NODETABLE_RECEIVE), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"),
|
||||||
|
TCP_ACCEPT => event_loop.reregister(self.tcp_listener.lock().unwrap().deref(), Token(TCP_ACCEPT), EventSet::all(), PollOpt::edge()).expect("Error reregistering stream"),
|
||||||
|
_ => warn!("Unexpected stream update")
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -8,39 +8,40 @@
|
|||||||
///
|
///
|
||||||
/// struct MyHandler;
|
/// struct MyHandler;
|
||||||
///
|
///
|
||||||
|
/// #[derive(Clone)]
|
||||||
/// struct MyMessage {
|
/// struct MyMessage {
|
||||||
/// data: u32
|
/// data: u32
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// impl NetworkProtocolHandler<MyMessage> for MyHandler {
|
/// impl NetworkProtocolHandler<MyMessage> for MyHandler {
|
||||||
/// fn initialize(&mut self, io: &mut NetworkContext<MyMessage>) {
|
/// fn initialize(&self, io: &NetworkContext<MyMessage>) {
|
||||||
/// io.register_timer(1000);
|
/// io.register_timer(0, 1000);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn read(&mut self, io: &mut NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
/// fn read(&self, io: &NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
/// println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
|
/// println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn connected(&mut self, io: &mut NetworkContext<MyMessage>, peer: &PeerId) {
|
/// fn connected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) {
|
||||||
/// println!("Connected {}", peer);
|
/// println!("Connected {}", peer);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn disconnected(&mut self, io: &mut NetworkContext<MyMessage>, peer: &PeerId) {
|
/// fn disconnected(&self, io: &NetworkContext<MyMessage>, peer: &PeerId) {
|
||||||
/// println!("Disconnected {}", peer);
|
/// println!("Disconnected {}", peer);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn timeout(&mut self, io: &mut NetworkContext<MyMessage>, timer: TimerToken) {
|
/// fn timeout(&self, io: &NetworkContext<MyMessage>, timer: TimerToken) {
|
||||||
/// println!("Timeout {}", timer);
|
/// println!("Timeout {}", timer);
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn message(&mut self, io: &mut NetworkContext<MyMessage>, message: &MyMessage) {
|
/// fn message(&self, io: &NetworkContext<MyMessage>, message: &MyMessage) {
|
||||||
/// println!("Message {}", message.data);
|
/// println!("Message {}", message.data);
|
||||||
/// }
|
/// }
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// fn main () {
|
/// fn main () {
|
||||||
/// let mut service = NetworkService::<MyMessage>::start().expect("Error creating network service");
|
/// let mut service = NetworkService::<MyMessage>::start().expect("Error creating network service");
|
||||||
/// service.register_protocol(Box::new(MyHandler), "myproto", &[1u8]);
|
/// service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]);
|
||||||
///
|
///
|
||||||
/// // Wait for quit condition
|
/// // Wait for quit condition
|
||||||
/// // ...
|
/// // ...
|
||||||
@ -57,36 +58,78 @@ mod error;
|
|||||||
mod node;
|
mod node;
|
||||||
|
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type PeerId = host::PeerId;
|
pub use network::host::PeerId;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type PacketId = host::PacketId;
|
pub use network::host::PacketId;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type NetworkContext<'s,'io, Message> = host::NetworkContext<'s, 'io, Message>;
|
pub use network::host::NetworkContext;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type NetworkService<Message> = service::NetworkService<Message>;
|
pub use network::service::NetworkService;
|
||||||
|
/// TODO [arkpar] Please document me
|
||||||
|
pub use network::host::NetworkIoMessage;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type NetworkIoMessage<Message> = host::NetworkIoMessage<Message>;
|
|
||||||
pub use network::host::NetworkIoMessage::User as UserMessage;
|
pub use network::host::NetworkIoMessage::User as UserMessage;
|
||||||
/// TODO [arkpar] Please document me
|
/// TODO [arkpar] Please document me
|
||||||
pub type NetworkError = error::NetworkError;
|
pub use network::error::NetworkError;
|
||||||
|
|
||||||
use io::*;
|
use io::TimerToken;
|
||||||
|
|
||||||
/// Network IO protocol handler. This needs to be implemented for each new subprotocol.
|
/// Network IO protocol handler. This needs to be implemented for each new subprotocol.
|
||||||
/// All the handler function are called from within IO event loop.
|
/// All the handler function are called from within IO event loop.
|
||||||
/// `Message` is the type for message data.
|
/// `Message` is the type for message data.
|
||||||
pub trait NetworkProtocolHandler<Message>: Send where Message: Send {
|
pub trait NetworkProtocolHandler<Message>: Sync + Send where Message: Send + Sync + Clone {
|
||||||
/// Initialize the handler
|
/// Initialize the handler
|
||||||
fn initialize(&mut self, _io: &mut NetworkContext<Message>) {}
|
fn initialize(&self, _io: &NetworkContext<Message>) {}
|
||||||
/// Called when new network packet received.
|
/// Called when new network packet received.
|
||||||
fn read(&mut self, io: &mut NetworkContext<Message>, peer: &PeerId, packet_id: u8, data: &[u8]);
|
fn read(&self, io: &NetworkContext<Message>, peer: &PeerId, packet_id: u8, data: &[u8]);
|
||||||
/// Called when new peer is connected. Only called when peer supports the same protocol.
|
/// Called when new peer is connected. Only called when peer supports the same protocol.
|
||||||
fn connected(&mut self, io: &mut NetworkContext<Message>, peer: &PeerId);
|
fn connected(&self, io: &NetworkContext<Message>, peer: &PeerId);
|
||||||
/// Called when a previously connected peer disconnects.
|
/// Called when a previously connected peer disconnects.
|
||||||
fn disconnected(&mut self, io: &mut NetworkContext<Message>, peer: &PeerId);
|
fn disconnected(&self, io: &NetworkContext<Message>, peer: &PeerId);
|
||||||
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
||||||
fn timeout(&mut self, _io: &mut NetworkContext<Message>, _timer: TimerToken) {}
|
fn timeout(&self, _io: &NetworkContext<Message>, _timer: TimerToken) {}
|
||||||
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
|
/// Called when a broadcasted message is received. The message can only be sent from a different IO handler.
|
||||||
fn message(&mut self, _io: &mut NetworkContext<Message>, _message: &Message) {}
|
fn message(&self, _io: &NetworkContext<Message>, _message: &Message) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_net_service() {
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
struct MyHandler;
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct MyMessage {
|
||||||
|
data: u32
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NetworkProtocolHandler<MyMessage> for MyHandler {
|
||||||
|
fn initialize(&self, io: &NetworkContext<MyMessage>) {
|
||||||
|
io.register_timer(0, 1000).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read(&self, _io: &NetworkContext<MyMessage>, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
|
println!("Received {} ({} bytes) from {}", packet_id, data.len(), peer);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn connected(&self, _io: &NetworkContext<MyMessage>, peer: &PeerId) {
|
||||||
|
println!("Connected {}", peer);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disconnected(&self, _io: &NetworkContext<MyMessage>, peer: &PeerId) {
|
||||||
|
println!("Disconnected {}", peer);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn timeout(&self, _io: &NetworkContext<MyMessage>, timer: TimerToken) {
|
||||||
|
println!("Timeout {}", timer);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn message(&self, _io: &NetworkContext<MyMessage>, message: &MyMessage) {
|
||||||
|
println!("Message {}", message.data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut service = NetworkService::<MyMessage>::start().expect("Error creating network service");
|
||||||
|
service.register_protocol(Arc::new(MyHandler), "myproto", &[1u8]).unwrap();
|
||||||
|
}
|
||||||
|
@ -1,23 +1,24 @@
|
|||||||
|
use std::sync::*;
|
||||||
use error::*;
|
use error::*;
|
||||||
use network::{NetworkProtocolHandler};
|
use network::{NetworkProtocolHandler};
|
||||||
use network::error::{NetworkError};
|
use network::error::{NetworkError};
|
||||||
use network::host::{Host, NetworkIoMessage, PeerId, PacketId, ProtocolId};
|
use network::host::{Host, NetworkIoMessage, ProtocolId};
|
||||||
use io::*;
|
use io::*;
|
||||||
|
|
||||||
/// IO Service with networking
|
/// IO Service with networking
|
||||||
/// `Message` defines a notification data type.
|
/// `Message` defines a notification data type.
|
||||||
pub struct NetworkService<Message> where Message: Send + 'static {
|
pub struct NetworkService<Message> where Message: Send + Sync + Clone + 'static {
|
||||||
io_service: IoService<NetworkIoMessage<Message>>,
|
io_service: IoService<NetworkIoMessage<Message>>,
|
||||||
host_info: String,
|
host_info: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Message> NetworkService<Message> where Message: Send + 'static {
|
impl<Message> NetworkService<Message> where Message: Send + Sync + Clone + 'static {
|
||||||
/// Starts IO event loop
|
/// Starts IO event loop
|
||||||
pub fn start() -> Result<NetworkService<Message>, UtilError> {
|
pub fn start() -> Result<NetworkService<Message>, UtilError> {
|
||||||
let mut io_service = try!(IoService::<NetworkIoMessage<Message>>::start());
|
let mut io_service = try!(IoService::<NetworkIoMessage<Message>>::start());
|
||||||
let host = Box::new(Host::new());
|
let host = Arc::new(Host::new());
|
||||||
let host_info = host.info.client_version.clone();
|
let host_info = host.client_version();
|
||||||
info!("NetworkService::start(): id={:?}", host.info.id());
|
info!("NetworkService::start(): id={:?}", host.client_id());
|
||||||
try!(io_service.register_handler(host));
|
try!(io_service.register_handler(host));
|
||||||
Ok(NetworkService {
|
Ok(NetworkService {
|
||||||
io_service: io_service,
|
io_service: io_service,
|
||||||
@ -25,21 +26,10 @@ impl<Message> NetworkService<Message> where Message: Send + 'static {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a message over the network. Normaly `HostIo::send` should be used. This can be used from non-io threads.
|
|
||||||
pub fn send(&mut self, peer: &PeerId, packet_id: PacketId, protocol: ProtocolId, data: &[u8]) -> Result<(), NetworkError> {
|
|
||||||
try!(self.io_service.send_message(NetworkIoMessage::Send {
|
|
||||||
peer: *peer,
|
|
||||||
packet_id: packet_id,
|
|
||||||
protocol: protocol,
|
|
||||||
data: data.to_vec()
|
|
||||||
}));
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Regiter a new protocol handler with the event loop.
|
/// Regiter a new protocol handler with the event loop.
|
||||||
pub fn register_protocol(&mut self, handler: Box<NetworkProtocolHandler<Message>+Send>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> {
|
pub fn register_protocol(&mut self, handler: Arc<NetworkProtocolHandler<Message>+Send + Sync>, protocol: ProtocolId, versions: &[u8]) -> Result<(), NetworkError> {
|
||||||
try!(self.io_service.send_message(NetworkIoMessage::AddHandler {
|
try!(self.io_service.send_message(NetworkIoMessage::AddHandler {
|
||||||
handler: Some(handler),
|
handler: handler,
|
||||||
protocol: protocol,
|
protocol: protocol,
|
||||||
versions: versions.to_vec(),
|
versions: versions.to_vec(),
|
||||||
}));
|
}));
|
||||||
@ -55,7 +45,5 @@ impl<Message> NetworkService<Message> where Message: Send + 'static {
|
|||||||
pub fn io(&mut self) -> &mut IoService<NetworkIoMessage<Message>> {
|
pub fn io(&mut self) -> &mut IoService<NetworkIoMessage<Message>> {
|
||||||
&mut self.io_service
|
&mut self.io_service
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@ use rlp::*;
|
|||||||
use network::connection::{EncryptedConnection, Packet};
|
use network::connection::{EncryptedConnection, Packet};
|
||||||
use network::handshake::Handshake;
|
use network::handshake::Handshake;
|
||||||
use error::*;
|
use error::*;
|
||||||
|
use io::{IoContext};
|
||||||
use network::error::{NetworkError, DisconnectReason};
|
use network::error::{NetworkError, DisconnectReason};
|
||||||
use network::host::*;
|
use network::host::*;
|
||||||
use network::node::NodeId;
|
use network::node::NodeId;
|
||||||
@ -84,7 +85,7 @@ const PACKET_LAST: u8 = 0x7f;
|
|||||||
|
|
||||||
impl Session {
|
impl Session {
|
||||||
/// Create a new session out of comepleted handshake. Consumes handshake object.
|
/// Create a new session out of comepleted handshake. Consumes handshake object.
|
||||||
pub fn new<Host:Handler<Timeout=Token>>(h: Handshake, event_loop: &mut EventLoop<Host>, host: &HostInfo) -> Result<Session, UtilError> {
|
pub fn new<Message>(h: Handshake, _io: &IoContext<Message>, host: &HostInfo) -> Result<Session, UtilError> where Message: Send + Sync + Clone {
|
||||||
let id = h.id.clone();
|
let id = h.id.clone();
|
||||||
let connection = try!(EncryptedConnection::new(h));
|
let connection = try!(EncryptedConnection::new(h));
|
||||||
let mut session = Session {
|
let mut session = Session {
|
||||||
@ -99,7 +100,6 @@ impl Session {
|
|||||||
};
|
};
|
||||||
try!(session.write_hello(host));
|
try!(session.write_hello(host));
|
||||||
try!(session.write_ping());
|
try!(session.write_ping());
|
||||||
try!(session.connection.register(event_loop));
|
|
||||||
Ok(session)
|
Ok(session)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,16 +109,16 @@ impl Session {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Readable IO handler. Returns packet data if available.
|
/// Readable IO handler. Returns packet data if available.
|
||||||
pub fn readable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, host: &HostInfo) -> Result<SessionData, UtilError> {
|
pub fn readable<Message>(&mut self, io: &IoContext<Message>, host: &HostInfo) -> Result<SessionData, UtilError> where Message: Send + Sync + Clone {
|
||||||
match try!(self.connection.readable(event_loop)) {
|
match try!(self.connection.readable(io)) {
|
||||||
Some(data) => Ok(try!(self.read_packet(data, host))),
|
Some(data) => Ok(try!(self.read_packet(data, host))),
|
||||||
None => Ok(SessionData::None)
|
None => Ok(SessionData::None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writable IO handler. Sends pending packets.
|
/// Writable IO handler. Sends pending packets.
|
||||||
pub fn writable<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>, _host: &HostInfo) -> Result<(), UtilError> {
|
pub fn writable<Message>(&mut self, io: &IoContext<Message>, _host: &HostInfo) -> Result<(), UtilError> where Message: Send + Sync + Clone {
|
||||||
self.connection.writable(event_loop)
|
self.connection.writable(io)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if peer supports given capability
|
/// Checks if peer supports given capability
|
||||||
@ -127,8 +127,8 @@ impl Session {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Update registration with the event loop. Should be called at the end of the IO handler.
|
/// Update registration with the event loop. Should be called at the end of the IO handler.
|
||||||
pub fn reregister<Host:Handler>(&mut self, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
pub fn update_socket<Host:Handler>(&self, reg:Token, event_loop: &mut EventLoop<Host>) -> Result<(), UtilError> {
|
||||||
self.connection.reregister(event_loop)
|
self.connection.update_socket(reg, event_loop)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Send a protocol packet to peer.
|
/// Send a protocol packet to peer.
|
||||||
|
Loading…
Reference in New Issue
Block a user