openethereum/ethcore/light/src/net/mod.rs

767 lines
23 KiB
Rust
Raw Normal View History

// Copyright 2015-2017 Parity Technologies (UK) Ltd.
2016-10-05 15:35:31 +02:00
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
2017-03-07 19:48:07 +01:00
//! PIP Protocol Version 1 implementation.
2016-10-05 15:35:31 +02:00
//!
2016-11-07 19:16:23 +01:00
//! This uses a "Provider" to answer requests.
2016-10-05 15:35:31 +02:00
use ethcore::transaction::{Action, UnverifiedTransaction};
2016-12-07 13:52:45 +01:00
use ethcore::receipt::Receipt;
2016-10-10 18:48:47 +02:00
use io::TimerToken;
2016-12-09 15:04:54 +01:00
use network::{NetworkProtocolHandler, NetworkContext, PeerId};
use rlp::{RlpStream, Stream, UntrustedRlp, View};
2016-10-10 18:48:47 +02:00
use util::hash::H256;
2017-02-25 11:07:38 +01:00
use util::{Bytes, DBValue, Mutex, RwLock, U256};
use time::{Duration, SteadyTime};
2016-10-10 18:48:47 +02:00
2016-12-07 15:27:04 +01:00
use std::collections::HashMap;
2016-12-27 13:54:51 +01:00
use std::fmt;
use std::sync::Arc;
2016-11-18 19:12:20 +01:00
use std::sync::atomic::{AtomicUsize, Ordering};
2016-10-10 18:48:47 +02:00
2016-12-05 17:09:05 +01:00
use provider::Provider;
2017-03-07 19:48:07 +01:00
use request::{self, HashOrNumber, Request, Response};
2016-11-09 15:36:26 +01:00
2017-02-23 23:10:29 +01:00
use self::request_credits::{Credits, FlowParams};
use self::context::{Ctx, TickCtx};
use self::error::Punishment;
use self::request_set::RequestSet;
use self::id_guard::IdGuard;
2016-11-04 17:19:01 +01:00
mod context;
2016-11-09 15:36:26 +01:00
mod error;
2016-11-07 19:16:23 +01:00
mod status;
mod request_set;
2016-11-06 19:04:30 +01:00
#[cfg(test)]
mod tests;
2017-02-23 23:10:29 +01:00
pub mod request_credits;
pub use self::error::Error;
pub use self::context::{BasicContext, EventContext, IoContext};
pub use self::status::{Status, Capabilities, Announcement};
2016-10-10 18:48:47 +02:00
const TIMEOUT: TimerToken = 0;
const TIMEOUT_INTERVAL_MS: u64 = 1000;
const TICK_TIMEOUT: TimerToken = 1;
const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000;
// minimum interval between updates.
const UPDATE_INTERVAL_MS: i64 = 5000;
/// Supported protocol versions.
pub const PROTOCOL_VERSIONS: &'static [u8] = &[1];
2016-10-10 18:48:47 +02:00
/// Max protocol version.
pub const MAX_PROTOCOL_VERSION: u8 = 1;
/// Packet count for LES.
pub const PACKET_COUNT: u8 = 17;
2016-10-10 18:48:47 +02:00
// packet ID definitions.
mod packet {
// the status packet.
pub const STATUS: u8 = 0x00;
2016-11-06 19:05:19 +01:00
// announcement of new block hashes or capabilities.
pub const ANNOUNCE: u8 = 0x01;
2016-10-10 18:48:47 +02:00
2017-03-07 19:48:07 +01:00
// request and response.
pub const REQUEST: u8 = 0x02;
pub const RESPONSE: u8 = 0x03;
2016-10-10 18:48:47 +02:00
// relay transactions to peers.
2017-03-07 19:48:07 +01:00
pub const SEND_TRANSACTIONS: u8 = 0x04;
// request and response for transaction proof.
2017-03-07 19:48:07 +01:00
// TODO: merge with request/response.
pub const GET_TRANSACTION_PROOF: u8 = 0x05;
pub const TRANSACTION_PROOF: u8 = 0x06;
2016-11-04 18:40:31 +01:00
}
// timeouts for different kinds of requests. all values are in milliseconds.
// TODO: variable timeouts based on request count.
mod timeout {
pub const HANDSHAKE: i64 = 2500;
2017-03-07 19:48:07 +01:00
pub const HEADERS: i64 = 2500;
pub const BODIES: i64 = 5000;
pub const RECEIPTS: i64 = 3500;
pub const PROOFS: i64 = 4000;
pub const CONTRACT_CODES: i64 = 5000;
pub const HEADER_PROOFS: i64 = 3500;
pub const TRANSACTION_PROOF: i64 = 5000;
}
2016-11-18 19:12:20 +01:00
/// A request id.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
2016-11-18 19:12:20 +01:00
pub struct ReqId(usize);
2016-12-27 13:54:51 +01:00
impl fmt::Display for ReqId {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Request #{}", self.0)
}
}
2016-11-09 15:36:26 +01:00
// A pending peer: one we've sent our status to but
// may not have received one for.
struct PendingPeer {
sent_head: H256,
last_update: SteadyTime,
2016-11-09 15:36:26 +01:00
}
/// Relevant data to each peer. Not accessible publicly, only `pub` due to
/// limitations of the privacy system.
pub struct Peer {
2017-02-23 23:10:29 +01:00
local_credits: Credits, // their credits relative to us
2016-11-09 15:36:26 +01:00
status: Status,
capabilities: Capabilities,
2017-02-23 23:10:29 +01:00
remote_flow: Option<(Credits, FlowParams)>,
sent_head: H256, // last chain head we've given them.
last_update: SteadyTime,
pending_requests: RequestSet,
failed_requests: Vec<ReqId>,
2016-10-10 18:48:47 +02:00
}
2016-10-05 15:35:31 +02:00
2016-11-15 18:19:16 +01:00
impl Peer {
2017-02-23 23:10:29 +01:00
// refund credits for a request. returns new amount of credits.
fn refund(&mut self, flow_params: &FlowParams, amount: U256) -> U256 {
2017-02-23 23:10:29 +01:00
flow_params.refund(&mut self.local_credits, amount);
2016-11-15 18:19:16 +01:00
2017-02-23 23:10:29 +01:00
self.local_credits.current()
}
2016-11-15 18:19:16 +01:00
}
/// An LES event handler.
2016-12-07 13:52:45 +01:00
///
/// Each handler function takes a context which describes the relevant peer
2016-12-07 13:52:45 +01:00
/// and gives references to the IO layer and protocol structure so new messages
/// can be dispatched immediately.
///
/// Request responses are not guaranteed to be complete or valid, but passed IDs will be correct.
/// Response handlers are not given a copy of the original request; it is assumed
/// that relevant data will be stored by interested handlers.
pub trait Handler: Send + Sync {
/// Called when a peer connects.
fn on_connect(&self, _ctx: &EventContext, _status: &Status, _capabilities: &Capabilities) { }
2016-12-07 13:52:45 +01:00
/// Called when a peer disconnects, with a list of unfulfilled request IDs as
/// of yet.
fn on_disconnect(&self, _ctx: &EventContext, _unfulfilled: &[ReqId]) { }
/// Called when a peer makes an announcement.
fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { }
/// Called when a peer requests relay of some transactions.
fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { }
2017-03-07 19:48:07 +01:00
/// Called when a peer responds to requests.
fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _relay: &[Response]) { }
2017-02-25 11:07:38 +01:00
/// Called when a peer responds with a transaction proof. Each proof is a vector of state items.
fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { }
/// Called to "tick" the handler periodically.
fn tick(&self, _ctx: &BasicContext) { }
/// Called on abort. This signals to handlers that they should clean up
/// and ignore peers.
// TODO: coreresponding `on_activate`?
fn on_abort(&self) { }
}
2016-11-18 19:12:20 +01:00
/// Protocol parameters.
pub struct Params {
/// Network id.
pub network_id: u64,
2017-02-23 23:10:29 +01:00
/// Request credits parameters.
2016-11-18 19:12:20 +01:00
pub flow_params: FlowParams,
/// Initial capabilities.
pub capabilities: Capabilities,
}
/// Type alias for convenience.
pub type PeerMap = HashMap<PeerId, Mutex<Peer>>;
mod id_guard {
use network::PeerId;
use util::RwLockReadGuard;
use super::{PeerMap, ReqId};
// Guards success or failure of given request.
// On drop, inserts the req_id into the "failed requests"
// set for the peer unless defused. In separate module to enforce correct usage.
pub struct IdGuard<'a> {
peers: RwLockReadGuard<'a, PeerMap>,
peer_id: PeerId,
req_id: ReqId,
active: bool,
}
impl<'a> IdGuard<'a> {
/// Create a new `IdGuard`, which will prevent access of the inner ReqId
/// (for forming responses, triggering handlers) until defused
pub fn new(peers: RwLockReadGuard<'a, PeerMap>, peer_id: PeerId, req_id: ReqId) -> Self {
IdGuard {
peers: peers,
peer_id: peer_id,
req_id: req_id,
active: true,
}
}
/// Defuse the guard, signalling that the request has been successfully decoded.
pub fn defuse(mut self) -> ReqId {
// can't use the mem::forget trick here since we need the
// read guard to drop.
self.active = false;
self.req_id
}
}
impl<'a> Drop for IdGuard<'a> {
fn drop(&mut self) {
if !self.active { return }
if let Some(p) = self.peers.get(&self.peer_id) {
p.lock().failed_requests.push(self.req_id);
}
}
}
}
2016-11-04 18:40:31 +01:00
/// This is an implementation of the light ethereum network protocol, abstracted
/// over a `Provider` of data and a p2p network.
///
/// This is simply designed for request-response purposes. Higher level uses
/// of the protocol, such as synchronization, will function as wrappers around
/// this system.
//
2016-11-18 19:27:32 +01:00
// LOCK ORDER:
// Locks must be acquired in the order declared, and when holding a read lock
2016-11-18 19:27:32 +01:00
// on the peers, only one peer may be held at a time.
2016-11-04 18:40:31 +01:00
pub struct LightProtocol {
provider: Arc<Provider>,
2016-10-10 18:48:47 +02:00
genesis_hash: H256,
network_id: u64,
2016-11-09 15:36:26 +01:00
pending_peers: RwLock<HashMap<PeerId, PendingPeer>>,
peers: RwLock<PeerMap>,
2016-11-09 15:36:26 +01:00
capabilities: RwLock<Capabilities>,
flow_params: FlowParams, // assumed static and same for every peer.
handlers: Vec<Arc<Handler>>,
2016-10-27 15:45:59 +02:00
req_id: AtomicUsize,
2016-10-10 18:48:47 +02:00
}
2016-11-04 18:40:31 +01:00
impl LightProtocol {
2016-11-18 19:12:20 +01:00
/// Create a new instance of the protocol manager.
pub fn new(provider: Arc<Provider>, params: Params) -> Self {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Initializing light protocol handler");
2016-12-09 00:35:34 +01:00
let genesis_hash = provider.chain_info().genesis_hash;
2016-11-18 19:12:20 +01:00
LightProtocol {
provider: provider,
genesis_hash: genesis_hash,
2016-11-18 19:12:20 +01:00
network_id: params.network_id,
pending_peers: RwLock::new(HashMap::new()),
peers: RwLock::new(HashMap::new()),
capabilities: RwLock::new(params.capabilities),
flow_params: params.flow_params,
handlers: Vec::new(),
req_id: AtomicUsize::new(0),
}
}
/// Attempt to get peer status.
pub fn peer_status(&self, peer: &PeerId) -> Option<Status> {
self.peers.read().get(&peer)
.map(|peer| peer.lock().status.clone())
}
2017-02-17 21:38:43 +01:00
/// Get number of (connected, active) peers.
pub fn peer_count(&self) -> (usize, usize) {
let num_pending = self.pending_peers.read().len();
let peers = self.peers.read();
(
num_pending + peers.len(),
peers.values().filter(|p| !p.lock().pending_requests.is_empty()).count(),
)
}
/// Make a request to a peer.
2016-11-18 19:12:20 +01:00
///
/// Fails on: nonexistent peer, network error, peer not server,
2017-02-23 23:10:29 +01:00
/// insufficient credits. Does not check capabilities before sending.
/// On success, returns a request id which can later be coordinated
2016-11-18 19:12:20 +01:00
/// with an event.
2017-03-07 19:48:07 +01:00
// TODO: pass `Requests`.
pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, request: Request) -> Result<ReqId, Error> {
2017-03-07 19:48:07 +01:00
unimplemented!()
2016-11-18 19:12:20 +01:00
}
/// Make an announcement of new chain head and capabilities to all peers.
/// The announcement is expected to be valid.
pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) {
let mut reorgs_map = HashMap::new();
let now = SteadyTime::now();
// update stored capabilities
self.capabilities.write().update_from(&announcement);
// calculate reorg info and send packets
for (peer_id, peer_info) in self.peers.read().iter() {
let mut peer_info = peer_info.lock();
// TODO: "urgent" announcements like new blocks?
// the timer approach will skip 1 (possibly 2) in rare occasions.
if peer_info.sent_head == announcement.head_hash ||
peer_info.status.head_num >= announcement.head_num ||
now - peer_info.last_update < Duration::milliseconds(UPDATE_INTERVAL_MS) {
continue
}
peer_info.last_update = now;
let reorg_depth = reorgs_map.entry(peer_info.sent_head)
.or_insert_with(|| {
match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) {
Some(depth) => depth,
None => {
// both values will always originate locally -- this means something
// has gone really wrong
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "couldn't compute reorganization depth between {:?} and {:?}",
&announcement.head_hash, &peer_info.sent_head);
0
}
}
});
peer_info.sent_head = announcement.head_hash;
announcement.reorg_depth = *reorg_depth;
io.send(*peer_id, packet::ANNOUNCE, status::write_announcement(&announcement));
}
2016-10-27 15:45:59 +02:00
}
/// Add an event handler.
///
/// These are intended to be added when the protocol structure
/// is initialized as a means of customizing its behavior,
/// and dispatching requests immediately upon events.
pub fn add_handler(&mut self, handler: Arc<Handler>) {
self.handlers.push(handler);
}
2016-12-07 15:27:04 +01:00
/// Signal to handlers that network activity is being aborted
/// and clear peer data.
pub fn abort(&self) {
for handler in &self.handlers {
handler.on_abort();
}
// acquire in order and hold.
let mut pending_peers = self.pending_peers.write();
let mut peers = self.peers.write();
pending_peers.clear();
peers.clear();
}
// Does the common pre-verification of responses before the response itself
2016-12-07 15:27:04 +01:00
// is actually decoded:
// - check whether peer exists
// - check whether request was made
// - check whether request kinds match
fn pre_verify_response(&self, peer: &PeerId, kind: request::Kind, raw: &UntrustedRlp) -> Result<IdGuard, Error> {
let req_id = ReqId(raw.val_at(0)?);
2017-02-23 23:10:29 +01:00
let cur_credits: U256 = raw.val_at(1)?;
2016-12-07 15:27:04 +01:00
2017-03-07 19:48:07 +01:00
trace!(target: "pip", "pre-verifying response from peer {}, kind={:?}", peer, kind);
2016-12-07 15:27:04 +01:00
let peers = self.peers.read();
2017-03-07 19:48:07 +01:00
let res = match peers.get(peer) {
2016-12-07 15:27:04 +01:00
Some(peer_info) => {
let mut peer_info = peer_info.lock();
let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now());
let flow_info = peer_info.remote_flow.as_mut();
match (req_info, flow_info) {
(Some(request), Some(flow_info)) => {
2017-02-23 23:10:29 +01:00
let &mut (ref mut c, ref mut flow) = flow_info;
let actual_credits = ::std::cmp::min(cur_credits, *flow.limit());
c.update_to(actual_credits);
2017-03-07 19:48:07 +01:00
Ok(())
}
2017-03-07 19:48:07 +01:00
(None, _) => Err(Error::UnsolicitedResponse),
(_, None) => Err(Error::NotServer), // really should be impossible.
}
2016-12-07 15:27:04 +01:00
}
2017-03-07 19:48:07 +01:00
None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind.
};
2017-03-07 19:48:07 +01:00
res.map(|_| IdGuard::new(peers, *peer, req_id))
2016-12-07 15:27:04 +01:00
}
2017-03-07 19:48:07 +01:00
/// Handle a packet using the given io context.
/// Packet data is _untrusted_, which means that invalid data won't lead to
/// issues.
pub fn handle_packet(&self, io: &IoContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
let rlp = UntrustedRlp::new(data);
2017-03-07 19:48:07 +01:00
trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer);
2016-12-09 00:35:34 +01:00
// handle the packet
let res = match packet_id {
packet::STATUS => self.status(peer, io, rlp),
packet::ANNOUNCE => self.announcement(peer, io, rlp),
2017-03-07 19:48:07 +01:00
packet::REQUEST => self.request(peer, io, rlp),
packet::RESPONSE => self.response(peer, io, rlp),
2017-02-25 11:07:38 +01:00
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
other => {
Err(Error::UnrecognizedPacket(other))
}
};
if let Err(e) = res {
punish(*peer, io, e);
}
}
// check timeouts and punish peers.
fn timeout_check(&self, io: &IoContext) {
let now = SteadyTime::now();
// handshake timeout
{
let mut pending = self.pending_peers.write();
let slowpokes: Vec<_> = pending.iter()
.filter(|&(_, ref peer)| {
peer.last_update + Duration::milliseconds(timeout::HANDSHAKE) <= now
})
.map(|(&p, _)| p)
.collect();
for slowpoke in slowpokes {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Peer {} handshake timed out", slowpoke);
pending.remove(&slowpoke);
io.disconnect_peer(slowpoke);
}
}
// request timeouts
{
for (peer_id, peer) in self.peers.read().iter() {
if peer.lock().pending_requests.check_timeout(now) {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Peer {} request timeout", peer_id);
io.disconnect_peer(*peer_id);
}
}
}
}
/// called when a peer connects.
pub fn on_connect(&self, peer: &PeerId, io: &IoContext) {
let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) {
Ok(pv) => pv,
Err(e) => { punish(*peer, io, e); return }
};
2016-12-09 00:35:34 +01:00
if PROTOCOL_VERSIONS.iter().find(|x| **x == proto_version).is_none() {
punish(*peer, io, Error::UnsupportedProtocolVersion(proto_version));
return;
2016-10-10 18:48:47 +02:00
}
let chain_info = self.provider.chain_info();
let status = Status {
head_td: chain_info.total_difficulty,
head_hash: chain_info.best_block_hash,
head_num: chain_info.best_block_number,
genesis_hash: chain_info.genesis_hash,
protocol_version: proto_version as u32, // match peer proto version
network_id: self.network_id,
last_head: None,
};
let capabilities = self.capabilities.read().clone();
let status_packet = status::write_handshake(&status, &capabilities, Some(&self.flow_params));
self.pending_peers.write().insert(*peer, PendingPeer {
sent_head: chain_info.best_block_hash,
last_update: SteadyTime::now(),
});
io.send(*peer, packet::STATUS, status_packet);
2016-10-10 18:48:47 +02:00
}
/// called when a peer disconnects.
pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) {
2017-03-07 19:48:07 +01:00
trace!(target: "pip", "Peer {} disconnecting", peer);
2016-12-09 00:35:34 +01:00
2016-11-09 15:36:26 +01:00
self.pending_peers.write().remove(&peer);
let unfulfilled = match self.peers.write().remove(&peer) {
None => return,
Some(peer_info) => {
let peer_info = peer_info.into_inner();
let mut unfulfilled: Vec<_> = peer_info.pending_requests.collect_ids();
unfulfilled.extend(peer_info.failed_requests);
unfulfilled
}
};
for handler in &self.handlers {
handler.on_disconnect(&Ctx {
peer: peer,
io: io,
proto: self,
}, &unfulfilled)
}
2016-10-10 18:48:47 +02:00
}
/// Execute the given closure with a basic context derived from the I/O context.
pub fn with_context<F, T>(&self, io: &IoContext, f: F) -> T
where F: FnOnce(&BasicContext) -> T
{
f(&TickCtx {
io: io,
proto: self,
})
}
fn tick_handlers(&self, io: &IoContext) {
for handler in &self.handlers {
handler.tick(&TickCtx {
io: io,
proto: self,
})
}
}
}
impl LightProtocol {
2016-11-09 15:36:26 +01:00
// Handle status message from peer.
fn status(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> {
2016-11-09 15:36:26 +01:00
let pending = match self.pending_peers.write().remove(peer) {
Some(pending) => pending,
None => {
return Err(Error::UnexpectedHandshake);
}
};
let (status, capabilities, flow_params) = status::parse_handshake(data)?;
2016-11-09 15:36:26 +01:00
2017-03-07 19:48:07 +01:00
trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num));
2016-11-09 15:36:26 +01:00
if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) {
return Err(Error::WrongNetwork);
}
2016-12-09 15:04:54 +01:00
if Some(status.protocol_version as u8) != io.protocol_version(*peer) {
return Err(Error::BadProtocolVersion);
}
2017-02-23 23:10:29 +01:00
let remote_flow = flow_params.map(|params| (params.create_credits(), params));
self.peers.write().insert(*peer, Mutex::new(Peer {
2017-02-23 23:10:29 +01:00
local_credits: self.flow_params.create_credits(),
status: status.clone(),
capabilities: capabilities.clone(),
remote_flow: remote_flow,
2016-11-09 15:36:26 +01:00
sent_head: pending.sent_head,
last_update: pending.last_update,
pending_requests: RequestSet::default(),
failed_requests: Vec::new(),
}));
2016-11-09 15:36:26 +01:00
for handler in &self.handlers {
handler.on_connect(&Ctx {
2016-12-07 13:52:45 +01:00
peer: *peer,
io: io,
2016-12-07 13:52:45 +01:00
proto: self,
}, &status, &capabilities)
}
2016-11-09 15:36:26 +01:00
Ok(())
2016-10-10 18:48:47 +02:00
}
2016-11-06 19:05:19 +01:00
// Handle an announcement.
fn announcement(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> {
2016-11-09 16:21:09 +01:00
if !self.peers.read().contains_key(peer) {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Ignoring announcement from unknown peer");
2016-11-09 16:21:09 +01:00
return Ok(())
}
2016-10-10 18:48:47 +02:00
let announcement = status::parse_announcement(data)?;
2016-11-09 16:21:09 +01:00
2016-12-07 13:52:45 +01:00
// scope to ensure locks are dropped before moving into handler-space.
{
let peers = self.peers.read();
let peer_info = match peers.get(peer) {
Some(info) => info,
None => return Ok(()),
};
let mut peer_info = peer_info.lock();
2016-11-09 16:21:09 +01:00
2016-12-07 13:52:45 +01:00
// update status.
{
// TODO: punish peer if they've moved backwards.
let status = &mut peer_info.status;
let last_head = status.head_hash;
status.head_hash = announcement.head_hash;
status.head_td = announcement.head_td;
status.head_num = announcement.head_num;
status.last_head = Some((last_head, announcement.reorg_depth));
}
2016-12-07 13:52:45 +01:00
// update capabilities.
peer_info.capabilities.update_from(&announcement);
2016-11-09 16:21:09 +01:00
}
for handler in &self.handlers {
handler.on_announcement(&Ctx {
2016-12-07 13:52:45 +01:00
peer: *peer,
io: io,
proto: self,
}, &announcement);
}
2016-11-09 16:21:09 +01:00
Ok(())
2016-10-10 18:48:47 +02:00
}
2017-03-07 20:58:23 +01:00
// Receive requests from a peer.
2017-03-07 19:48:07 +01:00
fn request(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> {
// the maximum amount of requests we'll fill in a single packet.
2017-03-07 20:58:23 +01:00
const MAX_REQUESTS: usize = 256;
2017-03-07 19:48:07 +01:00
use ::request_builder::RequestBuilder;
2017-03-07 20:58:23 +01:00
use ::request::CompleteRequest;
2017-02-25 11:07:38 +01:00
let peers = self.peers.read();
let peer = match peers.get(peer) {
Some(peer) => peer,
None => {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Ignoring request from unknown peer");
2017-02-25 11:07:38 +01:00
return Ok(())
}
};
let mut peer = peer.lock();
let req_id: u64 = raw.val_at(0)?;
2017-03-07 20:58:23 +01:00
let mut cumulative_cost = U256::from(0);
let cur_buffer = peer.local_credits.current();
2017-02-25 11:07:38 +01:00
2017-03-07 20:58:23 +01:00
let mut request_builder = RequestBuilder::default();
// deserialize requests, check costs and back-references.
for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) {
let request: Request = request_rlp.as_val()?;
cumulative_cost = cumulative_cost + self.flow_params.compute_cost(&request);
if cumulative_cost > cur_buffer { return Err(Error::NoCredits) }
request_builder.push(request).map_err(|_| Error::BadBackReference)?;
}
let requests = request_builder.build();
// respond to all requests until one fails.
let responses = requests.respond_to_all(|complete_req| {
match complete_req {
CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers),
CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof),
CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body),
CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts),
CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account),
CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage),
CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code),
CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution),
}
});
io.respond(packet::RESPONSE, ::rlp::encode(&responses).to_vec());
Ok(())
2017-02-25 11:07:38 +01:00
}
2017-03-07 19:48:07 +01:00
fn response(&self, peer: &PeerId, io: &IoContext, raw: UntrustedRlp) -> Result<(), Error> {
unimplemented!()
2017-02-25 11:07:38 +01:00
}
2016-10-10 18:48:47 +02:00
// Receive a set of transactions to relay.
fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: UntrustedRlp) -> Result<(), Error> {
const MAX_TRANSACTIONS: usize = 256;
let txs: Vec<_> = data.iter()
.take(MAX_TRANSACTIONS)
.map(|x| x.as_val::<UnverifiedTransaction>())
.collect::<Result<_,_>>()?;
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer);
for handler in &self.handlers {
handler.on_transactions(&Ctx {
2016-12-07 13:52:45 +01:00
peer: *peer,
io: io,
proto: self,
2016-12-07 13:52:45 +01:00
}, &txs);
}
Ok(())
2016-10-10 18:48:47 +02:00
}
}
// if something went wrong, figure out how much to punish the peer.
fn punish(peer: PeerId, io: &IoContext, e: Error) {
match e.punishment() {
Punishment::None => {}
Punishment::Disconnect => {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Disconnecting peer {}: {}", peer, e);
io.disconnect_peer(peer)
}
Punishment::Disable => {
2017-03-07 19:48:07 +01:00
debug!(target: "pip", "Disabling peer {}: {}", peer, e);
io.disable_peer(peer)
}
}
}
2016-11-04 18:40:31 +01:00
impl NetworkProtocolHandler for LightProtocol {
2016-10-10 18:48:47 +02:00
fn initialize(&self, io: &NetworkContext) {
io.register_timer(TIMEOUT, TIMEOUT_INTERVAL_MS)
.expect("Error registering sync timer.");
io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS)
.expect("Error registering sync timer.");
2016-10-10 18:48:47 +02:00
}
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
self.handle_packet(io, peer, packet_id, data);
2016-10-10 18:48:47 +02:00
}
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
self.on_connect(peer, io);
}
2016-12-07 13:52:45 +01:00
fn disconnected(&self, io: &NetworkContext, peer: &PeerId) {
self.on_disconnect(*peer, io);
2016-10-10 18:48:47 +02:00
}
fn timeout(&self, io: &NetworkContext, timer: TimerToken) {
2016-10-10 18:48:47 +02:00
match timer {
TIMEOUT => self.timeout_check(io),
TICK_TIMEOUT => self.tick_handlers(io),
2017-03-07 19:48:07 +01:00
_ => warn!(target: "pip", "received timeout on unknown token {}", timer),
2016-11-18 19:12:20 +01:00
}
}
}