2018-06-04 10:19:50 +02:00
|
|
|
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
|
2016-10-05 15:35:31 +02:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2017-04-05 12:43:47 +02:00
|
|
|
//! PLP Protocol Version 1 implementation.
|
2016-10-05 15:35:31 +02:00
|
|
|
//!
|
2016-11-07 19:16:23 +01:00
|
|
|
//! This uses a "Provider" to answer requests.
|
2016-10-05 15:35:31 +02:00
|
|
|
|
2018-01-11 17:49:10 +01:00
|
|
|
use transaction::UnverifiedTransaction;
|
2016-12-07 13:52:45 +01:00
|
|
|
|
2016-10-10 18:48:47 +02:00
|
|
|
use io::TimerToken;
|
2018-06-02 11:05:11 +02:00
|
|
|
use network::{NetworkProtocolHandler, NetworkContext, PeerId};
|
2018-04-16 15:52:12 +02:00
|
|
|
use rlp::{RlpStream, Rlp};
|
2018-01-10 13:35:18 +01:00
|
|
|
use ethereum_types::{H256, U256};
|
|
|
|
use kvdb::DBValue;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::{Mutex, RwLock};
|
2018-03-14 12:29:52 +01:00
|
|
|
use std::time::{Duration, Instant};
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2017-03-23 20:02:46 +01:00
|
|
|
use std::collections::{HashMap, HashSet};
|
2016-12-27 13:54:51 +01:00
|
|
|
use std::fmt;
|
2016-12-08 23:21:47 +01:00
|
|
|
use std::sync::Arc;
|
2016-11-18 19:12:20 +01:00
|
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
2017-06-30 10:58:48 +02:00
|
|
|
use std::ops::{BitOr, BitAnd, Not};
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2016-12-05 17:09:05 +01:00
|
|
|
use provider::Provider;
|
2017-04-05 15:02:44 +02:00
|
|
|
use request::{Request, NetworkRequests as Requests, Response};
|
2016-11-09 15:36:26 +01:00
|
|
|
|
2017-02-23 23:10:29 +01:00
|
|
|
use self::request_credits::{Credits, FlowParams};
|
2016-12-15 19:25:52 +01:00
|
|
|
use self::context::{Ctx, TickCtx};
|
2016-12-13 21:09:57 +01:00
|
|
|
use self::error::Punishment;
|
2017-05-23 12:31:09 +02:00
|
|
|
use self::load_timer::{LoadDistribution, NullStore};
|
2017-01-04 18:00:12 +01:00
|
|
|
use self::request_set::RequestSet;
|
2017-01-09 11:29:06 +01:00
|
|
|
use self::id_guard::IdGuard;
|
2016-11-04 17:19:01 +01:00
|
|
|
|
2016-12-07 17:52:10 +01:00
|
|
|
mod context;
|
2016-11-09 15:36:26 +01:00
|
|
|
mod error;
|
2017-05-23 12:31:09 +02:00
|
|
|
mod load_timer;
|
2016-11-07 19:16:23 +01:00
|
|
|
mod status;
|
2017-01-04 18:00:12 +01:00
|
|
|
mod request_set;
|
2016-11-06 19:04:30 +01:00
|
|
|
|
2017-03-09 16:55:13 +01:00
|
|
|
#[cfg(test)]
|
|
|
|
mod tests;
|
2016-12-08 12:20:18 +01:00
|
|
|
|
2017-02-23 23:10:29 +01:00
|
|
|
pub mod request_credits;
|
2017-01-11 14:39:03 +01:00
|
|
|
|
2016-12-15 19:25:52 +01:00
|
|
|
pub use self::context::{BasicContext, EventContext, IoContext};
|
2017-05-23 12:31:09 +02:00
|
|
|
pub use self::error::Error;
|
|
|
|
pub use self::load_timer::{SampleStore, FileStore};
|
2016-12-08 23:21:47 +01:00
|
|
|
pub use self::status::{Status, Capabilities, Announcement};
|
2016-11-09 18:05:00 +01:00
|
|
|
|
2016-10-10 18:48:47 +02:00
|
|
|
const TIMEOUT: TimerToken = 0;
|
2018-04-14 21:35:58 +02:00
|
|
|
const TIMEOUT_INTERVAL: Duration = Duration::from_secs(1);
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2016-12-19 12:28:42 +01:00
|
|
|
const TICK_TIMEOUT: TimerToken = 1;
|
2018-04-14 21:35:58 +02:00
|
|
|
const TICK_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5);
|
2016-12-19 12:28:42 +01:00
|
|
|
|
2017-03-23 20:02:46 +01:00
|
|
|
const PROPAGATE_TIMEOUT: TimerToken = 2;
|
2018-04-14 21:35:58 +02:00
|
|
|
const PROPAGATE_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5);
|
2017-03-23 20:02:46 +01:00
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
|
2018-04-14 21:35:58 +02:00
|
|
|
const RECALCULATE_COSTS_INTERVAL: Duration = Duration::from_secs(60 * 60);
|
2017-05-23 12:31:09 +02:00
|
|
|
|
2016-12-08 23:57:09 +01:00
|
|
|
// minimum interval between updates.
|
2018-04-02 10:47:56 +02:00
|
|
|
const UPDATE_INTERVAL: Duration = Duration::from_millis(5000);
|
2016-12-08 23:57:09 +01:00
|
|
|
|
2018-05-14 10:09:05 +02:00
|
|
|
/// Packet count for PIP.
|
|
|
|
const PACKET_COUNT_V1: u8 = 9;
|
|
|
|
|
2016-12-11 15:40:31 +01:00
|
|
|
/// Supported protocol versions.
|
2018-05-14 10:09:05 +02:00
|
|
|
pub const PROTOCOL_VERSIONS: &'static [(u8, u8)] = &[
|
|
|
|
(1, PACKET_COUNT_V1),
|
|
|
|
];
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2016-12-11 15:40:31 +01:00
|
|
|
/// Max protocol version.
|
2016-12-08 23:21:47 +01:00
|
|
|
pub const MAX_PROTOCOL_VERSION: u8 = 1;
|
|
|
|
|
2016-10-10 18:48:47 +02:00
|
|
|
// packet ID definitions.
|
|
|
|
mod packet {
|
|
|
|
// the status packet.
|
|
|
|
pub const STATUS: u8 = 0x00;
|
|
|
|
|
2016-11-06 19:05:19 +01:00
|
|
|
// announcement of new block hashes or capabilities.
|
|
|
|
pub const ANNOUNCE: u8 = 0x01;
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
// request and response.
|
|
|
|
pub const REQUEST: u8 = 0x02;
|
|
|
|
pub const RESPONSE: u8 = 0x03;
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
// request credits update and acknowledgement.
|
|
|
|
pub const UPDATE_CREDITS: u8 = 0x04;
|
|
|
|
pub const ACKNOWLEDGE_UPDATE: u8 = 0x05;
|
|
|
|
|
2016-10-10 18:48:47 +02:00
|
|
|
// relay transactions to peers.
|
2017-05-23 12:31:09 +02:00
|
|
|
pub const SEND_TRANSACTIONS: u8 = 0x06;
|
2017-07-03 12:25:10 +02:00
|
|
|
|
2017-09-05 17:54:05 +02:00
|
|
|
// two packets were previously meant to be reserved for epoch proofs.
|
|
|
|
// these have since been moved to requests.
|
2016-11-04 18:40:31 +01:00
|
|
|
}
|
|
|
|
|
2016-12-11 15:40:31 +01:00
|
|
|
// timeouts for different kinds of requests. all values are in milliseconds.
|
|
|
|
mod timeout {
|
2018-04-02 10:47:56 +02:00
|
|
|
use std::time::Duration;
|
|
|
|
|
2018-05-14 10:09:05 +02:00
|
|
|
pub const HANDSHAKE: Duration = Duration::from_millis(4_000);
|
|
|
|
pub const ACKNOWLEDGE_UPDATE: Duration = Duration::from_millis(5_000);
|
|
|
|
pub const BASE: u64 = 2_500; // base timeout for packet.
|
2017-03-08 17:37:07 +01:00
|
|
|
|
|
|
|
// timeouts per request within packet.
|
2018-03-14 12:29:52 +01:00
|
|
|
pub const HEADERS: u64 = 250; // per header?
|
|
|
|
pub const TRANSACTION_INDEX: u64 = 100;
|
|
|
|
pub const BODY: u64 = 50;
|
|
|
|
pub const RECEIPT: u64 = 50;
|
|
|
|
pub const PROOF: u64 = 100; // state proof
|
|
|
|
pub const CONTRACT_CODE: u64 = 100;
|
|
|
|
pub const HEADER_PROOF: u64 = 100;
|
|
|
|
pub const TRANSACTION_PROOF: u64 = 1000; // per gas?
|
|
|
|
pub const EPOCH_SIGNAL: u64 = 200;
|
2016-12-11 15:40:31 +01:00
|
|
|
}
|
|
|
|
|
2016-11-18 19:12:20 +01:00
|
|
|
/// A request id.
|
2017-04-07 19:35:39 +02:00
|
|
|
#[cfg(not(test))]
|
2017-01-04 18:00:12 +01:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
2016-11-18 19:12:20 +01:00
|
|
|
pub struct ReqId(usize);
|
|
|
|
|
2017-04-07 19:35:39 +02:00
|
|
|
#[cfg(test)]
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash)]
|
|
|
|
pub struct ReqId(pub usize);
|
|
|
|
|
2016-12-27 13:54:51 +01:00
|
|
|
impl fmt::Display for ReqId {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
write!(f, "Request #{}", self.0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-09 15:36:26 +01:00
|
|
|
// A pending peer: one we've sent our status to but
|
|
|
|
// may not have received one for.
|
|
|
|
struct PendingPeer {
|
|
|
|
sent_head: H256,
|
2018-03-14 12:29:52 +01:00
|
|
|
last_update: Instant,
|
2016-11-09 15:36:26 +01:00
|
|
|
}
|
|
|
|
|
2017-01-09 11:29:06 +01:00
|
|
|
/// Relevant data to each peer. Not accessible publicly, only `pub` due to
|
|
|
|
/// limitations of the privacy system.
|
|
|
|
pub struct Peer {
|
2017-02-23 23:10:29 +01:00
|
|
|
local_credits: Credits, // their credits relative to us
|
2016-11-09 15:36:26 +01:00
|
|
|
status: Status,
|
|
|
|
capabilities: Capabilities,
|
2017-02-23 23:10:29 +01:00
|
|
|
remote_flow: Option<(Credits, FlowParams)>,
|
2016-12-19 12:28:42 +01:00
|
|
|
sent_head: H256, // last chain head we've given them.
|
2018-03-14 12:29:52 +01:00
|
|
|
last_update: Instant,
|
2017-01-04 18:00:12 +01:00
|
|
|
pending_requests: RequestSet,
|
2017-01-09 11:29:06 +01:00
|
|
|
failed_requests: Vec<ReqId>,
|
2017-03-23 20:02:46 +01:00
|
|
|
propagated_transactions: HashSet<H256>,
|
2017-05-23 12:31:09 +02:00
|
|
|
skip_update: bool,
|
|
|
|
local_flow: Arc<FlowParams>,
|
2018-03-14 12:29:52 +01:00
|
|
|
awaiting_acknowledge: Option<(Instant, Arc<FlowParams>)>,
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
2016-10-05 15:35:31 +02:00
|
|
|
|
2017-06-30 10:58:48 +02:00
|
|
|
/// Whether or not a peer was kept by a handler
|
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
|
|
|
pub enum PeerStatus {
|
|
|
|
/// The peer was kept
|
|
|
|
Kept,
|
|
|
|
/// The peer was not kept
|
|
|
|
Unkept,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Not for PeerStatus {
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
fn not(self) -> Self {
|
|
|
|
use self::PeerStatus::*;
|
|
|
|
|
|
|
|
match self {
|
|
|
|
Kept => Unkept,
|
|
|
|
Unkept => Kept,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BitAnd for PeerStatus {
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
fn bitand(self, other: Self) -> Self {
|
|
|
|
use self::PeerStatus::*;
|
|
|
|
|
|
|
|
match (self, other) {
|
|
|
|
(Kept, Kept) => Kept,
|
|
|
|
_ => Unkept,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl BitOr for PeerStatus {
|
|
|
|
type Output = Self;
|
|
|
|
|
|
|
|
fn bitor(self, other: Self) -> Self {
|
|
|
|
use self::PeerStatus::*;
|
|
|
|
|
|
|
|
match (self, other) {
|
|
|
|
(_, Kept) | (Kept, _) => Kept,
|
|
|
|
_ => Unkept,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-08 17:37:07 +01:00
|
|
|
/// A light protocol event handler.
|
2016-12-07 13:52:45 +01:00
|
|
|
///
|
2016-12-13 14:48:03 +01:00
|
|
|
/// Each handler function takes a context which describes the relevant peer
|
2016-12-07 13:52:45 +01:00
|
|
|
/// and gives references to the IO layer and protocol structure so new messages
|
|
|
|
/// can be dispatched immediately.
|
|
|
|
///
|
|
|
|
/// Request responses are not guaranteed to be complete or valid, but passed IDs will be correct.
|
|
|
|
/// Response handlers are not given a copy of the original request; it is assumed
|
|
|
|
/// that relevant data will be stored by interested handlers.
|
2016-11-18 15:30:06 +01:00
|
|
|
pub trait Handler: Send + Sync {
|
|
|
|
/// Called when a peer connects.
|
2017-06-30 10:58:48 +02:00
|
|
|
fn on_connect(
|
|
|
|
&self,
|
|
|
|
_ctx: &EventContext,
|
|
|
|
_status: &Status,
|
|
|
|
_capabilities: &Capabilities
|
|
|
|
) -> PeerStatus { PeerStatus::Kept }
|
2016-12-07 13:52:45 +01:00
|
|
|
/// Called when a peer disconnects, with a list of unfulfilled request IDs as
|
|
|
|
/// of yet.
|
2016-12-07 17:52:10 +01:00
|
|
|
fn on_disconnect(&self, _ctx: &EventContext, _unfulfilled: &[ReqId]) { }
|
2016-11-18 15:30:06 +01:00
|
|
|
/// Called when a peer makes an announcement.
|
2016-12-07 17:52:10 +01:00
|
|
|
fn on_announcement(&self, _ctx: &EventContext, _announcement: &Announcement) { }
|
2016-11-18 15:30:06 +01:00
|
|
|
/// Called when a peer requests relay of some transactions.
|
2017-01-13 09:51:36 +01:00
|
|
|
fn on_transactions(&self, _ctx: &EventContext, _relay: &[UnverifiedTransaction]) { }
|
2017-03-07 19:48:07 +01:00
|
|
|
/// Called when a peer responds to requests.
|
2017-03-08 15:28:46 +01:00
|
|
|
/// Responses not guaranteed to contain valid data and are not yet checked against
|
|
|
|
/// the requests they correspond to.
|
|
|
|
fn on_responses(&self, _ctx: &EventContext, _req_id: ReqId, _responses: &[Response]) { }
|
2017-02-25 11:07:38 +01:00
|
|
|
/// Called when a peer responds with a transaction proof. Each proof is a vector of state items.
|
|
|
|
fn on_transaction_proof(&self, _ctx: &EventContext, _req_id: ReqId, _state_items: &[DBValue]) { }
|
2016-12-15 19:25:52 +01:00
|
|
|
/// Called to "tick" the handler periodically.
|
|
|
|
fn tick(&self, _ctx: &BasicContext) { }
|
2016-12-13 14:48:03 +01:00
|
|
|
/// Called on abort. This signals to handlers that they should clean up
|
|
|
|
/// and ignore peers.
|
|
|
|
// TODO: coreresponding `on_activate`?
|
2016-12-08 23:21:47 +01:00
|
|
|
fn on_abort(&self) { }
|
2016-11-18 15:30:06 +01:00
|
|
|
}
|
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
/// Configuration.
|
|
|
|
pub struct Config {
|
|
|
|
/// How many stored seconds of credits peers should be able to accumulate.
|
|
|
|
pub max_stored_seconds: u64,
|
|
|
|
/// How much of the total load capacity each peer should be allowed to take.
|
|
|
|
pub load_share: f64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Config {
|
|
|
|
fn default() -> Self {
|
|
|
|
const LOAD_SHARE: f64 = 1.0 / 25.0;
|
|
|
|
const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes.
|
|
|
|
|
|
|
|
Config {
|
|
|
|
max_stored_seconds: MAX_ACCUMULATED,
|
|
|
|
load_share: LOAD_SHARE,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Protocol initialization parameters.
|
2016-11-18 19:12:20 +01:00
|
|
|
pub struct Params {
|
|
|
|
/// Network id.
|
2016-12-08 23:21:47 +01:00
|
|
|
pub network_id: u64,
|
2017-05-23 12:31:09 +02:00
|
|
|
/// Config.
|
|
|
|
pub config: Config,
|
2016-11-18 19:12:20 +01:00
|
|
|
/// Initial capabilities.
|
|
|
|
pub capabilities: Capabilities,
|
2017-05-23 12:31:09 +02:00
|
|
|
/// The sample store (`None` if data shouldn't persist between runs).
|
|
|
|
pub sample_store: Option<Box<SampleStore>>,
|
2016-11-18 19:12:20 +01:00
|
|
|
}
|
|
|
|
|
2017-01-09 11:29:06 +01:00
|
|
|
/// Type alias for convenience.
|
|
|
|
pub type PeerMap = HashMap<PeerId, Mutex<Peer>>;
|
|
|
|
|
|
|
|
mod id_guard {
|
|
|
|
|
|
|
|
use network::PeerId;
|
2017-09-02 20:09:13 +02:00
|
|
|
use parking_lot::RwLockReadGuard;
|
2017-01-09 11:29:06 +01:00
|
|
|
|
|
|
|
use super::{PeerMap, ReqId};
|
|
|
|
|
|
|
|
// Guards success or failure of given request.
|
|
|
|
// On drop, inserts the req_id into the "failed requests"
|
|
|
|
// set for the peer unless defused. In separate module to enforce correct usage.
|
|
|
|
pub struct IdGuard<'a> {
|
|
|
|
peers: RwLockReadGuard<'a, PeerMap>,
|
|
|
|
peer_id: PeerId,
|
|
|
|
req_id: ReqId,
|
|
|
|
active: bool,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> IdGuard<'a> {
|
|
|
|
/// Create a new `IdGuard`, which will prevent access of the inner ReqId
|
|
|
|
/// (for forming responses, triggering handlers) until defused
|
|
|
|
pub fn new(peers: RwLockReadGuard<'a, PeerMap>, peer_id: PeerId, req_id: ReqId) -> Self {
|
|
|
|
IdGuard {
|
|
|
|
peers: peers,
|
|
|
|
peer_id: peer_id,
|
|
|
|
req_id: req_id,
|
|
|
|
active: true,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Defuse the guard, signalling that the request has been successfully decoded.
|
|
|
|
pub fn defuse(mut self) -> ReqId {
|
|
|
|
// can't use the mem::forget trick here since we need the
|
|
|
|
// read guard to drop.
|
|
|
|
self.active = false;
|
|
|
|
self.req_id
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a> Drop for IdGuard<'a> {
|
|
|
|
fn drop(&mut self) {
|
|
|
|
if !self.active { return }
|
|
|
|
if let Some(p) = self.peers.get(&self.peer_id) {
|
|
|
|
p.lock().failed_requests.push(self.req_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 18:40:31 +01:00
|
|
|
/// This is an implementation of the light ethereum network protocol, abstracted
|
|
|
|
/// over a `Provider` of data and a p2p network.
|
|
|
|
///
|
|
|
|
/// This is simply designed for request-response purposes. Higher level uses
|
|
|
|
/// of the protocol, such as synchronization, will function as wrappers around
|
|
|
|
/// this system.
|
2016-12-13 14:48:03 +01:00
|
|
|
//
|
2016-11-18 19:27:32 +01:00
|
|
|
// LOCK ORDER:
|
2016-12-13 14:48:03 +01:00
|
|
|
// Locks must be acquired in the order declared, and when holding a read lock
|
2016-11-18 19:27:32 +01:00
|
|
|
// on the peers, only one peer may be held at a time.
|
2016-11-04 18:40:31 +01:00
|
|
|
pub struct LightProtocol {
|
2016-12-08 23:21:47 +01:00
|
|
|
provider: Arc<Provider>,
|
2017-05-23 12:31:09 +02:00
|
|
|
config: Config,
|
2016-10-10 18:48:47 +02:00
|
|
|
genesis_hash: H256,
|
2016-12-08 23:21:47 +01:00
|
|
|
network_id: u64,
|
2016-11-09 15:36:26 +01:00
|
|
|
pending_peers: RwLock<HashMap<PeerId, PendingPeer>>,
|
2017-01-09 11:29:06 +01:00
|
|
|
peers: RwLock<PeerMap>,
|
2016-11-09 15:36:26 +01:00
|
|
|
capabilities: RwLock<Capabilities>,
|
2017-05-23 12:31:09 +02:00
|
|
|
flow_params: RwLock<Arc<FlowParams>>,
|
2017-01-11 14:39:03 +01:00
|
|
|
handlers: Vec<Arc<Handler>>,
|
2016-10-27 15:45:59 +02:00
|
|
|
req_id: AtomicUsize,
|
2017-05-23 12:31:09 +02:00
|
|
|
sample_store: Box<SampleStore>,
|
|
|
|
load_distribution: LoadDistribution,
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-04 18:40:31 +01:00
|
|
|
impl LightProtocol {
|
2016-11-18 19:12:20 +01:00
|
|
|
/// Create a new instance of the protocol manager.
|
2016-12-08 23:21:47 +01:00
|
|
|
pub fn new(provider: Arc<Provider>, params: Params) -> Self {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Initializing light protocol handler");
|
2016-12-09 00:35:34 +01:00
|
|
|
|
2016-12-08 12:20:18 +01:00
|
|
|
let genesis_hash = provider.chain_info().genesis_hash;
|
2017-05-23 12:31:09 +02:00
|
|
|
let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore));
|
|
|
|
let load_distribution = LoadDistribution::load(&*sample_store);
|
|
|
|
let flow_params = FlowParams::from_request_times(
|
2018-04-14 21:35:58 +02:00
|
|
|
|kind| load_distribution.expected_time(kind),
|
2017-05-23 12:31:09 +02:00
|
|
|
params.config.load_share,
|
2018-04-14 21:35:58 +02:00
|
|
|
Duration::from_secs(params.config.max_stored_seconds),
|
2017-05-23 12:31:09 +02:00
|
|
|
);
|
|
|
|
|
2016-11-18 19:12:20 +01:00
|
|
|
LightProtocol {
|
|
|
|
provider: provider,
|
2017-05-23 12:31:09 +02:00
|
|
|
config: params.config,
|
2016-12-08 12:20:18 +01:00
|
|
|
genesis_hash: genesis_hash,
|
2016-11-18 19:12:20 +01:00
|
|
|
network_id: params.network_id,
|
|
|
|
pending_peers: RwLock::new(HashMap::new()),
|
|
|
|
peers: RwLock::new(HashMap::new()),
|
|
|
|
capabilities: RwLock::new(params.capabilities),
|
2017-05-23 12:31:09 +02:00
|
|
|
flow_params: RwLock::new(Arc::new(flow_params)),
|
2016-11-18 19:12:20 +01:00
|
|
|
handlers: Vec::new(),
|
|
|
|
req_id: AtomicUsize::new(0),
|
2017-05-23 12:31:09 +02:00
|
|
|
sample_store: sample_store,
|
|
|
|
load_distribution: load_distribution,
|
2016-11-18 19:12:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-20 12:41:49 +01:00
|
|
|
/// Attempt to get peer status.
|
|
|
|
pub fn peer_status(&self, peer: &PeerId) -> Option<Status> {
|
|
|
|
self.peers.read().get(&peer)
|
|
|
|
.map(|peer| peer.lock().status.clone())
|
|
|
|
}
|
|
|
|
|
2017-02-17 21:38:43 +01:00
|
|
|
/// Get number of (connected, active) peers.
|
|
|
|
pub fn peer_count(&self) -> (usize, usize) {
|
|
|
|
let num_pending = self.pending_peers.read().len();
|
|
|
|
let peers = self.peers.read();
|
|
|
|
(
|
|
|
|
num_pending + peers.len(),
|
|
|
|
peers.values().filter(|p| !p.lock().pending_requests.is_empty()).count(),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2016-12-13 14:48:03 +01:00
|
|
|
/// Make a request to a peer.
|
2016-11-18 19:12:20 +01:00
|
|
|
///
|
2016-12-09 01:06:51 +01:00
|
|
|
/// Fails on: nonexistent peer, network error, peer not server,
|
2017-02-23 23:10:29 +01:00
|
|
|
/// insufficient credits. Does not check capabilities before sending.
|
2016-12-13 14:48:03 +01:00
|
|
|
/// On success, returns a request id which can later be coordinated
|
2016-11-18 19:12:20 +01:00
|
|
|
/// with an event.
|
2017-03-08 17:37:07 +01:00
|
|
|
pub fn request_from(&self, io: &IoContext, peer_id: &PeerId, requests: Requests) -> Result<ReqId, Error> {
|
|
|
|
let peers = self.peers.read();
|
|
|
|
let peer = match peers.get(peer_id) {
|
|
|
|
Some(peer) => peer,
|
|
|
|
None => return Err(Error::UnknownPeer),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut peer = peer.lock();
|
|
|
|
let peer = &mut *peer;
|
|
|
|
match peer.remote_flow {
|
|
|
|
None => Err(Error::NotServer),
|
|
|
|
Some((ref mut creds, ref params)) => {
|
2017-03-23 04:36:49 +01:00
|
|
|
// apply recharge to credits if there's no pending requests.
|
|
|
|
if peer.pending_requests.is_empty() {
|
|
|
|
params.recharge(creds);
|
|
|
|
}
|
|
|
|
|
2017-03-22 19:26:51 +01:00
|
|
|
// compute and deduct cost.
|
2017-03-23 14:38:32 +01:00
|
|
|
let pre_creds = creds.current();
|
2017-10-08 18:19:27 +02:00
|
|
|
let cost = match params.compute_cost_multi(requests.requests()) {
|
|
|
|
Some(cost) => cost,
|
|
|
|
None => return Err(Error::NotServer),
|
|
|
|
};
|
|
|
|
|
2017-03-22 19:26:51 +01:00
|
|
|
creds.deduct_cost(cost)?;
|
2017-03-08 17:37:07 +01:00
|
|
|
|
2017-03-23 04:36:49 +01:00
|
|
|
trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}",
|
2017-03-23 14:38:32 +01:00
|
|
|
peer_id, cost, pre_creds);
|
2017-03-23 04:36:49 +01:00
|
|
|
|
2017-03-08 17:37:07 +01:00
|
|
|
let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst));
|
|
|
|
io.send(*peer_id, packet::REQUEST, {
|
|
|
|
let mut stream = RlpStream::new_list(2);
|
2017-03-21 15:23:50 +01:00
|
|
|
stream.append(&req_id.0).append_list(&requests.requests());
|
2017-03-08 17:37:07 +01:00
|
|
|
stream.out()
|
|
|
|
});
|
|
|
|
|
|
|
|
// begin timeout.
|
2018-03-14 12:29:52 +01:00
|
|
|
peer.pending_requests.insert(req_id, requests, cost, Instant::now());
|
2017-03-08 17:37:07 +01:00
|
|
|
Ok(req_id)
|
|
|
|
}
|
|
|
|
}
|
2016-11-18 19:12:20 +01:00
|
|
|
}
|
|
|
|
|
2016-11-09 18:05:00 +01:00
|
|
|
/// Make an announcement of new chain head and capabilities to all peers.
|
|
|
|
/// The announcement is expected to be valid.
|
2016-12-07 17:52:10 +01:00
|
|
|
pub fn make_announcement(&self, io: &IoContext, mut announcement: Announcement) {
|
2016-11-09 18:05:00 +01:00
|
|
|
let mut reorgs_map = HashMap::new();
|
2018-03-14 12:29:52 +01:00
|
|
|
let now = Instant::now();
|
2016-11-09 18:05:00 +01:00
|
|
|
|
2016-11-18 15:30:06 +01:00
|
|
|
// update stored capabilities
|
|
|
|
self.capabilities.write().update_from(&announcement);
|
|
|
|
|
2016-11-09 18:05:00 +01:00
|
|
|
// calculate reorg info and send packets
|
2016-11-18 19:26:05 +01:00
|
|
|
for (peer_id, peer_info) in self.peers.read().iter() {
|
|
|
|
let mut peer_info = peer_info.lock();
|
2016-12-08 23:57:09 +01:00
|
|
|
|
|
|
|
// TODO: "urgent" announcements like new blocks?
|
|
|
|
// the timer approach will skip 1 (possibly 2) in rare occasions.
|
2016-12-13 14:48:03 +01:00
|
|
|
if peer_info.sent_head == announcement.head_hash ||
|
2016-12-08 23:57:09 +01:00
|
|
|
peer_info.status.head_num >= announcement.head_num ||
|
2018-04-02 10:47:56 +02:00
|
|
|
now - peer_info.last_update < UPDATE_INTERVAL {
|
2016-12-13 14:48:03 +01:00
|
|
|
continue
|
2016-12-08 23:57:09 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
peer_info.last_update = now;
|
|
|
|
|
2016-11-09 18:05:00 +01:00
|
|
|
let reorg_depth = reorgs_map.entry(peer_info.sent_head)
|
|
|
|
.or_insert_with(|| {
|
|
|
|
match self.provider.reorg_depth(&announcement.head_hash, &peer_info.sent_head) {
|
|
|
|
Some(depth) => depth,
|
|
|
|
None => {
|
|
|
|
// both values will always originate locally -- this means something
|
|
|
|
// has gone really wrong
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "couldn't compute reorganization depth between {:?} and {:?}",
|
2016-11-09 18:05:00 +01:00
|
|
|
&announcement.head_hash, &peer_info.sent_head);
|
|
|
|
0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
peer_info.sent_head = announcement.head_hash;
|
|
|
|
announcement.reorg_depth = *reorg_depth;
|
|
|
|
|
2016-12-07 17:52:10 +01:00
|
|
|
io.send(*peer_id, packet::ANNOUNCE, status::write_announcement(&announcement));
|
2016-11-09 18:05:00 +01:00
|
|
|
}
|
2016-10-27 15:45:59 +02:00
|
|
|
}
|
2016-11-18 15:30:06 +01:00
|
|
|
|
|
|
|
/// Add an event handler.
|
2017-01-11 14:39:03 +01:00
|
|
|
///
|
2016-12-13 14:48:03 +01:00
|
|
|
/// These are intended to be added when the protocol structure
|
2017-01-11 14:39:03 +01:00
|
|
|
/// is initialized as a means of customizing its behavior,
|
|
|
|
/// and dispatching requests immediately upon events.
|
|
|
|
pub fn add_handler(&mut self, handler: Arc<Handler>) {
|
2016-11-18 15:30:06 +01:00
|
|
|
self.handlers.push(handler);
|
|
|
|
}
|
2016-12-07 15:27:04 +01:00
|
|
|
|
2016-12-08 23:21:47 +01:00
|
|
|
/// Signal to handlers that network activity is being aborted
|
|
|
|
/// and clear peer data.
|
|
|
|
pub fn abort(&self) {
|
|
|
|
for handler in &self.handlers {
|
|
|
|
handler.on_abort();
|
|
|
|
}
|
|
|
|
|
|
|
|
// acquire in order and hold.
|
|
|
|
let mut pending_peers = self.pending_peers.write();
|
|
|
|
let mut peers = self.peers.write();
|
|
|
|
|
|
|
|
pending_peers.clear();
|
|
|
|
peers.clear();
|
|
|
|
}
|
|
|
|
|
2016-12-13 14:48:03 +01:00
|
|
|
// Does the common pre-verification of responses before the response itself
|
2016-12-07 15:27:04 +01:00
|
|
|
// is actually decoded:
|
|
|
|
// - check whether peer exists
|
|
|
|
// - check whether request was made
|
|
|
|
// - check whether request kinds match
|
2018-04-16 15:52:12 +02:00
|
|
|
fn pre_verify_response(&self, peer: &PeerId, raw: &Rlp) -> Result<IdGuard, Error> {
|
2017-01-04 18:00:12 +01:00
|
|
|
let req_id = ReqId(raw.val_at(0)?);
|
2017-02-23 23:10:29 +01:00
|
|
|
let cur_credits: U256 = raw.val_at(1)?;
|
2016-12-07 15:27:04 +01:00
|
|
|
|
2017-03-23 18:31:16 +01:00
|
|
|
trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer);
|
2016-12-07 15:27:04 +01:00
|
|
|
|
|
|
|
let peers = self.peers.read();
|
2017-03-07 19:48:07 +01:00
|
|
|
let res = match peers.get(peer) {
|
2016-12-07 15:27:04 +01:00
|
|
|
Some(peer_info) => {
|
|
|
|
let mut peer_info = peer_info.lock();
|
2017-05-23 12:31:09 +02:00
|
|
|
let peer_info: &mut Peer = &mut *peer_info;
|
2018-03-14 12:29:52 +01:00
|
|
|
let req_info = peer_info.pending_requests.remove(&req_id, Instant::now());
|
2017-05-23 12:31:09 +02:00
|
|
|
let last_batched = peer_info.pending_requests.is_empty();
|
2017-01-09 11:29:06 +01:00
|
|
|
let flow_info = peer_info.remote_flow.as_mut();
|
|
|
|
|
|
|
|
match (req_info, flow_info) {
|
2017-03-08 15:28:46 +01:00
|
|
|
(Some(_), Some(flow_info)) => {
|
2017-02-23 23:10:29 +01:00
|
|
|
let &mut (ref mut c, ref mut flow) = flow_info;
|
2017-03-22 19:26:51 +01:00
|
|
|
|
|
|
|
// only update if the cumulative cost of the request set is zero.
|
2017-05-23 12:31:09 +02:00
|
|
|
// and this response wasn't from before request costs were updated.
|
|
|
|
if !peer_info.skip_update && last_batched {
|
2017-03-22 19:26:51 +01:00
|
|
|
let actual_credits = ::std::cmp::min(cur_credits, *flow.limit());
|
|
|
|
c.update_to(actual_credits);
|
|
|
|
}
|
2016-12-19 12:28:42 +01:00
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
if last_batched { peer_info.skip_update = false }
|
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
Ok(())
|
2016-12-09 01:06:51 +01:00
|
|
|
}
|
2017-03-07 19:48:07 +01:00
|
|
|
(None, _) => Err(Error::UnsolicitedResponse),
|
|
|
|
(_, None) => Err(Error::NotServer), // really should be impossible.
|
2016-12-09 01:06:51 +01:00
|
|
|
}
|
2016-12-07 15:27:04 +01:00
|
|
|
}
|
2017-03-07 19:48:07 +01:00
|
|
|
None => Err(Error::UnknownPeer), // probably only occurs in a race of some kind.
|
2017-01-09 11:29:06 +01:00
|
|
|
};
|
2017-01-04 18:00:12 +01:00
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
res.map(|_| IdGuard::new(peers, *peer, req_id))
|
2016-12-07 15:27:04 +01:00
|
|
|
}
|
2016-12-07 17:52:10 +01:00
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
/// Handle a packet using the given io context.
|
2017-01-11 14:39:03 +01:00
|
|
|
/// Packet data is _untrusted_, which means that invalid data won't lead to
|
|
|
|
/// issues.
|
|
|
|
pub fn handle_packet(&self, io: &IoContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
2018-04-16 15:52:12 +02:00
|
|
|
let rlp = Rlp::new(data);
|
2016-12-07 17:52:10 +01:00
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
trace!(target: "pip", "Incoming packet {} from peer {}", packet_id, peer);
|
2016-12-09 00:35:34 +01:00
|
|
|
|
2016-12-07 17:52:10 +01:00
|
|
|
// handle the packet
|
|
|
|
let res = match packet_id {
|
|
|
|
packet::STATUS => self.status(peer, io, rlp),
|
|
|
|
packet::ANNOUNCE => self.announcement(peer, io, rlp),
|
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
packet::REQUEST => self.request(peer, io, rlp),
|
|
|
|
packet::RESPONSE => self.response(peer, io, rlp),
|
2017-02-25 11:07:38 +01:00
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
packet::UPDATE_CREDITS => self.update_credits(peer, io, rlp),
|
|
|
|
packet::ACKNOWLEDGE_UPDATE => self.acknowledge_update(peer, io, rlp),
|
|
|
|
|
2016-12-07 17:52:10 +01:00
|
|
|
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
|
|
|
|
|
|
|
|
other => {
|
|
|
|
Err(Error::UnrecognizedPacket(other))
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Err(e) = res {
|
2016-12-11 15:40:31 +01:00
|
|
|
punish(*peer, io, e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check timeouts and punish peers.
|
|
|
|
fn timeout_check(&self, io: &IoContext) {
|
2018-03-14 12:29:52 +01:00
|
|
|
let now = Instant::now();
|
2016-12-11 15:40:31 +01:00
|
|
|
|
|
|
|
// handshake timeout
|
|
|
|
{
|
|
|
|
let mut pending = self.pending_peers.write();
|
|
|
|
let slowpokes: Vec<_> = pending.iter()
|
|
|
|
.filter(|&(_, ref peer)| {
|
2018-04-02 10:47:56 +02:00
|
|
|
peer.last_update + timeout::HANDSHAKE <= now
|
2016-12-11 15:40:31 +01:00
|
|
|
})
|
|
|
|
.map(|(&p, _)| p)
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
for slowpoke in slowpokes {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Peer {} handshake timed out", slowpoke);
|
2016-12-11 15:40:31 +01:00
|
|
|
pending.remove(&slowpoke);
|
|
|
|
io.disconnect_peer(slowpoke);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
// request and update ack timeouts
|
2018-04-02 10:47:56 +02:00
|
|
|
let ack_duration = timeout::ACKNOWLEDGE_UPDATE;
|
2016-12-11 15:40:31 +01:00
|
|
|
{
|
2017-01-04 18:00:12 +01:00
|
|
|
for (peer_id, peer) in self.peers.read().iter() {
|
2017-05-23 12:31:09 +02:00
|
|
|
let peer = peer.lock();
|
|
|
|
if peer.pending_requests.check_timeout(now) {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Peer {} request timeout", peer_id);
|
2017-01-04 18:00:12 +01:00
|
|
|
io.disconnect_peer(*peer_id);
|
2016-12-07 17:52:10 +01:00
|
|
|
}
|
2017-05-23 12:31:09 +02:00
|
|
|
|
|
|
|
if let Some((ref start, _)) = peer.awaiting_acknowledge {
|
|
|
|
if *start + ack_duration <= now {
|
|
|
|
debug!(target: "pip", "Peer {} update acknowledgement timeout", peer_id);
|
|
|
|
io.disconnect_peer(*peer_id);
|
|
|
|
}
|
|
|
|
}
|
2016-12-07 17:52:10 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-12-15 19:25:52 +01:00
|
|
|
|
2017-03-23 20:02:46 +01:00
|
|
|
// propagate transactions to relay peers.
|
|
|
|
// if we aren't on the mainnet, we just propagate to all relay peers
|
|
|
|
fn propagate_transactions(&self, io: &IoContext) {
|
|
|
|
if self.capabilities.read().tx_relay { return }
|
|
|
|
|
|
|
|
let ready_transactions = self.provider.ready_transactions();
|
|
|
|
if ready_transactions.is_empty() { return }
|
|
|
|
|
|
|
|
trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len());
|
|
|
|
|
|
|
|
let all_transaction_hashes: HashSet<_> = ready_transactions.iter().map(|tx| tx.hash()).collect();
|
|
|
|
let mut buf = Vec::new();
|
|
|
|
|
|
|
|
let peers = self.peers.read();
|
|
|
|
for (peer_id, peer_info) in peers.iter() {
|
|
|
|
let mut peer_info = peer_info.lock();
|
|
|
|
if !peer_info.capabilities.tx_relay { continue }
|
|
|
|
|
|
|
|
let prop_filter = &mut peer_info.propagated_transactions;
|
|
|
|
*prop_filter = &*prop_filter & &all_transaction_hashes;
|
|
|
|
|
|
|
|
// fill the buffer with all non-propagated transactions.
|
|
|
|
let to_propagate = ready_transactions.iter()
|
|
|
|
.filter(|tx| prop_filter.insert(tx.hash()))
|
|
|
|
.map(|tx| &tx.transaction);
|
|
|
|
|
|
|
|
buf.extend(to_propagate);
|
|
|
|
|
|
|
|
// propagate to the given peer.
|
|
|
|
if buf.is_empty() { continue }
|
|
|
|
io.send(*peer_id, packet::SEND_TRANSACTIONS, {
|
|
|
|
let mut stream = RlpStream::new_list(buf.len());
|
|
|
|
for pending_tx in buf.drain(..) {
|
|
|
|
stream.append(pending_tx);
|
|
|
|
}
|
|
|
|
|
|
|
|
stream.out()
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-11 18:55:16 +01:00
|
|
|
/// called when a peer connects.
|
2017-01-11 14:39:03 +01:00
|
|
|
pub fn on_connect(&self, peer: &PeerId, io: &IoContext) {
|
2016-12-11 15:40:31 +01:00
|
|
|
let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) {
|
|
|
|
Ok(pv) => pv,
|
|
|
|
Err(e) => { punish(*peer, io, e); return }
|
|
|
|
};
|
2016-12-09 00:35:34 +01:00
|
|
|
|
2018-05-14 10:09:05 +02:00
|
|
|
if PROTOCOL_VERSIONS.iter().find(|x| x.0 == proto_version).is_none() {
|
2016-12-11 15:40:31 +01:00
|
|
|
punish(*peer, io, Error::UnsupportedProtocolVersion(proto_version));
|
|
|
|
return;
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
2016-12-13 14:48:03 +01:00
|
|
|
|
2016-12-11 15:40:31 +01:00
|
|
|
let chain_info = self.provider.chain_info();
|
|
|
|
|
|
|
|
let status = Status {
|
|
|
|
head_td: chain_info.total_difficulty,
|
|
|
|
head_hash: chain_info.best_block_hash,
|
|
|
|
head_num: chain_info.best_block_number,
|
|
|
|
genesis_hash: chain_info.genesis_hash,
|
|
|
|
protocol_version: proto_version as u32, // match peer proto version
|
|
|
|
network_id: self.network_id,
|
|
|
|
last_head: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
let capabilities = self.capabilities.read().clone();
|
2017-05-23 12:31:09 +02:00
|
|
|
let local_flow = self.flow_params.read();
|
|
|
|
let status_packet = status::write_handshake(&status, &capabilities, Some(&**local_flow));
|
2016-12-11 15:40:31 +01:00
|
|
|
|
|
|
|
self.pending_peers.write().insert(*peer, PendingPeer {
|
|
|
|
sent_head: chain_info.best_block_hash,
|
2018-03-14 12:29:52 +01:00
|
|
|
last_update: Instant::now(),
|
2016-12-11 15:40:31 +01:00
|
|
|
});
|
|
|
|
|
2017-03-22 19:26:51 +01:00
|
|
|
trace!(target: "pip", "Sending status to peer {}", peer);
|
2016-12-13 14:48:03 +01:00
|
|
|
io.send(*peer, packet::STATUS, status_packet);
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2017-01-11 14:39:03 +01:00
|
|
|
/// called when a peer disconnects.
|
|
|
|
pub fn on_disconnect(&self, peer: PeerId, io: &IoContext) {
|
2017-03-07 19:48:07 +01:00
|
|
|
trace!(target: "pip", "Peer {} disconnecting", peer);
|
2016-12-09 00:35:34 +01:00
|
|
|
|
2016-11-09 15:36:26 +01:00
|
|
|
self.pending_peers.write().remove(&peer);
|
2017-01-24 20:15:59 +01:00
|
|
|
let unfulfilled = match self.peers.write().remove(&peer) {
|
|
|
|
None => return,
|
|
|
|
Some(peer_info) => {
|
|
|
|
let peer_info = peer_info.into_inner();
|
|
|
|
let mut unfulfilled: Vec<_> = peer_info.pending_requests.collect_ids();
|
|
|
|
unfulfilled.extend(peer_info.failed_requests);
|
|
|
|
|
|
|
|
unfulfilled
|
2016-12-13 14:48:03 +01:00
|
|
|
}
|
2017-01-24 20:15:59 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
for handler in &self.handlers {
|
|
|
|
handler.on_disconnect(&Ctx {
|
|
|
|
peer: peer,
|
|
|
|
io: io,
|
|
|
|
proto: self,
|
|
|
|
}, &unfulfilled)
|
2016-11-18 15:30:06 +01:00
|
|
|
}
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2017-01-11 14:39:03 +01:00
|
|
|
/// Execute the given closure with a basic context derived from the I/O context.
|
|
|
|
pub fn with_context<F, T>(&self, io: &IoContext, f: F) -> T
|
|
|
|
where F: FnOnce(&BasicContext) -> T
|
|
|
|
{
|
|
|
|
f(&TickCtx {
|
|
|
|
io: io,
|
|
|
|
proto: self,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn tick_handlers(&self, io: &IoContext) {
|
|
|
|
for handler in &self.handlers {
|
|
|
|
handler.tick(&TickCtx {
|
|
|
|
io: io,
|
|
|
|
proto: self,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2017-05-23 12:31:09 +02:00
|
|
|
|
|
|
|
fn begin_new_cost_period(&self, io: &IoContext) {
|
|
|
|
self.load_distribution.end_period(&*self.sample_store);
|
|
|
|
|
|
|
|
let new_params = Arc::new(FlowParams::from_request_times(
|
2018-04-14 21:35:58 +02:00
|
|
|
|kind| self.load_distribution.expected_time(kind),
|
2017-05-23 12:31:09 +02:00
|
|
|
self.config.load_share,
|
2018-04-14 21:35:58 +02:00
|
|
|
Duration::from_secs(self.config.max_stored_seconds),
|
2017-05-23 12:31:09 +02:00
|
|
|
));
|
|
|
|
*self.flow_params.write() = new_params.clone();
|
|
|
|
|
|
|
|
let peers = self.peers.read();
|
2018-03-14 12:29:52 +01:00
|
|
|
let now = Instant::now();
|
2017-05-23 12:31:09 +02:00
|
|
|
|
|
|
|
let packet_body = {
|
|
|
|
let mut stream = RlpStream::new_list(3);
|
|
|
|
stream.append(new_params.limit())
|
|
|
|
.append(new_params.recharge_rate())
|
|
|
|
.append(new_params.cost_table());
|
|
|
|
stream.out()
|
|
|
|
};
|
|
|
|
|
|
|
|
for (peer_id, peer_info) in peers.iter() {
|
|
|
|
let mut peer_info = peer_info.lock();
|
|
|
|
|
|
|
|
io.send(*peer_id, packet::UPDATE_CREDITS, packet_body.clone());
|
|
|
|
peer_info.awaiting_acknowledge = Some((now.clone(), new_params.clone()));
|
|
|
|
}
|
|
|
|
}
|
2017-01-11 14:39:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl LightProtocol {
|
2016-11-09 15:36:26 +01:00
|
|
|
// Handle status message from peer.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn status(&self, peer: &PeerId, io: &IoContext, data: Rlp) -> Result<(), Error> {
|
2016-11-09 15:36:26 +01:00
|
|
|
let pending = match self.pending_peers.write().remove(peer) {
|
|
|
|
Some(pending) => pending,
|
|
|
|
None => {
|
|
|
|
return Err(Error::UnexpectedHandshake);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let (status, capabilities, flow_params) = status::parse_handshake(data)?;
|
2016-11-09 15:36:26 +01:00
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
trace!(target: "pip", "Connected peer with chain head {:?}", (status.head_hash, status.head_num));
|
2016-11-09 15:36:26 +01:00
|
|
|
|
|
|
|
if (status.network_id, status.genesis_hash) != (self.network_id, self.genesis_hash) {
|
2017-07-26 15:48:00 +02:00
|
|
|
trace!(target: "pip", "peer {} wrong network: network_id is {} vs our {}, gh is {} vs our {}",
|
|
|
|
peer, status.network_id, self.network_id, status.genesis_hash, self.genesis_hash);
|
|
|
|
|
2016-11-09 15:36:26 +01:00
|
|
|
return Err(Error::WrongNetwork);
|
|
|
|
}
|
|
|
|
|
2016-12-09 15:04:54 +01:00
|
|
|
if Some(status.protocol_version as u8) != io.protocol_version(*peer) {
|
|
|
|
return Err(Error::BadProtocolVersion);
|
|
|
|
}
|
|
|
|
|
2017-02-23 23:10:29 +01:00
|
|
|
let remote_flow = flow_params.map(|params| (params.create_credits(), params));
|
2017-05-23 12:31:09 +02:00
|
|
|
let local_flow = self.flow_params.read().clone();
|
2016-12-09 01:06:51 +01:00
|
|
|
|
2016-11-18 19:26:05 +01:00
|
|
|
self.peers.write().insert(*peer, Mutex::new(Peer {
|
2017-05-23 12:31:09 +02:00
|
|
|
local_credits: local_flow.create_credits(),
|
2016-11-18 15:30:06 +01:00
|
|
|
status: status.clone(),
|
|
|
|
capabilities: capabilities.clone(),
|
2016-12-09 01:06:51 +01:00
|
|
|
remote_flow: remote_flow,
|
2016-11-09 15:36:26 +01:00
|
|
|
sent_head: pending.sent_head,
|
2016-12-08 23:57:09 +01:00
|
|
|
last_update: pending.last_update,
|
2017-01-04 18:00:12 +01:00
|
|
|
pending_requests: RequestSet::default(),
|
2017-01-09 11:29:06 +01:00
|
|
|
failed_requests: Vec::new(),
|
2017-03-23 20:02:46 +01:00
|
|
|
propagated_transactions: HashSet::new(),
|
2017-05-23 12:31:09 +02:00
|
|
|
skip_update: false,
|
|
|
|
local_flow: local_flow,
|
|
|
|
awaiting_acknowledge: None,
|
2016-11-18 19:26:05 +01:00
|
|
|
}));
|
2016-11-09 15:36:26 +01:00
|
|
|
|
2017-06-30 10:58:48 +02:00
|
|
|
let any_kept = self.handlers.iter().map(
|
2017-06-30 12:10:12 +02:00
|
|
|
|handler| handler.on_connect(
|
|
|
|
&Ctx {
|
|
|
|
peer: *peer,
|
|
|
|
io: io,
|
|
|
|
proto: self,
|
|
|
|
},
|
|
|
|
&status,
|
|
|
|
&capabilities
|
|
|
|
)
|
2017-06-30 10:58:48 +02:00
|
|
|
).fold(PeerStatus::Kept, PeerStatus::bitor);
|
2016-11-18 15:30:06 +01:00
|
|
|
|
2017-06-30 10:58:48 +02:00
|
|
|
if any_kept == PeerStatus::Unkept {
|
|
|
|
Err(Error::RejectedByHandlers)
|
|
|
|
} else {
|
|
|
|
Ok(())
|
2016-11-18 15:30:06 +01:00
|
|
|
}
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2016-11-06 19:05:19 +01:00
|
|
|
// Handle an announcement.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn announcement(&self, peer: &PeerId, io: &IoContext, data: Rlp) -> Result<(), Error> {
|
2016-11-09 16:21:09 +01:00
|
|
|
if !self.peers.read().contains_key(peer) {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Ignoring announcement from unknown peer");
|
2016-11-09 16:21:09 +01:00
|
|
|
return Ok(())
|
|
|
|
}
|
2016-10-10 18:48:47 +02:00
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let announcement = status::parse_announcement(data)?;
|
2016-11-09 16:21:09 +01:00
|
|
|
|
2016-12-07 13:52:45 +01:00
|
|
|
// scope to ensure locks are dropped before moving into handler-space.
|
|
|
|
{
|
|
|
|
let peers = self.peers.read();
|
|
|
|
let peer_info = match peers.get(peer) {
|
|
|
|
Some(info) => info,
|
|
|
|
None => return Ok(()),
|
|
|
|
};
|
|
|
|
|
|
|
|
let mut peer_info = peer_info.lock();
|
2016-11-09 16:21:09 +01:00
|
|
|
|
2016-12-07 13:52:45 +01:00
|
|
|
// update status.
|
|
|
|
{
|
|
|
|
// TODO: punish peer if they've moved backwards.
|
|
|
|
let status = &mut peer_info.status;
|
|
|
|
let last_head = status.head_hash;
|
|
|
|
status.head_hash = announcement.head_hash;
|
|
|
|
status.head_td = announcement.head_td;
|
|
|
|
status.head_num = announcement.head_num;
|
|
|
|
status.last_head = Some((last_head, announcement.reorg_depth));
|
|
|
|
}
|
2016-11-18 19:26:05 +01:00
|
|
|
|
2016-12-07 13:52:45 +01:00
|
|
|
// update capabilities.
|
|
|
|
peer_info.capabilities.update_from(&announcement);
|
2016-11-09 16:21:09 +01:00
|
|
|
}
|
|
|
|
|
2016-11-18 15:30:06 +01:00
|
|
|
for handler in &self.handlers {
|
2016-12-07 17:52:10 +01:00
|
|
|
handler.on_announcement(&Ctx {
|
2016-12-07 13:52:45 +01:00
|
|
|
peer: *peer,
|
|
|
|
io: io,
|
|
|
|
proto: self,
|
|
|
|
}, &announcement);
|
2016-11-18 15:30:06 +01:00
|
|
|
}
|
2016-11-09 16:21:09 +01:00
|
|
|
|
|
|
|
Ok(())
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2017-03-07 20:58:23 +01:00
|
|
|
// Receive requests from a peer.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn request(&self, peer_id: &PeerId, io: &IoContext, raw: Rlp) -> Result<(), Error> {
|
2017-03-07 19:48:07 +01:00
|
|
|
// the maximum amount of requests we'll fill in a single packet.
|
2017-03-07 20:58:23 +01:00
|
|
|
const MAX_REQUESTS: usize = 256;
|
2017-03-07 19:48:07 +01:00
|
|
|
|
2017-09-24 19:18:17 +02:00
|
|
|
use ::request::Builder;
|
2017-03-07 20:58:23 +01:00
|
|
|
use ::request::CompleteRequest;
|
2017-02-25 11:07:38 +01:00
|
|
|
|
|
|
|
let peers = self.peers.read();
|
2017-03-09 16:55:13 +01:00
|
|
|
let peer = match peers.get(peer_id) {
|
2017-02-25 11:07:38 +01:00
|
|
|
Some(peer) => peer,
|
|
|
|
None => {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Ignoring request from unknown peer");
|
2017-02-25 11:07:38 +01:00
|
|
|
return Ok(())
|
|
|
|
}
|
|
|
|
};
|
|
|
|
let mut peer = peer.lock();
|
2017-05-23 12:31:09 +02:00
|
|
|
let peer: &mut Peer = &mut *peer;
|
2017-02-25 11:07:38 +01:00
|
|
|
|
|
|
|
let req_id: u64 = raw.val_at(0)?;
|
2017-09-24 19:18:17 +02:00
|
|
|
let mut request_builder = Builder::default();
|
2017-03-07 20:58:23 +01:00
|
|
|
|
2017-03-09 16:55:13 +01:00
|
|
|
trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id);
|
|
|
|
|
2017-03-08 15:28:46 +01:00
|
|
|
// deserialize requests, check costs and request validity.
|
2017-05-23 12:31:09 +02:00
|
|
|
peer.local_flow.recharge(&mut peer.local_credits);
|
2017-03-23 04:36:49 +01:00
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
peer.local_credits.deduct_cost(peer.local_flow.base_cost())?;
|
2017-03-07 20:58:23 +01:00
|
|
|
for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) {
|
|
|
|
let request: Request = request_rlp.as_val()?;
|
2017-10-08 18:19:27 +02:00
|
|
|
let cost = peer.local_flow.compute_cost(&request).ok_or(Error::NotServer)?;
|
2017-05-23 12:31:09 +02:00
|
|
|
peer.local_credits.deduct_cost(cost)?;
|
2017-03-07 20:58:23 +01:00
|
|
|
request_builder.push(request).map_err(|_| Error::BadBackReference)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let requests = request_builder.build();
|
2017-03-09 16:55:13 +01:00
|
|
|
let num_requests = requests.requests().len();
|
|
|
|
trace!(target: "pip", "Beginning to respond to requests (id: {}) from peer {}", req_id, peer_id);
|
2017-03-07 20:58:23 +01:00
|
|
|
|
|
|
|
// respond to all requests until one fails.
|
|
|
|
let responses = requests.respond_to_all(|complete_req| {
|
2017-05-23 12:31:09 +02:00
|
|
|
let _timer = self.load_distribution.begin_timer(&complete_req);
|
2017-03-07 20:58:23 +01:00
|
|
|
match complete_req {
|
|
|
|
CompleteRequest::Headers(req) => self.provider.block_headers(req).map(Response::Headers),
|
|
|
|
CompleteRequest::HeaderProof(req) => self.provider.header_proof(req).map(Response::HeaderProof),
|
2017-10-08 18:19:27 +02:00
|
|
|
CompleteRequest::TransactionIndex(req) => self.provider.transaction_index(req).map(Response::TransactionIndex),
|
2017-03-07 20:58:23 +01:00
|
|
|
CompleteRequest::Body(req) => self.provider.block_body(req).map(Response::Body),
|
|
|
|
CompleteRequest::Receipts(req) => self.provider.block_receipts(req).map(Response::Receipts),
|
|
|
|
CompleteRequest::Account(req) => self.provider.account_proof(req).map(Response::Account),
|
|
|
|
CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage),
|
|
|
|
CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code),
|
|
|
|
CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution),
|
2017-09-05 17:54:05 +02:00
|
|
|
CompleteRequest::Signal(req) => self.provider.epoch_signal(req).map(Response::Signal),
|
2017-03-07 20:58:23 +01:00
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2017-03-09 16:55:13 +01:00
|
|
|
trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id);
|
2017-03-23 04:36:49 +01:00
|
|
|
trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current());
|
2017-03-09 16:55:13 +01:00
|
|
|
|
2017-03-08 15:28:46 +01:00
|
|
|
io.respond(packet::RESPONSE, {
|
|
|
|
let mut stream = RlpStream::new_list(3);
|
|
|
|
let cur_credits = peer.local_credits.current();
|
2017-03-21 15:23:50 +01:00
|
|
|
stream.append(&req_id).append(&cur_credits).append_list(&responses);
|
2017-03-08 15:28:46 +01:00
|
|
|
stream.out()
|
|
|
|
});
|
2017-03-07 20:58:23 +01:00
|
|
|
Ok(())
|
2017-02-25 11:07:38 +01:00
|
|
|
}
|
|
|
|
|
2017-03-08 15:28:46 +01:00
|
|
|
// handle a packet with responses.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn response(&self, peer: &PeerId, io: &IoContext, raw: Rlp) -> Result<(), Error> {
|
2017-03-08 15:28:46 +01:00
|
|
|
let (req_id, responses) = {
|
|
|
|
let id_guard = self.pre_verify_response(peer, &raw)?;
|
2017-03-22 15:40:58 +01:00
|
|
|
let responses: Vec<Response> = raw.list_at(2)?;
|
2017-03-08 15:28:46 +01:00
|
|
|
(id_guard.defuse(), responses)
|
|
|
|
};
|
|
|
|
|
|
|
|
for handler in &self.handlers {
|
|
|
|
handler.on_responses(&Ctx {
|
|
|
|
io: io,
|
|
|
|
proto: self,
|
|
|
|
peer: *peer,
|
|
|
|
}, req_id, &responses);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
2017-02-25 11:07:38 +01:00
|
|
|
}
|
|
|
|
|
2017-05-23 12:31:09 +02:00
|
|
|
// handle an update of request credits parameters.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn update_credits(&self, peer_id: &PeerId, io: &IoContext, raw: Rlp) -> Result<(), Error> {
|
2017-05-23 12:31:09 +02:00
|
|
|
let peers = self.peers.read();
|
|
|
|
|
|
|
|
let peer = peers.get(peer_id).ok_or(Error::UnknownPeer)?;
|
|
|
|
let mut peer = peer.lock();
|
|
|
|
|
|
|
|
trace!(target: "pip", "Received an update to request credit params from peer {}", peer_id);
|
|
|
|
|
|
|
|
{
|
|
|
|
let &mut (ref mut credits, ref mut old_params) = peer.remote_flow.as_mut().ok_or(Error::NotServer)?;
|
|
|
|
old_params.recharge(credits);
|
|
|
|
|
|
|
|
let new_params = FlowParams::new(
|
|
|
|
raw.val_at(0)?, // limit
|
|
|
|
raw.val_at(2)?, // cost table
|
|
|
|
raw.val_at(1)?, // recharge.
|
|
|
|
);
|
|
|
|
|
|
|
|
// preserve ratio of current : limit when updating params.
|
|
|
|
credits.maintain_ratio(*old_params.limit(), *new_params.limit());
|
|
|
|
*old_params = new_params;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set flag to true when there is an in-flight request
|
|
|
|
// corresponding to old flow params.
|
|
|
|
if !peer.pending_requests.is_empty() {
|
|
|
|
peer.skip_update = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// let peer know we've acknowledged the update.
|
|
|
|
io.respond(packet::ACKNOWLEDGE_UPDATE, Vec::new());
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
// handle an acknowledgement of request credits update.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn acknowledge_update(&self, peer_id: &PeerId, _io: &IoContext, _raw: Rlp) -> Result<(), Error> {
|
2017-05-23 12:31:09 +02:00
|
|
|
let peers = self.peers.read();
|
|
|
|
let peer = peers.get(peer_id).ok_or(Error::UnknownPeer)?;
|
|
|
|
let mut peer = peer.lock();
|
|
|
|
|
|
|
|
trace!(target: "pip", "Received an acknowledgement for new request credit params from peer {}", peer_id);
|
|
|
|
|
|
|
|
let (_, new_params) = match peer.awaiting_acknowledge.take() {
|
|
|
|
Some(x) => x,
|
|
|
|
None => return Err(Error::UnsolicitedResponse),
|
|
|
|
};
|
|
|
|
|
|
|
|
let old_limit = *peer.local_flow.limit();
|
|
|
|
peer.local_credits.maintain_ratio(old_limit, *new_params.limit());
|
|
|
|
peer.local_flow = new_params;
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2016-10-10 18:48:47 +02:00
|
|
|
// Receive a set of transactions to relay.
|
2018-04-16 15:52:12 +02:00
|
|
|
fn relay_transactions(&self, peer: &PeerId, io: &IoContext, data: Rlp) -> Result<(), Error> {
|
2016-11-18 15:30:06 +01:00
|
|
|
const MAX_TRANSACTIONS: usize = 256;
|
|
|
|
|
2016-12-27 12:53:56 +01:00
|
|
|
let txs: Vec<_> = data.iter()
|
|
|
|
.take(MAX_TRANSACTIONS)
|
2017-01-13 09:51:36 +01:00
|
|
|
.map(|x| x.as_val::<UnverifiedTransaction>())
|
2016-12-27 12:53:56 +01:00
|
|
|
.collect::<Result<_,_>>()?;
|
2016-11-18 15:30:06 +01:00
|
|
|
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Received {} transactions to relay from peer {}", txs.len(), peer);
|
2016-11-18 15:30:06 +01:00
|
|
|
|
|
|
|
for handler in &self.handlers {
|
2016-12-07 17:52:10 +01:00
|
|
|
handler.on_transactions(&Ctx {
|
2016-12-07 13:52:45 +01:00
|
|
|
peer: *peer,
|
|
|
|
io: io,
|
2016-12-13 14:48:03 +01:00
|
|
|
proto: self,
|
2016-12-07 13:52:45 +01:00
|
|
|
}, &txs);
|
2016-11-18 15:30:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 15:40:31 +01:00
|
|
|
// if something went wrong, figure out how much to punish the peer.
|
|
|
|
fn punish(peer: PeerId, io: &IoContext, e: Error) {
|
|
|
|
match e.punishment() {
|
|
|
|
Punishment::None => {}
|
|
|
|
Punishment::Disconnect => {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Disconnecting peer {}: {}", peer, e);
|
2016-12-11 15:40:31 +01:00
|
|
|
io.disconnect_peer(peer)
|
|
|
|
}
|
|
|
|
Punishment::Disable => {
|
2017-03-07 19:48:07 +01:00
|
|
|
debug!(target: "pip", "Disabling peer {}: {}", peer, e);
|
2016-12-11 15:40:31 +01:00
|
|
|
io.disable_peer(peer)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-04 18:40:31 +01:00
|
|
|
impl NetworkProtocolHandler for LightProtocol {
|
2018-06-02 11:05:11 +02:00
|
|
|
fn initialize(&self, io: &NetworkContext) {
|
2018-04-14 21:35:58 +02:00
|
|
|
io.register_timer(TIMEOUT, TIMEOUT_INTERVAL)
|
2016-12-19 12:28:42 +01:00
|
|
|
.expect("Error registering sync timer.");
|
2018-04-14 21:35:58 +02:00
|
|
|
io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL)
|
2016-12-19 12:28:42 +01:00
|
|
|
.expect("Error registering sync timer.");
|
2018-04-14 21:35:58 +02:00
|
|
|
io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL)
|
2017-03-23 20:02:46 +01:00
|
|
|
.expect("Error registering sync timer.");
|
2018-04-14 21:35:58 +02:00
|
|
|
io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL)
|
2017-05-23 12:31:09 +02:00
|
|
|
.expect("Error registering request timer interval token.");
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
2018-03-05 11:56:35 +01:00
|
|
|
self.handle_packet(&io, peer, packet_id, data);
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
2018-03-05 11:56:35 +01:00
|
|
|
self.on_connect(peer, &io);
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-07 13:52:45 +01:00
|
|
|
fn disconnected(&self, io: &NetworkContext, peer: &PeerId) {
|
2018-03-05 11:56:35 +01:00
|
|
|
self.on_disconnect(*peer, &io);
|
2016-10-10 18:48:47 +02:00
|
|
|
}
|
|
|
|
|
2016-12-11 15:40:31 +01:00
|
|
|
fn timeout(&self, io: &NetworkContext, timer: TimerToken) {
|
2016-10-10 18:48:47 +02:00
|
|
|
match timer {
|
2018-03-05 11:56:35 +01:00
|
|
|
TIMEOUT => self.timeout_check(&io),
|
|
|
|
TICK_TIMEOUT => self.tick_handlers(&io),
|
|
|
|
PROPAGATE_TIMEOUT => self.propagate_transactions(&io),
|
|
|
|
RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(&io),
|
2017-03-07 19:48:07 +01:00
|
|
|
_ => warn!(target: "pip", "received timeout on unknown token {}", timer),
|
2016-11-18 19:12:20 +01:00
|
|
|
}
|
|
|
|
}
|
2016-12-13 14:48:03 +01:00
|
|
|
}
|