2020-01-17 14:27:28 +01:00
|
|
|
// Copyright 2015-2020 Parity Technologies (UK) Ltd.
|
2019-01-07 11:33:07 +01:00
|
|
|
// This file is part of Parity Ethereum.
|
2016-02-05 13:40:41 +01:00
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is free software: you can redistribute it and/or modify
|
2016-02-05 13:40:41 +01:00
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
2019-01-07 11:33:07 +01:00
|
|
|
// Parity Ethereum is distributed in the hope that it will be useful,
|
2016-02-05 13:40:41 +01:00
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
2019-01-07 11:33:07 +01:00
|
|
|
// along with Parity Ethereum. If not, see <http://www.gnu.org/licenses/>.
|
2016-02-05 13:40:41 +01:00
|
|
|
|
2017-11-13 14:37:08 +01:00
|
|
|
#![recursion_limit="128"]
|
2017-06-05 20:40:40 +02:00
|
|
|
|
Delete crates from parity-ethereum and fetch them from parity-common instead (#9083)
Use crates from parity-common: hashdb, keccak-hash, kvdb, kvdb-memorydb, kvdb-rocksdb, memorydb, parity-bytes, parity-crypto, path, patricia_trie, plain_hasher, rlp, target, test-support, trie-standardmap, triehash
2018-07-10 14:59:19 +02:00
|
|
|
extern crate parity_crypto as crypto;
|
2016-08-05 10:32:04 +02:00
|
|
|
extern crate ethcore_io as io;
|
2018-01-10 13:35:18 +01:00
|
|
|
extern crate ethereum_types;
|
2016-09-01 14:49:12 +02:00
|
|
|
extern crate rlp;
|
2017-07-28 19:06:39 +02:00
|
|
|
extern crate ipnetwork;
|
2018-09-03 18:40:11 +02:00
|
|
|
extern crate parity_snappy as snappy;
|
2018-06-01 13:37:43 +02:00
|
|
|
extern crate libc;
|
2019-02-07 15:27:09 +01:00
|
|
|
extern crate semver;
|
|
|
|
extern crate serde;
|
|
|
|
|
|
|
|
#[macro_use]
|
|
|
|
extern crate serde_derive;
|
2018-06-01 13:37:43 +02:00
|
|
|
|
|
|
|
#[cfg(test)] #[macro_use]
|
|
|
|
extern crate assert_matches;
|
2019-06-17 08:44:59 +02:00
|
|
|
extern crate derive_more;
|
2017-10-20 12:11:34 +02:00
|
|
|
|
2019-02-07 15:27:09 +01:00
|
|
|
#[macro_use]
|
|
|
|
extern crate lazy_static;
|
|
|
|
|
|
|
|
pub mod client_version;
|
|
|
|
|
2018-05-22 06:34:01 +02:00
|
|
|
mod connection_filter;
|
2016-01-13 11:31:37 +01:00
|
|
|
mod error;
|
|
|
|
|
2018-05-22 06:34:01 +02:00
|
|
|
pub use connection_filter::{ConnectionFilter, ConnectionDirection};
|
2017-07-14 20:40:28 +02:00
|
|
|
pub use io::TimerToken;
|
2019-06-17 08:44:59 +02:00
|
|
|
pub use error::{Error, DisconnectReason};
|
2018-03-05 11:56:35 +01:00
|
|
|
|
2019-02-07 15:27:09 +01:00
|
|
|
use client_version::ClientVersion;
|
2018-03-05 11:56:35 +01:00
|
|
|
use std::cmp::Ordering;
|
|
|
|
use std::collections::HashMap;
|
|
|
|
use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr};
|
|
|
|
use std::str::{self, FromStr};
|
|
|
|
use std::sync::Arc;
|
2018-04-14 21:35:58 +02:00
|
|
|
use std::time::Duration;
|
2017-07-28 19:06:39 +02:00
|
|
|
use ipnetwork::{IpNetwork, IpNetworkError};
|
2019-10-23 13:03:46 +02:00
|
|
|
use crypto::publickey::Secret;
|
2018-05-28 16:32:29 +02:00
|
|
|
use ethereum_types::H512;
|
2018-04-16 15:52:12 +02:00
|
|
|
use rlp::{Decodable, DecoderError, Rlp};
|
2018-03-05 11:56:35 +01:00
|
|
|
|
|
|
|
/// Protocol handler level packet id
|
|
|
|
pub type PacketId = u8;
|
|
|
|
/// Protocol / handler id
|
|
|
|
pub type ProtocolId = [u8; 3];
|
|
|
|
|
|
|
|
/// Node public key
|
|
|
|
pub type NodeId = H512;
|
|
|
|
|
|
|
|
/// Local (temporary) peer session ID.
|
|
|
|
pub type PeerId = usize;
|
|
|
|
|
2019-09-25 09:54:47 +02:00
|
|
|
/// Messages used to communicate with the event loop from other threads.
|
2018-03-05 11:56:35 +01:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub enum NetworkIoMessage {
|
|
|
|
/// Register a new protocol handler.
|
|
|
|
AddHandler {
|
|
|
|
/// Handler shared instance.
|
2019-08-27 17:29:33 +02:00
|
|
|
handler: Arc<dyn NetworkProtocolHandler + Sync>,
|
2018-03-05 11:56:35 +01:00
|
|
|
/// Protocol Id.
|
|
|
|
protocol: ProtocolId,
|
2018-05-14 10:09:05 +02:00
|
|
|
/// Supported protocol versions and number of packet IDs reserved by the protocol (packet count).
|
|
|
|
versions: Vec<(u8, u8)>,
|
2018-03-05 11:56:35 +01:00
|
|
|
},
|
|
|
|
/// Register a new protocol timer
|
|
|
|
AddTimer {
|
|
|
|
/// Protocol Id.
|
|
|
|
protocol: ProtocolId,
|
|
|
|
/// Timer token.
|
|
|
|
token: TimerToken,
|
2018-04-14 21:35:58 +02:00
|
|
|
/// Timer delay.
|
|
|
|
delay: Duration,
|
2018-03-05 11:56:35 +01:00
|
|
|
},
|
|
|
|
/// Initliaze public interface.
|
|
|
|
InitPublicInterface,
|
|
|
|
/// Disconnect a peer.
|
|
|
|
Disconnect(PeerId),
|
|
|
|
/// Disconnect and temporary disable peer.
|
|
|
|
DisablePeer(PeerId),
|
|
|
|
/// Network has been started with the host as the given enode.
|
|
|
|
NetworkStarted(String),
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Shared session information
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub struct SessionInfo {
|
|
|
|
/// Peer public key
|
|
|
|
pub id: Option<NodeId>,
|
|
|
|
/// Peer client ID
|
2019-02-07 15:27:09 +01:00
|
|
|
pub client_version: ClientVersion,
|
2018-03-05 11:56:35 +01:00
|
|
|
/// Peer RLPx protocol version
|
|
|
|
pub protocol_version: u32,
|
|
|
|
/// Session protocol capabilities
|
|
|
|
pub capabilities: Vec<SessionCapabilityInfo>,
|
|
|
|
/// Peer protocol capabilities
|
|
|
|
pub peer_capabilities: Vec<PeerCapabilityInfo>,
|
2018-04-14 21:35:58 +02:00
|
|
|
/// Peer ping delay
|
|
|
|
pub ping: Option<Duration>,
|
2018-03-05 11:56:35 +01:00
|
|
|
/// True if this session was originated by us.
|
|
|
|
pub originated: bool,
|
|
|
|
/// Remote endpoint address of the session
|
|
|
|
pub remote_address: String,
|
|
|
|
/// Local endpoint address of the session
|
|
|
|
pub local_address: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
|
|
pub struct PeerCapabilityInfo {
|
|
|
|
pub protocol: ProtocolId,
|
|
|
|
pub version: u8,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Decodable for PeerCapabilityInfo {
|
2018-04-16 15:52:12 +02:00
|
|
|
fn decode(rlp: &Rlp) -> Result<Self, DecoderError> {
|
2018-03-05 11:56:35 +01:00
|
|
|
let p: Vec<u8> = rlp.val_at(0)?;
|
|
|
|
if p.len() != 3 {
|
|
|
|
return Err(DecoderError::Custom("Invalid subprotocol string length. Should be 3"));
|
|
|
|
}
|
|
|
|
let mut p2: ProtocolId = [0u8; 3];
|
|
|
|
p2.clone_from_slice(&p);
|
|
|
|
Ok(PeerCapabilityInfo {
|
|
|
|
protocol: p2,
|
|
|
|
version: rlp.val_at(1)?
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ToString for PeerCapabilityInfo {
|
|
|
|
fn to_string(&self) -> String {
|
|
|
|
format!("{}/{}", str::from_utf8(&self.protocol[..]).unwrap_or("???"), self.version)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
|
|
|
pub struct SessionCapabilityInfo {
|
|
|
|
pub protocol: [u8; 3],
|
|
|
|
pub version: u8,
|
|
|
|
pub packet_count: u8,
|
|
|
|
pub id_offset: u8,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PartialOrd for SessionCapabilityInfo {
|
|
|
|
fn partial_cmp(&self, other: &SessionCapabilityInfo) -> Option<Ordering> {
|
|
|
|
Some(self.cmp(other))
|
|
|
|
}
|
|
|
|
}
|
2015-12-17 11:42:30 +01:00
|
|
|
|
2018-03-05 11:56:35 +01:00
|
|
|
impl Ord for SessionCapabilityInfo {
|
|
|
|
fn cmp(&self, b: &SessionCapabilityInfo) -> Ordering {
|
|
|
|
// By protocol id first
|
|
|
|
if self.protocol != b.protocol {
|
|
|
|
return self.protocol.cmp(&b.protocol);
|
|
|
|
}
|
|
|
|
// By version
|
|
|
|
self.version.cmp(&b.version)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-12-19 09:01:39 +01:00
|
|
|
/// Type of NAT resolving method
|
|
|
|
#[derive(Debug, PartialEq, Eq, Clone)]
|
|
|
|
pub enum NatType {
|
|
|
|
Nothing,
|
|
|
|
Any,
|
|
|
|
UPnP,
|
|
|
|
NatPMP,
|
|
|
|
}
|
|
|
|
|
2018-03-05 11:56:35 +01:00
|
|
|
/// Network service configuration
|
|
|
|
#[derive(Debug, PartialEq, Clone)]
|
|
|
|
pub struct NetworkConfiguration {
|
|
|
|
/// Directory path to store general network configuration. None means nothing will be saved
|
|
|
|
pub config_path: Option<String>,
|
|
|
|
/// Directory path to store network-specific configuration. None means nothing will be saved
|
|
|
|
pub net_config_path: Option<String>,
|
|
|
|
/// IP address to listen for incoming connections. Listen to all connections by default
|
|
|
|
pub listen_address: Option<SocketAddr>,
|
|
|
|
/// IP address to advertise. Detected automatically if none.
|
|
|
|
pub public_address: Option<SocketAddr>,
|
|
|
|
/// Port for UDP connections, same as TCP by default
|
|
|
|
pub udp_port: Option<u16>,
|
|
|
|
/// Enable NAT configuration
|
|
|
|
pub nat_enabled: bool,
|
2019-12-19 09:01:39 +01:00
|
|
|
/// Nat type
|
|
|
|
pub nat_type: NatType,
|
2018-03-05 11:56:35 +01:00
|
|
|
/// Enable discovery
|
|
|
|
pub discovery_enabled: bool,
|
|
|
|
/// List of initial node addresses
|
|
|
|
pub boot_nodes: Vec<String>,
|
|
|
|
/// Use provided node key instead of default
|
|
|
|
pub use_secret: Option<Secret>,
|
|
|
|
/// Minimum number of connected peers to maintain
|
|
|
|
pub min_peers: u32,
|
|
|
|
/// Maximum allowed number of peers
|
|
|
|
pub max_peers: u32,
|
|
|
|
/// Maximum handshakes
|
|
|
|
pub max_handshakes: u32,
|
|
|
|
/// Reserved protocols. Peers with <key> protocol get additional <value> connection slots.
|
|
|
|
pub reserved_protocols: HashMap<ProtocolId, u32>,
|
|
|
|
/// List of reserved node addresses.
|
|
|
|
pub reserved_nodes: Vec<String>,
|
|
|
|
/// The non-reserved peer mode.
|
|
|
|
pub non_reserved_mode: NonReservedPeerMode,
|
|
|
|
/// IP filter
|
|
|
|
pub ip_filter: IpFilter,
|
|
|
|
/// Client identifier
|
|
|
|
pub client_version: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for NetworkConfiguration {
|
|
|
|
fn default() -> Self {
|
|
|
|
NetworkConfiguration::new()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl NetworkConfiguration {
|
|
|
|
/// Create a new instance of default settings.
|
|
|
|
pub fn new() -> Self {
|
|
|
|
NetworkConfiguration {
|
|
|
|
config_path: None,
|
|
|
|
net_config_path: None,
|
|
|
|
listen_address: None,
|
|
|
|
public_address: None,
|
|
|
|
udp_port: None,
|
|
|
|
nat_enabled: true,
|
2019-12-19 09:01:39 +01:00
|
|
|
nat_type: NatType::Any,
|
2018-03-05 11:56:35 +01:00
|
|
|
discovery_enabled: true,
|
|
|
|
boot_nodes: Vec::new(),
|
|
|
|
use_secret: None,
|
|
|
|
min_peers: 25,
|
|
|
|
max_peers: 50,
|
|
|
|
max_handshakes: 64,
|
|
|
|
reserved_protocols: HashMap::new(),
|
|
|
|
ip_filter: IpFilter::default(),
|
|
|
|
reserved_nodes: Vec::new(),
|
|
|
|
non_reserved_mode: NonReservedPeerMode::Accept,
|
|
|
|
client_version: "Parity-network".into(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-15 16:49:43 +01:00
|
|
|
/// Create new default configuration with specified listen port.
|
2018-03-05 11:56:35 +01:00
|
|
|
pub fn new_with_port(port: u16) -> NetworkConfiguration {
|
|
|
|
let mut config = NetworkConfiguration::new();
|
|
|
|
config.listen_address = Some(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(0, 0, 0, 0), port)));
|
|
|
|
config
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Create new default configuration for localhost-only connection with random port (usefull for testing)
|
|
|
|
pub fn new_local() -> NetworkConfiguration {
|
|
|
|
let mut config = NetworkConfiguration::new();
|
|
|
|
config.listen_address = Some(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 0)));
|
|
|
|
config.nat_enabled = false;
|
|
|
|
config
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// IO access point. This is passed to all IO handlers and provides an interface to the IO subsystem.
|
|
|
|
pub trait NetworkContext {
|
|
|
|
/// Send a packet over the network to another peer.
|
|
|
|
fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), Error>;
|
|
|
|
|
|
|
|
/// Send a packet over the network to another peer using specified protocol.
|
|
|
|
fn send_protocol(&self, protocol: ProtocolId, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), Error>;
|
|
|
|
|
|
|
|
/// Respond to a current network message. Panics if no there is no packet in the context. If the session is expired returns nothing.
|
|
|
|
fn respond(&self, packet_id: PacketId, data: Vec<u8>) -> Result<(), Error>;
|
|
|
|
|
|
|
|
/// Disconnect a peer and prevent it from connecting again.
|
|
|
|
fn disable_peer(&self, peer: PeerId);
|
|
|
|
|
|
|
|
/// Disconnect peer. Reconnect can be attempted later.
|
|
|
|
fn disconnect_peer(&self, peer: PeerId);
|
|
|
|
|
|
|
|
/// Check if the session is still active.
|
|
|
|
fn is_expired(&self) -> bool;
|
|
|
|
|
|
|
|
/// Register a new IO timer. 'IoHandler::timeout' will be called with the token.
|
2018-04-14 21:35:58 +02:00
|
|
|
fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error>;
|
2018-03-05 11:56:35 +01:00
|
|
|
|
|
|
|
/// Returns peer identification string
|
2019-02-07 15:27:09 +01:00
|
|
|
fn peer_client_version(&self, peer: PeerId) -> ClientVersion;
|
2018-03-05 11:56:35 +01:00
|
|
|
|
|
|
|
/// Returns information on p2p session
|
|
|
|
fn session_info(&self, peer: PeerId) -> Option<SessionInfo>;
|
|
|
|
|
|
|
|
/// Returns max version for a given protocol.
|
|
|
|
fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option<u8>;
|
|
|
|
|
|
|
|
/// Returns this object's subprotocol name.
|
|
|
|
fn subprotocol_name(&self) -> ProtocolId;
|
2018-09-10 13:45:49 +02:00
|
|
|
|
|
|
|
/// Returns whether the given peer ID is a reserved peer.
|
|
|
|
fn is_reserved_peer(&self, peer: PeerId) -> bool;
|
2019-01-04 19:58:21 +01:00
|
|
|
|
|
|
|
/// Returns the size the payload shouldn't exceed
|
|
|
|
fn payload_soft_limit(&self) -> usize;
|
2018-03-05 11:56:35 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'a, T> NetworkContext for &'a T where T: ?Sized + NetworkContext {
|
|
|
|
fn send(&self, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), Error> {
|
|
|
|
(**self).send(peer, packet_id, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn send_protocol(&self, protocol: ProtocolId, peer: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), Error> {
|
|
|
|
(**self).send_protocol(protocol, peer, packet_id, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn respond(&self, packet_id: PacketId, data: Vec<u8>) -> Result<(), Error> {
|
|
|
|
(**self).respond(packet_id, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn disable_peer(&self, peer: PeerId) {
|
|
|
|
(**self).disable_peer(peer)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn disconnect_peer(&self, peer: PeerId) {
|
|
|
|
(**self).disconnect_peer(peer)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn is_expired(&self) -> bool {
|
|
|
|
(**self).is_expired()
|
|
|
|
}
|
|
|
|
|
2018-04-14 21:35:58 +02:00
|
|
|
fn register_timer(&self, token: TimerToken, delay: Duration) -> Result<(), Error> {
|
|
|
|
(**self).register_timer(token, delay)
|
2018-03-05 11:56:35 +01:00
|
|
|
}
|
|
|
|
|
2019-02-07 15:27:09 +01:00
|
|
|
fn peer_client_version(&self, peer: PeerId) -> ClientVersion {
|
2018-03-05 11:56:35 +01:00
|
|
|
(**self).peer_client_version(peer)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn session_info(&self, peer: PeerId) -> Option<SessionInfo> {
|
|
|
|
(**self).session_info(peer)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn protocol_version(&self, protocol: ProtocolId, peer: PeerId) -> Option<u8> {
|
|
|
|
(**self).protocol_version(protocol, peer)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn subprotocol_name(&self) -> ProtocolId {
|
|
|
|
(**self).subprotocol_name()
|
|
|
|
}
|
2018-09-10 13:45:49 +02:00
|
|
|
|
|
|
|
fn is_reserved_peer(&self, peer: PeerId) -> bool {
|
|
|
|
(**self).is_reserved_peer(peer)
|
|
|
|
}
|
2019-01-04 19:58:21 +01:00
|
|
|
|
|
|
|
fn payload_soft_limit(&self) -> usize {
|
|
|
|
(**self).payload_soft_limit()
|
|
|
|
}
|
2018-03-05 11:56:35 +01:00
|
|
|
}
|
|
|
|
|
2015-12-30 12:23:36 +01:00
|
|
|
/// Network IO protocol handler. This needs to be implemented for each new subprotocol.
|
|
|
|
/// All the handler function are called from within IO event loop.
|
2016-01-13 11:31:37 +01:00
|
|
|
/// `Message` is the type for message data.
|
2016-07-11 17:02:42 +02:00
|
|
|
pub trait NetworkProtocolHandler: Sync + Send {
|
2016-01-13 13:56:48 +01:00
|
|
|
/// Initialize the handler
|
2019-08-27 17:29:33 +02:00
|
|
|
fn initialize(&self, _io: &dyn NetworkContext) {}
|
2015-12-30 12:23:36 +01:00
|
|
|
/// Called when new network packet received.
|
2019-08-27 17:29:33 +02:00
|
|
|
fn read(&self, io: &dyn NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]);
|
2015-12-30 12:23:36 +01:00
|
|
|
/// Called when new peer is connected. Only called when peer supports the same protocol.
|
2019-08-27 17:29:33 +02:00
|
|
|
fn connected(&self, io: &dyn NetworkContext, peer: &PeerId);
|
2015-12-30 12:23:36 +01:00
|
|
|
/// Called when a previously connected peer disconnects.
|
2019-08-27 17:29:33 +02:00
|
|
|
fn disconnected(&self, io: &dyn NetworkContext, peer: &PeerId);
|
2016-01-13 11:31:37 +01:00
|
|
|
/// Timer function called after a timeout created with `NetworkContext::timeout`.
|
2019-08-27 17:29:33 +02:00
|
|
|
fn timeout(&self, _io: &dyn NetworkContext, _timer: TimerToken) {}
|
2015-12-02 12:07:46 +01:00
|
|
|
}
|
2015-12-17 11:42:30 +01:00
|
|
|
|
2016-06-21 13:56:33 +02:00
|
|
|
/// Non-reserved peer modes.
|
2018-08-21 11:55:31 +02:00
|
|
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
2016-06-21 13:56:33 +02:00
|
|
|
pub enum NonReservedPeerMode {
|
|
|
|
/// Accept them. This is the default.
|
|
|
|
Accept,
|
|
|
|
/// Deny them.
|
|
|
|
Deny,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl NonReservedPeerMode {
|
|
|
|
/// Attempt to parse the peer mode from a string.
|
|
|
|
pub fn parse(s: &str) -> Option<Self> {
|
|
|
|
match s {
|
|
|
|
"accept" => Some(NonReservedPeerMode::Accept),
|
|
|
|
"deny" => Some(NonReservedPeerMode::Deny),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
2016-07-11 17:02:42 +02:00
|
|
|
}
|
2016-10-24 18:25:27 +02:00
|
|
|
|
2017-07-28 19:06:39 +02:00
|
|
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
|
|
|
pub struct IpFilter {
|
Snapshot restoration overhaul (#11219)
* Comments and todos
Use `snapshot_sync` as logging target
* fix compilation
* More todos, more logs
* Fix picking snapshot peer: prefer the one with the highest block number
More docs, comments, todos
* Adjust WAIT_PEERS_TIMEOUT to be a multiple of MAINTAIN_SYNC_TIMER to try to fix snapshot startup problems
Docs, todos, comments
* Tabs
* Formatting
* Don't build new rlp::EMPTY_LIST_RLP instances
* Dial down debug logging
* Don't warn about missing hashes in the manifest: it's normal
Log client version on peer connect
* Cleanup
* Do not skip snapshots further away than 30k block from the highest block seen
Currently we look for peers that seed snapshots that are close to the highest block seen on the network (where "close" means withing 30k blocks). When a node starts up we wait for some time (5sec, increased here to 10sec) to let peers connect and if we have found a suitable peer to sync a snapshot from at the end of that delay, we start the download; if none is found and --warp-barrier is used we stall, otherwise we start a slow-sync.
When looking for a suitable snapshot, we use the highest block seen on the network to check if a peer has a snapshot that is within 30k blocks of that highest block number. This means that in a situation where all available snapshots are older than that, we will often fail to start a snapshot at all. What's worse is that the longer we delay starting a snapshot sync (to let more peers connect, in the hope of finding a good snapshot), the more likely we are to have seen a high block and thus the more likely we become to accept a snapshot.
This commit removes this comparison with the highest blocknumber criteria entirely and picks the best snapshot we find in 10sec.
* lockfile
* Add a `ChunkType::Dupe` variant so that we do not disconnect a peer if they happen to send us a duplicate chunk (just ignore the chunk and keep going)
Resolve some documentation todos, add more
* tweak log message
* Don't warp sync twice
Check if our own block is beyond the given warp barrier (can happen after we've completed a warp sync but are not quite yet synced up to the tip) and if so, don't sync.
More docs, resolve todos.
Dial down some `sync` debug level logging to trace
* Avoid iterating over all snapshot block/state hashes to find the next work item
Use a HashSet instead of a Vec and remove items from the set as chunks are processed. Calculate and store the total number of chunks in the `Snapshot` struct instead of counting pending chunks each time.
* Address review grumbles
* Log correct number of bytes written to disk
* Revert ChunkType::Dup change
* whitespace grumble
* Cleanup debugging code
* Fix docs
* Fix import and a typo
* Fix test impl
* Use `indexmap::IndexSet` to ensure chunk hashes are accessed in order
* Revert increased SNAPSHOT_MANIFEST_TIMEOUT: 5sec should be enough
2019-10-31 16:07:21 +01:00
|
|
|
pub predefined: AllowIP,
|
|
|
|
pub custom_allow: Vec<IpNetwork>,
|
|
|
|
pub custom_block: Vec<IpNetwork>,
|
2017-08-30 17:14:52 +02:00
|
|
|
}
|
2017-07-28 19:06:39 +02:00
|
|
|
|
|
|
|
impl Default for IpFilter {
|
Snapshot restoration overhaul (#11219)
* Comments and todos
Use `snapshot_sync` as logging target
* fix compilation
* More todos, more logs
* Fix picking snapshot peer: prefer the one with the highest block number
More docs, comments, todos
* Adjust WAIT_PEERS_TIMEOUT to be a multiple of MAINTAIN_SYNC_TIMER to try to fix snapshot startup problems
Docs, todos, comments
* Tabs
* Formatting
* Don't build new rlp::EMPTY_LIST_RLP instances
* Dial down debug logging
* Don't warn about missing hashes in the manifest: it's normal
Log client version on peer connect
* Cleanup
* Do not skip snapshots further away than 30k block from the highest block seen
Currently we look for peers that seed snapshots that are close to the highest block seen on the network (where "close" means withing 30k blocks). When a node starts up we wait for some time (5sec, increased here to 10sec) to let peers connect and if we have found a suitable peer to sync a snapshot from at the end of that delay, we start the download; if none is found and --warp-barrier is used we stall, otherwise we start a slow-sync.
When looking for a suitable snapshot, we use the highest block seen on the network to check if a peer has a snapshot that is within 30k blocks of that highest block number. This means that in a situation where all available snapshots are older than that, we will often fail to start a snapshot at all. What's worse is that the longer we delay starting a snapshot sync (to let more peers connect, in the hope of finding a good snapshot), the more likely we are to have seen a high block and thus the more likely we become to accept a snapshot.
This commit removes this comparison with the highest blocknumber criteria entirely and picks the best snapshot we find in 10sec.
* lockfile
* Add a `ChunkType::Dupe` variant so that we do not disconnect a peer if they happen to send us a duplicate chunk (just ignore the chunk and keep going)
Resolve some documentation todos, add more
* tweak log message
* Don't warp sync twice
Check if our own block is beyond the given warp barrier (can happen after we've completed a warp sync but are not quite yet synced up to the tip) and if so, don't sync.
More docs, resolve todos.
Dial down some `sync` debug level logging to trace
* Avoid iterating over all snapshot block/state hashes to find the next work item
Use a HashSet instead of a Vec and remove items from the set as chunks are processed. Calculate and store the total number of chunks in the `Snapshot` struct instead of counting pending chunks each time.
* Address review grumbles
* Log correct number of bytes written to disk
* Revert ChunkType::Dup change
* whitespace grumble
* Cleanup debugging code
* Fix docs
* Fix import and a typo
* Fix test impl
* Use `indexmap::IndexSet` to ensure chunk hashes are accessed in order
* Revert increased SNAPSHOT_MANIFEST_TIMEOUT: 5sec should be enough
2019-10-31 16:07:21 +01:00
|
|
|
fn default() -> Self {
|
|
|
|
IpFilter {
|
|
|
|
predefined: AllowIP::All,
|
|
|
|
custom_allow: vec![],
|
|
|
|
custom_block: vec![],
|
|
|
|
}
|
|
|
|
}
|
2017-07-28 19:06:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
impl IpFilter {
|
Snapshot restoration overhaul (#11219)
* Comments and todos
Use `snapshot_sync` as logging target
* fix compilation
* More todos, more logs
* Fix picking snapshot peer: prefer the one with the highest block number
More docs, comments, todos
* Adjust WAIT_PEERS_TIMEOUT to be a multiple of MAINTAIN_SYNC_TIMER to try to fix snapshot startup problems
Docs, todos, comments
* Tabs
* Formatting
* Don't build new rlp::EMPTY_LIST_RLP instances
* Dial down debug logging
* Don't warn about missing hashes in the manifest: it's normal
Log client version on peer connect
* Cleanup
* Do not skip snapshots further away than 30k block from the highest block seen
Currently we look for peers that seed snapshots that are close to the highest block seen on the network (where "close" means withing 30k blocks). When a node starts up we wait for some time (5sec, increased here to 10sec) to let peers connect and if we have found a suitable peer to sync a snapshot from at the end of that delay, we start the download; if none is found and --warp-barrier is used we stall, otherwise we start a slow-sync.
When looking for a suitable snapshot, we use the highest block seen on the network to check if a peer has a snapshot that is within 30k blocks of that highest block number. This means that in a situation where all available snapshots are older than that, we will often fail to start a snapshot at all. What's worse is that the longer we delay starting a snapshot sync (to let more peers connect, in the hope of finding a good snapshot), the more likely we are to have seen a high block and thus the more likely we become to accept a snapshot.
This commit removes this comparison with the highest blocknumber criteria entirely and picks the best snapshot we find in 10sec.
* lockfile
* Add a `ChunkType::Dupe` variant so that we do not disconnect a peer if they happen to send us a duplicate chunk (just ignore the chunk and keep going)
Resolve some documentation todos, add more
* tweak log message
* Don't warp sync twice
Check if our own block is beyond the given warp barrier (can happen after we've completed a warp sync but are not quite yet synced up to the tip) and if so, don't sync.
More docs, resolve todos.
Dial down some `sync` debug level logging to trace
* Avoid iterating over all snapshot block/state hashes to find the next work item
Use a HashSet instead of a Vec and remove items from the set as chunks are processed. Calculate and store the total number of chunks in the `Snapshot` struct instead of counting pending chunks each time.
* Address review grumbles
* Log correct number of bytes written to disk
* Revert ChunkType::Dup change
* whitespace grumble
* Cleanup debugging code
* Fix docs
* Fix import and a typo
* Fix test impl
* Use `indexmap::IndexSet` to ensure chunk hashes are accessed in order
* Revert increased SNAPSHOT_MANIFEST_TIMEOUT: 5sec should be enough
2019-10-31 16:07:21 +01:00
|
|
|
/// Attempt to parse the peer mode from a string.
|
|
|
|
pub fn parse(s: &str) -> Result<IpFilter, IpNetworkError> {
|
|
|
|
let mut filter = IpFilter::default();
|
|
|
|
for f in s.split_whitespace() {
|
|
|
|
match f {
|
|
|
|
"all" => filter.predefined = AllowIP::All,
|
|
|
|
"private" => filter.predefined = AllowIP::Private,
|
|
|
|
"public" => filter.predefined = AllowIP::Public,
|
|
|
|
"none" => filter.predefined = AllowIP::None,
|
|
|
|
custom => {
|
|
|
|
if custom.starts_with("-") {
|
|
|
|
filter.custom_block.push(IpNetwork::from_str(&custom.to_owned().split_off(1))?)
|
|
|
|
} else {
|
|
|
|
filter.custom_allow.push(IpNetwork::from_str(custom)?)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(filter)
|
|
|
|
}
|
2017-07-28 19:06:39 +02:00
|
|
|
}
|
|
|
|
|
2016-10-24 18:25:27 +02:00
|
|
|
/// IP fiter
|
2017-07-28 19:06:39 +02:00
|
|
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
2016-10-24 18:25:27 +02:00
|
|
|
pub enum AllowIP {
|
|
|
|
/// Connect to any address
|
|
|
|
All,
|
|
|
|
/// Connect to private network only
|
|
|
|
Private,
|
|
|
|
/// Connect to public network only
|
|
|
|
Public,
|
Snapshot restoration overhaul (#11219)
* Comments and todos
Use `snapshot_sync` as logging target
* fix compilation
* More todos, more logs
* Fix picking snapshot peer: prefer the one with the highest block number
More docs, comments, todos
* Adjust WAIT_PEERS_TIMEOUT to be a multiple of MAINTAIN_SYNC_TIMER to try to fix snapshot startup problems
Docs, todos, comments
* Tabs
* Formatting
* Don't build new rlp::EMPTY_LIST_RLP instances
* Dial down debug logging
* Don't warn about missing hashes in the manifest: it's normal
Log client version on peer connect
* Cleanup
* Do not skip snapshots further away than 30k block from the highest block seen
Currently we look for peers that seed snapshots that are close to the highest block seen on the network (where "close" means withing 30k blocks). When a node starts up we wait for some time (5sec, increased here to 10sec) to let peers connect and if we have found a suitable peer to sync a snapshot from at the end of that delay, we start the download; if none is found and --warp-barrier is used we stall, otherwise we start a slow-sync.
When looking for a suitable snapshot, we use the highest block seen on the network to check if a peer has a snapshot that is within 30k blocks of that highest block number. This means that in a situation where all available snapshots are older than that, we will often fail to start a snapshot at all. What's worse is that the longer we delay starting a snapshot sync (to let more peers connect, in the hope of finding a good snapshot), the more likely we are to have seen a high block and thus the more likely we become to accept a snapshot.
This commit removes this comparison with the highest blocknumber criteria entirely and picks the best snapshot we find in 10sec.
* lockfile
* Add a `ChunkType::Dupe` variant so that we do not disconnect a peer if they happen to send us a duplicate chunk (just ignore the chunk and keep going)
Resolve some documentation todos, add more
* tweak log message
* Don't warp sync twice
Check if our own block is beyond the given warp barrier (can happen after we've completed a warp sync but are not quite yet synced up to the tip) and if so, don't sync.
More docs, resolve todos.
Dial down some `sync` debug level logging to trace
* Avoid iterating over all snapshot block/state hashes to find the next work item
Use a HashSet instead of a Vec and remove items from the set as chunks are processed. Calculate and store the total number of chunks in the `Snapshot` struct instead of counting pending chunks each time.
* Address review grumbles
* Log correct number of bytes written to disk
* Revert ChunkType::Dup change
* whitespace grumble
* Cleanup debugging code
* Fix docs
* Fix import and a typo
* Fix test impl
* Use `indexmap::IndexSet` to ensure chunk hashes are accessed in order
* Revert increased SNAPSHOT_MANIFEST_TIMEOUT: 5sec should be enough
2019-10-31 16:07:21 +01:00
|
|
|
/// Block all addresses
|
|
|
|
None,
|
2016-10-24 18:25:27 +02:00
|
|
|
}
|