Limit incoming connections. (#8060)
* Limit ingress connections * Optimized handshakes logging
This commit is contained in:
parent
d716bae3d5
commit
e8106016c8
@ -18,9 +18,9 @@ use std::net::{SocketAddr, SocketAddrV4, Ipv4Addr};
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::ops::*;
|
use std::ops::*;
|
||||||
use std::cmp::min;
|
use std::cmp::{min, max};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::io::{Read, Write, self};
|
use std::io::{Read, Write, self};
|
||||||
use std::fs;
|
use std::fs;
|
||||||
@ -247,7 +247,6 @@ pub struct Host {
|
|||||||
timer_counter: RwLock<usize>,
|
timer_counter: RwLock<usize>,
|
||||||
stats: Arc<NetworkStats>,
|
stats: Arc<NetworkStats>,
|
||||||
reserved_nodes: RwLock<HashSet<NodeId>>,
|
reserved_nodes: RwLock<HashSet<NodeId>>,
|
||||||
num_sessions: AtomicUsize,
|
|
||||||
stopping: AtomicBool,
|
stopping: AtomicBool,
|
||||||
filter: Option<Arc<ConnectionFilter>>,
|
filter: Option<Arc<ConnectionFilter>>,
|
||||||
}
|
}
|
||||||
@ -304,7 +303,6 @@ impl Host {
|
|||||||
timer_counter: RwLock::new(USER_TIMER),
|
timer_counter: RwLock::new(USER_TIMER),
|
||||||
stats: stats,
|
stats: stats,
|
||||||
reserved_nodes: RwLock::new(HashSet::new()),
|
reserved_nodes: RwLock::new(HashSet::new()),
|
||||||
num_sessions: AtomicUsize::new(0),
|
|
||||||
stopping: AtomicBool::new(false),
|
stopping: AtomicBool::new(false),
|
||||||
filter: filter,
|
filter: filter,
|
||||||
};
|
};
|
||||||
@ -359,7 +357,7 @@ impl Host {
|
|||||||
// disconnect all non-reserved peers here.
|
// disconnect all non-reserved peers here.
|
||||||
let reserved: HashSet<NodeId> = self.reserved_nodes.read().clone();
|
let reserved: HashSet<NodeId> = self.reserved_nodes.read().clone();
|
||||||
let mut to_kill = Vec::new();
|
let mut to_kill = Vec::new();
|
||||||
for e in self.sessions.write().iter_mut() {
|
for e in self.sessions.read().iter() {
|
||||||
let mut s = e.lock();
|
let mut s = e.lock();
|
||||||
{
|
{
|
||||||
let id = s.id();
|
let id = s.id();
|
||||||
@ -399,7 +397,7 @@ impl Host {
|
|||||||
pub fn stop(&self, io: &IoContext<NetworkIoMessage>) -> Result<(), Error> {
|
pub fn stop(&self, io: &IoContext<NetworkIoMessage>) -> Result<(), Error> {
|
||||||
self.stopping.store(true, AtomicOrdering::Release);
|
self.stopping.store(true, AtomicOrdering::Release);
|
||||||
let mut to_kill = Vec::new();
|
let mut to_kill = Vec::new();
|
||||||
for e in self.sessions.write().iter_mut() {
|
for e in self.sessions.read().iter() {
|
||||||
let mut s = e.lock();
|
let mut s = e.lock();
|
||||||
s.disconnect(io, DisconnectReason::ClientQuit);
|
s.disconnect(io, DisconnectReason::ClientQuit);
|
||||||
to_kill.push(s.token());
|
to_kill.push(s.token());
|
||||||
@ -490,22 +488,28 @@ impl Host {
|
|||||||
self.sessions.read().iter().any(|e| e.lock().info.id == Some(id.clone()))
|
self.sessions.read().iter().any(|e| e.lock().info.id == Some(id.clone()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn session_count(&self) -> usize {
|
// returns (handshakes, egress, ingress)
|
||||||
self.num_sessions.load(AtomicOrdering::Relaxed)
|
fn session_count(&self) -> (usize, usize, usize) {
|
||||||
|
let mut handshakes = 0;
|
||||||
|
let mut egress = 0;
|
||||||
|
let mut ingress = 0;
|
||||||
|
for s in self.sessions.read().iter() {
|
||||||
|
match s.try_lock() {
|
||||||
|
Some(ref s) if s.is_ready() && s.info.originated => egress += 1,
|
||||||
|
Some(ref s) if s.is_ready() && !s.info.originated => ingress += 1,
|
||||||
|
_ => handshakes +=1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(handshakes, egress, ingress)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connecting_to(&self, id: &NodeId) -> bool {
|
fn connecting_to(&self, id: &NodeId) -> bool {
|
||||||
self.sessions.read().iter().any(|e| e.lock().id() == Some(id))
|
self.sessions.read().iter().any(|e| e.lock().id() == Some(id))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handshake_count(&self) -> usize {
|
|
||||||
// session_count < total_count is possible because of the data race.
|
|
||||||
self.sessions.read().count().saturating_sub(self.session_count())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn keep_alive(&self, io: &IoContext<NetworkIoMessage>) {
|
fn keep_alive(&self, io: &IoContext<NetworkIoMessage>) {
|
||||||
let mut to_kill = Vec::new();
|
let mut to_kill = Vec::new();
|
||||||
for e in self.sessions.write().iter_mut() {
|
for e in self.sessions.read().iter() {
|
||||||
let mut s = e.lock();
|
let mut s = e.lock();
|
||||||
if !s.keep_alive(io) {
|
if !s.keep_alive(io) {
|
||||||
s.disconnect(io, DisconnectReason::PingTimeout);
|
s.disconnect(io, DisconnectReason::PingTimeout);
|
||||||
@ -529,9 +533,9 @@ impl Host {
|
|||||||
(config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny, config.max_handshakes as usize, config.ip_filter.clone(), info.id().clone())
|
(config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny, config.max_handshakes as usize, config.ip_filter.clone(), info.id().clone())
|
||||||
};
|
};
|
||||||
|
|
||||||
let session_count = self.session_count();
|
let (handshake_count, egress_count, ingress_count) = self.session_count();
|
||||||
let reserved_nodes = self.reserved_nodes.read();
|
let reserved_nodes = self.reserved_nodes.read();
|
||||||
if session_count >= min_peers as usize + reserved_nodes.len() {
|
if egress_count + ingress_count >= min_peers as usize + reserved_nodes.len() {
|
||||||
// check if all pinned nodes are connected.
|
// check if all pinned nodes are connected.
|
||||||
if reserved_nodes.iter().all(|n| self.have_session(n) && self.connecting_to(n)) {
|
if reserved_nodes.iter().all(|n| self.have_session(n) && self.connecting_to(n)) {
|
||||||
return;
|
return;
|
||||||
@ -541,7 +545,6 @@ impl Host {
|
|||||||
pin = true;
|
pin = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let handshake_count = self.handshake_count();
|
|
||||||
// allow 16 slots for incoming connections
|
// allow 16 slots for incoming connections
|
||||||
if handshake_count >= max_handshakes {
|
if handshake_count >= max_handshakes {
|
||||||
return;
|
return;
|
||||||
@ -566,7 +569,7 @@ impl Host {
|
|||||||
self.connect_peer(&id, io);
|
self.connect_peer(&id, io);
|
||||||
started += 1;
|
started += 1;
|
||||||
}
|
}
|
||||||
debug!(target: "network", "Connecting peers: {} sessions, {} pending, {} started", self.session_count(), self.handshake_count(), started);
|
debug!(target: "network", "Connecting peers: {} sessions, {} pending + {} started", egress_count + ingress_count, handshake_count, started);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage>) {
|
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage>) {
|
||||||
@ -676,11 +679,11 @@ impl Host {
|
|||||||
let mut ready_id = None;
|
let mut ready_id = None;
|
||||||
if let Some(session) = session.clone() {
|
if let Some(session) = session.clone() {
|
||||||
{
|
{
|
||||||
let mut s = session.lock();
|
|
||||||
loop {
|
loop {
|
||||||
let session_result = s.readable(io, &self.info.read());
|
let session_result = session.lock().readable(io, &self.info.read());
|
||||||
match session_result {
|
match session_result {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
let s = session.lock();
|
||||||
trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
|
trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
|
||||||
if let ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) = *e.kind() {
|
if let ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) = *e.kind() {
|
||||||
if let Some(id) = s.id() {
|
if let Some(id) = s.id() {
|
||||||
@ -693,9 +696,9 @@ impl Host {
|
|||||||
break;
|
break;
|
||||||
},
|
},
|
||||||
Ok(SessionData::Ready) => {
|
Ok(SessionData::Ready) => {
|
||||||
self.num_sessions.fetch_add(1, AtomicOrdering::SeqCst);
|
let (_, egress_count, ingress_count) = self.session_count();
|
||||||
let session_count = self.session_count();
|
let mut s = session.lock();
|
||||||
let (min_peers, max_peers, reserved_only, self_id) = {
|
let (min_peers, mut max_peers, reserved_only, self_id) = {
|
||||||
let info = self.info.read();
|
let info = self.info.read();
|
||||||
let mut max_peers = info.config.max_peers;
|
let mut max_peers = info.config.max_peers;
|
||||||
for cap in s.info.capabilities.iter() {
|
for cap in s.info.capabilities.iter() {
|
||||||
@ -707,12 +710,17 @@ impl Host {
|
|||||||
(info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny, info.id().clone())
|
(info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny, info.id().clone())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
max_peers = max(max_peers, min_peers);
|
||||||
|
|
||||||
let id = s.id().expect("Ready session always has id").clone();
|
let id = s.id().expect("Ready session always has id").clone();
|
||||||
|
|
||||||
// Check for the session limit. session_counts accounts for the new session.
|
// Check for the session limit.
|
||||||
|
// Outgoing connections are allowed as long as their count is <= min_peers
|
||||||
|
// Incoming connections are allowed to take all of the max_peers reserve, or at most half of the slots.
|
||||||
|
let max_ingress = max(max_peers - min_peers, min_peers / 2);
|
||||||
if reserved_only ||
|
if reserved_only ||
|
||||||
(s.info.originated && session_count > min_peers) ||
|
(s.info.originated && egress_count > min_peers) ||
|
||||||
(!s.info.originated && session_count > max_peers) {
|
(!s.info.originated && ingress_count > max_ingress) {
|
||||||
// only proceed if the connecting peer is reserved.
|
// only proceed if the connecting peer is reserved.
|
||||||
if !self.reserved_nodes.read().contains(&id) {
|
if !self.reserved_nodes.read().contains(&id) {
|
||||||
s.disconnect(io, DisconnectReason::TooManyPeers);
|
s.disconnect(io, DisconnectReason::TooManyPeers);
|
||||||
@ -816,13 +824,12 @@ impl Host {
|
|||||||
let mut deregister = false;
|
let mut deregister = false;
|
||||||
let mut expired_session = None;
|
let mut expired_session = None;
|
||||||
if let FIRST_SESSION ... LAST_SESSION = token {
|
if let FIRST_SESSION ... LAST_SESSION = token {
|
||||||
let sessions = self.sessions.write();
|
let sessions = self.sessions.read();
|
||||||
if let Some(session) = sessions.get(token).cloned() {
|
if let Some(session) = sessions.get(token).cloned() {
|
||||||
expired_session = Some(session.clone());
|
expired_session = Some(session.clone());
|
||||||
let mut s = session.lock();
|
let mut s = session.lock();
|
||||||
if !s.expired() {
|
if !s.expired() {
|
||||||
if s.is_ready() {
|
if s.is_ready() {
|
||||||
self.num_sessions.fetch_sub(1, AtomicOrdering::SeqCst);
|
|
||||||
for (p, _) in self.handlers.read().iter() {
|
for (p, _) in self.handlers.read().iter() {
|
||||||
if s.have_capability(*p) {
|
if s.have_capability(*p) {
|
||||||
to_disconnect.push(*p);
|
to_disconnect.push(*p);
|
||||||
@ -854,7 +861,7 @@ impl Host {
|
|||||||
fn update_nodes(&self, _io: &IoContext<NetworkIoMessage>, node_changes: TableUpdates) {
|
fn update_nodes(&self, _io: &IoContext<NetworkIoMessage>, node_changes: TableUpdates) {
|
||||||
let mut to_remove: Vec<PeerId> = Vec::new();
|
let mut to_remove: Vec<PeerId> = Vec::new();
|
||||||
{
|
{
|
||||||
let sessions = self.sessions.write();
|
let sessions = self.sessions.read();
|
||||||
for c in sessions.iter() {
|
for c in sessions.iter() {
|
||||||
let s = c.lock();
|
let s = c.lock();
|
||||||
if let Some(id) = s.id() {
|
if let Some(id) = s.id() {
|
||||||
|
Loading…
Reference in New Issue
Block a user