v2.5.8-stable (#11041)

* add more tx tests (#11038)
* Fix parallel transactions race-condition (#10995)
* Add blake2_f precompile (#11017)
* [trace] introduce trace failed to Ext (#11019)
* Edit publish-onchain.sh to use https (#11016)
* Fix deadlock in network-devp2p (#11013)
* EIP 1108: Reduce alt_bn128 precompile gas costs (#11008)
* xDai chain support and nodes list update (#10989)
* EIP 2028: transaction gas lowered from 68 to 16 (#10987)
* EIP-1344 Add CHAINID op-code (#10983)
* manual publish jobs for releases, no changes for nightlies (#10977)
* [blooms-db] Fix benchmarks (#10974)
* Verify transaction against its block during import (#10954)
* Better error message for rpc gas price errors (#10931)
* tx-pool: accept local tx with higher gas price when pool full (#10901)
* Fix fork choice (#10837)
* Cleanup unused vm dependencies (#10787)
* Fix compilation on recent nightlies (#10991)
This commit is contained in:
s3krit
2019-09-12 18:43:53 +02:00
committed by GitHub
parent 6bd7db96fe
commit 45f27cec34
92 changed files with 2482 additions and 538 deletions

View File

@@ -254,6 +254,8 @@ struct ProtocolTimer {
}
/// Root IO handler. Manages protocol handlers, IO timers and network connections.
///
/// NOTE: must keep the lock in order of: reserved_nodes (rwlock) -> session (mutex, from sessions)
pub struct Host {
pub info: RwLock<HostInfo>,
udp_socket: Mutex<Option<UdpSocket>>,
@@ -715,12 +717,13 @@ impl Host {
let session_result = session.lock().readable(io, &self.info.read());
match session_result {
Err(e) => {
let reserved_nodes = self.reserved_nodes.read();
let s = session.lock();
trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
match *e.kind() {
ErrorKind::Disconnect(DisconnectReason::IncompatibleProtocol) | ErrorKind::Disconnect(DisconnectReason::UselessPeer) => {
if let Some(id) = s.id() {
if !self.reserved_nodes.read().contains(id) {
if !reserved_nodes.contains(id) {
let mut nodes = self.nodes.write();
nodes.note_failure(&id);
nodes.mark_as_useless(id);
@@ -734,6 +737,7 @@ impl Host {
},
Ok(SessionData::Ready) => {
let (_, egress_count, ingress_count) = self.session_count();
let reserved_nodes = self.reserved_nodes.read();
let mut s = session.lock();
let (min_peers, mut max_peers, reserved_only, self_id) = {
let info = self.info.read();
@@ -758,7 +762,7 @@ impl Host {
if reserved_only ||
(s.info.originated && egress_count > min_peers) ||
(!s.info.originated && ingress_count > max_ingress) {
if !self.reserved_nodes.read().contains(&id) {
if !reserved_nodes.contains(&id) {
// only proceed if the connecting peer is reserved.
trace!(target: "network", "Disconnecting non-reserved peer {:?}", id);
s.disconnect(io, DisconnectReason::TooManyPeers);
@@ -973,7 +977,8 @@ impl Host {
for i in to_remove {
trace!(target: "network", "Removed from node table: {}", i);
}
self.nodes.write().update(node_changes, &*self.reserved_nodes.read());
let reserved_nodes = self.reserved_nodes.read();
self.nodes.write().update(node_changes, &*reserved_nodes);
}
pub fn with_context<F>(&self, protocol: ProtocolId, io: &IoContext<NetworkIoMessage>, action: F) where F: FnOnce(&NetworkContextTrait) {
@@ -1059,8 +1064,9 @@ impl IoHandler<NetworkIoMessage> for Host {
},
NODE_TABLE => {
trace!(target: "network", "Refreshing node table");
self.nodes.write().clear_useless();
self.nodes.write().save();
let mut nodes = self.nodes.write();
nodes.clear_useless();
nodes.save();
},
_ => match self.timers.read().get(&token).cloned() {
Some(timer) => match self.handlers.read().get(&timer.protocol).cloned() {