diff --git a/js/src/modals/UpgradeParity/store.js b/js/src/modals/UpgradeParity/store.js
index 587872407..5cf3ddc85 100644
--- a/js/src/modals/UpgradeParity/store.js
+++ b/js/src/modals/UpgradeParity/store.js
@@ -27,7 +27,7 @@ const STEP_UPDATING = 1;
const STEP_COMPLETED = 2;
const STEP_ERROR = 2;
-const CHECK_INTERVAL = 1 * A_MINUTE;
+let instance = null;
export default class Store {
@observable available = null;
@@ -44,8 +44,6 @@ export default class Store {
this.loadStorage();
this.checkUpgrade();
-
- setInterval(this.checkUpgrade, CHECK_INTERVAL);
}
@computed get isVisible () {
@@ -119,10 +117,10 @@ export default class Store {
checkUpgrade = () => {
if (!this._api) {
- return;
+ return Promise.resolve(false);
}
- Promise
+ return Promise
.all([
this._api.parity.upgradeReady(),
this._api.parity.consensusCapability(),
@@ -134,11 +132,23 @@ export default class Store {
}
this.setVersions(available, version, consensusCapability);
+
+ return true;
})
.catch((error) => {
console.warn('checkUpgrade', error);
+
+ return false;
});
}
+
+ static get (api) {
+ if (!instance) {
+ instance = new Store(api);
+ }
+
+ return instance;
+ }
}
export {
diff --git a/js/src/redux/providers/status.js b/js/src/redux/providers/status.js
index e419d78f8..8456f2d9a 100644
--- a/js/src/redux/providers/status.js
+++ b/js/src/redux/providers/status.js
@@ -14,29 +14,30 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see .
-import BalancesProvider from './balances';
-import { statusBlockNumber, statusCollection, statusLogs } from './statusActions';
import { isEqual } from 'lodash';
import { LOG_KEYS, getLogger } from '~/config';
+import UpgradeStore from '~/modals/UpgradeParity/store';
+
+import BalancesProvider from './balances';
+import { statusBlockNumber, statusCollection, statusLogs } from './statusActions';
const log = getLogger(LOG_KEYS.Signer);
let instance = null;
export default class Status {
+ _apiStatus = {};
+ _status = {};
+ _longStatus = {};
+ _minerSettings = {};
+ _timeoutIds = {};
+ _blockNumberSubscriptionId = null;
+ _timestamp = Date.now();
+
constructor (store, api) {
this._api = api;
this._store = store;
-
- this._apiStatus = {};
- this._status = {};
- this._longStatus = {};
- this._minerSettings = {};
-
- this._timeoutIds = {};
- this._blockNumberSubscriptionId = null;
-
- this._timestamp = Date.now();
+ this._upgradeStore = UpgradeStore.get(api);
// On connecting, stop all subscriptions
api.on('connecting', this.stop, this);
@@ -281,10 +282,11 @@ export default class Status {
this._api.parity.netChain(),
this._api.parity.netPort(),
this._api.parity.rpcSettings(),
- this._api.parity.enode()
+ this._api.parity.enode(),
+ this._upgradeStore.checkUpgrade()
])
.then(([
- netPeers, clientVersion, netVersion, defaultExtraData, netChain, netPort, rpcSettings, enode
+ netPeers, clientVersion, netVersion, defaultExtraData, netChain, netPort, rpcSettings, enode, upgradeStatus
]) => {
const isTest =
netVersion === '2' || // morden
diff --git a/js/src/views/Application/application.js b/js/src/views/Application/application.js
index 256ff0212..2b32cf64e 100644
--- a/js/src/views/Application/application.js
+++ b/js/src/views/Application/application.js
@@ -51,7 +51,7 @@ class Application extends Component {
}
store = new Store(this.context.api);
- upgradeStore = new UpgradeStore(this.context.api);
+ upgradeStore = UpgradeStore.get(this.context.api);
render () {
const [root] = (window.location.hash || '').replace('#/', '').split('/');
@@ -65,7 +65,11 @@ class Application extends Component {
return (
- { isMinimized ? this.renderMinimized() : this.renderApp() }
+ {
+ isMinimized
+ ? this.renderMinimized()
+ : this.renderApp()
+ }
diff --git a/parity/main.rs b/parity/main.rs
index 7d2be8fb0..e0608eb1b 100644
--- a/parity/main.rs
+++ b/parity/main.rs
@@ -206,12 +206,27 @@ fn latest_exe_path() -> Option {
.and_then(|mut f| { let mut exe = String::new(); f.read_to_string(&mut exe).ok().map(|_| updates_path(&exe)) })
}
+#[cfg(windows)]
+fn global_cleanup() {
+ extern "system" { pub fn WSACleanup() -> i32; }
+ // We need to cleanup all sockets before spawning another Parity process. This makes shure everything is cleaned up.
+ // The loop is required because of internal refernce counter for winsock dll. We don't know how many crates we use do
+ // initialize it. There's at least 2 now.
+ for _ in 0.. 10 {
+ unsafe { WSACleanup(); }
+ }
+}
+
+#[cfg(not(windows))]
+fn global_cleanup() {}
+
// Starts ~/.parity-updates/parity and returns the code it exits with.
fn run_parity() -> Option {
+ global_cleanup();
use ::std::ffi::OsString;
let prefix = vec![OsString::from("--can-restart"), OsString::from("--force-direct")];
latest_exe_path().and_then(|exe| process::Command::new(exe)
- .args(&(env::args_os().chain(prefix.into_iter()).collect::>()))
+ .args(&(env::args_os().skip(1).chain(prefix.into_iter()).collect::>()))
.status()
.map(|es| es.code().unwrap_or(128))
.ok()
@@ -266,17 +281,23 @@ fn main() {
let exe = std::env::current_exe().ok();
let development = exe.as_ref().and_then(|p| p.parent().and_then(|p| p.parent()).and_then(|p| p.file_name()).map(|n| n == "target")).unwrap_or(false);
let same_name = exe.as_ref().map(|p| p.file_stem().map_or(false, |s| s == "parity") && p.extension().map_or(true, |x| x == "exe")).unwrap_or(false);
- let latest_exe = latest_exe_path();
- let have_update = latest_exe.as_ref().map_or(false, |p| p.exists());
- let is_non_updated_current = exe.map_or(false, |exe| latest_exe.as_ref().map_or(false, |lexe| exe.canonicalize().ok() != lexe.canonicalize().ok()));
- trace_main!("Starting up {} (force-direct: {}, development: {}, same-name: {}, have-update: {}, non-updated-current: {})", std::env::current_exe().map(|x| format!("{}", x.display())).unwrap_or("".to_owned()), force_direct, development, same_name, have_update, is_non_updated_current);
- if !force_direct && !development && same_name && have_update && is_non_updated_current {
+ trace_main!("Starting up {} (force-direct: {}, development: {}, same-name: {})", std::env::current_exe().map(|x| format!("{}", x.display())).unwrap_or("".to_owned()), force_direct, development, same_name);
+ if !force_direct && !development && same_name {
// looks like we're not running ~/.parity-updates/parity when the user is expecting otherwise.
// Everything run inside a loop, so we'll be able to restart from the child into a new version seamlessly.
loop {
// If we fail to run the updated parity then fallback to local version.
- trace_main!("Attempting to run latest update ({})...", latest_exe.as_ref().expect("guarded by have_update; latest_exe must exist for have_update; qed").display());
- let exit_code = run_parity().unwrap_or_else(|| { trace_main!("Falling back to local..."); main_direct(true) });
+ let latest_exe = latest_exe_path();
+ let have_update = latest_exe.as_ref().map_or(false, |p| p.exists());
+ let is_non_updated_current = exe.as_ref().map_or(false, |exe| latest_exe.as_ref().map_or(false, |lexe| exe.canonicalize().ok() != lexe.canonicalize().ok()));
+ trace_main!("Starting... (have-update: {}, non-updated-current: {})", have_update, is_non_updated_current);
+ let exit_code = if have_update && is_non_updated_current {
+ trace_main!("Attempting to run latest update ({})...", latest_exe.as_ref().expect("guarded by have_update; latest_exe must exist for have_update; qed").display());
+ run_parity().unwrap_or_else(|| { trace_main!("Falling back to local..."); main_direct(true) })
+ } else {
+ trace_main!("No latest update. Attempting to direct...");
+ main_direct(true)
+ };
trace_main!("Latest exited with {}", exit_code);
if exit_code != PLEASE_RESTART_EXIT_CODE {
trace_main!("Quitting...");
diff --git a/updater/src/updater.rs b/updater/src/updater.rs
index 4406451ef..f25c809e6 100644
--- a/updater/src/updater.rs
+++ b/updater/src/updater.rs
@@ -273,6 +273,7 @@ impl Updater {
if s.fetching.is_none() {
info!(target: "updater", "Attempting to get parity binary {}", b);
s.fetching = Some(latest.track.clone());
+ drop(s);
let weak_self = self.weak_self.lock().clone();
let f = move |r: Result| if let Some(this) = weak_self.upgrade() { this.fetch_done(r) };
self.fetcher.lock().as_ref().expect("Created on `new`; qed").fetch(b, Box::new(f));
@@ -311,7 +312,7 @@ impl Updater {
impl ChainNotify for Updater {
fn new_blocks(&self, _imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, _duration: u64) {
match (self.client.upgrade(), self.sync.upgrade()) {
- (Some(ref c), Some(ref s)) if s.status().is_syncing(c.queue_info()) => self.poll(),
+ (Some(ref c), Some(ref s)) if !s.status().is_syncing(c.queue_info()) => self.poll(),
_ => {},
}
}
diff --git a/util/network/src/host.rs b/util/network/src/host.rs
index 2f236a5f7..05f0e83a4 100644
--- a/util/network/src/host.rs
+++ b/util/network/src/host.rs
@@ -683,8 +683,7 @@ impl Host {
#[cfg_attr(feature="dev", allow(single_match))]
fn connect_peer(&self, id: &NodeId, io: &IoContext) {
- if self.have_session(id)
- {
+ if self.have_session(id) {
trace!(target: "network", "Aborted connect. Node already connected.");
return;
}
@@ -788,102 +787,119 @@ impl Host {
let mut packet_data: Vec<(ProtocolId, PacketId, Vec)> = Vec::new();
let mut kill = false;
let session = { self.sessions.read().get(token).cloned() };
+ let mut ready_id = None;
if let Some(session) = session.clone() {
- let mut s = session.lock();
- loop {
- let session_result = s.readable(io, &self.info.read());
- match session_result {
- Err(e) => {
- trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
- if let NetworkError::Disconnect(DisconnectReason::IncompatibleProtocol) = e {
- if let Some(id) = s.id() {
- if !self.reserved_nodes.read().contains(id) {
- self.nodes.write().mark_as_useless(id);
+ {
+ let mut s = session.lock();
+ loop {
+ let session_result = s.readable(io, &self.info.read());
+ match session_result {
+ Err(e) => {
+ trace!(target: "network", "Session read error: {}:{:?} ({:?}) {:?}", token, s.id(), s.remote_addr(), e);
+ if let NetworkError::Disconnect(DisconnectReason::IncompatibleProtocol) = e {
+ if let Some(id) = s.id() {
+ if !self.reserved_nodes.read().contains(id) {
+ self.nodes.write().mark_as_useless(id);
+ }
}
}
- }
- kill = true;
- break;
- },
- Ok(SessionData::Ready) => {
- self.num_sessions.fetch_add(1, AtomicOrdering::SeqCst);
- let session_count = self.session_count();
- let (min_peers, max_peers, reserved_only) = {
- let info = self.info.read();
- let mut max_peers = info.config.max_peers;
- for cap in s.info.capabilities.iter() {
- if let Some(num) = info.config.reserved_protocols.get(&cap.protocol) {
- max_peers += *num;
- break;
+ kill = true;
+ break;
+ },
+ Ok(SessionData::Ready) => {
+ self.num_sessions.fetch_add(1, AtomicOrdering::SeqCst);
+ let session_count = self.session_count();
+ let (min_peers, max_peers, reserved_only) = {
+ let info = self.info.read();
+ let mut max_peers = info.config.max_peers;
+ for cap in s.info.capabilities.iter() {
+ if let Some(num) = info.config.reserved_protocols.get(&cap.protocol) {
+ max_peers += *num;
+ break;
+ }
+ }
+ (info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny)
+ };
+
+ let id = s.id().expect("Ready session always has id").clone();
+
+ // Check for the session limit. session_counts accounts for the new session.
+ if reserved_only ||
+ (s.info.originated && session_count > min_peers) ||
+ (!s.info.originated && session_count > max_peers) {
+ // only proceed if the connecting peer is reserved.
+ if !self.reserved_nodes.read().contains(&id) {
+ s.disconnect(io, DisconnectReason::TooManyPeers);
+ return;
}
}
- (info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny)
- };
+ ready_id = Some(id);
- // Check for the session limit. session_counts accounts for the new session.
- if reserved_only ||
- (s.info.originated && session_count > min_peers) ||
- (!s.info.originated && session_count > max_peers) {
- // only proceed if the connecting peer is reserved.
- if !self.reserved_nodes.read().contains(s.id().expect("Ready session always has id")) {
- s.disconnect(io, DisconnectReason::TooManyPeers);
- return;
- }
- }
-
- // Add it to the node table
- if !s.info.originated {
- if let Ok(address) = s.remote_addr() {
- let entry = NodeEntry { id: s.id().expect("Ready session always has id").clone(), endpoint: NodeEndpoint { address: address, udp_port: address.port() } };
- self.nodes.write().add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
- let mut discovery = self.discovery.lock();
- if let Some(ref mut discovery) = *discovery {
- discovery.add_node(entry);
+ // Add it to the node table
+ if !s.info.originated {
+ if let Ok(address) = s.remote_addr() {
+ let entry = NodeEntry { id: id, endpoint: NodeEndpoint { address: address, udp_port: address.port() } };
+ self.nodes.write().add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
+ let mut discovery = self.discovery.lock();
+ if let Some(ref mut discovery) = *discovery {
+ discovery.add_node(entry);
+ }
}
}
- }
- for (p, _) in self.handlers.read().iter() {
- if s.have_capability(*p) {
- ready_data.push(*p);
+ for (p, _) in self.handlers.read().iter() {
+ if s.have_capability(*p) {
+ ready_data.push(*p);
+ }
}
- }
- },
- Ok(SessionData::Packet {
- data,
- protocol,
- packet_id,
- }) => {
- match self.handlers.read().get(&protocol) {
- None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) },
- Some(_) => packet_data.push((protocol, packet_id, data)),
- }
- },
- Ok(SessionData::Continue) => (),
- Ok(SessionData::None) => break,
- }
- }
- }
- if kill {
- self.kill_connection(token, io, true);
- }
- let handlers = self.handlers.read();
- for p in ready_data {
- self.stats.inc_sessions();
- let reserved = self.reserved_nodes.read();
- if let Some(h) = handlers.get(&p).clone() {
- h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token);
-
- // accumulate pending packets.
- if let Some(session) = session.as_ref() {
- let mut session = session.lock();
- packet_data.extend(session.mark_connected(p));
+ },
+ Ok(SessionData::Packet {
+ data,
+ protocol,
+ packet_id,
+ }) => {
+ match self.handlers.read().get(&protocol) {
+ None => { warn!(target: "network", "No handler found for protocol: {:?}", protocol) },
+ Some(_) => packet_data.push((protocol, packet_id, data)),
+ }
+ },
+ Ok(SessionData::Continue) => (),
+ Ok(SessionData::None) => break,
+ }
}
}
- }
- for (p, packet_id, data) in packet_data {
- let reserved = self.reserved_nodes.read();
- if let Some(h) = handlers.get(&p).clone() {
- h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]);
+
+ if kill {
+ self.kill_connection(token, io, true);
+ }
+
+ let handlers = self.handlers.read();
+ if !ready_data.is_empty() {
+ let duplicate = self.sessions.read().iter().any(|e| {
+ let session = e.lock();
+ session.token() != token && session.info.id == ready_id
+ });
+ if duplicate {
+ trace!(target: "network", "Rejected duplicate connection: {}", token);
+ session.lock().disconnect(io, DisconnectReason::DuplicatePeer);
+ return;
+ }
+ for p in ready_data {
+ self.stats.inc_sessions();
+ let reserved = self.reserved_nodes.read();
+ if let Some(h) = handlers.get(&p).clone() {
+ h.connected(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token);
+ // accumulate pending packets.
+ let mut session = session.lock();
+ packet_data.extend(session.mark_connected(p));
+ }
+ }
+ }
+
+ for (p, packet_id, data) in packet_data {
+ let reserved = self.reserved_nodes.read();
+ if let Some(h) = handlers.get(&p).clone() {
+ h.read(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]);
+ }
}
}
}