Backporting to beta (#4203)
* Minor typo to ensure it updates only when synced. (#4188) * Updater fixes (#4196) * Minor typo to ensure it updates only when synced. * Fix deadlock. * Skip unneeded arg in making list. * Allow auto-restart even when not running an update. * Fix trace. * Update update info on each loop. * Fix build. * Shutdown all sockets * Remove superfluous use. * Poll for upgrades as part of global status (long) (#4197) * Poll for upgrades as part of global status (long) * Fix path * Prevent duplicate incoming connections (#4180)
This commit is contained in:
parent
f20db41169
commit
cf6d870b09
@ -27,7 +27,7 @@ const STEP_UPDATING = 1;
|
|||||||
const STEP_COMPLETED = 2;
|
const STEP_COMPLETED = 2;
|
||||||
const STEP_ERROR = 2;
|
const STEP_ERROR = 2;
|
||||||
|
|
||||||
const CHECK_INTERVAL = 1 * A_MINUTE;
|
let instance = null;
|
||||||
|
|
||||||
export default class Store {
|
export default class Store {
|
||||||
@observable available = null;
|
@observable available = null;
|
||||||
@ -44,8 +44,6 @@ export default class Store {
|
|||||||
|
|
||||||
this.loadStorage();
|
this.loadStorage();
|
||||||
this.checkUpgrade();
|
this.checkUpgrade();
|
||||||
|
|
||||||
setInterval(this.checkUpgrade, CHECK_INTERVAL);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@computed get isVisible () {
|
@computed get isVisible () {
|
||||||
@ -119,10 +117,10 @@ export default class Store {
|
|||||||
|
|
||||||
checkUpgrade = () => {
|
checkUpgrade = () => {
|
||||||
if (!this._api) {
|
if (!this._api) {
|
||||||
return;
|
return Promise.resolve(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
Promise
|
return Promise
|
||||||
.all([
|
.all([
|
||||||
this._api.parity.upgradeReady(),
|
this._api.parity.upgradeReady(),
|
||||||
this._api.parity.consensusCapability(),
|
this._api.parity.consensusCapability(),
|
||||||
@ -134,11 +132,23 @@ export default class Store {
|
|||||||
}
|
}
|
||||||
|
|
||||||
this.setVersions(available, version, consensusCapability);
|
this.setVersions(available, version, consensusCapability);
|
||||||
|
|
||||||
|
return true;
|
||||||
})
|
})
|
||||||
.catch((error) => {
|
.catch((error) => {
|
||||||
console.warn('checkUpgrade', error);
|
console.warn('checkUpgrade', error);
|
||||||
|
|
||||||
|
return false;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static get (api) {
|
||||||
|
if (!instance) {
|
||||||
|
instance = new Store(api);
|
||||||
|
}
|
||||||
|
|
||||||
|
return instance;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
export {
|
export {
|
||||||
|
@ -14,29 +14,30 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import BalancesProvider from './balances';
|
|
||||||
import { statusBlockNumber, statusCollection, statusLogs } from './statusActions';
|
|
||||||
import { isEqual } from 'lodash';
|
import { isEqual } from 'lodash';
|
||||||
|
|
||||||
import { LOG_KEYS, getLogger } from '~/config';
|
import { LOG_KEYS, getLogger } from '~/config';
|
||||||
|
import UpgradeStore from '~/modals/UpgradeParity/store';
|
||||||
|
|
||||||
|
import BalancesProvider from './balances';
|
||||||
|
import { statusBlockNumber, statusCollection, statusLogs } from './statusActions';
|
||||||
|
|
||||||
const log = getLogger(LOG_KEYS.Signer);
|
const log = getLogger(LOG_KEYS.Signer);
|
||||||
let instance = null;
|
let instance = null;
|
||||||
|
|
||||||
export default class Status {
|
export default class Status {
|
||||||
|
_apiStatus = {};
|
||||||
|
_status = {};
|
||||||
|
_longStatus = {};
|
||||||
|
_minerSettings = {};
|
||||||
|
_timeoutIds = {};
|
||||||
|
_blockNumberSubscriptionId = null;
|
||||||
|
_timestamp = Date.now();
|
||||||
|
|
||||||
constructor (store, api) {
|
constructor (store, api) {
|
||||||
this._api = api;
|
this._api = api;
|
||||||
this._store = store;
|
this._store = store;
|
||||||
|
this._upgradeStore = UpgradeStore.get(api);
|
||||||
this._apiStatus = {};
|
|
||||||
this._status = {};
|
|
||||||
this._longStatus = {};
|
|
||||||
this._minerSettings = {};
|
|
||||||
|
|
||||||
this._timeoutIds = {};
|
|
||||||
this._blockNumberSubscriptionId = null;
|
|
||||||
|
|
||||||
this._timestamp = Date.now();
|
|
||||||
|
|
||||||
// On connecting, stop all subscriptions
|
// On connecting, stop all subscriptions
|
||||||
api.on('connecting', this.stop, this);
|
api.on('connecting', this.stop, this);
|
||||||
@ -281,10 +282,11 @@ export default class Status {
|
|||||||
this._api.parity.netChain(),
|
this._api.parity.netChain(),
|
||||||
this._api.parity.netPort(),
|
this._api.parity.netPort(),
|
||||||
this._api.parity.rpcSettings(),
|
this._api.parity.rpcSettings(),
|
||||||
this._api.parity.enode()
|
this._api.parity.enode(),
|
||||||
|
this._upgradeStore.checkUpgrade()
|
||||||
])
|
])
|
||||||
.then(([
|
.then(([
|
||||||
netPeers, clientVersion, netVersion, defaultExtraData, netChain, netPort, rpcSettings, enode
|
netPeers, clientVersion, netVersion, defaultExtraData, netChain, netPort, rpcSettings, enode, upgradeStatus
|
||||||
]) => {
|
]) => {
|
||||||
const isTest =
|
const isTest =
|
||||||
netVersion === '2' || // morden
|
netVersion === '2' || // morden
|
||||||
|
@ -51,7 +51,7 @@ class Application extends Component {
|
|||||||
}
|
}
|
||||||
|
|
||||||
store = new Store(this.context.api);
|
store = new Store(this.context.api);
|
||||||
upgradeStore = new UpgradeStore(this.context.api);
|
upgradeStore = UpgradeStore.get(this.context.api);
|
||||||
|
|
||||||
render () {
|
render () {
|
||||||
const [root] = (window.location.hash || '').replace('#/', '').split('/');
|
const [root] = (window.location.hash || '').replace('#/', '').split('/');
|
||||||
@ -65,7 +65,11 @@ class Application extends Component {
|
|||||||
|
|
||||||
return (
|
return (
|
||||||
<div>
|
<div>
|
||||||
{ isMinimized ? this.renderMinimized() : this.renderApp() }
|
{
|
||||||
|
isMinimized
|
||||||
|
? this.renderMinimized()
|
||||||
|
: this.renderApp()
|
||||||
|
}
|
||||||
<Connection />
|
<Connection />
|
||||||
<ParityBar dapp={ isMinimized } />
|
<ParityBar dapp={ isMinimized } />
|
||||||
</div>
|
</div>
|
||||||
|
@ -206,12 +206,27 @@ fn latest_exe_path() -> Option<PathBuf> {
|
|||||||
.and_then(|mut f| { let mut exe = String::new(); f.read_to_string(&mut exe).ok().map(|_| updates_path(&exe)) })
|
.and_then(|mut f| { let mut exe = String::new(); f.read_to_string(&mut exe).ok().map(|_| updates_path(&exe)) })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(windows)]
|
||||||
|
fn global_cleanup() {
|
||||||
|
extern "system" { pub fn WSACleanup() -> i32; }
|
||||||
|
// We need to cleanup all sockets before spawning another Parity process. This makes shure everything is cleaned up.
|
||||||
|
// The loop is required because of internal refernce counter for winsock dll. We don't know how many crates we use do
|
||||||
|
// initialize it. There's at least 2 now.
|
||||||
|
for _ in 0.. 10 {
|
||||||
|
unsafe { WSACleanup(); }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(not(windows))]
|
||||||
|
fn global_cleanup() {}
|
||||||
|
|
||||||
// Starts ~/.parity-updates/parity and returns the code it exits with.
|
// Starts ~/.parity-updates/parity and returns the code it exits with.
|
||||||
fn run_parity() -> Option<i32> {
|
fn run_parity() -> Option<i32> {
|
||||||
|
global_cleanup();
|
||||||
use ::std::ffi::OsString;
|
use ::std::ffi::OsString;
|
||||||
let prefix = vec![OsString::from("--can-restart"), OsString::from("--force-direct")];
|
let prefix = vec![OsString::from("--can-restart"), OsString::from("--force-direct")];
|
||||||
latest_exe_path().and_then(|exe| process::Command::new(exe)
|
latest_exe_path().and_then(|exe| process::Command::new(exe)
|
||||||
.args(&(env::args_os().chain(prefix.into_iter()).collect::<Vec<_>>()))
|
.args(&(env::args_os().skip(1).chain(prefix.into_iter()).collect::<Vec<_>>()))
|
||||||
.status()
|
.status()
|
||||||
.map(|es| es.code().unwrap_or(128))
|
.map(|es| es.code().unwrap_or(128))
|
||||||
.ok()
|
.ok()
|
||||||
@ -266,17 +281,23 @@ fn main() {
|
|||||||
let exe = std::env::current_exe().ok();
|
let exe = std::env::current_exe().ok();
|
||||||
let development = exe.as_ref().and_then(|p| p.parent().and_then(|p| p.parent()).and_then(|p| p.file_name()).map(|n| n == "target")).unwrap_or(false);
|
let development = exe.as_ref().and_then(|p| p.parent().and_then(|p| p.parent()).and_then(|p| p.file_name()).map(|n| n == "target")).unwrap_or(false);
|
||||||
let same_name = exe.as_ref().map(|p| p.file_stem().map_or(false, |s| s == "parity") && p.extension().map_or(true, |x| x == "exe")).unwrap_or(false);
|
let same_name = exe.as_ref().map(|p| p.file_stem().map_or(false, |s| s == "parity") && p.extension().map_or(true, |x| x == "exe")).unwrap_or(false);
|
||||||
let latest_exe = latest_exe_path();
|
trace_main!("Starting up {} (force-direct: {}, development: {}, same-name: {})", std::env::current_exe().map(|x| format!("{}", x.display())).unwrap_or("<unknown>".to_owned()), force_direct, development, same_name);
|
||||||
let have_update = latest_exe.as_ref().map_or(false, |p| p.exists());
|
if !force_direct && !development && same_name {
|
||||||
let is_non_updated_current = exe.map_or(false, |exe| latest_exe.as_ref().map_or(false, |lexe| exe.canonicalize().ok() != lexe.canonicalize().ok()));
|
|
||||||
trace_main!("Starting up {} (force-direct: {}, development: {}, same-name: {}, have-update: {}, non-updated-current: {})", std::env::current_exe().map(|x| format!("{}", x.display())).unwrap_or("<unknown>".to_owned()), force_direct, development, same_name, have_update, is_non_updated_current);
|
|
||||||
if !force_direct && !development && same_name && have_update && is_non_updated_current {
|
|
||||||
// looks like we're not running ~/.parity-updates/parity when the user is expecting otherwise.
|
// looks like we're not running ~/.parity-updates/parity when the user is expecting otherwise.
|
||||||
// Everything run inside a loop, so we'll be able to restart from the child into a new version seamlessly.
|
// Everything run inside a loop, so we'll be able to restart from the child into a new version seamlessly.
|
||||||
loop {
|
loop {
|
||||||
// If we fail to run the updated parity then fallback to local version.
|
// If we fail to run the updated parity then fallback to local version.
|
||||||
|
let latest_exe = latest_exe_path();
|
||||||
|
let have_update = latest_exe.as_ref().map_or(false, |p| p.exists());
|
||||||
|
let is_non_updated_current = exe.as_ref().map_or(false, |exe| latest_exe.as_ref().map_or(false, |lexe| exe.canonicalize().ok() != lexe.canonicalize().ok()));
|
||||||
|
trace_main!("Starting... (have-update: {}, non-updated-current: {})", have_update, is_non_updated_current);
|
||||||
|
let exit_code = if have_update && is_non_updated_current {
|
||||||
trace_main!("Attempting to run latest update ({})...", latest_exe.as_ref().expect("guarded by have_update; latest_exe must exist for have_update; qed").display());
|
trace_main!("Attempting to run latest update ({})...", latest_exe.as_ref().expect("guarded by have_update; latest_exe must exist for have_update; qed").display());
|
||||||
let exit_code = run_parity().unwrap_or_else(|| { trace_main!("Falling back to local..."); main_direct(true) });
|
run_parity().unwrap_or_else(|| { trace_main!("Falling back to local..."); main_direct(true) })
|
||||||
|
} else {
|
||||||
|
trace_main!("No latest update. Attempting to direct...");
|
||||||
|
main_direct(true)
|
||||||
|
};
|
||||||
trace_main!("Latest exited with {}", exit_code);
|
trace_main!("Latest exited with {}", exit_code);
|
||||||
if exit_code != PLEASE_RESTART_EXIT_CODE {
|
if exit_code != PLEASE_RESTART_EXIT_CODE {
|
||||||
trace_main!("Quitting...");
|
trace_main!("Quitting...");
|
||||||
|
@ -273,6 +273,7 @@ impl Updater {
|
|||||||
if s.fetching.is_none() {
|
if s.fetching.is_none() {
|
||||||
info!(target: "updater", "Attempting to get parity binary {}", b);
|
info!(target: "updater", "Attempting to get parity binary {}", b);
|
||||||
s.fetching = Some(latest.track.clone());
|
s.fetching = Some(latest.track.clone());
|
||||||
|
drop(s);
|
||||||
let weak_self = self.weak_self.lock().clone();
|
let weak_self = self.weak_self.lock().clone();
|
||||||
let f = move |r: Result<PathBuf, fetch::Error>| if let Some(this) = weak_self.upgrade() { this.fetch_done(r) };
|
let f = move |r: Result<PathBuf, fetch::Error>| if let Some(this) = weak_self.upgrade() { this.fetch_done(r) };
|
||||||
self.fetcher.lock().as_ref().expect("Created on `new`; qed").fetch(b, Box::new(f));
|
self.fetcher.lock().as_ref().expect("Created on `new`; qed").fetch(b, Box::new(f));
|
||||||
@ -311,7 +312,7 @@ impl Updater {
|
|||||||
impl ChainNotify for Updater {
|
impl ChainNotify for Updater {
|
||||||
fn new_blocks(&self, _imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, _duration: u64) {
|
fn new_blocks(&self, _imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, _duration: u64) {
|
||||||
match (self.client.upgrade(), self.sync.upgrade()) {
|
match (self.client.upgrade(), self.sync.upgrade()) {
|
||||||
(Some(ref c), Some(ref s)) if s.status().is_syncing(c.queue_info()) => self.poll(),
|
(Some(ref c), Some(ref s)) if !s.status().is_syncing(c.queue_info()) => self.poll(),
|
||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -683,8 +683,7 @@ impl Host {
|
|||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
#[cfg_attr(feature="dev", allow(single_match))]
|
||||||
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage>) {
|
fn connect_peer(&self, id: &NodeId, io: &IoContext<NetworkIoMessage>) {
|
||||||
if self.have_session(id)
|
if self.have_session(id) {
|
||||||
{
|
|
||||||
trace!(target: "network", "Aborted connect. Node already connected.");
|
trace!(target: "network", "Aborted connect. Node already connected.");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -788,7 +787,9 @@ impl Host {
|
|||||||
let mut packet_data: Vec<(ProtocolId, PacketId, Vec<u8>)> = Vec::new();
|
let mut packet_data: Vec<(ProtocolId, PacketId, Vec<u8>)> = Vec::new();
|
||||||
let mut kill = false;
|
let mut kill = false;
|
||||||
let session = { self.sessions.read().get(token).cloned() };
|
let session = { self.sessions.read().get(token).cloned() };
|
||||||
|
let mut ready_id = None;
|
||||||
if let Some(session) = session.clone() {
|
if let Some(session) = session.clone() {
|
||||||
|
{
|
||||||
let mut s = session.lock();
|
let mut s = session.lock();
|
||||||
loop {
|
loop {
|
||||||
let session_result = s.readable(io, &self.info.read());
|
let session_result = s.readable(io, &self.info.read());
|
||||||
@ -820,21 +821,24 @@ impl Host {
|
|||||||
(info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny)
|
(info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let id = s.id().expect("Ready session always has id").clone();
|
||||||
|
|
||||||
// Check for the session limit. session_counts accounts for the new session.
|
// Check for the session limit. session_counts accounts for the new session.
|
||||||
if reserved_only ||
|
if reserved_only ||
|
||||||
(s.info.originated && session_count > min_peers) ||
|
(s.info.originated && session_count > min_peers) ||
|
||||||
(!s.info.originated && session_count > max_peers) {
|
(!s.info.originated && session_count > max_peers) {
|
||||||
// only proceed if the connecting peer is reserved.
|
// only proceed if the connecting peer is reserved.
|
||||||
if !self.reserved_nodes.read().contains(s.id().expect("Ready session always has id")) {
|
if !self.reserved_nodes.read().contains(&id) {
|
||||||
s.disconnect(io, DisconnectReason::TooManyPeers);
|
s.disconnect(io, DisconnectReason::TooManyPeers);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ready_id = Some(id);
|
||||||
|
|
||||||
// Add it to the node table
|
// Add it to the node table
|
||||||
if !s.info.originated {
|
if !s.info.originated {
|
||||||
if let Ok(address) = s.remote_addr() {
|
if let Ok(address) = s.remote_addr() {
|
||||||
let entry = NodeEntry { id: s.id().expect("Ready session always has id").clone(), endpoint: NodeEndpoint { address: address, udp_port: address.port() } };
|
let entry = NodeEntry { id: id, endpoint: NodeEndpoint { address: address, udp_port: address.port() } };
|
||||||
self.nodes.write().add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
|
self.nodes.write().add_node(Node::new(entry.id.clone(), entry.endpoint.clone()));
|
||||||
let mut discovery = self.discovery.lock();
|
let mut discovery = self.discovery.lock();
|
||||||
if let Some(ref mut discovery) = *discovery {
|
if let Some(ref mut discovery) = *discovery {
|
||||||
@ -863,27 +867,39 @@ impl Host {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if kill {
|
if kill {
|
||||||
self.kill_connection(token, io, true);
|
self.kill_connection(token, io, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
let handlers = self.handlers.read();
|
let handlers = self.handlers.read();
|
||||||
|
if !ready_data.is_empty() {
|
||||||
|
let duplicate = self.sessions.read().iter().any(|e| {
|
||||||
|
let session = e.lock();
|
||||||
|
session.token() != token && session.info.id == ready_id
|
||||||
|
});
|
||||||
|
if duplicate {
|
||||||
|
trace!(target: "network", "Rejected duplicate connection: {}", token);
|
||||||
|
session.lock().disconnect(io, DisconnectReason::DuplicatePeer);
|
||||||
|
return;
|
||||||
|
}
|
||||||
for p in ready_data {
|
for p in ready_data {
|
||||||
self.stats.inc_sessions();
|
self.stats.inc_sessions();
|
||||||
let reserved = self.reserved_nodes.read();
|
let reserved = self.reserved_nodes.read();
|
||||||
if let Some(h) = handlers.get(&p).clone() {
|
if let Some(h) = handlers.get(&p).clone() {
|
||||||
h.connected(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token);
|
h.connected(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token);
|
||||||
|
|
||||||
// accumulate pending packets.
|
// accumulate pending packets.
|
||||||
if let Some(session) = session.as_ref() {
|
|
||||||
let mut session = session.lock();
|
let mut session = session.lock();
|
||||||
packet_data.extend(session.mark_connected(p));
|
packet_data.extend(session.mark_connected(p));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (p, packet_id, data) in packet_data {
|
for (p, packet_id, data) in packet_data {
|
||||||
let reserved = self.reserved_nodes.read();
|
let reserved = self.reserved_nodes.read();
|
||||||
if let Some(h) = handlers.get(&p).clone() {
|
if let Some(h) = handlers.get(&p).clone() {
|
||||||
h.read(&NetworkContext::new(io, p, session.clone(), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]);
|
h.read(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token, packet_id, &data[1..]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user