2017-01-25 18:51:41 +01:00
|
|
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
2016-12-11 19:15:58 +01:00
|
|
|
// This file is part of Parity.
|
|
|
|
|
|
|
|
// Parity is free software: you can redistribute it and/or modify
|
|
|
|
// it under the terms of the GNU General Public License as published by
|
|
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
|
|
// (at your option) any later version.
|
|
|
|
|
|
|
|
// Parity is distributed in the hope that it will be useful,
|
|
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
// GNU General Public License for more details.
|
|
|
|
|
|
|
|
// You should have received a copy of the GNU General Public License
|
|
|
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
|
|
|
use std::sync::{Arc, Weak};
|
2016-12-15 19:53:13 +01:00
|
|
|
use std::fs;
|
2016-12-11 19:15:58 +01:00
|
|
|
use std::io::Write;
|
|
|
|
use std::path::{PathBuf};
|
2016-12-17 14:36:30 +01:00
|
|
|
use target_info::Target;
|
|
|
|
use util::misc;
|
2016-12-11 22:47:43 +01:00
|
|
|
use ipc_common_types::{VersionInfo, ReleaseTrack};
|
2016-12-22 18:26:39 +01:00
|
|
|
use util::path::restrict_permissions_owner;
|
2016-12-11 19:15:58 +01:00
|
|
|
use util::{Address, H160, H256, FixedHash, Mutex, Bytes};
|
2016-12-16 10:14:44 +01:00
|
|
|
use ethsync::{SyncProvider};
|
2016-12-11 19:15:58 +01:00
|
|
|
use ethcore::client::{BlockId, BlockChainClient, ChainNotify};
|
|
|
|
use hash_fetch::{self as fetch, HashFetch};
|
2016-12-22 18:26:39 +01:00
|
|
|
use hash_fetch::fetch::Client as FetchService;
|
2016-12-11 19:15:58 +01:00
|
|
|
use operations::Operations;
|
2016-12-22 18:26:39 +01:00
|
|
|
use parity_reactor::Remote;
|
2016-12-11 22:47:43 +01:00
|
|
|
use service::{Service};
|
|
|
|
use types::all::{ReleaseInfo, OperationsInfo, CapState};
|
2016-12-11 19:15:58 +01:00
|
|
|
|
|
|
|
/// Filter for releases.
|
|
|
|
#[derive(Debug, Eq, PartialEq, Clone)]
|
|
|
|
pub enum UpdateFilter {
|
|
|
|
/// All releases following the same track.
|
|
|
|
All,
|
2016-12-22 18:26:39 +01:00
|
|
|
/// As with `All`, but only those which are known to be critical.
|
2016-12-11 19:15:58 +01:00
|
|
|
Critical,
|
|
|
|
/// None.
|
|
|
|
None,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// The policy for auto-updating.
|
|
|
|
#[derive(Debug, Eq, PartialEq, Clone)]
|
|
|
|
pub struct UpdatePolicy {
|
|
|
|
/// Download potential updates.
|
|
|
|
pub enable_downloading: bool,
|
|
|
|
/// Disable client if we know we're incapable of syncing.
|
|
|
|
pub require_consensus: bool,
|
|
|
|
/// Which of those downloaded should be automatically installed.
|
|
|
|
pub filter: UpdateFilter,
|
2016-12-15 18:51:59 +01:00
|
|
|
/// Which track we should be following.
|
|
|
|
pub track: ReleaseTrack,
|
2016-12-15 19:53:13 +01:00
|
|
|
/// Path for the updates to go.
|
|
|
|
pub path: String,
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for UpdatePolicy {
|
|
|
|
fn default() -> Self {
|
|
|
|
UpdatePolicy {
|
|
|
|
enable_downloading: false,
|
|
|
|
require_consensus: true,
|
|
|
|
filter: UpdateFilter::None,
|
2016-12-15 18:51:59 +01:00
|
|
|
track: ReleaseTrack::Unknown,
|
2016-12-15 19:53:13 +01:00
|
|
|
path: Default::default(),
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Debug, Default)]
|
|
|
|
struct UpdaterState {
|
|
|
|
latest: Option<OperationsInfo>,
|
|
|
|
|
|
|
|
fetching: Option<ReleaseInfo>,
|
|
|
|
ready: Option<ReleaseInfo>,
|
|
|
|
installed: Option<ReleaseInfo>,
|
|
|
|
|
|
|
|
capability: CapState,
|
2017-02-24 01:06:35 +01:00
|
|
|
|
|
|
|
disabled: bool,
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Service for checking for updates and determining whether we can achieve consensus.
|
|
|
|
pub struct Updater {
|
|
|
|
// Useful environmental stuff.
|
|
|
|
update_policy: UpdatePolicy,
|
|
|
|
weak_self: Mutex<Weak<Updater>>,
|
|
|
|
client: Weak<BlockChainClient>,
|
2016-12-16 10:14:44 +01:00
|
|
|
sync: Weak<SyncProvider>,
|
2016-12-11 19:15:58 +01:00
|
|
|
fetcher: Mutex<Option<fetch::Client>>,
|
|
|
|
operations: Mutex<Option<Operations>>,
|
|
|
|
exit_handler: Mutex<Option<Box<Fn() + 'static + Send>>>,
|
|
|
|
|
|
|
|
// Our version info (static)
|
|
|
|
this: VersionInfo,
|
|
|
|
|
|
|
|
// All the other info - this changes so leave it behind a Mutex.
|
|
|
|
state: Mutex<UpdaterState>,
|
|
|
|
}
|
|
|
|
|
|
|
|
const CLIENT_ID: &'static str = "parity";
|
|
|
|
|
2016-12-17 14:36:30 +01:00
|
|
|
fn platform() -> String {
|
|
|
|
if cfg!(target_os = "macos") {
|
|
|
|
"x86_64-apple-darwin".into()
|
|
|
|
} else if cfg!(windows) {
|
|
|
|
"x86_64-pc-windows-msvc".into()
|
|
|
|
} else if cfg!(target_os = "linux") {
|
|
|
|
format!("{}-unknown-linux-gnu", Target::arch())
|
|
|
|
} else {
|
|
|
|
misc::platform()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 19:15:58 +01:00
|
|
|
impl Updater {
|
2016-12-22 18:26:39 +01:00
|
|
|
pub fn new(client: Weak<BlockChainClient>, sync: Weak<SyncProvider>, update_policy: UpdatePolicy, fetch: FetchService, remote: Remote) -> Arc<Self> {
|
2016-12-15 18:51:59 +01:00
|
|
|
let r = Arc::new(Updater {
|
2016-12-11 19:15:58 +01:00
|
|
|
update_policy: update_policy,
|
|
|
|
weak_self: Mutex::new(Default::default()),
|
|
|
|
client: client.clone(),
|
2016-12-16 10:14:44 +01:00
|
|
|
sync: sync.clone(),
|
2016-12-11 19:15:58 +01:00
|
|
|
fetcher: Mutex::new(None),
|
|
|
|
operations: Mutex::new(None),
|
|
|
|
exit_handler: Mutex::new(None),
|
|
|
|
this: VersionInfo::this(),
|
|
|
|
state: Mutex::new(Default::default()),
|
2016-12-15 18:51:59 +01:00
|
|
|
});
|
2016-12-22 18:26:39 +01:00
|
|
|
*r.fetcher.lock() = Some(fetch::Client::with_fetch(r.clone(), fetch, remote));
|
2016-12-11 19:15:58 +01:00
|
|
|
*r.weak_self.lock() = Arc::downgrade(&r);
|
|
|
|
r.poll();
|
|
|
|
r
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Set a closure to call when we want to restart the client
|
|
|
|
pub fn set_exit_handler<F>(&self, f: F) where F: Fn() + 'static + Send {
|
2016-12-22 18:26:39 +01:00
|
|
|
*self.exit_handler.lock() = Some(Box::new(f));
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn collect_release_info(operations: &Operations, release_id: &H256) -> Result<ReleaseInfo, String> {
|
|
|
|
let (fork, track, semver, is_critical) = operations.release(CLIENT_ID, release_id)?;
|
|
|
|
let latest_binary = operations.checksum(CLIENT_ID, release_id, &platform())?;
|
|
|
|
Ok(ReleaseInfo {
|
|
|
|
version: VersionInfo::from_raw(semver, track, release_id.clone().into()),
|
|
|
|
is_critical: is_critical,
|
|
|
|
fork: fork as u64,
|
|
|
|
binary: if latest_binary.is_zero() { None } else { Some(latest_binary) },
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-12-15 18:51:59 +01:00
|
|
|
fn track(&self) -> ReleaseTrack {
|
|
|
|
match self.update_policy.track {
|
|
|
|
ReleaseTrack::Unknown => self.this.track,
|
|
|
|
x => x,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-12-11 19:15:58 +01:00
|
|
|
fn collect_latest(&self) -> Result<OperationsInfo, String> {
|
|
|
|
if let Some(ref operations) = *self.operations.lock() {
|
2016-12-12 02:57:19 +01:00
|
|
|
let hh: H256 = self.this.hash.into();
|
2016-12-16 10:02:42 +01:00
|
|
|
trace!(target: "updater", "Looking up this_fork for our release: {}/{:?}", CLIENT_ID, hh);
|
2016-12-11 19:15:58 +01:00
|
|
|
let this_fork = operations.release(CLIENT_ID, &self.this.hash.into()).ok()
|
2016-12-12 02:57:19 +01:00
|
|
|
.and_then(|(fork, track, _, _)| {
|
2016-12-22 18:26:39 +01:00
|
|
|
trace!(target: "updater", "Operations returned fork={}, track={}", fork as u64, track);
|
2016-12-12 02:57:19 +01:00
|
|
|
if track > 0 {Some(fork as u64)} else {None}
|
|
|
|
});
|
2016-12-11 19:15:58 +01:00
|
|
|
|
2016-12-15 18:51:59 +01:00
|
|
|
if self.track() == ReleaseTrack::Unknown {
|
2016-12-11 19:15:58 +01:00
|
|
|
return Err(format!("Current executable ({}) is unreleased.", H160::from(self.this.hash)));
|
|
|
|
}
|
|
|
|
|
2016-12-15 18:51:59 +01:00
|
|
|
let latest_in_track = operations.latest_in_track(CLIENT_ID, self.track().into())?;
|
2016-12-11 19:15:58 +01:00
|
|
|
let in_track = Self::collect_release_info(operations, &latest_in_track)?;
|
|
|
|
let mut in_minor = Some(in_track.clone());
|
|
|
|
const PROOF: &'static str = "in_minor initialised and assigned with Some; loop breaks if None assigned; qed";
|
2016-12-15 18:51:59 +01:00
|
|
|
while in_minor.as_ref().expect(PROOF).version.track != self.track() {
|
2016-12-11 19:15:58 +01:00
|
|
|
let track = match in_minor.as_ref().expect(PROOF).version.track {
|
|
|
|
ReleaseTrack::Beta => ReleaseTrack::Stable,
|
|
|
|
ReleaseTrack::Nightly => ReleaseTrack::Beta,
|
|
|
|
_ => { in_minor = None; break; }
|
|
|
|
};
|
|
|
|
in_minor = Some(Self::collect_release_info(operations, &operations.latest_in_track(CLIENT_ID, track.into())?)?);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(OperationsInfo {
|
|
|
|
fork: operations.latest_fork()? as u64,
|
|
|
|
this_fork: this_fork,
|
|
|
|
track: in_track,
|
|
|
|
minor: in_minor,
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
Err("Operations not available".into())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn update_file_name(v: &VersionInfo) -> String {
|
|
|
|
format!("parity-{}.{}.{}-{:?}", v.version.major, v.version.minor, v.version.patch, v.hash)
|
|
|
|
}
|
|
|
|
|
2016-12-15 19:53:13 +01:00
|
|
|
fn updates_path(&self, name: &str) -> PathBuf {
|
|
|
|
let mut dest = PathBuf::from(self.update_policy.path.clone());
|
2016-12-11 19:15:58 +01:00
|
|
|
dest.push(name);
|
|
|
|
dest
|
|
|
|
}
|
|
|
|
|
|
|
|
fn fetch_done(&self, result: Result<PathBuf, fetch::Error>) {
|
2017-02-24 01:06:35 +01:00
|
|
|
(|| -> Result<(), (String, bool)> {
|
2016-12-11 19:15:58 +01:00
|
|
|
let auto = {
|
|
|
|
let mut s = self.state.lock();
|
|
|
|
let fetched = s.fetching.take().unwrap();
|
2017-02-24 01:06:35 +01:00
|
|
|
let b = result.map_err(|e| (format!("Unable to fetch update ({}): {:?}", fetched.version, e), false))?;
|
2016-12-12 03:02:27 +01:00
|
|
|
info!(target: "updater", "Fetched latest version ({}) OK to {}", fetched.version, b.display());
|
2016-12-15 19:53:13 +01:00
|
|
|
let dest = self.updates_path(&Self::update_file_name(&fetched.version));
|
2017-02-24 01:06:35 +01:00
|
|
|
fs::create_dir_all(dest.parent().expect("at least one thing pushed; qed")).map_err(|e| (format!("Unable to create updates path: {:?}", e), true))?;
|
|
|
|
fs::copy(&b, &dest).map_err(|e| (format!("Unable to copy update: {:?}", e), true))?;
|
|
|
|
restrict_permissions_owner(&dest, false, true).map_err(|e| (format!("Unable to update permissions: {}", e), true))?;
|
2016-12-16 18:17:15 +01:00
|
|
|
info!(target: "updater", "Installed updated binary to {}", dest.display());
|
2016-12-11 19:15:58 +01:00
|
|
|
let auto = match self.update_policy.filter {
|
|
|
|
UpdateFilter::All => true,
|
|
|
|
UpdateFilter::Critical if fetched.is_critical /* TODO: or is on a bad fork */ => true,
|
|
|
|
_ => false,
|
|
|
|
};
|
|
|
|
s.ready = Some(fetched);
|
|
|
|
auto
|
|
|
|
};
|
|
|
|
if auto {
|
|
|
|
// will lock self.state, so ensure it's outside of previous block.
|
|
|
|
self.execute_upgrade();
|
|
|
|
}
|
|
|
|
Ok(())
|
2017-02-24 01:06:35 +01:00
|
|
|
})().unwrap_or_else(|(e, fatal)| { self.state.lock().disabled = fatal; warn!("{}", e); });
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
fn poll(&self) {
|
2016-12-16 10:02:42 +01:00
|
|
|
trace!(target: "updater", "Current release is {} ({:?})", self.this, self.this.hash);
|
2016-12-11 19:15:58 +01:00
|
|
|
|
2016-12-13 21:21:07 +01:00
|
|
|
// We rely on a secure state. Bail if we're unsure about it.
|
|
|
|
if self.client.upgrade().map_or(true, |s| !s.chain_info().security_level().is_full()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-12-11 19:15:58 +01:00
|
|
|
if self.operations.lock().is_none() {
|
|
|
|
if let Some(ops_addr) = self.client.upgrade().and_then(|c| c.registry_address("operations".into())) {
|
2016-12-12 03:02:27 +01:00
|
|
|
trace!(target: "updater", "Found operations at {}", ops_addr);
|
2016-12-11 19:15:58 +01:00
|
|
|
let client = self.client.clone();
|
|
|
|
*self.operations.lock() = Some(Operations::new(ops_addr, move |a, d| client.upgrade().ok_or("No client!".into()).and_then(|c| c.call_contract(a, d))));
|
|
|
|
} else {
|
|
|
|
// No Operations contract - bail.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let current_number = self.client.upgrade().map_or(0, |c| c.block_number(BlockId::Latest).unwrap_or(0));
|
|
|
|
|
2016-12-22 18:26:39 +01:00
|
|
|
let mut capability = CapState::Unknown;
|
2016-12-11 19:15:58 +01:00
|
|
|
let latest = self.collect_latest().ok();
|
|
|
|
if let Some(ref latest) = latest {
|
2016-12-16 10:02:42 +01:00
|
|
|
trace!(target: "updater", "Latest release in our track is v{} it is {}critical ({} binary is {})",
|
2016-12-11 19:15:58 +01:00
|
|
|
latest.track.version,
|
|
|
|
if latest.track.is_critical {""} else {"non-"},
|
2016-12-17 14:36:30 +01:00
|
|
|
&platform(),
|
2016-12-11 19:15:58 +01:00
|
|
|
if let Some(ref b) = latest.track.binary {
|
|
|
|
format!("{}", b)
|
|
|
|
} else {
|
|
|
|
"unreleased".into()
|
|
|
|
}
|
|
|
|
);
|
|
|
|
let mut s = self.state.lock();
|
2017-02-24 01:06:35 +01:00
|
|
|
let running_later = latest.track.version.version < self.version_info().version;
|
2016-12-11 19:15:58 +01:00
|
|
|
let running_latest = latest.track.version.hash == self.version_info().hash;
|
|
|
|
let already_have_latest = s.installed.as_ref().or(s.ready.as_ref()).map_or(false, |t| *t == latest.track);
|
2017-02-24 01:06:35 +01:00
|
|
|
if self.update_policy.enable_downloading && !running_later && !running_latest && !already_have_latest {
|
2016-12-11 19:15:58 +01:00
|
|
|
if let Some(b) = latest.track.binary {
|
|
|
|
if s.fetching.is_none() {
|
2016-12-12 03:02:27 +01:00
|
|
|
info!(target: "updater", "Attempting to get parity binary {}", b);
|
2016-12-11 19:15:58 +01:00
|
|
|
s.fetching = Some(latest.track.clone());
|
2017-01-17 23:34:46 +01:00
|
|
|
drop(s);
|
2016-12-11 19:15:58 +01:00
|
|
|
let weak_self = self.weak_self.lock().clone();
|
|
|
|
let f = move |r: Result<PathBuf, fetch::Error>| if let Some(this) = weak_self.upgrade() { this.fetch_done(r) };
|
2016-12-22 18:26:39 +01:00
|
|
|
self.fetcher.lock().as_ref().expect("Created on `new`; qed").fetch(b, Box::new(f));
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-12-16 10:02:42 +01:00
|
|
|
trace!(target: "updater", "Fork: this/current/latest/latest-known: {}/#{}/#{}/#{}", match latest.this_fork { Some(f) => format!("#{}", f), None => "unknown".into(), }, current_number, latest.track.fork, latest.fork);
|
2016-12-11 19:15:58 +01:00
|
|
|
|
|
|
|
if let Some(this_fork) = latest.this_fork {
|
|
|
|
if this_fork < latest.fork {
|
2016-12-22 18:26:39 +01:00
|
|
|
// We're behind the latest fork. Now is the time to be upgrading; perhaps we're too late...
|
2016-12-11 19:15:58 +01:00
|
|
|
if let Some(c) = self.client.upgrade() {
|
|
|
|
let current_number = c.block_number(BlockId::Latest).unwrap_or(0);
|
|
|
|
if current_number >= latest.fork - 1 {
|
|
|
|
// We're at (or past) the last block we can import. Disable the client.
|
|
|
|
if self.update_policy.require_consensus {
|
|
|
|
c.disable();
|
|
|
|
}
|
|
|
|
capability = CapState::IncapableSince(latest.fork);
|
|
|
|
} else {
|
|
|
|
capability = CapState::CapableUntil(latest.fork);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
capability = CapState::Capable;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut s = self.state.lock();
|
|
|
|
s.latest = latest;
|
|
|
|
s.capability = capability;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ChainNotify for Updater {
|
2016-12-15 15:35:46 +01:00
|
|
|
fn new_blocks(&self, _imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, _duration: u64) {
|
2016-12-16 10:14:44 +01:00
|
|
|
match (self.client.upgrade(), self.sync.upgrade()) {
|
2017-01-17 13:05:02 +01:00
|
|
|
(Some(ref c), Some(ref s)) if !s.status().is_syncing(c.queue_info()) => self.poll(),
|
2016-12-16 10:14:44 +01:00
|
|
|
_ => {},
|
|
|
|
}
|
2016-12-11 19:15:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl fetch::urlhint::ContractClient for Updater {
|
|
|
|
fn registrar(&self) -> Result<Address, String> {
|
|
|
|
self.client.upgrade().ok_or_else(|| "Client not available".to_owned())?
|
|
|
|
.registrar_address()
|
|
|
|
.ok_or_else(|| "Registrar not available".into())
|
|
|
|
}
|
|
|
|
|
|
|
|
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String> {
|
|
|
|
self.client.upgrade().ok_or_else(|| "Client not available".to_owned())?
|
|
|
|
.call_contract(address, data)
|
|
|
|
}
|
|
|
|
}
|
2016-12-11 20:14:18 +01:00
|
|
|
|
|
|
|
impl Service for Updater {
|
|
|
|
fn capability(&self) -> CapState {
|
|
|
|
self.state.lock().capability
|
|
|
|
}
|
|
|
|
|
|
|
|
fn upgrade_ready(&self) -> Option<ReleaseInfo> {
|
|
|
|
self.state.lock().ready.clone()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn execute_upgrade(&self) -> bool {
|
|
|
|
(|| -> Result<bool, String> {
|
|
|
|
let mut s = self.state.lock();
|
|
|
|
if let Some(r) = s.ready.take() {
|
|
|
|
let p = Self::update_file_name(&r.version);
|
2016-12-15 19:53:13 +01:00
|
|
|
let n = self.updates_path("latest");
|
2016-12-11 20:14:18 +01:00
|
|
|
// TODO: creating then writing is a bit fragile. would be nice to make it atomic.
|
|
|
|
match fs::File::create(&n).and_then(|mut f| f.write_all(p.as_bytes())) {
|
|
|
|
Ok(_) => {
|
2016-12-12 03:02:27 +01:00
|
|
|
info!(target: "updater", "Completed upgrade to {}", &r.version);
|
2016-12-11 20:14:18 +01:00
|
|
|
s.installed = Some(r);
|
|
|
|
if let Some(ref h) = *self.exit_handler.lock() {
|
|
|
|
(*h)();
|
|
|
|
}
|
|
|
|
Ok(true)
|
|
|
|
}
|
|
|
|
Err(e) => {
|
|
|
|
s.ready = Some(r);
|
|
|
|
Err(format!("Unable to create soft-link for update {:?}", e))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2016-12-12 03:02:27 +01:00
|
|
|
warn!(target: "updater", "Execute upgrade called when no upgrade ready.");
|
2016-12-11 20:14:18 +01:00
|
|
|
Ok(false)
|
|
|
|
}
|
|
|
|
})().unwrap_or_else(|e| { warn!("{}", e); false })
|
|
|
|
}
|
|
|
|
|
|
|
|
fn version_info(&self) -> VersionInfo { self.this.clone() }
|
|
|
|
|
|
|
|
fn info(&self) -> Option<OperationsInfo> { self.state.lock().latest.clone() }
|
2016-12-22 18:26:39 +01:00
|
|
|
}
|