Fetch and place in updates path.

This commit is contained in:
Gav Wood 2016-12-09 19:02:42 +01:00
parent 8903384840
commit c2b6be95c8
No known key found for this signature in database
GPG Key ID: C49C1ACA1CC9B252
7 changed files with 82 additions and 41 deletions

View File

@ -20,7 +20,7 @@
use ethcore::blockchain_info::BlockChainInfo;
use ethcore::client::{BlockChainClient, ProvingBlockChainClient};
use ethcore::transaction::SignedTransaction;
use ethcore::ids::BlockID;
use ethcore::ids::BlockId;
use util::{Bytes, H256};
@ -96,7 +96,7 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
let best_num = self.chain_info().best_block_number;
let start_num = req.block_num;
match self.block_hash(BlockID::Number(req.block_num)) {
match self.block_hash(BlockId::Number(req.block_num)) {
Some(hash) if hash == req.block_hash => {}
_=> {
trace!(target: "les_provider", "unknown/non-canonical start block in header request: {:?}", (req.block_num, req.block_hash));
@ -108,7 +108,7 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
.map(|x: u64| x.saturating_mul(req.skip))
.take_while(|x| if req.reverse { x < &start_num } else { best_num - start_num < *x })
.map(|x| if req.reverse { start_num - x } else { start_num + x })
.map(|x| self.block_header(BlockID::Number(x)))
.map(|x| self.block_header(BlockId::Number(x)))
.take_while(|x| x.is_some())
.flat_map(|x| x)
.collect()
@ -116,7 +116,7 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
fn block_bodies(&self, req: request::Bodies) -> Vec<Bytes> {
req.block_hashes.into_iter()
.map(|hash| self.block_body(BlockID::Hash(hash)))
.map(|hash| self.block_body(BlockId::Hash(hash)))
.map(|body| body.unwrap_or_else(|| ::rlp::EMPTY_LIST_RLP.to_vec()))
.collect()
}
@ -135,8 +135,8 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
for request in req.requests {
let proof = match request.key2 {
Some(key2) => self.prove_storage(request.key1, key2, request.from_level, BlockID::Hash(request.block)),
None => self.prove_account(request.key1, request.from_level, BlockID::Hash(request.block)),
Some(key2) => self.prove_storage(request.key1, key2, request.from_level, BlockId::Hash(request.block)),
None => self.prove_account(request.key1, request.from_level, BlockId::Hash(request.block)),
};
let mut stream = RlpStream::new_list(proof.len());
@ -153,7 +153,7 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
fn contract_code(&self, req: request::ContractCodes) -> Vec<Bytes> {
req.code_requests.into_iter()
.map(|req| {
self.code_by_hash(req.account_key, BlockID::Hash(req.block_hash))
self.code_by_hash(req.account_key, BlockId::Hash(req.block_hash))
})
.collect()
}

View File

@ -1445,19 +1445,19 @@ impl MayPanic for Client {
}
impl ProvingBlockChainClient for Client {
fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockID) -> Vec<Bytes> {
fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec<Bytes> {
self.state_at(id)
.and_then(move |state| state.prove_storage(key1, key2, from_level).ok())
.unwrap_or_else(Vec::new)
}
fn prove_account(&self, key1: H256, from_level: u32, id: BlockID) -> Vec<Bytes> {
fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec<Bytes> {
self.state_at(id)
.and_then(move |state| state.prove_account(key1, from_level).ok())
.unwrap_or_else(Vec::new)
}
fn code_by_hash(&self, account_key: H256, id: BlockID) -> Bytes {
fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes {
self.state_at(id)
.and_then(move |state| state.code_by_address_hash(account_key).ok())
.and_then(|x| x)

View File

@ -71,8 +71,6 @@ impl FromStr for DatabaseCompactionProfile {
pub enum UpdateFilter {
/// All releases following the same track.
All,
/// Only those of the same minor version potentially changing tracks.
Patch,
/// As with `All`, but only those which are known to be critical.
Critical,
/// None.

View File

@ -285,15 +285,15 @@ pub trait ProvingBlockChainClient: BlockChainClient {
/// Returns a vector of raw trie nodes (in order from the root) proving the storage query.
/// Nodes after `from_level` may be omitted.
/// An empty vector indicates unservable query.
fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockID) -> Vec<Bytes>;
fn prove_storage(&self, key1: H256, key2: H256, from_level: u32, id: BlockId) -> Vec<Bytes>;
/// Prove account existence at a specific block id.
/// The key is the keccak hash of the account's address.
/// Returns a vector of raw trie nodes (in order from the root) proving the query.
/// Nodes after `from_level` may be omitted.
/// An empty vector indicates unservable query.
fn prove_account(&self, key1: H256, from_level: u32, id: BlockID) -> Vec<Bytes>;
fn prove_account(&self, key1: H256, from_level: u32, id: BlockId) -> Vec<Bytes>;
/// Get code by address hash.
fn code_by_hash(&self, account_key: H256, id: BlockID) -> Bytes;
fn code_by_hash(&self, account_key: H256, id: BlockId) -> Bytes;
}

View File

@ -15,11 +15,12 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::{Weak};
use std::{fs, env};
use std::path::PathBuf;
use util::misc::{VersionInfo, ReleaseTrack, platform};
use util::misc::{VersionInfo, ReleaseTrack/*, platform*/};
use util::{Address, H160, H256, FixedHash, Mutex};
use client::operations::Operations;
use client::{Client, UpdatePolicy, BlockId};
use client::{Client, UpdatePolicy, UpdateFilter, BlockId};
use fetch::HashFetch;
use fetch;
@ -44,7 +45,7 @@ pub struct Updater {
fetch: Weak<HashFetch>,
operations: Operations,
update_policy: UpdatePolicy,
fetching: Mutex<bool>,
fetching: Mutex<Option<ReleaseInfo>>,
// These don't change
pub this: VersionInfo,
@ -52,10 +53,16 @@ pub struct Updater {
// This does change
pub latest: Option<OperationsInfo>,
pub ready: Option<ReleaseInfo>,
}
const CLIENT_ID: &'static str = "parity";
fn platform() -> String {
"test".to_owned()
}
impl Updater {
pub fn new(client: Weak<Client>, fetch: Weak<fetch::Client>, operations: Address, update_policy: UpdatePolicy) -> Self {
let mut u = Updater {
@ -63,10 +70,11 @@ impl Updater {
fetch: fetch.clone(),
operations: Operations::new(operations, move |a, d| client.upgrade().ok_or("No client!".into()).and_then(|c| c.call_contract(a, d))),
update_policy: update_policy,
fetching: Mutex::new(false),
fetching: Mutex::new(None),
this: VersionInfo::this(),
this_fork: None,
latest: None,
ready: None,
};
u.this_fork = u.operations.release(CLIENT_ID, &u.this.hash.into()).ok()
@ -95,16 +103,26 @@ impl Updater {
/// The release which is ready to be upgraded to, if any. If this returns `Some`, then
/// `execute_upgrade` may be called.
pub fn upgrade_ready(&self) -> Option<VersionInfo> {
unimplemented!()
pub fn upgrade_ready(&self) -> Option<ReleaseInfo> {
self.ready.clone()
}
/// Actually upgrades the client. Assumes that the binary has been downloaded.
/// @returns `true` on success.
pub fn execute_upgrade(&mut self) -> bool {
// TODO: link ~/.parity-updates/parity to self.ready
// TODO: restart parity.
unimplemented!()
}
/// Returns true iff the current version is capable of forming consensus.
pub fn consensus_capable(&self) -> bool {
/* if let Some(ref latest) = self.latest {
*/ unimplemented!()
}
/// Our version info.
pub fn version_info(&self) -> &VersionInfo { &self.this }
@ -147,12 +165,37 @@ impl Updater {
})
}
fn fetch_done(&self, _r: Result<PathBuf, fetch::Error>) {
fn fetch_done(&mut self, _r: Result<PathBuf, fetch::Error>) {
let fetched = self.fetching.lock().take().unwrap();
match _r {
Ok(b) => info!("Fetched latest version OK: {}", b.display()),
Err(e) => warn!("Unable to fetch latest version: {:?}", e),
Ok(b) => {
info!("Fetched latest version ({}) OK to {}", fetched.version, b.display());
let mut dest = PathBuf::from(env::home_dir().unwrap().to_str().expect("env filesystem paths really should be valid; qed"));
dest.push(".parity-updates");
match fs::create_dir_all(&dest) {
Ok(_) => {
dest.push(format!("parity-{}-{:?}", fetched.version, fetched.version.hash));
match fs::copy(&b, &dest) {
Ok(_) => {
info!("Copied file to {}", dest.display());
let auto = match self.update_policy.filter {
UpdateFilter::All => true,
UpdateFilter::Critical if fetched.is_critical /* TODO: or is on a bad fork */ => true,
_ => false,
};
self.ready = Some(fetched);
if auto {
self.execute_upgrade();
}
},
Err(e) => warn!("Unable to copy update: {:?}", e),
}
},
Err(e) => warn!("Unable to create updates path: {:?}", e),
}
},
Err(e) => warn!("Unable to fetch update ({}): {:?}", fetched.version, e),
}
*self.fetching.lock() = false;
}
pub fn tick(&mut self) {
@ -168,19 +211,21 @@ impl Updater {
platform(),
if let Some(ref b) = latest.track.binary {
format!("{}", b)
} else {
"unreleased".into()
}
} else {
"unreleased".into()
}
);
if let Some(b) = latest.track.binary {
let mut fetching = self.fetching.lock();
if !*fetching {
info!("Attempting to get parity binary {}", b);
let c = self.client.clone();
let f = move |r: Result<PathBuf, fetch::Error>| if let Some(c) = c.upgrade() { c.updater().as_ref().expect("updater exists; updater only owned by client; qed").fetch_done(r); };
if let Some(fetch) = self.fetch.clone().upgrade() {
fetch.fetch(b, Box::new(f)).ok();
*fetching = true;
if self.update_policy.enable_downloading && latest.track.version.hash != self.version_info().hash && self.ready.as_ref().map_or(true, |t| *t != latest.track) {
if let Some(b) = latest.track.binary {
let mut fetching = self.fetching.lock();
if fetching.is_none() {
info!("Attempting to get parity binary {}", b);
let c = self.client.clone();
let f = move |r: Result<PathBuf, fetch::Error>| if let Some(c) = c.upgrade() { c.updater().as_mut().expect("updater exists; updater only owned by client; qed").fetch_done(r); };
if let Some(fetch) = self.fetch.clone().upgrade() {
fetch.fetch(b, Box::new(f)).ok();
*fetching = Some(latest.track.clone());
}
}
}
}

View File

@ -33,7 +33,6 @@ Operating Options:
--auto-update TRACK Set a release track to automatically update and
install.
all - All updates in the current release track.
patch - All updates of the current minor version.
critical - Only consensus/security updates.
none - No updates will be auto-installed.
(default: {flag_auto_update}).

View File

@ -624,7 +624,6 @@ impl Configuration {
filter: match self.args.flag_auto_update.as_ref() {
"none" => UpdateFilter::None,
"critical" => UpdateFilter::Critical,
"patch" => UpdateFilter::Patch,
"all" => UpdateFilter::All,
_ => return Err("Invalid value for `--auto-update`. See `--help` for more information.".into()),
},
@ -985,13 +984,13 @@ mod tests {
// when
let conf0 = parse(&["parity"]);
let conf1 = parse(&["parity", "--auto-update", "all"]);
let conf2 = parse(&["parity", "--no-download", "--auto-update=patch"]);
let conf2 = parse(&["parity", "--no-download", "--auto-update=all"]);
let conf3 = parse(&["parity", "--auto-update=xxx"]);
// then
assert_eq!(conf0.update_policy().unwrap(), UpdatePolicy{enable_downloading: true, filter: UpdateFilter::Critical});
assert_eq!(conf1.update_policy().unwrap(), UpdatePolicy{enable_downloading: true, filter: UpdateFilter::All});
assert_eq!(conf2.update_policy().unwrap(), UpdatePolicy{enable_downloading: false, filter: UpdateFilter::Patch});
assert_eq!(conf2.update_policy().unwrap(), UpdatePolicy{enable_downloading: false, filter: UpdateFilter::All});
assert!(conf3.update_policy().is_err());
}