Merge remote-tracking branch 'parity/split-internal-seal' into auth-round

This commit is contained in:
keorn 2016-09-12 11:36:16 +02:00
commit d98f69cf58
37 changed files with 775 additions and 335 deletions

View File

@ -225,17 +225,20 @@ windows:
name: "${CI_BUILD_NAME}_parity"
test-linux:
stage: test
image: ethcore/rust:stable
before_script:
- git submodule update --init --recursive
script:
- ./test.sh --verbose
tags:
- rust
- rust-stable
- rust-test
dependencies:
- linux-stable
deploy-binaries:
stage: deploy
only:
- master
- beta
- tags
- stable
script:
- ./deploy.sh

1
Cargo.lock generated
View File

@ -390,6 +390,7 @@ name = "ethcore-ipc-nano"
version = "1.4.0"
dependencies = [
"ethcore-ipc 1.4.0",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)",
]

View File

@ -39,7 +39,7 @@ clippy = { version = "0.0.85", optional = true}
serde_codegen = { version = "0.8", optional = true }
[features]
default = ["serde_codegen", "extra-dapps", "https-fetch/ca-github-only"]
default = ["serde_codegen", "extra-dapps"]
extra-dapps = ["parity-dapps-wallet"]
nightly = ["serde_macros"]
dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"]

View File

@ -38,65 +38,65 @@ use handlers::{ContentHandler, ContentFetcherHandler, ContentValidator};
use endpoint::{Endpoint, EndpointPath, Handler};
use apps::cache::{ContentCache, ContentStatus};
use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest};
use apps::urlhint::{URLHintContract, URLHint};
use apps::urlhint::{URLHintContract, URLHint, URLHintResult};
const MAX_CACHED_DAPPS: usize = 10;
pub struct AppFetcher<R: URLHint = URLHintContract> {
pub struct ContentFetcher<R: URLHint = URLHintContract> {
dapps_path: PathBuf,
resolver: R,
cache: Arc<Mutex<ContentCache>>,
sync: Arc<SyncStatus>,
dapps: Arc<Mutex<ContentCache>>,
}
impl<R: URLHint> Drop for AppFetcher<R> {
impl<R: URLHint> Drop for ContentFetcher<R> {
fn drop(&mut self) {
// Clear cache path
let _ = fs::remove_dir_all(&self.dapps_path);
}
}
impl<R: URLHint> AppFetcher<R> {
impl<R: URLHint> ContentFetcher<R> {
pub fn new(resolver: R, sync_status: Arc<SyncStatus>) -> Self {
let mut dapps_path = env::temp_dir();
dapps_path.push(random_filename());
AppFetcher {
ContentFetcher {
dapps_path: dapps_path,
resolver: resolver,
sync: sync_status,
dapps: Arc::new(Mutex::new(ContentCache::default())),
cache: Arc::new(Mutex::new(ContentCache::default())),
}
}
#[cfg(test)]
fn set_status(&self, app_id: &str, status: ContentStatus) {
self.dapps.lock().insert(app_id.to_owned(), status);
fn set_status(&self, content_id: &str, status: ContentStatus) {
self.cache.lock().insert(content_id.to_owned(), status);
}
pub fn contains(&self, app_id: &str) -> bool {
let mut dapps = self.dapps.lock();
pub fn contains(&self, content_id: &str) -> bool {
let mut cache = self.cache.lock();
// Check if we already have the app
if dapps.get(app_id).is_some() {
if cache.get(content_id).is_some() {
return true;
}
// fallback to resolver
if let Ok(app_id) = app_id.from_hex() {
if let Ok(content_id) = content_id.from_hex() {
// if app_id is valid, but we are syncing always return true.
if self.sync.is_major_syncing() {
return true;
}
// else try to resolve the app_id
self.resolver.resolve(app_id).is_some()
self.resolver.resolve(content_id).is_some()
} else {
false
}
}
pub fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box<Handler> {
let mut dapps = self.dapps.lock();
let app_id = path.app_id.clone();
let mut cache = self.cache.lock();
let content_id = path.app_id.clone();
if self.sync.is_major_syncing() {
return Box::new(ContentHandler::error(
@ -108,7 +108,7 @@ impl<R: URLHint> AppFetcher<R> {
}
let (new_status, handler) = {
let status = dapps.get(&app_id);
let status = cache.get(&content_id);
match status {
// Just server dapp
Some(&mut ContentStatus::Ready(ref endpoint)) => {
@ -125,24 +125,40 @@ impl<R: URLHint> AppFetcher<R> {
},
// We need to start fetching app
None => {
let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true.");
let app = self.resolver.resolve(app_hex);
if let Some(app) = app {
let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true.");
let content = self.resolver.resolve(content_hex);
let abort = Arc::new(AtomicBool::new(false));
(Some(ContentStatus::Fetching(abort.clone())), Box::new(ContentFetcherHandler::new(
app,
match content {
Some(URLHintResult::Dapp(dapp)) => (
Some(ContentStatus::Fetching(abort.clone())),
Box::new(ContentFetcherHandler::new(
dapp.url(),
abort,
control,
path.using_dapps_domains,
DappInstaller {
dapp_id: app_id.clone(),
id: content_id.clone(),
dapps_path: self.dapps_path.clone(),
dapps: self.dapps.clone(),
cache: self.cache.clone(),
})) as Box<Handler>
),
Some(URLHintResult::Content(content)) => (
Some(ContentStatus::Fetching(abort.clone())),
Box::new(ContentFetcherHandler::new(
content.url,
abort,
control,
path.using_dapps_domains,
ContentInstaller {
id: content_id.clone(),
mime: content.mime,
content_path: self.dapps_path.clone(),
cache: self.cache.clone(),
}
)) as Box<Handler>)
} else {
)) as Box<Handler>,
),
None => {
// This may happen when sync status changes in between
// `contains` and `to_handler`
(None, Box::new(ContentHandler::error(
@ -151,14 +167,15 @@ impl<R: URLHint> AppFetcher<R> {
"Requested resource was not found.",
None
)) as Box<Handler>)
},
}
},
}
};
if let Some(status) = new_status {
dapps.clear_garbage(MAX_CACHED_DAPPS);
dapps.insert(app_id, status);
cache.clear_garbage(MAX_CACHED_DAPPS);
cache.insert(content_id, status);
}
handler
@ -169,7 +186,7 @@ impl<R: URLHint> AppFetcher<R> {
pub enum ValidationError {
Io(io::Error),
Zip(zip::result::ZipError),
InvalidDappId,
InvalidContentId,
ManifestNotFound,
ManifestSerialization(String),
HashMismatch { expected: H256, got: H256, },
@ -180,7 +197,7 @@ impl fmt::Display for ValidationError {
match *self {
ValidationError::Io(ref io) => write!(f, "Unexpected IO error occured: {:?}", io),
ValidationError::Zip(ref zip) => write!(f, "Unable to read ZIP archive: {:?}", zip),
ValidationError::InvalidDappId => write!(f, "Dapp ID is invalid. It should be 32 bytes hash of content."),
ValidationError::InvalidContentId => write!(f, "ID is invalid. It should be 256 bits keccak hash of content."),
ValidationError::ManifestNotFound => write!(f, "Downloaded Dapp bundle did not contain valid manifest.json file."),
ValidationError::ManifestSerialization(ref err) => {
write!(f, "There was an error during Dapp Manifest serialization: {:?}", err)
@ -204,10 +221,55 @@ impl From<zip::result::ZipError> for ValidationError {
}
}
struct ContentInstaller {
id: String,
mime: String,
content_path: PathBuf,
cache: Arc<Mutex<ContentCache>>,
}
impl ContentValidator for ContentInstaller {
type Error = ValidationError;
type Result = PathBuf;
fn validate_and_install(&self, path: PathBuf) -> Result<(String, PathBuf), ValidationError> {
// Create dir
try!(fs::create_dir_all(&self.content_path));
// And prepare path for a file
let filename = path.file_name().expect("We always fetch a file.");
let mut content_path = self.content_path.clone();
content_path.push(&filename);
if content_path.exists() {
try!(fs::remove_dir_all(&content_path))
}
try!(fs::copy(&path, &content_path));
Ok((self.id.clone(), content_path))
}
fn done(&self, result: Option<&PathBuf>) {
let mut cache = self.cache.lock();
match result {
Some(result) => {
let page = LocalPageEndpoint::single_file(result.clone(), self.mime.clone());
cache.insert(self.id.clone(), ContentStatus::Ready(page));
},
// In case of error
None => {
cache.remove(&self.id);
},
}
}
}
struct DappInstaller {
dapp_id: String,
id: String,
dapps_path: PathBuf,
dapps: Arc<Mutex<ContentCache>>,
cache: Arc<Mutex<ContentCache>>,
}
impl DappInstaller {
@ -244,15 +306,16 @@ impl DappInstaller {
impl ContentValidator for DappInstaller {
type Error = ValidationError;
type Result = Manifest;
fn validate_and_install(&self, app_path: PathBuf) -> Result<Manifest, ValidationError> {
fn validate_and_install(&self, app_path: PathBuf) -> Result<(String, Manifest), ValidationError> {
trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path);
let mut file_reader = io::BufReader::new(try!(fs::File::open(app_path)));
let hash = try!(sha3(&mut file_reader));
let dapp_id = try!(self.dapp_id.as_str().parse().map_err(|_| ValidationError::InvalidDappId));
if dapp_id != hash {
let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId));
if id != hash {
return Err(ValidationError::HashMismatch {
expected: dapp_id,
expected: id,
got: hash,
});
}
@ -262,7 +325,7 @@ impl ContentValidator for DappInstaller {
// First find manifest file
let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip));
// Overwrite id to match hash
manifest.id = self.dapp_id.clone();
manifest.id = self.id.clone();
let target = self.dapp_target_path(&manifest);
@ -300,20 +363,20 @@ impl ContentValidator for DappInstaller {
try!(manifest_file.write_all(manifest_str.as_bytes()));
// Return modified app manifest
Ok(manifest)
Ok((manifest.id.clone(), manifest))
}
fn done(&self, manifest: Option<&Manifest>) {
let mut dapps = self.dapps.lock();
let mut cache = self.cache.lock();
match manifest {
Some(manifest) => {
let path = self.dapp_target_path(manifest);
let app = LocalPageEndpoint::new(path, manifest.clone().into());
dapps.insert(self.dapp_id.clone(), ContentStatus::Ready(app));
cache.insert(self.id.clone(), ContentStatus::Ready(app));
},
// In case of error
None => {
dapps.remove(&self.dapp_id);
cache.remove(&self.id);
},
}
}
@ -327,12 +390,12 @@ mod tests {
use endpoint::EndpointInfo;
use page::LocalPageEndpoint;
use apps::cache::ContentStatus;
use apps::urlhint::{GithubApp, URLHint};
use super::AppFetcher;
use apps::urlhint::{URLHint, URLHintResult};
use super::ContentFetcher;
struct FakeResolver;
impl URLHint for FakeResolver {
fn resolve(&self, _app_id: Bytes) -> Option<GithubApp> {
fn resolve(&self, _id: Bytes) -> Option<URLHintResult> {
None
}
}
@ -341,7 +404,7 @@ mod tests {
fn should_true_if_contains_the_app() {
// given
let path = env::temp_dir();
let fetcher = AppFetcher::new(FakeResolver, Arc::new(|| false));
let fetcher = ContentFetcher::new(FakeResolver, Arc::new(|| false));
let handler = LocalPageEndpoint::new(path, EndpointInfo {
name: "fake".into(),
description: "".into(),

View File

@ -17,6 +17,7 @@
use std::fmt;
use std::sync::Arc;
use rustc_serialize::hex::ToHex;
use mime_guess;
use ethabi::{Interface, Contract, Token};
use util::{Address, Bytes, Hashable};
@ -52,6 +53,13 @@ impl GithubApp {
}
}
#[derive(Debug, PartialEq)]
pub struct Content {
pub url: String,
pub mime: String,
pub owner: Address,
}
/// RAW Contract interface.
/// Should execute transaction using current blockchain state.
pub trait ContractClient: Send + Sync {
@ -61,10 +69,19 @@ pub trait ContractClient: Send + Sync {
fn call(&self, address: Address, data: Bytes) -> Result<Bytes, String>;
}
/// Result of resolving id to URL
#[derive(Debug, PartialEq)]
pub enum URLHintResult {
/// Dapp
Dapp(GithubApp),
/// Content
Content(Content),
}
/// URLHint Contract interface
pub trait URLHint {
/// Resolves given id to registrar entry.
fn resolve(&self, app_id: Bytes) -> Option<GithubApp>;
fn resolve(&self, id: Bytes) -> Option<URLHintResult>;
}
pub struct URLHintContract {
@ -110,10 +127,10 @@ impl URLHintContract {
}
}
fn encode_urlhint_call(&self, app_id: Bytes) -> Option<Bytes> {
fn encode_urlhint_call(&self, id: Bytes) -> Option<Bytes> {
let call = self.urlhint
.function("entries".into())
.and_then(|f| f.encode_call(vec![Token::FixedBytes(app_id)]));
.and_then(|f| f.encode_call(vec![Token::FixedBytes(id)]));
match call {
Ok(res) => {
@ -126,7 +143,7 @@ impl URLHintContract {
}
}
fn decode_urlhint_output(&self, output: Bytes) -> Option<GithubApp> {
fn decode_urlhint_output(&self, output: Bytes) -> Option<URLHintResult> {
trace!(target: "dapps", "Output: {:?}", output.to_hex());
let output = self.urlhint
.function("entries".into())
@ -149,6 +166,17 @@ impl URLHintContract {
if owner == Address::default() {
return None;
}
let commit = GithubApp::commit(&commit);
if commit == Some(Default::default()) {
let mime = guess_mime_type(&account_slash_repo).unwrap_or("application/octet-stream".into());
return Some(URLHintResult::Content(Content {
url: account_slash_repo,
mime: mime,
owner: owner,
}));
}
let (account, repo) = {
let mut it = account_slash_repo.split('/');
match (it.next(), it.next()) {
@ -157,12 +185,12 @@ impl URLHintContract {
}
};
GithubApp::commit(&commit).map(|commit| GithubApp {
commit.map(|commit| URLHintResult::Dapp(GithubApp {
account: account,
repo: repo,
commit: commit,
owner: owner,
})
}))
},
e => {
warn!(target: "dapps", "Invalid contract output parameters: {:?}", e);
@ -177,10 +205,10 @@ impl URLHintContract {
}
impl URLHint for URLHintContract {
fn resolve(&self, app_id: Bytes) -> Option<GithubApp> {
fn resolve(&self, id: Bytes) -> Option<URLHintResult> {
self.urlhint_address().and_then(|address| {
// Prepare contract call
self.encode_urlhint_call(app_id)
self.encode_urlhint_call(id)
.and_then(|data| {
let call = self.client.call(address, data);
if let Err(ref e) = call {
@ -193,6 +221,34 @@ impl URLHint for URLHintContract {
}
}
fn guess_mime_type(url: &str) -> Option<String> {
const CONTENT_TYPE: &'static str = "content-type=";
let mut it = url.split('#');
// skip url
let url = it.next();
// get meta headers
let metas = it.next();
if let Some(metas) = metas {
for meta in metas.split('&') {
let meta = meta.to_lowercase();
if meta.starts_with(CONTENT_TYPE) {
return Some(meta[CONTENT_TYPE.len()..].to_owned());
}
}
}
url.and_then(|url| {
url.split('.').last()
}).and_then(|extension| {
mime_guess::get_mime_type_str(extension).map(Into::into)
})
}
#[cfg(test)]
pub fn test_guess_mime_type(url: &str) -> Option<String> {
guess_mime_type(url)
}
fn as_string<T: fmt::Debug>(e: T) -> String {
format!("{:?}", e)
}
@ -201,7 +257,7 @@ fn as_string<T: fmt::Debug>(e: T) -> String {
mod tests {
use std::sync::Arc;
use std::str::FromStr;
use rustc_serialize::hex::{ToHex, FromHex};
use rustc_serialize::hex::FromHex;
use super::*;
use util::{Bytes, Address, Mutex, ToPretty};
@ -279,12 +335,33 @@ mod tests {
let res = urlhint.resolve("test".bytes().collect());
// then
assert_eq!(res, Some(GithubApp {
assert_eq!(res, Some(URLHintResult::Dapp(GithubApp {
account: "ethcore".into(),
repo: "dao.claim".into(),
commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(),
owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(),
}))
})))
}
#[test]
fn should_decode_urlhint_content_output() {
// given
let mut registrar = FakeRegistrar::new();
registrar.responses = Mutex::new(vec![
Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()),
Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003d68747470733a2f2f657468636f72652e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e67000000".from_hex().unwrap()),
]);
let urlhint = URLHintContract::new(Arc::new(registrar));
// when
let res = urlhint.resolve("test".bytes().collect());
// then
assert_eq!(res, Some(URLHintResult::Content(Content {
url: "https://ethcore.io/assets/images/ethcore-black-horizontal.png".into(),
mime: "image/png".into(),
owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(),
})))
}
#[test]
@ -303,4 +380,20 @@ mod tests {
// then
assert_eq!(url, "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213".to_owned());
}
#[test]
fn should_guess_mime_type_from_url() {
let url1 = "https://ethcore.io/parity";
let url2 = "https://ethcore.io/parity#content-type=image/png";
let url3 = "https://ethcore.io/parity#something&content-type=image/png";
let url4 = "https://ethcore.io/parity.png#content-type=image/jpeg";
let url5 = "https://ethcore.io/parity.png";
assert_eq!(test_guess_mime_type(url1), None);
assert_eq!(test_guess_mime_type(url2), Some("image/png".into()));
assert_eq!(test_guess_mime_type(url3), Some("image/png".into()));
assert_eq!(test_guess_mime_type(url4), Some("image/jpeg".into()));
assert_eq!(test_guess_mime_type(url5), Some("image/png".into()));
}
}

View File

@ -63,7 +63,7 @@ impl Client {
self.https_client.close();
}
pub fn request(&mut self, url: String, abort: Arc<AtomicBool>, on_done: Box<Fn() + Send>) -> Result<mpsc::Receiver<FetchResult>, FetchError> {
pub fn request(&mut self, url: &str, abort: Arc<AtomicBool>, on_done: Box<Fn() + Send>) -> Result<mpsc::Receiver<FetchResult>, FetchError> {
let is_https = url.starts_with("https://");
let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl));
trace!(target: "dapps", "Fetching from: {:?}", url);

View File

@ -16,7 +16,7 @@
//! Hyper Server Handler that fetches a file during a request (proxy).
use std::fmt;
use std::{fs, fmt};
use std::path::PathBuf;
use std::sync::{mpsc, Arc};
use std::sync::atomic::AtomicBool;
@ -29,51 +29,50 @@ use hyper::status::StatusCode;
use handlers::ContentHandler;
use handlers::client::{Client, FetchResult};
use apps::redirection_address;
use apps::urlhint::GithubApp;
use apps::manifest::Manifest;
const FETCH_TIMEOUT: u64 = 30;
enum FetchState {
NotStarted(GithubApp),
enum FetchState<T: fmt::Debug> {
NotStarted(String),
Error(ContentHandler),
InProgress {
deadline: Instant,
receiver: mpsc::Receiver<FetchResult>,
},
Done(Manifest),
Done((String, T)),
}
pub trait ContentValidator {
type Error: fmt::Debug + fmt::Display;
type Result: fmt::Debug;
fn validate_and_install(&self, app: PathBuf) -> Result<Manifest, Self::Error>;
fn done(&self, Option<&Manifest>);
fn validate_and_install(&self, app: PathBuf) -> Result<(String, Self::Result), Self::Error>;
fn done(&self, Option<&Self::Result>);
}
pub struct ContentFetcherHandler<H: ContentValidator> {
abort: Arc<AtomicBool>,
control: Option<Control>,
status: FetchState,
status: FetchState<H::Result>,
client: Option<Client>,
using_dapps_domains: bool,
dapp: H,
installer: H,
}
impl<H: ContentValidator> Drop for ContentFetcherHandler<H> {
fn drop(&mut self) {
let manifest = match self.status {
FetchState::Done(ref manifest) => Some(manifest),
let result = match self.status {
FetchState::Done((_, ref result)) => Some(result),
_ => None,
};
self.dapp.done(manifest);
self.installer.done(result);
}
}
impl<H: ContentValidator> ContentFetcherHandler<H> {
pub fn new(
app: GithubApp,
url: String,
abort: Arc<AtomicBool>,
control: Control,
using_dapps_domains: bool,
@ -84,9 +83,9 @@ impl<H: ContentValidator> ContentFetcherHandler<H> {
abort: abort,
control: Some(control),
client: Some(client),
status: FetchState::NotStarted(app),
status: FetchState::NotStarted(url),
using_dapps_domains: using_dapps_domains,
dapp: handler,
installer: handler,
}
}
@ -97,8 +96,8 @@ impl<H: ContentValidator> ContentFetcherHandler<H> {
}
fn fetch_app(client: &mut Client, app: &GithubApp, abort: Arc<AtomicBool>, control: Control) -> Result<mpsc::Receiver<FetchResult>, String> {
client.request(app.url(), abort, Box::new(move || {
fn fetch_content(client: &mut Client, url: &str, abort: Arc<AtomicBool>, control: Control) -> Result<mpsc::Receiver<FetchResult>, String> {
client.request(url, abort, Box::new(move || {
trace!(target: "dapps", "Fetching finished.");
// Ignoring control errors
let _ = control.ready(Next::read());
@ -108,14 +107,14 @@ impl<H: ContentValidator> ContentFetcherHandler<H> {
impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<H> {
fn on_request(&mut self, request: server::Request<HttpStream>) -> Next {
let status = if let FetchState::NotStarted(ref app) = self.status {
let status = if let FetchState::NotStarted(ref url) = self.status {
Some(match *request.method() {
// Start fetching content
Method::Get => {
trace!(target: "dapps", "Fetching dapp: {:?}", app);
trace!(target: "dapps", "Fetching content from: {:?}", url);
let control = self.control.take().expect("on_request is called only once, thus control is always Some");
let client = self.client.as_mut().expect("on_request is called before client is closed.");
let fetch = Self::fetch_app(client, app, self.abort.clone(), control);
let fetch = Self::fetch_content(client, url, self.abort.clone(), control);
match fetch {
Ok(receiver) => FetchState::InProgress {
deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT),
@ -154,7 +153,7 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
let timeout = ContentHandler::error(
StatusCode::GatewayTimeout,
"Download Timeout",
&format!("Could not fetch dapp bundle within {} seconds.", FETCH_TIMEOUT),
&format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT),
None
);
Self::close_client(&mut self.client);
@ -166,32 +165,31 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
match rec {
// Unpack and validate
Ok(Ok(path)) => {
trace!(target: "dapps", "Fetching dapp finished. Starting validation.");
trace!(target: "dapps", "Fetching content finished. Starting validation ({:?})", path);
Self::close_client(&mut self.client);
// Unpack and verify
let state = match self.dapp.validate_and_install(path.clone()) {
let state = match self.installer.validate_and_install(path.clone()) {
Err(e) => {
trace!(target: "dapps", "Error while validating dapp: {:?}", e);
trace!(target: "dapps", "Error while validating content: {:?}", e);
FetchState::Error(ContentHandler::error(
StatusCode::BadGateway,
"Invalid Dapp",
"Downloaded bundle does not contain a valid dapp.",
"Downloaded bundle does not contain a valid content.",
Some(&format!("{:?}", e))
))
},
Ok(manifest) => FetchState::Done(manifest)
Ok(result) => FetchState::Done(result)
};
// Remove temporary zip file
// TODO [todr] Uncomment me
// let _ = fs::remove_file(path);
let _ = fs::remove_file(path);
(Some(state), Next::write())
},
Ok(Err(e)) => {
warn!(target: "dapps", "Unable to fetch new dapp: {:?}", e);
warn!(target: "dapps", "Unable to fetch content: {:?}", e);
let error = ContentHandler::error(
StatusCode::BadGateway,
"Download Error",
"There was an error when fetching the dapp.",
"There was an error when fetching the content.",
Some(&format!("{:?}", e)),
);
(Some(FetchState::Error(error)), Next::write())
@ -213,10 +211,10 @@ impl<H: ContentValidator> server::Handler<HttpStream> for ContentFetcherHandler<
fn on_response(&mut self, res: &mut server::Response) -> Next {
match self.status {
FetchState::Done(ref manifest) => {
trace!(target: "dapps", "Fetching dapp finished. Redirecting to {}", manifest.id);
FetchState::Done((ref id, _)) => {
trace!(target: "dapps", "Fetching content finished. Redirecting to {}", id);
res.set_status(StatusCode::Found);
res.headers_mut().set(header::Location(redirection_address(self.using_dapps_domains, &manifest.id)));
res.headers_mut().set(header::Location(redirection_address(self.using_dapps_domains, id)));
Next::write()
},
FetchState::Error(ref mut handler) => handler.on_response(res),

View File

@ -191,7 +191,7 @@ impl Server {
) -> Result<Server, ServerError> {
let panic_handler = Arc::new(Mutex::new(None));
let authorization = Arc::new(authorization);
let apps_fetcher = Arc::new(apps::fetcher::AppFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status));
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status));
let endpoints = Arc::new(apps::all_endpoints(dapps_path));
let special = Arc::new({
let mut special = HashMap::new();
@ -206,7 +206,7 @@ impl Server {
.handle(move |ctrl| router::Router::new(
ctrl,
apps::main_page(),
apps_fetcher.clone(),
content_fetcher.clone(),
endpoints.clone(),
special.clone(),
authorization.clone(),

View File

@ -17,20 +17,30 @@
use mime_guess;
use std::io::{Seek, Read, SeekFrom};
use std::fs;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use page::handler;
use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler};
pub struct LocalPageEndpoint {
path: PathBuf,
info: EndpointInfo,
mime: Option<String>,
info: Option<EndpointInfo>,
}
impl LocalPageEndpoint {
pub fn new(path: PathBuf, info: EndpointInfo) -> Self {
LocalPageEndpoint {
path: path,
info: info,
mime: None,
info: Some(info),
}
}
pub fn single_file(path: PathBuf, mime: String) -> Self {
LocalPageEndpoint {
path: path,
mime: Some(mime),
info: None,
}
}
@ -41,32 +51,47 @@ impl LocalPageEndpoint {
impl Endpoint for LocalPageEndpoint {
fn info(&self) -> Option<&EndpointInfo> {
Some(&self.info)
self.info.as_ref()
}
fn to_handler(&self, path: EndpointPath) -> Box<Handler> {
if let Some(ref mime) = self.mime {
Box::new(handler::PageHandler {
app: LocalDapp::new(self.path.clone()),
app: LocalSingleFile { path: self.path.clone(), mime: mime.clone() },
prefix: None,
path: path,
file: Default::default(),
safe_to_embed: false,
})
} else {
Box::new(handler::PageHandler {
app: LocalDapp { path: self.path.clone() },
prefix: None,
path: path,
file: Default::default(),
safe_to_embed: false,
})
}
}
}
struct LocalSingleFile {
path: PathBuf,
mime: String,
}
impl handler::Dapp for LocalSingleFile {
type DappFile = LocalFile;
fn file(&self, _path: &str) -> Option<Self::DappFile> {
LocalFile::from_path(&self.path, Some(&self.mime))
}
}
struct LocalDapp {
path: PathBuf,
}
impl LocalDapp {
fn new(path: PathBuf) -> Self {
LocalDapp {
path: path
}
}
}
impl handler::Dapp for LocalDapp {
type DappFile = LocalFile;
@ -75,18 +100,7 @@ impl handler::Dapp for LocalDapp {
for part in file_path.split('/') {
path.push(part);
}
// Check if file exists
fs::File::open(path.clone()).ok().map(|file| {
let content_type = mime_guess::guess_mime_type(path);
let len = file.metadata().ok().map_or(0, |meta| meta.len());
LocalFile {
content_type: content_type.to_string(),
buffer: [0; 4096],
file: file,
pos: 0,
len: len,
}
})
LocalFile::from_path(&path, None)
}
}
@ -98,6 +112,24 @@ struct LocalFile {
pos: u64,
}
impl LocalFile {
fn from_path<P: AsRef<Path>>(path: P, mime: Option<&str>) -> Option<Self> {
// Check if file exists
fs::File::open(&path).ok().map(|file| {
let content_type = mime.map(|mime| mime.to_owned())
.unwrap_or_else(|| mime_guess::guess_mime_type(path).to_string());
let len = file.metadata().ok().map_or(0, |meta| meta.len());
LocalFile {
content_type: content_type,
buffer: [0; 4096],
file: file,
pos: 0,
len: len,
}
})
}
}
impl handler::DappFile for LocalFile {
fn content_type(&self) -> &str {
&self.content_type

View File

@ -27,7 +27,7 @@ use url::{Url, Host};
use hyper::{self, server, Next, Encoder, Decoder, Control, StatusCode};
use hyper::net::HttpStream;
use apps;
use apps::fetcher::AppFetcher;
use apps::fetcher::ContentFetcher;
use endpoint::{Endpoint, Endpoints, EndpointPath};
use handlers::{Redirection, extract_url, ContentHandler};
use self::auth::{Authorization, Authorized};
@ -45,7 +45,7 @@ pub struct Router<A: Authorization + 'static> {
control: Option<Control>,
main_page: &'static str,
endpoints: Arc<Endpoints>,
fetch: Arc<AppFetcher>,
fetch: Arc<ContentFetcher>,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>,
authorization: Arc<A>,
allowed_hosts: Option<Vec<String>>,
@ -136,7 +136,7 @@ impl<A: Authorization> Router<A> {
pub fn new(
control: Control,
main_page: &'static str,
app_fetcher: Arc<AppFetcher>,
content_fetcher: Arc<ContentFetcher>,
endpoints: Arc<Endpoints>,
special: Arc<HashMap<SpecialEndpoint, Box<Endpoint>>>,
authorization: Arc<A>,
@ -148,7 +148,7 @@ impl<A: Authorization> Router<A> {
control: Some(control),
main_page: main_page,
endpoints: endpoints,
fetch: app_fetcher,
fetch: content_fetcher,
special: special,
authorization: authorization,
allowed_hosts: allowed_hosts,

View File

@ -17,7 +17,7 @@
use std::env;
use std::str;
use std::sync::Arc;
use rustc_serialize::hex::{ToHex, FromHex};
use rustc_serialize::hex::FromHex;
use ServerBuilder;
use Server;

View File

@ -460,7 +460,7 @@ mod client_tests {
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
client.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
client.close().unwrap();
@ -477,7 +477,7 @@ mod client_tests {
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
client.put("xxx".as_bytes(), "1".as_bytes()).unwrap();
@ -498,7 +498,7 @@ mod client_tests {
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
assert!(client.get("xxx".as_bytes()).unwrap().is_none());
@ -516,7 +516,7 @@ mod client_tests {
crossbeam::scope(move |scope| {
let stop = Arc::new(AtomicBool::new(false));
run_worker(scope, stop.clone(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
let transaction = DBTransaction::new();
@ -541,7 +541,7 @@ mod client_tests {
let stop = StopGuard::new();
run_worker(&scope, stop.share(), url);
let client = nanoipc::init_client::<DatabaseClient<_>>(url).unwrap();
let client = nanoipc::generic_client::<DatabaseClient<_>>(url).unwrap();
client.open_default(path.as_str().to_owned()).unwrap();
let mut batch = Vec::new();

View File

@ -66,13 +66,13 @@ pub fn extras_service_url(db_path: &str) -> Result<String, ::std::io::Error> {
pub fn blocks_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> {
let url = try!(blocks_service_url(db_path));
let client = try!(nanoipc::init_client::<DatabaseClient<_>>(&url));
let client = try!(nanoipc::generic_client::<DatabaseClient<_>>(&url));
Ok(client)
}
pub fn extras_client(db_path: &str) -> Result<DatabaseConnection, ServiceError> {
let url = try!(extras_service_url(db_path));
let client = try!(nanoipc::init_client::<DatabaseClient<_>>(&url));
let client = try!(nanoipc::generic_client::<DatabaseClient<_>>(&url));
Ok(client)
}

View File

@ -23,15 +23,9 @@ RUN rustup target add aarch64-unknown-linux-gnu
# show backtraces
ENV RUST_BACKTRACE 1
# set compilers
ENV CXX aarch64-linux-gnu-g++
ENV CC aarch64-linux-gnu-gcc
# show tools
RUN rustc -vV && \
cargo -V && \
gcc -v &&\
g++ -v
cargo -V
# build parity
RUN git clone https://github.com/ethcore/parity && \

View File

@ -23,15 +23,10 @@ RUN rustup target add armv7-unknown-linux-gnueabihf
# show backtraces
ENV RUST_BACKTRACE 1
# set compilers
ENV CXX arm-linux-gnueabihf-g++
ENV CC arm-linux-gnueabihf-gcc
# show tools
RUN rustc -vV && \
cargo -V && \
gcc -v &&\
g++ -v
cargo -V
# build parity
RUN git clone https://github.com/ethcore/parity && \

View File

@ -673,6 +673,8 @@ impl Client {
impl snapshot::DatabaseRestore for Client {
/// Restart the client with a new backend
fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> {
trace!(target: "snapshot", "Replacing client database with {:?}", new_db);
let _import_lock = self.import_lock.lock();
let mut state_db = self.state_db.write();
let mut chain = self.chain.write();

View File

@ -99,6 +99,8 @@ impl Engine for BasicAuthority {
/// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current).
fn on_close_block(&self, _block: &mut ExecutedBlock) {}
fn seals_internally(&self) -> bool { true }
/// Attempt to seal the block internally.
///
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will

View File

@ -58,6 +58,8 @@ impl Engine for InstantSeal {
Schedule::new_homestead()
}
fn seals_internally(&self) -> bool { true }
fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option<Vec<Bytes>> {
Some(Vec::new())
}

View File

@ -73,6 +73,8 @@ pub trait Engine : Sync + Send {
/// Block transformation functions, after the transactions.
fn on_close_block(&self, _block: &mut ExecutedBlock) {}
/// If true, generate_seal has to be implemented.
fn seals_internally(&self) -> bool { false }
/// Attempt to seal the block internally.
///
/// If `Some` is returned, then you get a valid seal.

View File

@ -24,7 +24,7 @@ use views::{BlockView, HeaderView};
use state::State;
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics};
use executive::contract_address;
use block::{ClosedBlock, IsBlock, Block};
use block::{ClosedBlock, SealedBlock, IsBlock, Block};
use error::*;
use transaction::{Action, SignedTransaction};
use receipt::{Receipt, RichReceipt};
@ -243,19 +243,15 @@ impl Miner {
}
/// Prepares new block for sealing including top transactions from queue.
#[cfg_attr(feature="dev", allow(match_same_arms))]
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
fn prepare_sealing(&self, chain: &MiningBlockChainClient) {
trace!(target: "miner", "prepare_sealing: entering");
fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option<H256>) {
{
trace!(target: "miner", "recalibrating...");
trace!(target: "miner", "prepare_block: recalibrating...");
let txq = self.transaction_queue.clone();
self.gas_pricer.lock().recalibrate(move |price| {
trace!(target: "miner", "Got gas price! {}", price);
trace!(target: "miner", "prepare_block: Got gas price! {}", price);
txq.lock().set_minimal_gas_price(price);
});
trace!(target: "miner", "done recalibration.");
trace!(target: "miner", "prepare_block: done recalibration.");
}
let (transactions, mut open_block, original_work_hash) = {
@ -273,13 +269,13 @@ impl Miner {
*/
let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) {
Some(old_block) => {
trace!(target: "miner", "Already have previous work; updating and returning");
trace!(target: "miner", "prepare_block: Already have previous work; updating and returning");
// add transactions to old_block
old_block.reopen(&*self.engine)
}
None => {
// block not found - create it.
trace!(target: "miner", "No existing work - making new block");
trace!(target: "miner", "prepare_block: No existing work - making new block");
chain.prepare_open_block(
self.author(),
(self.gas_floor_target(), self.gas_ceil_target()),
@ -334,37 +330,49 @@ impl Miner {
queue.remove_invalid(&hash, &fetch_account);
}
}
(block, original_work_hash)
}
if !block.transactions().is_empty() {
trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal.");
// block with transactions - see if we can seal immediately.
/// Attempts to perform internal sealing to return Ok(sealed),
/// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine.
fn seal_block_internally(&self, block: ClosedBlock) -> Result<SealedBlock, Option<ClosedBlock>> {
trace!(target: "miner", "seal_block_internally: block has transaction - attempting internal seal.");
let s = self.engine.generate_seal(block.block(), match self.accounts {
Some(ref x) => Some(&**x),
None => None,
});
if let Some(seal) = s {
trace!(target: "miner", "prepare_sealing: managed internal seal. importing...");
if let Ok(sealed) = block.lock().try_seal(&*self.engine, seal) {
if let Ok(_) = chain.import_block(sealed.rlp_bytes()) {
trace!(target: "miner", "prepare_sealing: sealed internally and imported. leaving.");
} else {
warn!("prepare_sealing: ERROR: could not import internally sealed block. WTF?");
}
} else {
trace!(target: "miner", "seal_block_internally: managed internal seal. importing...");
block.lock().try_seal(&*self.engine, seal).or_else(|_| {
warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?");
}
return;
Err(None)
})
} else {
trace!(target: "miner", "prepare_sealing: unable to generate seal internally");
trace!(target: "miner", "seal_block_internally: unable to generate seal internally");
Err(Some(block))
}
}
/// Uses engine to seal the block and then imports it to chain.
fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool {
if !block.transactions().is_empty() {
if let Ok(sealed) = self.seal_block_internally(block) {
if chain.import_block(sealed.rlp_bytes()).is_ok() {
return true
}
}
}
false
}
/// Prepares work which has to be done to seal.
fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option<H256>) {
let (work, is_new) = {
let mut sealing_work = self.sealing_work.lock();
let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash());
trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash());
trace!(target: "miner", "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash());
let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) {
trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
trace!(target: "miner", "prepare_work: Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash());
let pow_hash = block.block().fields().header.hash();
let number = block.block().fields().header.number();
let difficulty = *block.block().fields().header.difficulty();
@ -378,7 +386,7 @@ impl Miner {
} else {
(None, false)
};
trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash()));
trace!(target: "miner", "prepare_work: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash()));
(work, is_new)
};
if is_new {
@ -393,12 +401,12 @@ impl Miner {
}
/// Returns true if we had to prepare new pending block
fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool {
trace!(target: "miner", "enable_and_prepare_sealing: entering");
fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool {
trace!(target: "miner", "prepare_work_sealing: entering");
let prepare_new = {
let mut sealing_work = self.sealing_work.lock();
let have_work = sealing_work.queue.peek_last_ref().is_some();
trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work);
trace!(target: "miner", "prepare_work_sealing: have_work={}", have_work);
if !have_work {
sealing_work.enabled = true;
true
@ -411,12 +419,13 @@ impl Miner {
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.prepare_sealing(chain);
let (block, original_work_hash) = self.prepare_block(chain);
self.prepare_work(block, original_work_hash);
}
let mut sealing_block_last_request = self.sealing_block_last_request.lock();
let best_number = chain.chain_info().best_block_number;
if *sealing_block_last_request != best_number {
trace!(target: "miner", "enable_and_prepare_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number);
*sealing_block_last_request = best_number;
}
@ -635,7 +644,7 @@ impl MinerService for Miner {
trace!(target: "own_tx", "Importing transaction: {:?}", transaction);
let imported = {
// Be sure to release the lock before we call enable_and_prepare_sealing
// Be sure to release the lock before we call prepare_work_sealing
let mut transaction_queue = self.transaction_queue.lock();
let import = self.add_transactions_to_queue(
chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue
@ -662,7 +671,7 @@ impl MinerService for Miner {
if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() {
// Make sure to do it after transaction is imported and lock is droped.
// We need to create pending block and enable sealing
let prepared = self.enable_and_prepare_sealing(chain);
let prepared = self.prepare_work_sealing(chain);
// If new block has not been prepared (means we already had one)
// we need to update sealing
if !prepared {
@ -804,7 +813,15 @@ impl MinerService for Miner {
// | NOTE Code below requires transaction_queue and sealing_work locks. |
// | Make sure to release the locks before calling that method. |
// --------------------------------------------------------------------------
self.prepare_sealing(chain);
trace!(target: "miner", "update_sealing: preparing a block");
let (block, original_work_hash) = self.prepare_block(chain);
if self.engine.seals_internally() {
trace!(target: "miner", "update_sealing: engine indicates internal sealing");
self.seal_and_import_block_internally(chain, block);
} else {
trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work");
self.prepare_work(block, original_work_hash);
}
}
}
@ -814,7 +831,7 @@ impl MinerService for Miner {
fn map_sealing_work<F, T>(&self, chain: &MiningBlockChainClient, f: F) -> Option<T> where F: FnOnce(&ClosedBlock) -> T {
trace!(target: "miner", "map_sealing_work: entering");
self.enable_and_prepare_sealing(chain);
self.prepare_work_sealing(chain);
trace!(target: "miner", "map_sealing_work: sealing prepared");
let mut sealing_work = self.sealing_work.lock();
let ret = sealing_work.queue.use_last_ref();
@ -1002,7 +1019,7 @@ mod tests {
assert_eq!(miner.pending_transactions_hashes().len(), 1);
assert_eq!(miner.pending_receipts().len(), 1);
// This method will let us know if pending block was created (before calling that method)
assert_eq!(miner.enable_and_prepare_sealing(&client), false);
assert_eq!(miner.prepare_work_sealing(&client), false);
}
#[test]
@ -1032,6 +1049,6 @@ mod tests {
assert_eq!(miner.pending_transactions().len(), 0);
assert_eq!(miner.pending_receipts().len(), 0);
// This method will let us know if pending block was created (before calling that method)
assert_eq!(miner.enable_and_prepare_sealing(&client), true);
assert_eq!(miner.prepare_work_sealing(&client), true);
}
}

View File

@ -94,7 +94,6 @@ impl ClientService {
pruning: pruning,
channel: io_service.channel(),
snapshot_root: snapshot_path.into(),
client_db: client_path.into(),
db_restore: client.clone(),
};
let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params)));
@ -187,7 +186,7 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); }
ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); }
ClientIoMessage::BeginRestoration(ref manifest) => {
if let Err(e) = self.snapshot.init_restore(manifest.clone()) {
if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) {
warn!("Failed to initialize snapshot restoration: {}", e);
}
}

View File

@ -35,7 +35,7 @@ use service::ClientIoMessage;
use io::IoChannel;
use util::{Bytes, H256, Mutex, RwLock, UtilError};
use util::{Bytes, H256, Mutex, RwLock, RwLockReadGuard, UtilError};
use util::journaldb::Algorithm;
use util::kvdb::{Database, DatabaseConfig};
use util::snappy;
@ -70,7 +70,7 @@ struct Restoration {
block_chunks_left: HashSet<H256>,
state: StateRebuilder,
blocks: BlockRebuilder,
writer: LooseWriter,
writer: Option<LooseWriter>,
snappy_buffer: Bytes,
final_state_root: H256,
guard: Guard,
@ -80,8 +80,8 @@ struct RestorationParams<'a> {
manifest: ManifestData, // manifest to base restoration on.
pruning: Algorithm, // pruning algorithm for the database.
db_path: PathBuf, // database path
db_config: &'a DatabaseConfig,
writer: LooseWriter, // writer for recovered snapshot.
db_config: &'a DatabaseConfig, // configuration for the database.
writer: Option<LooseWriter>, // writer for recovered snapshot.
genesis: &'a [u8], // genesis block of the chain.
guard: Guard, // guard for the restoration directory.
}
@ -120,7 +120,10 @@ impl Restoration {
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
try!(self.state.feed(&self.snappy_buffer[..len]));
try!(self.writer.write_state_chunk(hash, chunk));
if let Some(ref mut writer) = self.writer.as_mut() {
try!(writer.write_state_chunk(hash, chunk));
}
}
Ok(())
@ -132,7 +135,9 @@ impl Restoration {
let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer));
try!(self.blocks.feed(&self.snappy_buffer[..len], engine));
try!(self.writer.write_block_chunk(hash, chunk));
if let Some(ref mut writer) = self.writer.as_mut() {
try!(writer.write_block_chunk(hash, chunk));
}
}
Ok(())
@ -157,7 +162,9 @@ impl Restoration {
// connect out-of-order chunks.
self.blocks.glue_chunks();
try!(self.writer.finish(self.manifest));
if let Some(writer) = self.writer {
try!(writer.finish(self.manifest));
}
self.guard.disarm();
Ok(())
@ -187,9 +194,6 @@ pub struct ServiceParams {
/// The directory to put snapshots in.
/// Usually "<chain hash>/snapshot"
pub snapshot_root: PathBuf,
/// The client's database directory.
/// Usually "<chain hash>/<pruning>/db".
pub client_db: PathBuf,
/// A handle for database restoration.
pub db_restore: Arc<DatabaseRestore>,
}
@ -198,7 +202,6 @@ pub struct ServiceParams {
/// This controls taking snapshots and restoring from them.
pub struct Service {
restoration: Mutex<Option<Restoration>>,
client_db: PathBuf,
snapshot_root: PathBuf,
db_config: DatabaseConfig,
io_channel: Channel,
@ -219,7 +222,6 @@ impl Service {
pub fn new(params: ServiceParams) -> Result<Self, Error> {
let mut service = Service {
restoration: Mutex::new(None),
client_db: params.client_db,
snapshot_root: params.snapshot_root,
db_config: params.db_config,
io_channel: params.channel,
@ -301,11 +303,15 @@ impl Service {
fn replace_client_db(&self) -> Result<(), Error> {
let our_db = self.restoration_db();
trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db);
try!(self.db_restore.restore_db(our_db.to_str().unwrap()));
try!(self.db_restore.restore_db(&*our_db.to_string_lossy()));
Ok(())
}
/// Get a reference to the snapshot reader.
pub fn reader(&self) -> RwLockReadGuard<Option<LooseReader>> {
self.reader.read()
}
/// Tick the snapshot service. This will log any active snapshot
/// being taken.
pub fn tick(&self) {
@ -357,11 +363,15 @@ impl Service {
}
/// Initialize the restoration synchronously.
pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> {
/// The recover flag indicates whether to recover the restored snapshot.
pub fn init_restore(&self, manifest: ManifestData, recover: bool) -> Result<(), Error> {
let rest_dir = self.restoration_dir();
let mut res = self.restoration.lock();
self.state_chunks.store(0, Ordering::SeqCst);
self.block_chunks.store(0, Ordering::SeqCst);
// tear down existing restoration.
*res = None;
@ -376,7 +386,10 @@ impl Service {
try!(fs::create_dir_all(&rest_dir));
// make new restoration.
let writer = try!(LooseWriter::new(self.temp_recovery_dir()));
let writer = match recover {
true => Some(try!(LooseWriter::new(self.temp_recovery_dir()))),
false => None
};
let params = RestorationParams {
manifest: manifest,
@ -391,8 +404,8 @@ impl Service {
*res = Some(try!(Restoration::new(params)));
*self.status.lock() = RestorationStatus::Ongoing {
state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32,
block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32,
state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32,
block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32,
};
Ok(())
}
@ -403,13 +416,13 @@ impl Service {
fn finalize_restoration(&self, rest: &mut Option<Restoration>) -> Result<(), Error> {
trace!(target: "snapshot", "finalizing restoration");
self.state_chunks.store(0, Ordering::SeqCst);
self.block_chunks.store(0, Ordering::SeqCst);
let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some());
// destroy the restoration before replacing databases and snapshot.
try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(())));
try!(self.replace_client_db());
if recover {
let mut reader = self.reader.write();
*reader = None; // destroy the old reader if it existed.
@ -428,10 +441,10 @@ impl Service {
trace!(target: "snapshot", "copying restored snapshot files over");
try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir));
let _ = fs::remove_dir_all(self.restoration_dir());
*reader = Some(try!(LooseReader::new(snapshot_dir)));
}
let _ = fs::remove_dir_all(self.restoration_dir());
*self.status.lock() = RestorationStatus::Inactive;
Ok(())
@ -512,7 +525,13 @@ impl SnapshotService for Service {
}
fn status(&self) -> RestorationStatus {
*self.status.lock()
let mut cur_status = self.status.lock();
if let RestorationStatus::Ongoing { ref mut state_chunks_done, ref mut block_chunks_done } = *cur_status {
*state_chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32;
*block_chunks_done = self.block_chunks.load(Ordering::SeqCst) as u32;
}
cur_status.clone()
}
fn begin_restore(&self, manifest: ManifestData) {
@ -523,12 +542,6 @@ impl SnapshotService for Service {
fn abort_restore(&self) {
*self.restoration.lock() = None;
*self.status.lock() = RestorationStatus::Inactive;
if let Err(e) = fs::remove_dir_all(&self.restoration_dir()) {
match e.kind() {
ErrorKind::NotFound => {},
_ => warn!("encountered error {} while deleting snapshot restoration dir.", e),
}
}
}
fn restore_state_chunk(&self, hash: H256, chunk: Bytes) {
@ -585,7 +598,6 @@ mod tests {
pruning: Algorithm::Archive,
channel: service.channel(),
snapshot_root: dir,
client_db: client_db,
db_restore: Arc::new(NoopDBRestore),
};

View File

@ -18,6 +18,7 @@
mod blocks;
mod state;
mod service;
pub mod helpers;

View File

@ -0,0 +1,143 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tests for the snapshot service.
use std::sync::Arc;
use client::{BlockChainClient, Client};
use ids::BlockID;
use snapshot::service::{Service, ServiceParams};
use snapshot::{self, ManifestData, SnapshotService};
use spec::Spec;
use tests::helpers::generate_dummy_client_with_spec_and_data;
use devtools::RandomTempPath;
use io::IoChannel;
use util::kvdb::DatabaseConfig;
struct NoopDBRestore;
impl snapshot::DatabaseRestore for NoopDBRestore {
fn restore_db(&self, _new_db: &str) -> Result<(), ::error::Error> {
Ok(())
}
}
#[test]
fn restored_is_equivalent() {
const NUM_BLOCKS: u32 = 400;
const TX_PER: usize = 5;
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices);
let path = RandomTempPath::create_dir();
let mut path = path.as_path().clone();
let mut client_db = path.clone();
client_db.push("client_db");
path.push("snapshot");
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
let spec = Spec::new_null();
let client2 = Client::new(
Default::default(),
&spec,
&client_db,
Arc::new(::miner::Miner::with_spec(&spec)),
IoChannel::disconnected(),
&db_config,
).unwrap();
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
db_config: db_config,
pruning: ::util::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: path,
db_restore: client2.clone(),
};
let service = Service::new(service_params).unwrap();
service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap();
let manifest = service.manifest().unwrap();
service.init_restore(manifest.clone(), true).unwrap();
assert!(service.init_restore(manifest.clone(), true).is_ok());
for hash in manifest.state_hashes {
let chunk = service.chunk(hash).unwrap();
service.feed_state_chunk(hash, &chunk);
}
for hash in manifest.block_hashes {
let chunk = service.chunk(hash).unwrap();
service.feed_block_chunk(hash, &chunk);
}
assert_eq!(service.status(), ::snapshot::RestorationStatus::Inactive);
for x in 0..NUM_BLOCKS {
let block1 = client.block(BlockID::Number(x as u64)).unwrap();
let block2 = client2.block(BlockID::Number(x as u64)).unwrap();
assert_eq!(block1, block2);
}
}
#[test]
fn guards_delete_folders() {
let spec = Spec::new_null();
let path = RandomTempPath::create_dir();
let mut path = path.as_path().clone();
let service_params = ServiceParams {
engine: spec.engine.clone(),
genesis_block: spec.genesis_block(),
db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS),
pruning: ::util::journaldb::Algorithm::Archive,
channel: IoChannel::disconnected(),
snapshot_root: path.clone(),
db_restore: Arc::new(NoopDBRestore),
};
let service = Service::new(service_params).unwrap();
path.push("restoration");
let manifest = ManifestData {
state_hashes: vec![],
block_hashes: vec![],
block_number: 0,
block_hash: Default::default(),
state_root: Default::default(),
};
service.init_restore(manifest.clone(), true).unwrap();
assert!(path.exists());
service.abort_restore();
assert!(!path.exists());
service.init_restore(manifest.clone(), true).unwrap();
assert!(path.exists());
drop(service);
assert!(!path.exists());
}

View File

@ -56,7 +56,7 @@ fn can_handshake() {
let stop_guard = StopGuard::new();
let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc";
run_test_worker(scope, stop_guard.share(), socket_path);
let remote_client = nanoipc::init_client::<RemoteClient<_>>(socket_path).unwrap();
let remote_client = nanoipc::generic_client::<RemoteClient<_>>(socket_path).unwrap();
assert!(remote_client.handshake().is_ok());
})
@ -68,7 +68,7 @@ fn can_query_block() {
let stop_guard = StopGuard::new();
let socket_path = "ipc:///tmp/parity-client-rpc-20.ipc";
run_test_worker(scope, stop_guard.share(), socket_path);
let remote_client = nanoipc::init_client::<RemoteClient<_>>(socket_path).unwrap();
let remote_client = nanoipc::generic_client::<RemoteClient<_>>(socket_path).unwrap();
let non_existant_block = remote_client.block_header(BlockID::Number(999));

View File

@ -240,7 +240,7 @@ mod tests {
::std::thread::spawn(move || {
while !hypervisor_ready.load(Ordering::Relaxed) { }
let client = nanoipc::init_client::<HypervisorServiceClient<_>>(url).unwrap();
let client = nanoipc::fast_client::<HypervisorServiceClient<_>>(url).unwrap();
client.handshake().unwrap();
client.module_ready(test_module_id);
});

View File

@ -110,7 +110,7 @@ impl HypervisorService {
let modules = self.modules.read().unwrap();
modules.get(&module_id).map(|module| {
trace!(target: "hypervisor", "Sending shutdown to {}({})", module_id, &module.control_url);
let client = nanoipc::init_client::<ControlServiceClient<_>>(&module.control_url).unwrap();
let client = nanoipc::fast_client::<ControlServiceClient<_>>(&module.control_url).unwrap();
client.shutdown();
trace!(target: "hypervisor", "Sent shutdown to {}", module_id);
});

View File

@ -10,4 +10,4 @@ license = "GPL-3.0"
ethcore-ipc = { path = "../rpc" }
nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" }
log = "0.3"
lazy_static = "0.2"

View File

@ -19,6 +19,7 @@
extern crate ethcore_ipc as ipc;
extern crate nanomsg;
#[macro_use] extern crate log;
#[macro_use] extern crate lazy_static;
pub use ipc::{WithSocket, IpcInterface, IpcConfig};
pub use nanomsg::Socket as NanoSocket;
@ -28,7 +29,8 @@ use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut}
use std::ops::Deref;
const POLL_TIMEOUT: isize = 200;
const CLIENT_CONNECTION_TIMEOUT: isize = 120000;
const DEFAULT_CONNECTION_TIMEOUT: isize = 30000;
const DEBUG_CONNECTION_TIMEOUT: isize = 5000;
/// Generic worker to handle service (binded) sockets
pub struct Worker<S: ?Sized> where S: IpcInterface {
@ -68,7 +70,7 @@ pub fn init_duplex_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, Sock
SocketError::DuplexLink
}));
socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap();
socket.set_receive_timeout(DEFAULT_CONNECTION_TIMEOUT).unwrap();
let endpoint = try!(socket.connect(socket_addr).map_err(|e| {
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e);
@ -84,26 +86,58 @@ pub fn init_duplex_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, Sock
/// Spawns client <`S`> over specified address
/// creates socket and connects endpoint to it
/// for request-reply connections to the service
pub fn init_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
pub fn client<S>(socket_addr: &str, receive_timeout: Option<isize>) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
let mut socket = try!(Socket::new(Protocol::Req).map_err(|e| {
warn!(target: "ipc", "Failed to create ipc socket: {:?}", e);
SocketError::RequestLink
}));
socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap();
if let Some(timeout) = receive_timeout {
socket.set_receive_timeout(timeout).unwrap();
}
let endpoint = try!(socket.connect(socket_addr).map_err(|e| {
warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e);
SocketError::RequestLink
}));
trace!(target: "ipc", "Created cleint for {}", socket_addr);
trace!(target: "ipc", "Created client for {}", socket_addr);
Ok(GuardedSocket {
client: Arc::new(S::init(socket)),
_endpoint: endpoint,
})
}
lazy_static! {
/// Set PARITY_IPC_DEBUG=1 for fail-fast connectivity problems diagnostic
pub static ref DEBUG_FLAG: bool = {
use std::env;
if let Ok(debug) = env::var("PARITY_IPC_DEBUG") {
debug == "1" || debug.to_uppercase() == "TRUE"
}
else { false }
};
}
/// Client with no default timeout on operations
pub fn generic_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
if *DEBUG_FLAG {
client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT))
} else {
client(socket_addr, None)
}
}
/// Client over interface that is supposed to give quick almost non-blocking responses
pub fn fast_client<S>(socket_addr: &str) -> Result<GuardedSocket<S>, SocketError> where S: WithSocket<Socket> {
if *DEBUG_FLAG {
client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT))
} else {
client(socket_addr, Some(DEFAULT_CONNECTION_TIMEOUT))
}
}
/// Error occurred while establising socket or endpoint
#[derive(Debug)]
pub enum SocketError {

View File

@ -31,6 +31,7 @@ use ethcore::error::ImportError;
use ethcore::miner::Miner;
use cache::CacheConfig;
use informant::Informant;
use io_handler::ImportIoHandler;
use params::{SpecType, Pruning};
use helpers::{to_client_config, execute_upgrades};
use dir::Directories;
@ -170,6 +171,10 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color);
try!(service.register_io_handler(Arc::new(ImportIoHandler {
info: Arc::new(informant),
})).map_err(|_| "Unable to register informant handler".to_owned()));
let do_import = |bytes| {
while client.queue_info().is_full() { sleep(Duration::from_secs(1)); }
match client.import_block(bytes) {
@ -181,7 +186,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
},
Ok(_) => {},
}
informant.tick();
Ok(())
};
@ -266,10 +270,10 @@ fn execute_export(cmd: ExportBlockchain) -> Result<String, String> {
};
let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found"));
let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found"));
let to = try!(client.block_number(cmd.to_block).ok_or("To block could not be found"));
for i in from..(to + 1) {
let b = client.block(BlockID::Number(i)).unwrap();
let b = try!(client.block(BlockID::Number(i)).ok_or("Error exporting incomplete chain"));
match format {
DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); }
DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); }

View File

@ -63,7 +63,7 @@ pub fn payload<B: ipc::BinaryConvertable>() -> Result<B, BootError> {
}
pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket<HypervisorServiceClient<NanoSocket>>{
let hypervisor_client = nanoipc::init_client::<HypervisorServiceClient<_>>(hv_url).unwrap();
let hypervisor_client = nanoipc::fast_client::<HypervisorServiceClient<_>>(hv_url).unwrap();
hypervisor_client.handshake().unwrap();
hypervisor_client.module_ready(module_id, control_url.to_owned());
@ -73,7 +73,7 @@ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> Guar
pub fn dependency<C: WithSocket<NanoSocket>>(url: &str)
-> Result<GuardedSocket<C>, BootError>
{
nanoipc::init_client::<C>(url).map_err(|socket_err| BootError::DependencyConnect(socket_err))
nanoipc::generic_client::<C>(url).map_err(|socket_err| BootError::DependencyConnect(socket_err))
}
pub fn main_thread() -> Arc<AtomicBool> {

View File

@ -33,7 +33,7 @@ Usage:
parity export [ <file> ] [options]
parity signer new-token [options]
parity snapshot <file> [options]
parity restore <file> [options]
parity restore [ <file> ] [options]
Operating Options:
--mode MODE Set the operating mode. MODE can be one of:

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use ethcore::client::Client;
use ethcore::service::ClientIoMessage;
use ethsync::{SyncProvider, ManageNetwork};
@ -31,6 +32,7 @@ pub struct ClientIoHandler {
pub net: Arc<ManageNetwork>,
pub accounts: Arc<AccountProvider>,
pub info: Arc<Informant>,
pub shutdown: Arc<AtomicBool>
}
impl IoHandler<ClientIoMessage> for ClientIoHandler {
@ -39,8 +41,24 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
}
fn timeout(&self, _io: &IoContext<ClientIoMessage>, timer: TimerToken) {
if let INFO_TIMER = timer {
if timer == INFO_TIMER && !self.shutdown.load(Ordering::SeqCst) {
self.info.tick();
}
}
}
pub struct ImportIoHandler {
pub info: Arc<Informant>,
}
impl IoHandler<ClientIoMessage> for ImportIoHandler {
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
io.register_timer(INFO_TIMER, 5000).expect("Error registering timer");
}
fn timeout(&self, _io: &IoContext<ClientIoMessage>, timer: TimerToken) {
if let INFO_TIMER = timer {
self.info.tick()
}
}
}

View File

@ -71,7 +71,7 @@ mod ipc_deps {
pub use ethsync::{SyncClient, NetworkManagerClient, ServiceConfiguration};
pub use ethcore::client::ChainNotifyClient;
pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL};
pub use nanoipc::{GuardedSocket, NanoSocket, init_client};
pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client};
pub use ipc::IpcSocket;
pub use ipc::binary::serialize;
}
@ -134,11 +134,11 @@ pub fn sync
hypervisor.start();
hypervisor.wait_for_startup();
let sync_client = init_client::<SyncClient<_>>(
let sync_client = generic_client::<SyncClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap();
let notify_client = init_client::<ChainNotifyClient<_>>(
let notify_client = generic_client::<ChainNotifyClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap();
let manage_client = init_client::<NetworkManagerClient<_>>(
let manage_client = generic_client::<NetworkManagerClient<_>>(
&service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap();
*hypervisor_ref = Some(hypervisor);

View File

@ -257,8 +257,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
sync: sync_provider.clone(),
net: manage_network.clone(),
accounts: account_provider.clone(),
shutdown: Default::default(),
});
service.register_io_handler(io_handler).expect("Error registering IO handler");
service.register_io_handler(io_handler.clone()).expect("Error registering IO handler");
// the watcher must be kept alive.
let _watcher = match cmd.no_periodic_snapshot {
@ -289,6 +290,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> {
// Handle exit
wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server);
// to make sure timer does not spawn requests while shutdown is in progress
io_handler.shutdown.store(true, ::std::sync::atomic::Ordering::SeqCst);
// just Arc is dropping here, to allow other reference release in its default time
drop(io_handler);
// hypervisor should be shutdown first while everything still works and can be
// terminated gracefully
drop(hypervisor);

View File

@ -21,8 +21,9 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
use ethcore_logger::{setup_log, Config as LogConfig};
use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService};
use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService as SS};
use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter};
use ethcore::snapshot::service::Service as SnapshotService;
use ethcore::service::ClientService;
use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType};
use ethcore::miner::Miner;
@ -62,6 +63,60 @@ pub struct SnapshotCommand {
pub block_at: BlockID,
}
// helper for reading chunks from arbitrary reader and feeding them into the
// service.
fn restore_using<R: SnapshotReader>(snapshot: Arc<SnapshotService>, reader: &R, recover: bool) -> Result<(), String> {
let manifest = reader.manifest();
info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash);
try!(snapshot.init_restore(manifest.clone(), recover).map_err(|e| {
format!("Failed to begin restoration: {}", e)
}));
let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
let informant_handle = snapshot.clone();
::std::thread::spawn(move || {
while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() {
info!("Processed {}/{} state chunks and {}/{} block chunks.",
state_chunks_done, num_state, block_chunks_done, num_blocks);
::std::thread::sleep(Duration::from_secs(5));
}
});
info!("Restoring state");
for &state_hash in &manifest.state_hashes {
if snapshot.status() == RestorationStatus::Failed {
return Err("Restoration failed".into());
}
let chunk = try!(reader.chunk(state_hash)
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e)));
snapshot.feed_state_chunk(state_hash, &chunk);
}
info!("Restoring blocks");
for &block_hash in &manifest.block_hashes {
if snapshot.status() == RestorationStatus::Failed {
return Err("Restoration failed".into());
}
let chunk = try!(reader.chunk(block_hash)
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e)));
snapshot.feed_block_chunk(block_hash, &chunk);
}
match snapshot.status() {
RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()),
RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
RestorationStatus::Inactive => {
info!("Restoration complete.");
Ok(())
}
}
}
impl SnapshotCommand {
// shared portion of snapshot commands: start the client service
fn start_service(self) -> Result<(ClientService, Arc<PanicHandler>), String> {
@ -106,70 +161,36 @@ impl SnapshotCommand {
/// restore from a snapshot
pub fn restore(self) -> Result<(), String> {
let file = try!(self.file_path.clone().ok_or("No file path provided.".to_owned()));
let file = self.file_path.clone();
let (service, _panic_handler) = try!(self.start_service());
warn!("Snapshot restoration is experimental and the format may be subject to change.");
warn!("On encountering an unexpected error, please ensure that you have a recent snapshot.");
let snapshot = service.snapshot_service();
if let Some(file) = file {
info!("Attempting to restore from snapshot at '{}'", file);
let reader = PackedReader::new(Path::new(&file))
.map_err(|e| format!("Couldn't open snapshot file: {}", e))
.and_then(|x| x.ok_or("Snapshot file has invalid format.".into()));
let reader = try!(reader);
let manifest = reader.manifest();
try!(restore_using(snapshot, &reader, true));
} else {
info!("Attempting to restore from local snapshot.");
// drop the client so we don't restore while it has open DB handles.
drop(service);
try!(snapshot.init_restore(manifest.clone()).map_err(|e| {
format!("Failed to begin restoration: {}", e)
}));
let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len());
let informant_handle = snapshot.clone();
::std::thread::spawn(move || {
while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() {
info!("Processed {}/{} state chunks and {}/{} block chunks.",
state_chunks_done, num_state, block_chunks_done, num_blocks);
::std::thread::sleep(Duration::from_secs(5));
// attempting restoration with recovery will lead to deadlock
// as we currently hold a read lock on the service's reader.
match *snapshot.reader() {
Some(ref reader) => try!(restore_using(snapshot.clone(), reader, false)),
None => return Err("No local snapshot found.".into()),
}
});
info!("Restoring state");
for &state_hash in &manifest.state_hashes {
if snapshot.status() == RestorationStatus::Failed {
return Err("Restoration failed".into());
}
let chunk = try!(reader.chunk(state_hash)
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e)));
snapshot.feed_state_chunk(state_hash, &chunk);
}
info!("Restoring blocks");
for &block_hash in &manifest.block_hashes {
if snapshot.status() == RestorationStatus::Failed {
return Err("Restoration failed".into());
}
let chunk = try!(reader.chunk(block_hash)
.map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e)));
snapshot.feed_block_chunk(block_hash, &chunk);
}
match snapshot.status() {
RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()),
RestorationStatus::Failed => Err("Snapshot restoration failed.".into()),
RestorationStatus::Inactive => {
info!("Restoration complete.");
Ok(())
}
}
}
/// Take a snapshot from the head of the chain.
pub fn take_snapshot(self) -> Result<(), String> {

View File

@ -458,8 +458,6 @@ impl Database {
let mut backup_db = PathBuf::from(&self.path);
backup_db.pop();
backup_db.push("backup_db");
println!("Path at {:?}", self.path);
println!("Backup at {:?}", backup_db);
let existed = match fs::rename(&self.path, &backup_db) {
Ok(_) => true,