diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 59c709747..bf7f51d71 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -225,17 +225,20 @@ windows: name: "${CI_BUILD_NAME}_parity" test-linux: stage: test - image: ethcore/rust:stable before_script: - git submodule update --init --recursive script: - ./test.sh --verbose tags: - - rust - - rust-stable + - rust-test dependencies: - linux-stable deploy-binaries: stage: deploy + only: + - master + - beta + - tags + - stable script: - ./deploy.sh diff --git a/Cargo.lock b/Cargo.lock index 2ebf9b854..13fbcf9ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -390,6 +390,7 @@ name = "ethcore-ipc-nano" version = "1.4.0" dependencies = [ "ethcore-ipc 1.4.0", + "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)", ] @@ -569,6 +570,7 @@ name = "ethkey" version = "0.2.0" dependencies = [ "bigint 0.1.0", + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", @@ -580,6 +582,7 @@ dependencies = [ name = "ethstore" version = "0.1.0" dependencies = [ + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 531f7da1b..742d59bf2 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -39,7 +39,7 @@ clippy = { version = "0.0.85", optional = true} serde_codegen = { version = "0.8", optional = true } [features] -default = ["serde_codegen", "extra-dapps", "https-fetch/ca-github-only"] +default = ["serde_codegen", "extra-dapps"] extra-dapps = ["parity-dapps-wallet"] nightly = ["serde_macros"] dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] diff --git a/dapps/src/apps/cache.rs b/dapps/src/apps/cache.rs index b5acbcb15..be9521cf9 100644 --- a/dapps/src/apps/cache.rs +++ b/dapps/src/apps/cache.rs @@ -18,13 +18,13 @@ use std::fs; use std::sync::{Arc}; -use std::sync::atomic::{AtomicBool, Ordering}; use linked_hash_map::LinkedHashMap; use page::LocalPageEndpoint; +use handlers::FetchControl; pub enum ContentStatus { - Fetching(Arc), + Fetching(Arc), Ready(LocalPageEndpoint), } @@ -57,10 +57,10 @@ impl ContentCache { while len > expected_size { let entry = self.cache.pop_front().unwrap(); match entry.1 { - ContentStatus::Fetching(ref abort) => { + ContentStatus::Fetching(ref fetch) => { trace!(target: "dapps", "Aborting {} because of limit.", entry.0); // Mark as aborted - abort.store(true, Ordering::SeqCst); + fetch.abort() }, ContentStatus::Ready(ref endpoint) => { trace!(target: "dapps", "Removing {} because of limit.", entry.0); diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 502fbe4aa..8702e4706 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -23,7 +23,6 @@ use std::{fs, env, fmt}; use std::io::{self, Read, Write}; use std::path::PathBuf; use std::sync::Arc; -use std::sync::atomic::{AtomicBool}; use rustc_serialize::hex::FromHex; use hyper; @@ -38,65 +37,67 @@ use handlers::{ContentHandler, ContentFetcherHandler, ContentValidator}; use endpoint::{Endpoint, EndpointPath, Handler}; use apps::cache::{ContentCache, ContentStatus}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; -use apps::urlhint::{URLHintContract, URLHint}; +use apps::urlhint::{URLHintContract, URLHint, URLHintResult}; const MAX_CACHED_DAPPS: usize = 10; -pub struct AppFetcher { +pub struct ContentFetcher { dapps_path: PathBuf, resolver: R, + cache: Arc>, sync: Arc, - dapps: Arc>, } -impl Drop for AppFetcher { +impl Drop for ContentFetcher { fn drop(&mut self) { // Clear cache path let _ = fs::remove_dir_all(&self.dapps_path); } } -impl AppFetcher { +impl ContentFetcher { pub fn new(resolver: R, sync_status: Arc) -> Self { let mut dapps_path = env::temp_dir(); dapps_path.push(random_filename()); - AppFetcher { + ContentFetcher { dapps_path: dapps_path, resolver: resolver, sync: sync_status, - dapps: Arc::new(Mutex::new(ContentCache::default())), + cache: Arc::new(Mutex::new(ContentCache::default())), } } #[cfg(test)] - fn set_status(&self, app_id: &str, status: ContentStatus) { - self.dapps.lock().insert(app_id.to_owned(), status); + fn set_status(&self, content_id: &str, status: ContentStatus) { + self.cache.lock().insert(content_id.to_owned(), status); } - pub fn contains(&self, app_id: &str) -> bool { - let mut dapps = self.dapps.lock(); - // Check if we already have the app - if dapps.get(app_id).is_some() { - return true; + pub fn contains(&self, content_id: &str) -> bool { + { + let mut cache = self.cache.lock(); + // Check if we already have the app + if cache.get(content_id).is_some() { + return true; + } } // fallback to resolver - if let Ok(app_id) = app_id.from_hex() { + if let Ok(content_id) = content_id.from_hex() { // if app_id is valid, but we are syncing always return true. if self.sync.is_major_syncing() { return true; } // else try to resolve the app_id - self.resolver.resolve(app_id).is_some() + self.resolver.resolve(content_id).is_some() } else { false } } pub fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box { - let mut dapps = self.dapps.lock(); - let app_id = path.app_id.clone(); + let mut cache = self.cache.lock(); + let content_id = path.app_id.clone(); if self.sync.is_major_syncing() { return Box::new(ContentHandler::error( @@ -108,57 +109,85 @@ impl AppFetcher { } let (new_status, handler) = { - let status = dapps.get(&app_id); + let status = cache.get(&content_id); match status { // Just server dapp Some(&mut ContentStatus::Ready(ref endpoint)) => { (None, endpoint.to_async_handler(path, control)) }, // App is already being fetched - Some(&mut ContentStatus::Fetching(_)) => { - (None, Box::new(ContentHandler::error_with_refresh( - StatusCode::ServiceUnavailable, - "Download In Progress", - "This dapp is already being downloaded. Please wait...", - None, - )) as Box) + Some(&mut ContentStatus::Fetching(ref fetch_control)) => { + trace!(target: "dapps", "Content fetching in progress. Waiting..."); + (None, fetch_control.to_handler(control)) }, // We need to start fetching app None => { - let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true."); - let app = self.resolver.resolve(app_hex); + trace!(target: "dapps", "Content unavailable. Fetching..."); + let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true."); + let content = self.resolver.resolve(content_hex); - if let Some(app) = app { - let abort = Arc::new(AtomicBool::new(false)); + let cache = self.cache.clone(); + let on_done = move |id: String, result: Option| { + let mut cache = cache.lock(); + match result { + Some(endpoint) => { + cache.insert(id, ContentStatus::Ready(endpoint)); + }, + // In case of error + None => { + cache.remove(&id); + }, + } + }; - (Some(ContentStatus::Fetching(abort.clone())), Box::new(ContentFetcherHandler::new( - app, - abort, - control, - path.using_dapps_domains, - DappInstaller { - dapp_id: app_id.clone(), - dapps_path: self.dapps_path.clone(), - dapps: self.dapps.clone(), - } - )) as Box) - } else { - // This may happen when sync status changes in between - // `contains` and `to_handler` - (None, Box::new(ContentHandler::error( - StatusCode::NotFound, - "Resource Not Found", - "Requested resource was not found.", - None - )) as Box) + match content { + Some(URLHintResult::Dapp(dapp)) => { + let (handler, fetch_control) = ContentFetcherHandler::new( + dapp.url(), + control, + path.using_dapps_domains, + DappInstaller { + id: content_id.clone(), + dapps_path: self.dapps_path.clone(), + on_done: Box::new(on_done), + } + ); + + (Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box) + }, + Some(URLHintResult::Content(content)) => { + let (handler, fetch_control) = ContentFetcherHandler::new( + content.url, + control, + path.using_dapps_domains, + ContentInstaller { + id: content_id.clone(), + mime: content.mime, + content_path: self.dapps_path.clone(), + on_done: Box::new(on_done), + } + ); + + (Some(ContentStatus::Fetching(fetch_control)), Box::new(handler) as Box) + }, + None => { + // This may happen when sync status changes in between + // `contains` and `to_handler` + (None, Box::new(ContentHandler::error( + StatusCode::NotFound, + "Resource Not Found", + "Requested resource was not found.", + None + )) as Box) + }, } }, } }; if let Some(status) = new_status { - dapps.clear_garbage(MAX_CACHED_DAPPS); - dapps.insert(app_id, status); + cache.clear_garbage(MAX_CACHED_DAPPS); + cache.insert(content_id, status); } handler @@ -169,7 +198,7 @@ impl AppFetcher { pub enum ValidationError { Io(io::Error), Zip(zip::result::ZipError), - InvalidDappId, + InvalidContentId, ManifestNotFound, ManifestSerialization(String), HashMismatch { expected: H256, got: H256, }, @@ -180,7 +209,7 @@ impl fmt::Display for ValidationError { match *self { ValidationError::Io(ref io) => write!(f, "Unexpected IO error occured: {:?}", io), ValidationError::Zip(ref zip) => write!(f, "Unable to read ZIP archive: {:?}", zip), - ValidationError::InvalidDappId => write!(f, "Dapp ID is invalid. It should be 32 bytes hash of content."), + ValidationError::InvalidContentId => write!(f, "ID is invalid. It should be 256 bits keccak hash of content."), ValidationError::ManifestNotFound => write!(f, "Downloaded Dapp bundle did not contain valid manifest.json file."), ValidationError::ManifestSerialization(ref err) => { write!(f, "There was an error during Dapp Manifest serialization: {:?}", err) @@ -204,10 +233,44 @@ impl From for ValidationError { } } +struct ContentInstaller { + id: String, + mime: String, + content_path: PathBuf, + on_done: Box) + Send>, +} + +impl ContentValidator for ContentInstaller { + type Error = ValidationError; + + fn validate_and_install(&self, path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> { + // Create dir + try!(fs::create_dir_all(&self.content_path)); + + // And prepare path for a file + let filename = path.file_name().expect("We always fetch a file."); + let mut content_path = self.content_path.clone(); + content_path.push(&filename); + + if content_path.exists() { + try!(fs::remove_dir_all(&content_path)) + } + + try!(fs::copy(&path, &content_path)); + + Ok((self.id.clone(), LocalPageEndpoint::single_file(content_path, self.mime.clone()))) + } + + fn done(&self, endpoint: Option) { + (self.on_done)(self.id.clone(), endpoint) + } +} + + struct DappInstaller { - dapp_id: String, + id: String, dapps_path: PathBuf, - dapps: Arc>, + on_done: Box) + Send>, } impl DappInstaller { @@ -245,14 +308,14 @@ impl DappInstaller { impl ContentValidator for DappInstaller { type Error = ValidationError; - fn validate_and_install(&self, app_path: PathBuf) -> Result { + fn validate_and_install(&self, app_path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> { trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path); let mut file_reader = io::BufReader::new(try!(fs::File::open(app_path))); let hash = try!(sha3(&mut file_reader)); - let dapp_id = try!(self.dapp_id.as_str().parse().map_err(|_| ValidationError::InvalidDappId)); - if dapp_id != hash { + let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId)); + if id != hash { return Err(ValidationError::HashMismatch { - expected: dapp_id, + expected: id, got: hash, }); } @@ -262,7 +325,7 @@ impl ContentValidator for DappInstaller { // First find manifest file let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip)); // Overwrite id to match hash - manifest.id = self.dapp_id.clone(); + manifest.id = self.id.clone(); let target = self.dapp_target_path(&manifest); @@ -299,23 +362,15 @@ impl ContentValidator for DappInstaller { let mut manifest_file = try!(fs::File::create(manifest_path)); try!(manifest_file.write_all(manifest_str.as_bytes())); + // Create endpoint + let app = LocalPageEndpoint::new(target, manifest.clone().into()); + // Return modified app manifest - Ok(manifest) + Ok((manifest.id.clone(), app)) } - fn done(&self, manifest: Option<&Manifest>) { - let mut dapps = self.dapps.lock(); - match manifest { - Some(manifest) => { - let path = self.dapp_target_path(manifest); - let app = LocalPageEndpoint::new(path, manifest.clone().into()); - dapps.insert(self.dapp_id.clone(), ContentStatus::Ready(app)); - }, - // In case of error - None => { - dapps.remove(&self.dapp_id); - }, - } + fn done(&self, endpoint: Option) { + (self.on_done)(self.id.clone(), endpoint) } } @@ -327,12 +382,12 @@ mod tests { use endpoint::EndpointInfo; use page::LocalPageEndpoint; use apps::cache::ContentStatus; - use apps::urlhint::{GithubApp, URLHint}; - use super::AppFetcher; + use apps::urlhint::{URLHint, URLHintResult}; + use super::ContentFetcher; struct FakeResolver; impl URLHint for FakeResolver { - fn resolve(&self, _app_id: Bytes) -> Option { + fn resolve(&self, _id: Bytes) -> Option { None } } @@ -341,7 +396,7 @@ mod tests { fn should_true_if_contains_the_app() { // given let path = env::temp_dir(); - let fetcher = AppFetcher::new(FakeResolver, Arc::new(|| false)); + let fetcher = ContentFetcher::new(FakeResolver, Arc::new(|| false)); let handler = LocalPageEndpoint::new(path, EndpointInfo { name: "fake".into(), description: "".into(), diff --git a/dapps/src/apps/urlhint.rs b/dapps/src/apps/urlhint.rs index f57e5e0d7..2b86c0777 100644 --- a/dapps/src/apps/urlhint.rs +++ b/dapps/src/apps/urlhint.rs @@ -17,6 +17,7 @@ use std::fmt; use std::sync::Arc; use rustc_serialize::hex::ToHex; +use mime_guess; use ethabi::{Interface, Contract, Token}; use util::{Address, Bytes, Hashable}; @@ -52,6 +53,13 @@ impl GithubApp { } } +#[derive(Debug, PartialEq)] +pub struct Content { + pub url: String, + pub mime: String, + pub owner: Address, +} + /// RAW Contract interface. /// Should execute transaction using current blockchain state. pub trait ContractClient: Send + Sync { @@ -61,10 +69,19 @@ pub trait ContractClient: Send + Sync { fn call(&self, address: Address, data: Bytes) -> Result; } +/// Result of resolving id to URL +#[derive(Debug, PartialEq)] +pub enum URLHintResult { + /// Dapp + Dapp(GithubApp), + /// Content + Content(Content), +} + /// URLHint Contract interface pub trait URLHint { /// Resolves given id to registrar entry. - fn resolve(&self, app_id: Bytes) -> Option; + fn resolve(&self, id: Bytes) -> Option; } pub struct URLHintContract { @@ -110,10 +127,10 @@ impl URLHintContract { } } - fn encode_urlhint_call(&self, app_id: Bytes) -> Option { + fn encode_urlhint_call(&self, id: Bytes) -> Option { let call = self.urlhint .function("entries".into()) - .and_then(|f| f.encode_call(vec![Token::FixedBytes(app_id)])); + .and_then(|f| f.encode_call(vec![Token::FixedBytes(id)])); match call { Ok(res) => { @@ -126,7 +143,7 @@ impl URLHintContract { } } - fn decode_urlhint_output(&self, output: Bytes) -> Option { + fn decode_urlhint_output(&self, output: Bytes) -> Option { trace!(target: "dapps", "Output: {:?}", output.to_hex()); let output = self.urlhint .function("entries".into()) @@ -149,6 +166,17 @@ impl URLHintContract { if owner == Address::default() { return None; } + + let commit = GithubApp::commit(&commit); + if commit == Some(Default::default()) { + let mime = guess_mime_type(&account_slash_repo).unwrap_or("application/octet-stream".into()); + return Some(URLHintResult::Content(Content { + url: account_slash_repo, + mime: mime, + owner: owner, + })); + } + let (account, repo) = { let mut it = account_slash_repo.split('/'); match (it.next(), it.next()) { @@ -157,12 +185,12 @@ impl URLHintContract { } }; - GithubApp::commit(&commit).map(|commit| GithubApp { + commit.map(|commit| URLHintResult::Dapp(GithubApp { account: account, repo: repo, commit: commit, owner: owner, - }) + })) }, e => { warn!(target: "dapps", "Invalid contract output parameters: {:?}", e); @@ -177,10 +205,10 @@ impl URLHintContract { } impl URLHint for URLHintContract { - fn resolve(&self, app_id: Bytes) -> Option { + fn resolve(&self, id: Bytes) -> Option { self.urlhint_address().and_then(|address| { // Prepare contract call - self.encode_urlhint_call(app_id) + self.encode_urlhint_call(id) .and_then(|data| { let call = self.client.call(address, data); if let Err(ref e) = call { @@ -193,6 +221,34 @@ impl URLHint for URLHintContract { } } +fn guess_mime_type(url: &str) -> Option { + const CONTENT_TYPE: &'static str = "content-type="; + + let mut it = url.split('#'); + // skip url + let url = it.next(); + // get meta headers + let metas = it.next(); + if let Some(metas) = metas { + for meta in metas.split('&') { + let meta = meta.to_lowercase(); + if meta.starts_with(CONTENT_TYPE) { + return Some(meta[CONTENT_TYPE.len()..].to_owned()); + } + } + } + url.and_then(|url| { + url.split('.').last() + }).and_then(|extension| { + mime_guess::get_mime_type_str(extension).map(Into::into) + }) +} + +#[cfg(test)] +pub fn test_guess_mime_type(url: &str) -> Option { + guess_mime_type(url) +} + fn as_string(e: T) -> String { format!("{:?}", e) } @@ -201,7 +257,7 @@ fn as_string(e: T) -> String { mod tests { use std::sync::Arc; use std::str::FromStr; - use rustc_serialize::hex::{ToHex, FromHex}; + use rustc_serialize::hex::FromHex; use super::*; use util::{Bytes, Address, Mutex, ToPretty}; @@ -279,12 +335,33 @@ mod tests { let res = urlhint.resolve("test".bytes().collect()); // then - assert_eq!(res, Some(GithubApp { + assert_eq!(res, Some(URLHintResult::Dapp(GithubApp { account: "ethcore".into(), repo: "dao.claim".into(), commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(), owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), - })) + }))) + } + + #[test] + fn should_decode_urlhint_content_output() { + // given + let mut registrar = FakeRegistrar::new(); + registrar.responses = Mutex::new(vec![ + Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), + Ok("00000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff000000000000000000000000000000000000000000000000000000000000003d68747470733a2f2f657468636f72652e696f2f6173736574732f696d616765732f657468636f72652d626c61636b2d686f72697a6f6e74616c2e706e67000000".from_hex().unwrap()), + ]); + let urlhint = URLHintContract::new(Arc::new(registrar)); + + // when + let res = urlhint.resolve("test".bytes().collect()); + + // then + assert_eq!(res, Some(URLHintResult::Content(Content { + url: "https://ethcore.io/assets/images/ethcore-black-horizontal.png".into(), + mime: "image/png".into(), + owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), + }))) } #[test] @@ -303,4 +380,20 @@ mod tests { // then assert_eq!(url, "https://codeload.github.com/test/xyz/zip/000102030405060708090a0b0c0d0e0f10111213".to_owned()); } + + #[test] + fn should_guess_mime_type_from_url() { + let url1 = "https://ethcore.io/parity"; + let url2 = "https://ethcore.io/parity#content-type=image/png"; + let url3 = "https://ethcore.io/parity#something&content-type=image/png"; + let url4 = "https://ethcore.io/parity.png#content-type=image/jpeg"; + let url5 = "https://ethcore.io/parity.png"; + + + assert_eq!(test_guess_mime_type(url1), None); + assert_eq!(test_guess_mime_type(url2), Some("image/png".into())); + assert_eq!(test_guess_mime_type(url3), Some("image/png".into())); + assert_eq!(test_guess_mime_type(url4), Some("image/jpeg".into())); + assert_eq!(test_guess_mime_type(url5), Some("image/png".into())); + } } diff --git a/dapps/src/error_tpl.html b/dapps/src/error_tpl.html index 6551431a6..c6b4db0e7 100644 --- a/dapps/src/error_tpl.html +++ b/dapps/src/error_tpl.html @@ -3,7 +3,6 @@ - {meta} {title} diff --git a/dapps/src/handlers/client/mod.rs b/dapps/src/handlers/client/mod.rs index 181f60001..3d8551e8a 100644 --- a/dapps/src/handlers/client/mod.rs +++ b/dapps/src/handlers/client/mod.rs @@ -63,7 +63,7 @@ impl Client { self.https_client.close(); } - pub fn request(&mut self, url: String, abort: Arc, on_done: Box) -> Result, FetchError> { + pub fn request(&mut self, url: &str, abort: Arc, on_done: Box) -> Result, FetchError> { let is_https = url.starts_with("https://"); let url = try!(url.parse().map_err(|_| FetchError::InvalidUrl)); trace!(target: "dapps", "Fetching from: {:?}", url); diff --git a/dapps/src/handlers/content.rs b/dapps/src/handlers/content.rs index f283fbb6a..4dc011475 100644 --- a/dapps/src/handlers/content.rs +++ b/dapps/src/handlers/content.rs @@ -23,6 +23,7 @@ use hyper::status::StatusCode; use util::version; +#[derive(Clone)] pub struct ContentHandler { code: StatusCode, content: String, @@ -57,18 +58,6 @@ impl ContentHandler { Self::html(code, format!( include_str!("../error_tpl.html"), title=title, - meta="", - message=message, - details=details.unwrap_or_else(|| ""), - version=version(), - )) - } - - pub fn error_with_refresh(code: StatusCode, title: &str, message: &str, details: Option<&str>) -> Self { - Self::html(code, format!( - include_str!("../error_tpl.html"), - title=title, - meta="", message=message, details=details.unwrap_or_else(|| ""), version=version(), diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 98242f2b3..c463d3710 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -16,78 +16,160 @@ //! Hyper Server Handler that fetches a file during a request (proxy). -use std::fmt; +use std::{fs, fmt}; use std::path::PathBuf; use std::sync::{mpsc, Arc}; -use std::sync::atomic::AtomicBool; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Instant, Duration}; +use util::Mutex; -use hyper::{header, server, Decoder, Encoder, Next, Method, Control}; +use hyper::{server, Decoder, Encoder, Next, Method, Control}; use hyper::net::HttpStream; use hyper::status::StatusCode; -use handlers::ContentHandler; +use handlers::{ContentHandler, Redirection}; use handlers::client::{Client, FetchResult}; use apps::redirection_address; -use apps::urlhint::GithubApp; -use apps::manifest::Manifest; +use page::LocalPageEndpoint; const FETCH_TIMEOUT: u64 = 30; enum FetchState { - NotStarted(GithubApp), + NotStarted(String), Error(ContentHandler), - InProgress { - deadline: Instant, - receiver: mpsc::Receiver, - }, - Done(Manifest), + InProgress(mpsc::Receiver), + Done(String, LocalPageEndpoint, Redirection), } pub trait ContentValidator { type Error: fmt::Debug + fmt::Display; - fn validate_and_install(&self, app: PathBuf) -> Result; - fn done(&self, Option<&Manifest>); + fn validate_and_install(&self, app: PathBuf) -> Result<(String, LocalPageEndpoint), Self::Error>; + fn done(&self, Option); +} + +pub struct FetchControl { + abort: Arc, + listeners: Mutex)>>, + deadline: Instant, +} + +impl Default for FetchControl { + fn default() -> Self { + FetchControl { + abort: Arc::new(AtomicBool::new(false)), + listeners: Mutex::new(Vec::new()), + deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT), + } + } +} + +impl FetchControl { + fn notify FetchState>(&self, status: F) { + let mut listeners = self.listeners.lock(); + for (control, sender) in listeners.drain(..) { + if let Err(e) = sender.send(status()) { + trace!(target: "dapps", "Waiting listener notification failed: {:?}", e); + } else { + let _ = control.ready(Next::read()); + } + } + } + + fn set_status(&self, status: &FetchState) { + match *status { + FetchState::Error(ref handler) => self.notify(|| FetchState::Error(handler.clone())), + FetchState::Done(ref id, ref endpoint, ref handler) => self.notify(|| FetchState::Done(id.clone(), endpoint.clone(), handler.clone())), + FetchState::NotStarted(_) | FetchState::InProgress(_) => {}, + } + } + + pub fn abort(&self) { + self.abort.store(true, Ordering::SeqCst); + } + + pub fn to_handler(&self, control: Control) -> Box + Send> { + let (tx, rx) = mpsc::channel(); + self.listeners.lock().push((control, tx)); + + Box::new(WaitingHandler { + receiver: rx, + state: None, + }) + } +} + +pub struct WaitingHandler { + receiver: mpsc::Receiver, + state: Option, +} + +impl server::Handler for WaitingHandler { + fn on_request(&mut self, _request: server::Request) -> Next { + Next::wait() + } + + fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { + self.state = self.receiver.try_recv().ok(); + Next::write() + } + + fn on_response(&mut self, res: &mut server::Response) -> Next { + match self.state { + Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response(res), + Some(FetchState::Error(ref mut handler)) => handler.on_response(res), + _ => Next::end(), + } + } + + fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { + match self.state { + Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response_writable(encoder), + Some(FetchState::Error(ref mut handler)) => handler.on_response_writable(encoder), + _ => Next::end(), + } + } } pub struct ContentFetcherHandler { - abort: Arc, + fetch_control: Arc, control: Option, status: FetchState, client: Option, using_dapps_domains: bool, - dapp: H, + installer: H, } impl Drop for ContentFetcherHandler { fn drop(&mut self) { - let manifest = match self.status { - FetchState::Done(ref manifest) => Some(manifest), + let result = match self.status { + FetchState::Done(_, ref result, _) => Some(result.clone()), _ => None, }; - self.dapp.done(manifest); + self.installer.done(result); } } impl ContentFetcherHandler { pub fn new( - app: GithubApp, - abort: Arc, + url: String, control: Control, using_dapps_domains: bool, - handler: H) -> Self { + handler: H) -> (Self, Arc) { + let fetch_control = Arc::new(FetchControl::default()); let client = Client::new(); - ContentFetcherHandler { - abort: abort, + let handler = ContentFetcherHandler { + fetch_control: fetch_control.clone(), control: Some(control), client: Some(client), - status: FetchState::NotStarted(app), + status: FetchState::NotStarted(url), using_dapps_domains: using_dapps_domains, - dapp: handler, - } + installer: handler, + }; + + (handler, fetch_control) } fn close_client(client: &mut Option) { @@ -96,9 +178,8 @@ impl ContentFetcherHandler { .close(); } - - fn fetch_app(client: &mut Client, app: &GithubApp, abort: Arc, control: Control) -> Result, String> { - client.request(app.url(), abort, Box::new(move || { + fn fetch_content(client: &mut Client, url: &str, abort: Arc, control: Control) -> Result, String> { + client.request(url, abort, Box::new(move || { trace!(target: "dapps", "Fetching finished."); // Ignoring control errors let _ = control.ready(Next::read()); @@ -108,19 +189,16 @@ impl ContentFetcherHandler { impl server::Handler for ContentFetcherHandler { fn on_request(&mut self, request: server::Request) -> Next { - let status = if let FetchState::NotStarted(ref app) = self.status { + let status = if let FetchState::NotStarted(ref url) = self.status { Some(match *request.method() { // Start fetching content Method::Get => { - trace!(target: "dapps", "Fetching dapp: {:?}", app); + trace!(target: "dapps", "Fetching content from: {:?}", url); let control = self.control.take().expect("on_request is called only once, thus control is always Some"); let client = self.client.as_mut().expect("on_request is called before client is closed."); - let fetch = Self::fetch_app(client, app, self.abort.clone(), control); + let fetch = Self::fetch_content(client, url, self.fetch_control.abort.clone(), control); match fetch { - Ok(receiver) => FetchState::InProgress { - deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT), - receiver: receiver, - }, + Ok(receiver) => FetchState::InProgress(receiver), Err(e) => FetchState::Error(ContentHandler::error( StatusCode::BadGateway, "Unable To Start Dapp Download", @@ -140,6 +218,7 @@ impl server::Handler for ContentFetcherHandler< } else { None }; if let Some(status) = status { + self.fetch_control.set_status(&status); self.status = status; } @@ -149,49 +228,51 @@ impl server::Handler for ContentFetcherHandler< fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { let (status, next) = match self.status { // Request may time out - FetchState::InProgress { ref deadline, .. } if *deadline < Instant::now() => { + FetchState::InProgress(_) if self.fetch_control.deadline < Instant::now() => { trace!(target: "dapps", "Fetching dapp failed because of timeout."); let timeout = ContentHandler::error( StatusCode::GatewayTimeout, "Download Timeout", - &format!("Could not fetch dapp bundle within {} seconds.", FETCH_TIMEOUT), + &format!("Could not fetch content within {} seconds.", FETCH_TIMEOUT), None ); Self::close_client(&mut self.client); (Some(FetchState::Error(timeout)), Next::write()) }, - FetchState::InProgress { ref receiver, .. } => { + FetchState::InProgress(ref receiver) => { // Check if there is an answer let rec = receiver.try_recv(); match rec { // Unpack and validate Ok(Ok(path)) => { - trace!(target: "dapps", "Fetching dapp finished. Starting validation."); + trace!(target: "dapps", "Fetching content finished. Starting validation ({:?})", path); Self::close_client(&mut self.client); // Unpack and verify - let state = match self.dapp.validate_and_install(path.clone()) { + let state = match self.installer.validate_and_install(path.clone()) { Err(e) => { - trace!(target: "dapps", "Error while validating dapp: {:?}", e); + trace!(target: "dapps", "Error while validating content: {:?}", e); FetchState::Error(ContentHandler::error( StatusCode::BadGateway, "Invalid Dapp", - "Downloaded bundle does not contain a valid dapp.", + "Downloaded bundle does not contain a valid content.", Some(&format!("{:?}", e)) )) }, - Ok(manifest) => FetchState::Done(manifest) + Ok((id, result)) => { + let address = redirection_address(self.using_dapps_domains, &id); + FetchState::Done(id, result, Redirection::new(&address)) + }, }; // Remove temporary zip file - // TODO [todr] Uncomment me - // let _ = fs::remove_file(path); + let _ = fs::remove_file(path); (Some(state), Next::write()) }, Ok(Err(e)) => { - warn!(target: "dapps", "Unable to fetch new dapp: {:?}", e); + warn!(target: "dapps", "Unable to fetch content: {:?}", e); let error = ContentHandler::error( StatusCode::BadGateway, "Download Error", - "There was an error when fetching the dapp.", + "There was an error when fetching the content.", Some(&format!("{:?}", e)), ); (Some(FetchState::Error(error)), Next::write()) @@ -205,6 +286,7 @@ impl server::Handler for ContentFetcherHandler< }; if let Some(status) = status { + self.fetch_control.set_status(&status); self.status = status; } @@ -213,12 +295,7 @@ impl server::Handler for ContentFetcherHandler< fn on_response(&mut self, res: &mut server::Response) -> Next { match self.status { - FetchState::Done(ref manifest) => { - trace!(target: "dapps", "Fetching dapp finished. Redirecting to {}", manifest.id); - res.set_status(StatusCode::Found); - res.headers_mut().set(header::Location(redirection_address(self.using_dapps_domains, &manifest.id))); - Next::write() - }, + FetchState::Done(_, _, ref mut handler) => handler.on_response(res), FetchState::Error(ref mut handler) => handler.on_response(res), _ => Next::end(), } @@ -226,9 +303,9 @@ impl server::Handler for ContentFetcherHandler< fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { match self.status { + FetchState::Done(_, _, ref mut handler) => handler.on_response_writable(encoder), FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), _ => Next::end(), } } } - diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index 6f6423b58..62b13eaa8 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -27,7 +27,7 @@ pub use self::auth::AuthRequiredHandler; pub use self::echo::EchoHandler; pub use self::content::ContentHandler; pub use self::redirect::Redirection; -pub use self::fetch::{ContentFetcherHandler, ContentValidator}; +pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl}; use url::Url; use hyper::{server, header, net, uri}; diff --git a/dapps/src/handlers/redirect.rs b/dapps/src/handlers/redirect.rs index 8b6158266..e43d32e24 100644 --- a/dapps/src/handlers/redirect.rs +++ b/dapps/src/handlers/redirect.rs @@ -20,15 +20,20 @@ use hyper::{header, server, Decoder, Encoder, Next}; use hyper::net::HttpStream; use hyper::status::StatusCode; +#[derive(Clone)] pub struct Redirection { to_url: String } impl Redirection { - pub fn new(url: &str) -> Box { - Box::new(Redirection { + pub fn new(url: &str) -> Self { + Redirection { to_url: url.to_owned() - }) + } + } + + pub fn boxed(url: &str) -> Box { + Box::new(Self::new(url)) } } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 87563a3ae..4dcf53a44 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -191,7 +191,7 @@ impl Server { ) -> Result { let panic_handler = Arc::new(Mutex::new(None)); let authorization = Arc::new(authorization); - let apps_fetcher = Arc::new(apps::fetcher::AppFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status)); + let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(apps::urlhint::URLHintContract::new(registrar), sync_status)); let endpoints = Arc::new(apps::all_endpoints(dapps_path)); let special = Arc::new({ let mut special = HashMap::new(); @@ -206,7 +206,7 @@ impl Server { .handle(move |ctrl| router::Router::new( ctrl, apps::main_page(), - apps_fetcher.clone(), + content_fetcher.clone(), endpoints.clone(), special.clone(), authorization.clone(), diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs index 86d4273d5..e34cc6434 100644 --- a/dapps/src/page/local.rs +++ b/dapps/src/page/local.rs @@ -17,20 +17,31 @@ use mime_guess; use std::io::{Seek, Read, SeekFrom}; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use page::handler; use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; +#[derive(Debug, Clone)] pub struct LocalPageEndpoint { path: PathBuf, - info: EndpointInfo, + mime: Option, + info: Option, } impl LocalPageEndpoint { pub fn new(path: PathBuf, info: EndpointInfo) -> Self { LocalPageEndpoint { path: path, - info: info, + mime: None, + info: Some(info), + } + } + + pub fn single_file(path: PathBuf, mime: String) -> Self { + LocalPageEndpoint { + path: path, + mime: Some(mime), + info: None, } } @@ -41,17 +52,40 @@ impl LocalPageEndpoint { impl Endpoint for LocalPageEndpoint { fn info(&self) -> Option<&EndpointInfo> { - Some(&self.info) + self.info.as_ref() } fn to_handler(&self, path: EndpointPath) -> Box { - Box::new(handler::PageHandler { - app: LocalDapp::new(self.path.clone()), - prefix: None, - path: path, - file: Default::default(), - safe_to_embed: false, - }) + if let Some(ref mime) = self.mime { + Box::new(handler::PageHandler { + app: LocalSingleFile { path: self.path.clone(), mime: mime.clone() }, + prefix: None, + path: path, + file: Default::default(), + safe_to_embed: false, + }) + } else { + Box::new(handler::PageHandler { + app: LocalDapp { path: self.path.clone() }, + prefix: None, + path: path, + file: Default::default(), + safe_to_embed: false, + }) + } + } +} + +struct LocalSingleFile { + path: PathBuf, + mime: String, +} + +impl handler::Dapp for LocalSingleFile { + type DappFile = LocalFile; + + fn file(&self, _path: &str) -> Option { + LocalFile::from_path(&self.path, Some(&self.mime)) } } @@ -59,14 +93,6 @@ struct LocalDapp { path: PathBuf, } -impl LocalDapp { - fn new(path: PathBuf) -> Self { - LocalDapp { - path: path - } - } -} - impl handler::Dapp for LocalDapp { type DappFile = LocalFile; @@ -75,18 +101,7 @@ impl handler::Dapp for LocalDapp { for part in file_path.split('/') { path.push(part); } - // Check if file exists - fs::File::open(path.clone()).ok().map(|file| { - let content_type = mime_guess::guess_mime_type(path); - let len = file.metadata().ok().map_or(0, |meta| meta.len()); - LocalFile { - content_type: content_type.to_string(), - buffer: [0; 4096], - file: file, - pos: 0, - len: len, - } - }) + LocalFile::from_path(&path, None) } } @@ -98,6 +113,24 @@ struct LocalFile { pos: u64, } +impl LocalFile { + fn from_path>(path: P, mime: Option<&str>) -> Option { + // Check if file exists + fs::File::open(&path).ok().map(|file| { + let content_type = mime.map(|mime| mime.to_owned()) + .unwrap_or_else(|| mime_guess::guess_mime_type(path).to_string()); + let len = file.metadata().ok().map_or(0, |meta| meta.len()); + LocalFile { + content_type: content_type, + buffer: [0; 4096], + file: file, + pos: 0, + len: len, + } + }) + } +} + impl handler::DappFile for LocalFile { fn content_type(&self) -> &str { &self.content_type diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index c93456d71..f54d6bf3d 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -27,7 +27,7 @@ use url::{Url, Host}; use hyper::{self, server, Next, Encoder, Decoder, Control, StatusCode}; use hyper::net::HttpStream; use apps; -use apps::fetcher::AppFetcher; +use apps::fetcher::ContentFetcher; use endpoint::{Endpoint, Endpoints, EndpointPath}; use handlers::{Redirection, extract_url, ContentHandler}; use self::auth::{Authorization, Authorized}; @@ -45,7 +45,7 @@ pub struct Router { control: Option, main_page: &'static str, endpoints: Arc, - fetch: Arc, + fetch: Arc, special: Arc>>, authorization: Arc, allowed_hosts: Option>, @@ -104,7 +104,7 @@ impl server::Handler for Router { // Redirect any GET request to home. _ if *req.method() == hyper::method::Method::Get => { let address = apps::redirection_address(false, self.main_page); - Redirection::new(address.as_str()) + Redirection::boxed(address.as_str()) }, // RPC by default _ => { @@ -136,7 +136,7 @@ impl Router { pub fn new( control: Control, main_page: &'static str, - app_fetcher: Arc, + content_fetcher: Arc, endpoints: Arc, special: Arc>>, authorization: Arc, @@ -148,7 +148,7 @@ impl Router { control: Some(control), main_page: main_page, endpoints: endpoints, - fetch: app_fetcher, + fetch: content_fetcher, special: special, authorization: authorization, allowed_hosts: allowed_hosts, diff --git a/dapps/src/tests/helpers.rs b/dapps/src/tests/helpers.rs index 4cd21520c..efbd24a8d 100644 --- a/dapps/src/tests/helpers.rs +++ b/dapps/src/tests/helpers.rs @@ -17,7 +17,7 @@ use std::env; use std::str; use std::sync::Arc; -use rustc_serialize::hex::{ToHex, FromHex}; +use rustc_serialize::hex::FromHex; use ServerBuilder; use Server; diff --git a/db/src/database.rs b/db/src/database.rs index 185618f99..9a52822f6 100644 --- a/db/src/database.rs +++ b/db/src/database.rs @@ -460,7 +460,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); client.put("xxx".as_bytes(), "1".as_bytes()).unwrap(); client.close().unwrap(); @@ -477,7 +477,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); client.put("xxx".as_bytes(), "1".as_bytes()).unwrap(); @@ -498,7 +498,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); assert!(client.get("xxx".as_bytes()).unwrap().is_none()); @@ -516,7 +516,7 @@ mod client_tests { crossbeam::scope(move |scope| { let stop = Arc::new(AtomicBool::new(false)); run_worker(scope, stop.clone(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); let transaction = DBTransaction::new(); @@ -541,7 +541,7 @@ mod client_tests { let stop = StopGuard::new(); run_worker(&scope, stop.share(), url); - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::generic_client::>(url).unwrap(); client.open_default(path.as_str().to_owned()).unwrap(); let mut batch = Vec::new(); diff --git a/db/src/lib.rs.in b/db/src/lib.rs.in index 4fa43b977..54fccb097 100644 --- a/db/src/lib.rs.in +++ b/db/src/lib.rs.in @@ -66,13 +66,13 @@ pub fn extras_service_url(db_path: &str) -> Result { pub fn blocks_client(db_path: &str) -> Result { let url = try!(blocks_service_url(db_path)); - let client = try!(nanoipc::init_client::>(&url)); + let client = try!(nanoipc::generic_client::>(&url)); Ok(client) } pub fn extras_client(db_path: &str) -> Result { let url = try!(extras_service_url(db_path)); - let client = try!(nanoipc::init_client::>(&url)); + let client = try!(nanoipc::generic_client::>(&url)); Ok(client) } diff --git a/docker/ubuntu-aarch64/Dockerfile b/docker/ubuntu-aarch64/Dockerfile index aae09f71c..1f4159a54 100644 --- a/docker/ubuntu-aarch64/Dockerfile +++ b/docker/ubuntu-aarch64/Dockerfile @@ -23,15 +23,9 @@ RUN rustup target add aarch64-unknown-linux-gnu # show backtraces ENV RUST_BACKTRACE 1 -# set compilers -ENV CXX aarch64-linux-gnu-g++ -ENV CC aarch64-linux-gnu-gcc - # show tools RUN rustc -vV && \ - cargo -V && \ - gcc -v &&\ - g++ -v + cargo -V # build parity RUN git clone https://github.com/ethcore/parity && \ diff --git a/docker/ubuntu-arm/Dockerfile b/docker/ubuntu-arm/Dockerfile index 54a54ad55..6c2fa2852 100644 --- a/docker/ubuntu-arm/Dockerfile +++ b/docker/ubuntu-arm/Dockerfile @@ -23,15 +23,10 @@ RUN rustup target add armv7-unknown-linux-gnueabihf # show backtraces ENV RUST_BACKTRACE 1 -# set compilers -ENV CXX arm-linux-gnueabihf-g++ -ENV CC arm-linux-gnueabihf-gcc # show tools RUN rustc -vV && \ - cargo -V && \ - gcc -v &&\ - g++ -v + cargo -V # build parity RUN git clone https://github.com/ethcore/parity && \ diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index e1d603082..b6333902b 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -673,6 +673,8 @@ impl Client { impl snapshot::DatabaseRestore for Client { /// Restart the client with a new backend fn restore_db(&self, new_db: &str) -> Result<(), EthcoreError> { + trace!(target: "snapshot", "Replacing client database with {:?}", new_db); + let _import_lock = self.import_lock.lock(); let mut state_db = self.state_db.write(); let mut chain = self.chain.write(); diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 18dfeec46..5cb2be9ed 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -99,6 +99,8 @@ impl Engine for BasicAuthority { /// This assumes that all uncles are valid uncles (i.e. of at least one generation before the current). fn on_close_block(&self, _block: &mut ExecutedBlock) {} + fn seals_internally(&self) -> bool { true } + /// Attempt to seal the block internally. /// /// This operation is synchronous and may (quite reasonably) not be available, in which `false` will diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 3c95f3465..e88c1d102 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -58,6 +58,8 @@ impl Engine for InstantSeal { Schedule::new_homestead() } + fn seals_internally(&self) -> bool { true } + fn generate_seal(&self, _block: &ExecutedBlock, _accounts: Option<&AccountProvider>) -> Option> { Some(Vec::new()) } @@ -71,18 +73,12 @@ mod tests { use spec::Spec; use block::*; - /// Create a new test chain spec with `BasicAuthority` consensus engine. - fn new_test_instant() -> Spec { - let bytes: &[u8] = include_bytes!("../../res/instant_seal.json"); - Spec::load(bytes).expect("invalid chain spec") - } - #[test] fn instant_can_seal() { let tap = AccountProvider::transient_provider(); let addr = tap.insert_account("".sha3(), "").unwrap(); - let spec = new_test_instant(); + let spec = Spec::new_test_instant(); let engine = &*spec.engine; let genesis_header = spec.genesis_header(); let mut db_result = get_temp_journal_db(); @@ -98,7 +94,7 @@ mod tests { #[test] fn instant_cant_verify() { - let engine = new_test_instant().engine; + let engine = Spec::new_test_instant().engine; let mut header: Header = Header::default(); assert!(engine.verify_block_basic(&header, None).is_ok()); diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 6414ba5e4..0394426ce 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -71,6 +71,8 @@ pub trait Engine : Sync + Send { /// Block transformation functions, after the transactions. fn on_close_block(&self, _block: &mut ExecutedBlock) {} + /// If true, generate_seal has to be implemented. + fn seals_internally(&self) -> bool { false } /// Attempt to seal the block internally. /// /// If `Some` is returned, then you get a valid seal. diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index c9d60f075..152b0e994 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -24,7 +24,7 @@ use views::{BlockView, HeaderView}; use state::State; use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics}; use executive::contract_address; -use block::{ClosedBlock, IsBlock, Block}; +use block::{ClosedBlock, SealedBlock, IsBlock, Block}; use error::*; use transaction::{Action, SignedTransaction}; use receipt::{Receipt, RichReceipt}; @@ -34,6 +34,7 @@ use miner::{MinerService, MinerStatus, TransactionQueue, AccountDetails, Transac use miner::work_notify::WorkPoster; use client::TransactionImportResult; use miner::price_info::PriceInfo; +use header::BlockNumber; /// Different possible definitions for pending transaction set. #[derive(Debug, PartialEq)] @@ -165,6 +166,7 @@ struct SealingWork { } /// Keeps track of transactions using priority queue and holds currently mined block. +/// Handles preparing work for "work sealing" or seals "internally" if Engine does not require work. pub struct Miner { // NOTE [ToDr] When locking always lock in this order! transaction_queue: Arc>, @@ -243,19 +245,15 @@ impl Miner { } /// Prepares new block for sealing including top transactions from queue. - #[cfg_attr(feature="dev", allow(match_same_arms))] - #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] - fn prepare_sealing(&self, chain: &MiningBlockChainClient) { - trace!(target: "miner", "prepare_sealing: entering"); - + fn prepare_block(&self, chain: &MiningBlockChainClient) -> (ClosedBlock, Option) { { - trace!(target: "miner", "recalibrating..."); + trace!(target: "miner", "prepare_block: recalibrating..."); let txq = self.transaction_queue.clone(); self.gas_pricer.lock().recalibrate(move |price| { - trace!(target: "miner", "Got gas price! {}", price); + trace!(target: "miner", "prepare_block: Got gas price! {}", price); txq.lock().set_minimal_gas_price(price); }); - trace!(target: "miner", "done recalibration."); + trace!(target: "miner", "prepare_block: done recalibration."); } let (transactions, mut open_block, original_work_hash) = { @@ -273,13 +271,13 @@ impl Miner { */ let open_block = match sealing_work.queue.pop_if(|b| b.block().fields().header.parent_hash() == &best_hash) { Some(old_block) => { - trace!(target: "miner", "Already have previous work; updating and returning"); + trace!(target: "miner", "prepare_block: Already have previous work; updating and returning"); // add transactions to old_block old_block.reopen(&*self.engine) } None => { // block not found - create it. - trace!(target: "miner", "No existing work - making new block"); + trace!(target: "miner", "prepare_block: No existing work - making new block"); chain.prepare_open_block( self.author(), (self.gas_floor_target(), self.gas_ceil_target()), @@ -334,37 +332,79 @@ impl Miner { queue.remove_invalid(&hash, &fetch_account); } } + (block, original_work_hash) + } - if !block.transactions().is_empty() { - trace!(target: "miner", "prepare_sealing: block has transaction - attempting internal seal."); - // block with transactions - see if we can seal immediately. - let s = self.engine.generate_seal(block.block(), match self.accounts { - Some(ref x) => Some(&**x), - None => None, - }); - if let Some(seal) = s { - trace!(target: "miner", "prepare_sealing: managed internal seal. importing..."); - if let Ok(sealed) = block.lock().try_seal(&*self.engine, seal) { - if let Ok(_) = chain.import_block(sealed.rlp_bytes()) { - trace!(target: "miner", "prepare_sealing: sealed internally and imported. leaving."); - } else { - warn!("prepare_sealing: ERROR: could not import internally sealed block. WTF?"); - } - } else { - warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?"); - } - return; + /// Check is reseal is allowed and necessary. + fn requires_reseal(&self, best_block: BlockNumber) -> bool { + let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); + let mut sealing_work = self.sealing_work.lock(); + if sealing_work.enabled { + trace!(target: "miner", "requires_reseal: sealing enabled"); + let last_request = *self.sealing_block_last_request.lock(); + let should_disable_sealing = !self.forced_sealing() + && !has_local_transactions + && best_block > last_request + && best_block - last_request > SEALING_TIMEOUT_IN_BLOCKS; + + trace!(target: "miner", "requires_reseal: should_disable_sealing={}; best_block={}, last_request={}", should_disable_sealing, best_block, last_request); + + if should_disable_sealing { + trace!(target: "miner", "Miner sleeping (current {}, last {})", best_block, last_request); + sealing_work.enabled = false; + sealing_work.queue.reset(); + false } else { - trace!(target: "miner", "prepare_sealing: unable to generate seal internally"); + // sealing enabled and we don't want to sleep. + *self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; + true + } + } else { + // sealing is disabled. + false + } + } + + /// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed), + /// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine. + fn seal_block_internally(&self, block: ClosedBlock) -> Result> { + trace!(target: "miner", "seal_block_internally: block has transaction - attempting internal seal."); + let s = self.engine.generate_seal(block.block(), match self.accounts { + Some(ref x) => Some(&**x), + None => None, + }); + if let Some(seal) = s { + trace!(target: "miner", "seal_block_internally: managed internal seal. importing..."); + block.lock().try_seal(&*self.engine, seal).or_else(|_| { + warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal. WTF?"); + Err(None) + }) + } else { + trace!(target: "miner", "seal_block_internally: unable to generate seal internally"); + Err(Some(block)) + } + } + + /// Uses Engine to seal the block internally and then imports it to chain. + fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool { + if !block.transactions().is_empty() { + if let Ok(sealed) = self.seal_block_internally(block) { + if chain.import_block(sealed.rlp_bytes()).is_ok() { + return true + } } } + false + } + /// Prepares work which has to be done to seal. + fn prepare_work(&self, block: ClosedBlock, original_work_hash: Option) { let (work, is_new) = { let mut sealing_work = self.sealing_work.lock(); let last_work_hash = sealing_work.queue.peek_last_ref().map(|pb| pb.block().fields().header.hash()); - trace!(target: "miner", "Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); + trace!(target: "miner", "prepare_work: Checking whether we need to reseal: orig={:?} last={:?}, this={:?}", original_work_hash, last_work_hash, block.block().fields().header.hash()); let (work, is_new) = if last_work_hash.map_or(true, |h| h != block.block().fields().header.hash()) { - trace!(target: "miner", "Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); + trace!(target: "miner", "prepare_work: Pushing a new, refreshed or borrowed pending {}...", block.block().fields().header.hash()); let pow_hash = block.block().fields().header.hash(); let number = block.block().fields().header.number(); let difficulty = *block.block().fields().header.difficulty(); @@ -378,7 +418,7 @@ impl Miner { } else { (None, false) }; - trace!(target: "miner", "prepare_sealing: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash())); + trace!(target: "miner", "prepare_work: leaving (last={:?})", sealing_work.queue.peek_last_ref().map(|b| b.block().fields().header.hash())); (work, is_new) }; if is_new { @@ -392,13 +432,13 @@ impl Miner { queue.set_gas_limit(gas_limit); } - /// Returns true if we had to prepare new pending block - fn enable_and_prepare_sealing(&self, chain: &MiningBlockChainClient) -> bool { - trace!(target: "miner", "enable_and_prepare_sealing: entering"); + /// Returns true if we had to prepare new pending block. + fn prepare_work_sealing(&self, chain: &MiningBlockChainClient) -> bool { + trace!(target: "miner", "prepare_work_sealing: entering"); let prepare_new = { let mut sealing_work = self.sealing_work.lock(); let have_work = sealing_work.queue.peek_last_ref().is_some(); - trace!(target: "miner", "enable_and_prepare_sealing: have_work={}", have_work); + trace!(target: "miner", "prepare_work_sealing: have_work={}", have_work); if !have_work { sealing_work.enabled = true; true @@ -411,12 +451,13 @@ impl Miner { // | NOTE Code below requires transaction_queue and sealing_work locks. | // | Make sure to release the locks before calling that method. | // -------------------------------------------------------------------------- - self.prepare_sealing(chain); + let (block, original_work_hash) = self.prepare_block(chain); + self.prepare_work(block, original_work_hash); } let mut sealing_block_last_request = self.sealing_block_last_request.lock(); let best_number = chain.chain_info().best_block_number; if *sealing_block_last_request != best_number { - trace!(target: "miner", "enable_and_prepare_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number); + trace!(target: "miner", "prepare_work_sealing: Miner received request (was {}, now {}) - waking up.", *sealing_block_last_request, best_number); *sealing_block_last_request = best_number; } @@ -635,7 +676,7 @@ impl MinerService for Miner { trace!(target: "own_tx", "Importing transaction: {:?}", transaction); let imported = { - // Be sure to release the lock before we call enable_and_prepare_sealing + // Be sure to release the lock before we call prepare_work_sealing let mut transaction_queue = self.transaction_queue.lock(); let import = self.add_transactions_to_queue( chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue @@ -661,11 +702,11 @@ impl MinerService for Miner { // -------------------------------------------------------------------------- if imported.is_ok() && self.options.reseal_on_own_tx && self.tx_reseal_allowed() { // Make sure to do it after transaction is imported and lock is droped. - // We need to create pending block and enable sealing - let prepared = self.enable_and_prepare_sealing(chain); - // If new block has not been prepared (means we already had one) - // we need to update sealing - if !prepared { + // We need to create pending block and enable sealing. + if self.engine.seals_internally() || !self.prepare_work_sealing(chain) { + // If new block has not been prepared (means we already had one) + // or Engine might be able to seal internally, + // we need to update sealing. self.update_sealing(chain); } } @@ -767,44 +808,26 @@ impl MinerService for Miner { self.transaction_queue.lock().last_nonce(address) } + + /// Update sealing if required. + /// Prepare the block and work if the Engine does not seal internally. fn update_sealing(&self, chain: &MiningBlockChainClient) { trace!(target: "miner", "update_sealing"); - let requires_reseal = { - let has_local_transactions = self.transaction_queue.lock().has_local_pending_transactions(); - let mut sealing_work = self.sealing_work.lock(); - if sealing_work.enabled { - trace!(target: "miner", "update_sealing: sealing enabled"); - let current_no = chain.chain_info().best_block_number; - let last_request = *self.sealing_block_last_request.lock(); - let should_disable_sealing = !self.forced_sealing() - && !has_local_transactions - && current_no > last_request - && current_no - last_request > SEALING_TIMEOUT_IN_BLOCKS; - trace!(target: "miner", "update_sealing: should_disable_sealing={}; current_no={}, last_request={}", should_disable_sealing, current_no, last_request); - - if should_disable_sealing { - trace!(target: "miner", "Miner sleeping (current {}, last {})", current_no, last_request); - sealing_work.enabled = false; - sealing_work.queue.reset(); - false - } else { - // sealing enabled and we don't want to sleep. - *self.next_allowed_reseal.lock() = Instant::now() + self.options.reseal_min_period; - true - } - } else { - // sealing is disabled. - false - } - }; - - if requires_reseal { + if self.requires_reseal(chain.chain_info().best_block_number) { // -------------------------------------------------------------------------- // | NOTE Code below requires transaction_queue and sealing_work locks. | // | Make sure to release the locks before calling that method. | // -------------------------------------------------------------------------- - self.prepare_sealing(chain); + trace!(target: "miner", "update_sealing: preparing a block"); + let (block, original_work_hash) = self.prepare_block(chain); + if self.engine.seals_internally() { + trace!(target: "miner", "update_sealing: engine indicates internal sealing"); + self.seal_and_import_block_internally(chain, block); + } else { + trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work"); + self.prepare_work(block, original_work_hash); + } } } @@ -814,7 +837,7 @@ impl MinerService for Miner { fn map_sealing_work(&self, chain: &MiningBlockChainClient, f: F) -> Option where F: FnOnce(&ClosedBlock) -> T { trace!(target: "miner", "map_sealing_work: entering"); - self.enable_and_prepare_sealing(chain); + self.prepare_work_sealing(chain); trace!(target: "miner", "map_sealing_work: sealing prepared"); let mut sealing_work = self.sealing_work.lock(); let ret = sealing_work.queue.use_last_ref(); @@ -917,11 +940,12 @@ mod tests { use super::*; use util::*; use ethkey::{Generator, Random}; - use client::{TestBlockChainClient, EachBlockWith}; - use client::{TransactionImportResult}; - use types::transaction::{Transaction, Action}; + use client::{BlockChainClient, TestBlockChainClient, EachBlockWith, TransactionImportResult}; + use header::BlockNumber; + use types::transaction::{Transaction, SignedTransaction, Action}; use block::*; use spec::Spec; + use tests::helpers::{generate_dummy_client}; #[test] fn should_prepare_block_to_seal() { @@ -975,23 +999,24 @@ mod tests { )).ok().expect("Miner was just created.") } + fn transaction() -> SignedTransaction { + let keypair = Random.generate().unwrap(); + Transaction { + action: Action::Create, + value: U256::zero(), + data: "3331600055".from_hex().unwrap(), + gas: U256::from(100_000), + gas_price: U256::zero(), + nonce: U256::zero(), + }.sign(keypair.secret()) + } + #[test] fn should_make_pending_block_when_importing_own_transaction() { // given let client = TestBlockChainClient::default(); let miner = miner(); - let transaction = { - let keypair = Random.generate().unwrap(); - Transaction { - action: Action::Create, - value: U256::zero(), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::zero(), - }.sign(keypair.secret()) - }; - + let transaction = transaction(); // when let res = miner.import_own_transaction(&client, transaction); @@ -1002,7 +1027,7 @@ mod tests { assert_eq!(miner.pending_transactions_hashes().len(), 1); assert_eq!(miner.pending_receipts().len(), 1); // This method will let us know if pending block was created (before calling that method) - assert_eq!(miner.enable_and_prepare_sealing(&client), false); + assert_eq!(miner.prepare_work_sealing(&client), false); } #[test] @@ -1010,18 +1035,7 @@ mod tests { // given let client = TestBlockChainClient::default(); let miner = miner(); - let transaction = { - let keypair = Random.generate().unwrap(); - Transaction { - action: Action::Create, - value: U256::zero(), - data: "3331600055".from_hex().unwrap(), - gas: U256::from(100_000), - gas_price: U256::zero(), - nonce: U256::zero(), - }.sign(keypair.secret()) - }; - + let transaction = transaction(); // when let res = miner.import_external_transactions(&client, vec![transaction]).pop().unwrap(); @@ -1032,6 +1046,31 @@ mod tests { assert_eq!(miner.pending_transactions().len(), 0); assert_eq!(miner.pending_receipts().len(), 0); // This method will let us know if pending block was created (before calling that method) - assert_eq!(miner.enable_and_prepare_sealing(&client), true); + assert_eq!(miner.prepare_work_sealing(&client), true); + } + + #[test] + fn internal_seals_without_work() { + let miner = Miner::with_spec(&Spec::new_test_instant()); + { + let mut sealing_work = miner.sealing_work.lock(); + sealing_work.enabled = true; + } + let c = generate_dummy_client(2); + let client = c.reference().as_ref(); + + assert_eq!(miner.import_external_transactions(client, vec![transaction()]).pop().unwrap().unwrap(), TransactionImportResult::Current); + + miner.update_sealing(client); + client.flush_queue(); + assert!(miner.pending_block().is_none()); + assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber); + + assert_eq!(miner.import_own_transaction(client, transaction()).unwrap(), TransactionImportResult::Current); + + miner.update_sealing(client); + client.flush_queue(); + assert!(miner.pending_block().is_none()); + assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber); } } diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index fb00a8aa8..4b1e8abec 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -81,6 +81,7 @@ //! - It removes all transactions (either from `current` or `future`) with nonce < client nonce //! - It moves matching `future` transactions to `current` +use std::ops::Deref; use std::cmp::Ordering; use std::cmp; use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap}; @@ -215,7 +216,48 @@ impl VerifiedTransaction { } fn sender(&self) -> Address { - self.transaction.sender().unwrap() + self.transaction.sender().expect("Sender is verified in new; qed") + } +} + +#[derive(Debug, Default)] +struct GasPriceQueue { + backing: BTreeMap>, +} + +impl GasPriceQueue { + /// Insert an item into a BTreeMap/HashSet "multimap". + pub fn insert(&mut self, gas_price: U256, hash: H256) -> bool { + self.backing.entry(gas_price).or_insert_with(Default::default).insert(hash) + } + + /// Remove an item from a BTreeMap/HashSet "multimap". + /// Returns true if the item was removed successfully. + pub fn remove(&mut self, gas_price: &U256, hash: &H256) -> bool { + if let Some(mut hashes) = self.backing.get_mut(gas_price) { + let only_one_left = hashes.len() == 1; + if !only_one_left { + // Operation may be ok: only if hash is in gas-price's Set. + return hashes.remove(hash); + } + if hash != hashes.iter().next().expect("We know there is only one element in collection, tested above; qed") { + // Operation failed: hash not the single item in gas-price's Set. + return false; + } + } else { + // Operation failed: gas-price not found in Map. + return false; + } + // Operation maybe ok: only if hash not found in gas-price Set. + self.backing.remove(gas_price).is_some() + } +} + +impl Deref for GasPriceQueue { + type Target=BTreeMap>; + + fn deref(&self) -> &Self::Target { + &self.backing } } @@ -227,7 +269,7 @@ impl VerifiedTransaction { struct TransactionSet { by_priority: BTreeSet, by_address: Table, - by_gas_price: BTreeMap>, + by_gas_price: GasPriceQueue, limit: usize, } @@ -245,12 +287,12 @@ impl TransactionSet { // If transaction was replaced remove it from priority queue if let Some(ref old_order) = by_address_replaced { assert!(self.by_priority.remove(old_order), "hash is in `by_address`; all transactions in `by_address` must be in `by_priority`; qed"); - assert!(Self::remove_item(&mut self.by_gas_price, &old_order.gas_price, &old_order.hash), + assert!(self.by_gas_price.remove(&old_order.gas_price, &old_order.hash), "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); } - Self::insert_item(&mut self.by_gas_price, order_gas_price, order_hash); - debug_assert_eq!(self.by_priority.len(), self.by_address.len()); - debug_assert_eq!(self.by_gas_price.iter().map(|(_, v)| v.len()).fold(0, |a, b| a + b), self.by_address.len()); + self.by_gas_price.insert(order_gas_price, order_hash); + assert_eq!(self.by_priority.len(), self.by_address.len()); + assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len()); by_address_replaced } @@ -263,6 +305,7 @@ impl TransactionSet { if len <= self.limit { return None; } + let to_drop : Vec<(Address, U256)> = { self.by_priority .iter() @@ -290,13 +333,16 @@ impl TransactionSet { /// Drop transaction from this set (remove from `by_priority` and `by_address`) fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { if let Some(tx_order) = self.by_address.remove(sender, nonce) { - assert!(Self::remove_item(&mut self.by_gas_price, &tx_order.gas_price, &tx_order.hash), + assert!(self.by_gas_price.remove(&tx_order.gas_price, &tx_order.hash), "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); - self.by_priority.remove(&tx_order); + assert!(self.by_priority.remove(&tx_order), + "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_priority`; qed"); assert_eq!(self.by_priority.len(), self.by_address.len()); + assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len()); return Some(tx_order); } assert_eq!(self.by_priority.len(), self.by_address.len()); + assert_eq!(self.by_gas_price.values().map(|v| v.len()).fold(0, |a, b| a + b), self.by_address.len()); None } @@ -304,7 +350,7 @@ impl TransactionSet { fn clear(&mut self) { self.by_priority.clear(); self.by_address.clear(); - self.by_gas_price.clear(); + self.by_gas_price.backing.clear(); } /// Sets new limit for number of transactions in this `TransactionSet`. @@ -321,32 +367,6 @@ impl TransactionSet { _ => U256::default(), } } - - /// Insert an item into a BTreeMap/HashSet "multimap". - fn insert_item(into: &mut BTreeMap>, gas_price: U256, hash: H256) -> bool { - into.entry(gas_price).or_insert_with(Default::default).insert(hash) - } - - /// Remove an item from a BTreeMap/HashSet "multimap". - /// Returns true if the item was removed successfully. - fn remove_item(from: &mut BTreeMap>, gas_price: &U256, hash: &H256) -> bool { - if let Some(mut hashes) = from.get_mut(gas_price) { - let only_one_left = hashes.len() == 1; - if !only_one_left { - // Operation may be ok: only if hash is in gas-price's Set. - return hashes.remove(hash); - } - if hashes.iter().next().unwrap() != hash { - // Operation failed: hash not the single item in gas-price's Set. - return false; - } - } else { - // Operation failed: gas-price not found in Map. - return false; - } - // Operation maybe ok: only if hash not found in gas-price Set. - from.remove(gas_price).is_some() - } } #[derive(Debug)] @@ -588,7 +608,7 @@ impl TransactionQueue { return; } - let transaction = transaction.unwrap(); + let transaction = transaction.expect("None is tested in early-exit condition above; qed"); let sender = transaction.sender(); let nonce = transaction.nonce(); let current_nonce = fetch_account(&sender).nonce; @@ -623,7 +643,7 @@ impl TransactionQueue { None => vec![], }; for k in all_nonces_from_sender { - let order = self.future.drop(sender, &k).unwrap(); + let order = self.future.drop(sender, &k).expect("iterating over a collection that has been retrieved above; qed"); if k >= current_nonce { self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { @@ -644,7 +664,8 @@ impl TransactionQueue { for k in all_nonces_from_sender { // Goes to future or is removed - let order = self.current.drop(sender, &k).unwrap(); + let order = self.current.drop(sender, &k).expect("iterating over a collection that has been retrieved above; + qed"); if k >= current_nonce { self.future.insert(*sender, k, order.update_height(k, current_nonce)); } else { @@ -704,10 +725,11 @@ impl TransactionQueue { if let None = by_nonce { return; } - let mut by_nonce = by_nonce.unwrap(); + let mut by_nonce = by_nonce.expect("None is tested in early-exit condition above; qed"); while let Some(order) = by_nonce.remove(¤t_nonce) { - // remove also from priority and hash + // remove also from priority and gas_price self.future.by_priority.remove(&order); + self.future.by_gas_price.remove(&order.gas_price, &order.hash); // Put to current let order = order.update_height(current_nonce, first_nonce); self.current.insert(address, current_nonce, order); @@ -1395,6 +1417,9 @@ mod test { let stats = txq.status(); assert_eq!(stats.pending, 3); assert_eq!(stats.future, 0); + assert_eq!(txq.future.by_priority.len(), 0); + assert_eq!(txq.future.by_address.len(), 0); + assert_eq!(txq.future.by_gas_price.len(), 0); } #[test] diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index a2b483d40..9fa126cc7 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -94,7 +94,6 @@ impl ClientService { pruning: pruning, channel: io_service.channel(), snapshot_root: snapshot_path.into(), - client_db: client_path.into(), db_restore: client.clone(), }; let snapshot = Arc::new(try!(SnapshotService::new(snapshot_params))); @@ -187,7 +186,7 @@ impl IoHandler for ClientIoHandler { ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); } ClientIoMessage::BeginRestoration(ref manifest) => { - if let Err(e) = self.snapshot.init_restore(manifest.clone()) { + if let Err(e) = self.snapshot.init_restore(manifest.clone(), true) { warn!("Failed to initialize snapshot restoration: {}", e); } } diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 2a186378f..78a065958 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -35,7 +35,7 @@ use service::ClientIoMessage; use io::IoChannel; -use util::{Bytes, H256, Mutex, RwLock, UtilError}; +use util::{Bytes, H256, Mutex, RwLock, RwLockReadGuard, UtilError}; use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; @@ -70,7 +70,7 @@ struct Restoration { block_chunks_left: HashSet, state: StateRebuilder, blocks: BlockRebuilder, - writer: LooseWriter, + writer: Option, snappy_buffer: Bytes, final_state_root: H256, guard: Guard, @@ -80,8 +80,8 @@ struct RestorationParams<'a> { manifest: ManifestData, // manifest to base restoration on. pruning: Algorithm, // pruning algorithm for the database. db_path: PathBuf, // database path - db_config: &'a DatabaseConfig, - writer: LooseWriter, // writer for recovered snapshot. + db_config: &'a DatabaseConfig, // configuration for the database. + writer: Option, // writer for recovered snapshot. genesis: &'a [u8], // genesis block of the chain. guard: Guard, // guard for the restoration directory. } @@ -120,7 +120,10 @@ impl Restoration { let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); try!(self.state.feed(&self.snappy_buffer[..len])); - try!(self.writer.write_state_chunk(hash, chunk)); + + if let Some(ref mut writer) = self.writer.as_mut() { + try!(writer.write_state_chunk(hash, chunk)); + } } Ok(()) @@ -132,7 +135,9 @@ impl Restoration { let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); - try!(self.writer.write_block_chunk(hash, chunk)); + if let Some(ref mut writer) = self.writer.as_mut() { + try!(writer.write_block_chunk(hash, chunk)); + } } Ok(()) @@ -157,7 +162,9 @@ impl Restoration { // connect out-of-order chunks. self.blocks.glue_chunks(); - try!(self.writer.finish(self.manifest)); + if let Some(writer) = self.writer { + try!(writer.finish(self.manifest)); + } self.guard.disarm(); Ok(()) @@ -187,9 +194,6 @@ pub struct ServiceParams { /// The directory to put snapshots in. /// Usually "/snapshot" pub snapshot_root: PathBuf, - /// The client's database directory. - /// Usually "//db". - pub client_db: PathBuf, /// A handle for database restoration. pub db_restore: Arc, } @@ -198,7 +202,6 @@ pub struct ServiceParams { /// This controls taking snapshots and restoring from them. pub struct Service { restoration: Mutex>, - client_db: PathBuf, snapshot_root: PathBuf, db_config: DatabaseConfig, io_channel: Channel, @@ -219,7 +222,6 @@ impl Service { pub fn new(params: ServiceParams) -> Result { let mut service = Service { restoration: Mutex::new(None), - client_db: params.client_db, snapshot_root: params.snapshot_root, db_config: params.db_config, io_channel: params.channel, @@ -301,11 +303,15 @@ impl Service { fn replace_client_db(&self) -> Result<(), Error> { let our_db = self.restoration_db(); - trace!(target: "snapshot", "replacing {:?} with {:?}", self.client_db, our_db); - try!(self.db_restore.restore_db(our_db.to_str().unwrap())); + try!(self.db_restore.restore_db(&*our_db.to_string_lossy())); Ok(()) } + /// Get a reference to the snapshot reader. + pub fn reader(&self) -> RwLockReadGuard> { + self.reader.read() + } + /// Tick the snapshot service. This will log any active snapshot /// being taken. pub fn tick(&self) { @@ -348,6 +354,10 @@ impl Service { // destroy the old snapshot reader. *reader = None; + if snapshot_dir.exists() { + try!(fs::remove_dir_all(&snapshot_dir)); + } + try!(fs::rename(temp_dir, &snapshot_dir)); *reader = Some(try!(LooseReader::new(snapshot_dir))); @@ -357,11 +367,15 @@ impl Service { } /// Initialize the restoration synchronously. - pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> { + /// The recover flag indicates whether to recover the restored snapshot. + pub fn init_restore(&self, manifest: ManifestData, recover: bool) -> Result<(), Error> { let rest_dir = self.restoration_dir(); let mut res = self.restoration.lock(); + self.state_chunks.store(0, Ordering::SeqCst); + self.block_chunks.store(0, Ordering::SeqCst); + // tear down existing restoration. *res = None; @@ -376,7 +390,10 @@ impl Service { try!(fs::create_dir_all(&rest_dir)); // make new restoration. - let writer = try!(LooseWriter::new(self.temp_recovery_dir())); + let writer = match recover { + true => Some(try!(LooseWriter::new(self.temp_recovery_dir()))), + false => None + }; let params = RestorationParams { manifest: manifest, @@ -391,8 +408,8 @@ impl Service { *res = Some(try!(Restoration::new(params))); *self.status.lock() = RestorationStatus::Ongoing { - state_chunks_done: self.state_chunks.load(Ordering::Relaxed) as u32, - block_chunks_done: self.block_chunks.load(Ordering::Relaxed) as u32, + state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32, + block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32, }; Ok(()) } @@ -403,35 +420,30 @@ impl Service { fn finalize_restoration(&self, rest: &mut Option) -> Result<(), Error> { trace!(target: "snapshot", "finalizing restoration"); - self.state_chunks.store(0, Ordering::SeqCst); - self.block_chunks.store(0, Ordering::SeqCst); + let recover = rest.as_ref().map_or(false, |rest| rest.writer.is_some()); // destroy the restoration before replacing databases and snapshot. try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(()))); try!(self.replace_client_db()); - let mut reader = self.reader.write(); - *reader = None; // destroy the old reader if it existed. + if recover { + let mut reader = self.reader.write(); + *reader = None; // destroy the old reader if it existed. - let snapshot_dir = self.snapshot_dir(); + let snapshot_dir = self.snapshot_dir(); - trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); - if let Err(e) = fs::remove_dir_all(&snapshot_dir) { - match e.kind() { - ErrorKind::NotFound => {} - _ => return Err(e.into()), + if snapshot_dir.exists() { + trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); + try!(fs::remove_dir_all(&snapshot_dir)); } + + trace!(target: "snapshot", "copying restored snapshot files over"); + try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); + + *reader = Some(try!(LooseReader::new(snapshot_dir))); } - try!(fs::create_dir(&snapshot_dir)); - - trace!(target: "snapshot", "copying restored snapshot files over"); - try!(fs::rename(self.temp_recovery_dir(), &snapshot_dir)); - let _ = fs::remove_dir_all(self.restoration_dir()); - - *reader = Some(try!(LooseReader::new(snapshot_dir))); - *self.status.lock() = RestorationStatus::Inactive; Ok(()) @@ -512,7 +524,13 @@ impl SnapshotService for Service { } fn status(&self) -> RestorationStatus { - *self.status.lock() + let mut cur_status = self.status.lock(); + if let RestorationStatus::Ongoing { ref mut state_chunks_done, ref mut block_chunks_done } = *cur_status { + *state_chunks_done = self.state_chunks.load(Ordering::SeqCst) as u32; + *block_chunks_done = self.block_chunks.load(Ordering::SeqCst) as u32; + } + + cur_status.clone() } fn begin_restore(&self, manifest: ManifestData) { @@ -523,12 +541,6 @@ impl SnapshotService for Service { fn abort_restore(&self) { *self.restoration.lock() = None; *self.status.lock() = RestorationStatus::Inactive; - if let Err(e) = fs::remove_dir_all(&self.restoration_dir()) { - match e.kind() { - ErrorKind::NotFound => {}, - _ => warn!("encountered error {} while deleting snapshot restoration dir.", e), - } - } } fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { @@ -585,7 +597,6 @@ mod tests { pruning: Algorithm::Archive, channel: service.channel(), snapshot_root: dir, - client_db: client_db, db_restore: Arc::new(NoopDBRestore), }; diff --git a/ethcore/src/snapshot/tests/mod.rs b/ethcore/src/snapshot/tests/mod.rs index 84096bead..d9c0abc73 100644 --- a/ethcore/src/snapshot/tests/mod.rs +++ b/ethcore/src/snapshot/tests/mod.rs @@ -18,6 +18,7 @@ mod blocks; mod state; +mod service; pub mod helpers; diff --git a/ethcore/src/snapshot/tests/service.rs b/ethcore/src/snapshot/tests/service.rs new file mode 100644 index 000000000..e136985c6 --- /dev/null +++ b/ethcore/src/snapshot/tests/service.rs @@ -0,0 +1,143 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tests for the snapshot service. + +use std::sync::Arc; + +use client::{BlockChainClient, Client}; +use ids::BlockID; +use snapshot::service::{Service, ServiceParams}; +use snapshot::{self, ManifestData, SnapshotService}; +use spec::Spec; +use tests::helpers::generate_dummy_client_with_spec_and_data; + +use devtools::RandomTempPath; +use io::IoChannel; +use util::kvdb::DatabaseConfig; + +struct NoopDBRestore; + +impl snapshot::DatabaseRestore for NoopDBRestore { + fn restore_db(&self, _new_db: &str) -> Result<(), ::error::Error> { + Ok(()) + } +} + +#[test] +fn restored_is_equivalent() { + const NUM_BLOCKS: u32 = 400; + const TX_PER: usize = 5; + + let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()]; + + let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices); + + let path = RandomTempPath::create_dir(); + let mut path = path.as_path().clone(); + let mut client_db = path.clone(); + + client_db.push("client_db"); + path.push("snapshot"); + + let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); + + let spec = Spec::new_null(); + let client2 = Client::new( + Default::default(), + &spec, + &client_db, + Arc::new(::miner::Miner::with_spec(&spec)), + IoChannel::disconnected(), + &db_config, + ).unwrap(); + + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: db_config, + pruning: ::util::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: path, + db_restore: client2.clone(), + }; + + let service = Service::new(service_params).unwrap(); + service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap(); + + let manifest = service.manifest().unwrap(); + + service.init_restore(manifest.clone(), true).unwrap(); + assert!(service.init_restore(manifest.clone(), true).is_ok()); + + for hash in manifest.state_hashes { + let chunk = service.chunk(hash).unwrap(); + service.feed_state_chunk(hash, &chunk); + } + + for hash in manifest.block_hashes { + let chunk = service.chunk(hash).unwrap(); + service.feed_block_chunk(hash, &chunk); + } + + assert_eq!(service.status(), ::snapshot::RestorationStatus::Inactive); + + for x in 0..NUM_BLOCKS { + let block1 = client.block(BlockID::Number(x as u64)).unwrap(); + let block2 = client2.block(BlockID::Number(x as u64)).unwrap(); + + assert_eq!(block1, block2); + } +} + +#[test] +fn guards_delete_folders() { + let spec = Spec::new_null(); + let path = RandomTempPath::create_dir(); + let mut path = path.as_path().clone(); + let service_params = ServiceParams { + engine: spec.engine.clone(), + genesis_block: spec.genesis_block(), + db_config: DatabaseConfig::with_columns(::db::NUM_COLUMNS), + pruning: ::util::journaldb::Algorithm::Archive, + channel: IoChannel::disconnected(), + snapshot_root: path.clone(), + db_restore: Arc::new(NoopDBRestore), + }; + + let service = Service::new(service_params).unwrap(); + path.push("restoration"); + + let manifest = ManifestData { + state_hashes: vec![], + block_hashes: vec![], + block_number: 0, + block_hash: Default::default(), + state_root: Default::default(), + }; + + service.init_restore(manifest.clone(), true).unwrap(); + assert!(path.exists()); + + service.abort_restore(); + assert!(!path.exists()); + + service.init_restore(manifest.clone(), true).unwrap(); + assert!(path.exists()); + + drop(service); + assert!(!path.exists()); +} \ No newline at end of file diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 58317e97b..a6b5ad649 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -260,6 +260,11 @@ impl Spec { pub fn new_null() -> Self { Spec::load(include_bytes!("../../res/null.json") as &[u8]).expect("null.json is invalid") } + + /// Create a new Spec with InstantSeal consensus which does internal sealing (not requiring work). + pub fn new_test_instant() -> Self { + Spec::load(include_bytes!("../../res/instant_seal.json") as &[u8]).expect("instant_seal.json is invalid") + } } #[cfg(test)] diff --git a/ethcore/src/tests/rpc.rs b/ethcore/src/tests/rpc.rs index 202e42988..d5d88c087 100644 --- a/ethcore/src/tests/rpc.rs +++ b/ethcore/src/tests/rpc.rs @@ -56,7 +56,7 @@ fn can_handshake() { let stop_guard = StopGuard::new(); let socket_path = "ipc:///tmp/parity-client-rpc-10.ipc"; run_test_worker(scope, stop_guard.share(), socket_path); - let remote_client = nanoipc::init_client::>(socket_path).unwrap(); + let remote_client = nanoipc::generic_client::>(socket_path).unwrap(); assert!(remote_client.handshake().is_ok()); }) @@ -68,7 +68,7 @@ fn can_query_block() { let stop_guard = StopGuard::new(); let socket_path = "ipc:///tmp/parity-client-rpc-20.ipc"; run_test_worker(scope, stop_guard.share(), socket_path); - let remote_client = nanoipc::init_client::>(socket_path).unwrap(); + let remote_client = nanoipc::generic_client::>(socket_path).unwrap(); let non_existant_block = remote_client.block_header(BlockID::Number(999)); diff --git a/ipc/hypervisor/src/lib.rs b/ipc/hypervisor/src/lib.rs index 3cfd464e9..78b8b04ce 100644 --- a/ipc/hypervisor/src/lib.rs +++ b/ipc/hypervisor/src/lib.rs @@ -240,7 +240,7 @@ mod tests { ::std::thread::spawn(move || { while !hypervisor_ready.load(Ordering::Relaxed) { } - let client = nanoipc::init_client::>(url).unwrap(); + let client = nanoipc::fast_client::>(url).unwrap(); client.handshake().unwrap(); client.module_ready(test_module_id); }); diff --git a/ipc/hypervisor/src/service.rs.in b/ipc/hypervisor/src/service.rs.in index 938cea345..74d289f50 100644 --- a/ipc/hypervisor/src/service.rs.in +++ b/ipc/hypervisor/src/service.rs.in @@ -110,7 +110,7 @@ impl HypervisorService { let modules = self.modules.read().unwrap(); modules.get(&module_id).map(|module| { trace!(target: "hypervisor", "Sending shutdown to {}({})", module_id, &module.control_url); - let client = nanoipc::init_client::>(&module.control_url).unwrap(); + let client = nanoipc::fast_client::>(&module.control_url).unwrap(); client.shutdown(); trace!(target: "hypervisor", "Sent shutdown to {}", module_id); }); diff --git a/ipc/nano/Cargo.toml b/ipc/nano/Cargo.toml index ee399e60f..b358eb23a 100644 --- a/ipc/nano/Cargo.toml +++ b/ipc/nano/Cargo.toml @@ -10,4 +10,4 @@ license = "GPL-3.0" ethcore-ipc = { path = "../rpc" } nanomsg = { git = "https://github.com/ethcore/nanomsg.rs.git" } log = "0.3" - +lazy_static = "0.2" diff --git a/ipc/nano/src/lib.rs b/ipc/nano/src/lib.rs index da48151a6..1157e75d3 100644 --- a/ipc/nano/src/lib.rs +++ b/ipc/nano/src/lib.rs @@ -19,6 +19,7 @@ extern crate ethcore_ipc as ipc; extern crate nanomsg; #[macro_use] extern crate log; +#[macro_use] extern crate lazy_static; pub use ipc::{WithSocket, IpcInterface, IpcConfig}; pub use nanomsg::Socket as NanoSocket; @@ -28,7 +29,8 @@ use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut} use std::ops::Deref; const POLL_TIMEOUT: isize = 200; -const CLIENT_CONNECTION_TIMEOUT: isize = 120000; +const DEFAULT_CONNECTION_TIMEOUT: isize = 30000; +const DEBUG_CONNECTION_TIMEOUT: isize = 5000; /// Generic worker to handle service (binded) sockets pub struct Worker where S: IpcInterface { @@ -68,7 +70,7 @@ pub fn init_duplex_client(socket_addr: &str) -> Result, Sock SocketError::DuplexLink })); - socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap(); + socket.set_receive_timeout(DEFAULT_CONNECTION_TIMEOUT).unwrap(); let endpoint = try!(socket.connect(socket_addr).map_err(|e| { warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e); @@ -84,26 +86,58 @@ pub fn init_duplex_client(socket_addr: &str) -> Result, Sock /// Spawns client <`S`> over specified address /// creates socket and connects endpoint to it /// for request-reply connections to the service -pub fn init_client(socket_addr: &str) -> Result, SocketError> where S: WithSocket { +pub fn client(socket_addr: &str, receive_timeout: Option) -> Result, SocketError> where S: WithSocket { let mut socket = try!(Socket::new(Protocol::Req).map_err(|e| { warn!(target: "ipc", "Failed to create ipc socket: {:?}", e); SocketError::RequestLink })); - socket.set_receive_timeout(CLIENT_CONNECTION_TIMEOUT).unwrap(); + if let Some(timeout) = receive_timeout { + socket.set_receive_timeout(timeout).unwrap(); + } let endpoint = try!(socket.connect(socket_addr).map_err(|e| { warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", socket_addr, e); SocketError::RequestLink })); - trace!(target: "ipc", "Created cleint for {}", socket_addr); + trace!(target: "ipc", "Created client for {}", socket_addr); Ok(GuardedSocket { client: Arc::new(S::init(socket)), _endpoint: endpoint, }) } +lazy_static! { + /// Set PARITY_IPC_DEBUG=1 for fail-fast connectivity problems diagnostic + pub static ref DEBUG_FLAG: bool = { + use std::env; + + if let Ok(debug) = env::var("PARITY_IPC_DEBUG") { + debug == "1" || debug.to_uppercase() == "TRUE" + } + else { false } + }; +} + +/// Client with no default timeout on operations +pub fn generic_client(socket_addr: &str) -> Result, SocketError> where S: WithSocket { + if *DEBUG_FLAG { + client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT)) + } else { + client(socket_addr, None) + } +} + +/// Client over interface that is supposed to give quick almost non-blocking responses +pub fn fast_client(socket_addr: &str) -> Result, SocketError> where S: WithSocket { + if *DEBUG_FLAG { + client(socket_addr, Some(DEBUG_CONNECTION_TIMEOUT)) + } else { + client(socket_addr, Some(DEFAULT_CONNECTION_TIMEOUT)) + } +} + /// Error occurred while establising socket or endpoint #[derive(Debug)] pub enum SocketError { diff --git a/parity/blockchain.rs b/parity/blockchain.rs index ccdf61130..1c22a9844 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -31,6 +31,7 @@ use ethcore::error::ImportError; use ethcore::miner::Miner; use cache::CacheConfig; use informant::Informant; +use io_handler::ImportIoHandler; use params::{SpecType, Pruning}; use helpers::{to_client_config, execute_upgrades}; use dir::Directories; @@ -170,6 +171,10 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let informant = Informant::new(client.clone(), None, None, cmd.logger_config.color); + try!(service.register_io_handler(Arc::new(ImportIoHandler { + info: Arc::new(informant), + })).map_err(|_| "Unable to register informant handler".to_owned())); + let do_import = |bytes| { while client.queue_info().is_full() { sleep(Duration::from_secs(1)); } match client.import_block(bytes) { @@ -181,7 +186,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result { }, Ok(_) => {}, } - informant.tick(); Ok(()) }; @@ -266,10 +270,10 @@ fn execute_export(cmd: ExportBlockchain) -> Result { }; let from = try!(client.block_number(cmd.from_block).ok_or("From block could not be found")); - let to = try!(client.block_number(cmd.to_block).ok_or("From block could not be found")); + let to = try!(client.block_number(cmd.to_block).ok_or("To block could not be found")); for i in from..(to + 1) { - let b = client.block(BlockID::Number(i)).unwrap(); + let b = try!(client.block(BlockID::Number(i)).ok_or("Error exporting incomplete chain")); match format { DataFormat::Binary => { out.write(&b).expect("Couldn't write to stream."); } DataFormat::Hex => { out.write_fmt(format_args!("{}", b.pretty())).expect("Couldn't write to stream."); } diff --git a/parity/boot.rs b/parity/boot.rs index aa0e4b82b..0b0e6b670 100644 --- a/parity/boot.rs +++ b/parity/boot.rs @@ -63,7 +63,7 @@ pub fn payload() -> Result { } pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket>{ - let hypervisor_client = nanoipc::init_client::>(hv_url).unwrap(); + let hypervisor_client = nanoipc::fast_client::>(hv_url).unwrap(); hypervisor_client.handshake().unwrap(); hypervisor_client.module_ready(module_id, control_url.to_owned()); @@ -73,7 +73,7 @@ pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> Guar pub fn dependency>(url: &str) -> Result, BootError> { - nanoipc::init_client::(url).map_err(|socket_err| BootError::DependencyConnect(socket_err)) + nanoipc::generic_client::(url).map_err(|socket_err| BootError::DependencyConnect(socket_err)) } pub fn main_thread() -> Arc { diff --git a/parity/cli.rs b/parity/cli.rs index bb46bda13..a234d9d7d 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -33,7 +33,7 @@ Usage: parity export [ ] [options] parity signer new-token [options] parity snapshot [options] - parity restore [options] + parity restore [ ] [options] Operating Options: --mode MODE Set the operating mode. MODE can be one of: diff --git a/parity/io_handler.rs b/parity/io_handler.rs index d60f80f9a..bf73f55bb 100644 --- a/parity/io_handler.rs +++ b/parity/io_handler.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::sync::Arc; +use std::sync::atomic::{AtomicBool, Ordering}; use ethcore::client::Client; use ethcore::service::ClientIoMessage; use ethsync::{SyncProvider, ManageNetwork}; @@ -31,6 +32,7 @@ pub struct ClientIoHandler { pub net: Arc, pub accounts: Arc, pub info: Arc, + pub shutdown: Arc } impl IoHandler for ClientIoHandler { @@ -39,8 +41,24 @@ impl IoHandler for ClientIoHandler { } fn timeout(&self, _io: &IoContext, timer: TimerToken) { - if let INFO_TIMER = timer { + if timer == INFO_TIMER && !self.shutdown.load(Ordering::SeqCst) { self.info.tick(); } } } + +pub struct ImportIoHandler { + pub info: Arc, +} + +impl IoHandler for ImportIoHandler { + fn initialize(&self, io: &IoContext) { + io.register_timer(INFO_TIMER, 5000).expect("Error registering timer"); + } + + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if let INFO_TIMER = timer { + self.info.tick() + } + } +} \ No newline at end of file diff --git a/parity/modules.rs b/parity/modules.rs index 73de6ca29..53cef4741 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -71,7 +71,7 @@ mod ipc_deps { pub use ethsync::{SyncClient, NetworkManagerClient, ServiceConfiguration}; pub use ethcore::client::ChainNotifyClient; pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL}; - pub use nanoipc::{GuardedSocket, NanoSocket, init_client}; + pub use nanoipc::{GuardedSocket, NanoSocket, generic_client, fast_client}; pub use ipc::IpcSocket; pub use ipc::binary::serialize; } @@ -134,11 +134,11 @@ pub fn sync hypervisor.start(); hypervisor.wait_for_startup(); - let sync_client = init_client::>( + let sync_client = generic_client::>( &service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap(); - let notify_client = init_client::>( + let notify_client = generic_client::>( &service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap(); - let manage_client = init_client::>( + let manage_client = generic_client::>( &service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap(); *hypervisor_ref = Some(hypervisor); diff --git a/parity/run.rs b/parity/run.rs index 720e6f1bf..cefd8bb21 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -257,8 +257,9 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { sync: sync_provider.clone(), net: manage_network.clone(), accounts: account_provider.clone(), + shutdown: Default::default(), }); - service.register_io_handler(io_handler).expect("Error registering IO handler"); + service.register_io_handler(io_handler.clone()).expect("Error registering IO handler"); // the watcher must be kept alive. let _watcher = match cmd.no_periodic_snapshot { @@ -289,6 +290,11 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // Handle exit wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); + // to make sure timer does not spawn requests while shutdown is in progress + io_handler.shutdown.store(true, ::std::sync::atomic::Ordering::SeqCst); + // just Arc is dropping here, to allow other reference release in its default time + drop(io_handler); + // hypervisor should be shutdown first while everything still works and can be // terminated gracefully drop(hypervisor); diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 8c0bdd8fc..73d06426f 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -21,8 +21,9 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use ethcore_logger::{setup_log, Config as LogConfig}; -use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService}; +use ethcore::snapshot::{Progress, RestorationStatus, SnapshotService as SS}; use ethcore::snapshot::io::{SnapshotReader, PackedReader, PackedWriter}; +use ethcore::snapshot::service::Service as SnapshotService; use ethcore::service::ClientService; use ethcore::client::{Mode, DatabaseCompactionProfile, Switch, VMType}; use ethcore::miner::Miner; @@ -62,6 +63,60 @@ pub struct SnapshotCommand { pub block_at: BlockID, } +// helper for reading chunks from arbitrary reader and feeding them into the +// service. +fn restore_using(snapshot: Arc, reader: &R, recover: bool) -> Result<(), String> { + let manifest = reader.manifest(); + + info!("Restoring to block #{} (0x{:?})", manifest.block_number, manifest.block_hash); + + try!(snapshot.init_restore(manifest.clone(), recover).map_err(|e| { + format!("Failed to begin restoration: {}", e) + })); + + let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); + + let informant_handle = snapshot.clone(); + ::std::thread::spawn(move || { + while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() { + info!("Processed {}/{} state chunks and {}/{} block chunks.", + state_chunks_done, num_state, block_chunks_done, num_blocks); + ::std::thread::sleep(Duration::from_secs(5)); + } + }); + + info!("Restoring state"); + for &state_hash in &manifest.state_hashes { + if snapshot.status() == RestorationStatus::Failed { + return Err("Restoration failed".into()); + } + + let chunk = try!(reader.chunk(state_hash) + .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))); + snapshot.feed_state_chunk(state_hash, &chunk); + } + + info!("Restoring blocks"); + for &block_hash in &manifest.block_hashes { + if snapshot.status() == RestorationStatus::Failed { + return Err("Restoration failed".into()); + } + + let chunk = try!(reader.chunk(block_hash) + .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))); + snapshot.feed_block_chunk(block_hash, &chunk); + } + + match snapshot.status() { + RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), + RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), + RestorationStatus::Inactive => { + info!("Restoration complete."); + Ok(()) + } + } +} + impl SnapshotCommand { // shared portion of snapshot commands: start the client service fn start_service(self) -> Result<(ClientService, Arc), String> { @@ -106,69 +161,35 @@ impl SnapshotCommand { /// restore from a snapshot pub fn restore(self) -> Result<(), String> { - let file = try!(self.file_path.clone().ok_or("No file path provided.".to_owned())); + let file = self.file_path.clone(); let (service, _panic_handler) = try!(self.start_service()); warn!("Snapshot restoration is experimental and the format may be subject to change."); warn!("On encountering an unexpected error, please ensure that you have a recent snapshot."); let snapshot = service.snapshot_service(); - let reader = PackedReader::new(Path::new(&file)) - .map_err(|e| format!("Couldn't open snapshot file: {}", e)) - .and_then(|x| x.ok_or("Snapshot file has invalid format.".into())); - let reader = try!(reader); - let manifest = reader.manifest(); + if let Some(file) = file { + info!("Attempting to restore from snapshot at '{}'", file); - // drop the client so we don't restore while it has open DB handles. - drop(service); + let reader = PackedReader::new(Path::new(&file)) + .map_err(|e| format!("Couldn't open snapshot file: {}", e)) + .and_then(|x| x.ok_or("Snapshot file has invalid format.".into())); - try!(snapshot.init_restore(manifest.clone()).map_err(|e| { - format!("Failed to begin restoration: {}", e) - })); + let reader = try!(reader); + try!(restore_using(snapshot, &reader, true)); + } else { + info!("Attempting to restore from local snapshot."); - let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); - - let informant_handle = snapshot.clone(); - ::std::thread::spawn(move || { - while let RestorationStatus::Ongoing { state_chunks_done, block_chunks_done } = informant_handle.status() { - info!("Processed {}/{} state chunks and {}/{} block chunks.", - state_chunks_done, num_state, block_chunks_done, num_blocks); - - ::std::thread::sleep(Duration::from_secs(5)); - } - }); - - info!("Restoring state"); - for &state_hash in &manifest.state_hashes { - if snapshot.status() == RestorationStatus::Failed { - return Err("Restoration failed".into()); - } - - let chunk = try!(reader.chunk(state_hash) - .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", state_hash, e))); - snapshot.feed_state_chunk(state_hash, &chunk); - } - - info!("Restoring blocks"); - for &block_hash in &manifest.block_hashes { - if snapshot.status() == RestorationStatus::Failed { - return Err("Restoration failed".into()); - } - - let chunk = try!(reader.chunk(block_hash) - .map_err(|e| format!("Encountered error while reading chunk {:?}: {}", block_hash, e))); - snapshot.feed_block_chunk(block_hash, &chunk); - } - - match snapshot.status() { - RestorationStatus::Ongoing { .. } => Err("Snapshot file is incomplete and missing chunks.".into()), - RestorationStatus::Failed => Err("Snapshot restoration failed.".into()), - RestorationStatus::Inactive => { - info!("Restoration complete."); - Ok(()) + // attempting restoration with recovery will lead to deadlock + // as we currently hold a read lock on the service's reader. + match *snapshot.reader() { + Some(ref reader) => try!(restore_using(snapshot.clone(), reader, false)), + None => return Err("No local snapshot found.".into()), } } + + Ok(()) } /// Take a snapshot from the head of the chain. diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 177df5fa0..3a89ae293 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -458,8 +458,6 @@ impl Database { let mut backup_db = PathBuf::from(&self.path); backup_db.pop(); backup_db.push("backup_db"); - println!("Path at {:?}", self.path); - println!("Backup at {:?}", backup_db); let existed = match fs::rename(&self.path, &backup_db) { Ok(_) => true,