From a41db5469a66d045b13f831c4f8a0f18b03e7bff Mon Sep 17 00:00:00 2001 From: Gabx Date: Fri, 19 Aug 2016 13:38:32 +0200 Subject: [PATCH 01/17] add systemd service file --- scripts/parity.service | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 scripts/parity.service diff --git a/scripts/parity.service b/scripts/parity.service new file mode 100644 index 000000000..9a2a1f29f --- /dev/null +++ b/scripts/parity.service @@ -0,0 +1,11 @@ +[Unit] +Description=Parity Daemon + +[Service] +EnvironmentFile=%h/.parity/parity.conf +ExecStart=/usr/bin/parity $ARGS + +[Install] +WantedBy=default.target + + From 87b882dec1a6714d7a7edcfca2d3025c232c2ced Mon Sep 17 00:00:00 2001 From: Gabx Date: Fri, 19 Aug 2016 16:53:38 +0200 Subject: [PATCH 02/17] start parity with systemd --- README.md | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f3a8a92bc..da2359b0f 100644 --- a/README.md +++ b/README.md @@ -84,9 +84,21 @@ $ cargo build --release This will produce an executable in the `./target/release` subdirectory. -To get started, just run +## Start parity +### manually +To start manually parity, just run ```bash $ ./target/release/parity ``` and parity will begin syncing the Ethereum blockchain. + +### systemd service file +To start parity as a regular user using systemd init: + +1. copy the ```parity/scripts/parity.service``` in your +systemd user directory (usually ```~/.config/systemd/user```). +2. to pass any argument to parity, write a ```~/.parity/prity.conf``` file this way: +```ARGS="ARG1 ARG2 ARG3"```. + + Example: ```ARGS=ui --geth --identity MyMachine"```. From 32530c61eda82aeeb5b0cb727c7ab7130ed5fc8e Mon Sep 17 00:00:00 2001 From: Gabx Date: Sun, 21 Aug 2016 19:41:54 +0200 Subject: [PATCH 03/17] fix typos --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index da2359b0f..150db47f1 100644 --- a/README.md +++ b/README.md @@ -98,7 +98,7 @@ To start parity as a regular user using systemd init: 1. copy the ```parity/scripts/parity.service``` in your systemd user directory (usually ```~/.config/systemd/user```). -2. to pass any argument to parity, write a ```~/.parity/prity.conf``` file this way: +2. to pass any argument to parity, write a ```~/.parity/parity.conf``` file this way: ```ARGS="ARG1 ARG2 ARG3"```. - Example: ```ARGS=ui --geth --identity MyMachine"```. + Example: ```ARGS="ui --geth --identity MyMachine"```. From ef0bb691bc3020677e88edc51bac4dfa393989db Mon Sep 17 00:00:00 2001 From: Gabx Date: Tue, 23 Aug 2016 20:10:13 +0200 Subject: [PATCH 04/17] capitalize some words --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 150db47f1..876bb9776 100644 --- a/README.md +++ b/README.md @@ -86,19 +86,19 @@ This will produce an executable in the `./target/release` subdirectory. ## Start parity ### manually -To start manually parity, just run +To start Manually Parity, just run ```bash $ ./target/release/parity ``` -and parity will begin syncing the Ethereum blockchain. +and Parity will begin syncing the Ethereum blockchain. ### systemd service file -To start parity as a regular user using systemd init: +To start Parity as a regular user using systemd init: -1. copy the ```parity/scripts/parity.service``` in your +1. Copy the ```parity/scripts/parity.service``` in your systemd user directory (usually ```~/.config/systemd/user```). -2. to pass any argument to parity, write a ```~/.parity/parity.conf``` file this way: +2. To pass any argument to parity, write a ```~/.parity/parity.conf``` file this way: ```ARGS="ARG1 ARG2 ARG3"```. Example: ```ARGS="ui --geth --identity MyMachine"```. From a8d26470e26b6ba562d713e573b70579e7f55f5c Mon Sep 17 00:00:00 2001 From: Gabx Date: Tue, 23 Aug 2016 23:17:06 +0200 Subject: [PATCH 05/17] capitalize some words --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 876bb9776..b2ddc80b4 100644 --- a/README.md +++ b/README.md @@ -86,7 +86,7 @@ This will produce an executable in the `./target/release` subdirectory. ## Start parity ### manually -To start Manually Parity, just run +To start Parity manually, just run ```bash $ ./target/release/parity ``` From 0baa8a53a50dfeb5a3a3a95666ea84a422fa6f2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Thu, 25 Aug 2016 08:57:13 +0200 Subject: [PATCH 06/17] dapps-hosts configuration --- Cargo.lock | 1 - dapps/src/lib.rs | 62 ++++++++++++++++++++++++++--- dapps/src/router/host_validation.rs | 8 ++-- dapps/src/router/mod.rs | 14 ++++--- parity/cli.rs | 6 +++ parity/configuration.rs | 28 +++++++++++++ parity/dapps.rs | 10 +++-- 7 files changed, 108 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c06c5a62..119e87fdf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,7 +290,6 @@ version = "1.4.0" dependencies = [ "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 574c38acf..e50bc2006 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -108,14 +108,28 @@ impl ServerBuilder { /// Asynchronously start server with no authentication, /// returns result with `Server` handle on success or an error. - pub fn start_unsecure_http(&self, addr: &SocketAddr) -> Result { - Server::start_http(addr, NoAuth, self.handler.clone(), self.dapps_path.clone(), self.registrar.clone()) + pub fn start_unsecured_http(&self, addr: &SocketAddr, hosts: Option>) -> Result { + Server::start_http( + addr, + hosts, + NoAuth, + self.handler.clone(), + self.dapps_path.clone(), + self.registrar.clone() + ) } /// Asynchronously start server with `HTTP Basic Authentication`, /// return result with `Server` handle on success or an error. - pub fn start_basic_auth_http(&self, addr: &SocketAddr, username: &str, password: &str) -> Result { - Server::start_http(addr, HttpBasicAuth::single_user(username, password), self.handler.clone(), self.dapps_path.clone(), self.registrar.clone()) + pub fn start_basic_auth_http(&self, addr: &SocketAddr, hosts: Option>, username: &str, password: &str) -> Result { + Server::start_http( + addr, + hosts, + HttpBasicAuth::single_user(username, password), + self.handler.clone(), + self.dapps_path.clone(), + self.registrar.clone() + ) } } @@ -126,8 +140,24 @@ pub struct Server { } impl Server { + /// Returns a list of allowed hosts or `None` if all hosts are allowed. + fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { + let mut allowed = Vec::new(); + + match hosts { + Some(hosts) => allowed.extend_from_slice(&hosts), + None => return None, + } + + // Add localhost domain as valid too if listening on loopback interface. + allowed.push(bind_address.replace("127.0.0.1", "localhost").into()); + allowed.push(bind_address.into()); + Some(allowed) + } + fn start_http( addr: &SocketAddr, + hosts: Option>, authorization: A, handler: Arc, dapps_path: String, @@ -144,7 +174,7 @@ impl Server { special.insert(router::SpecialEndpoint::Utils, apps::utils()); special }); - let bind_address = format!("{}", addr); + let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); try!(hyper::Server::http(addr)) .handle(move |ctrl| router::Router::new( @@ -154,7 +184,7 @@ impl Server { endpoints.clone(), special.clone(), authorization.clone(), - bind_address.clone(), + hosts.clone(), )) .map(|(l, srv)| { @@ -207,3 +237,23 @@ pub fn random_filename() -> String { rng.gen_ascii_chars().take(12).collect() } +#[cfg(test)] +mod tests { + use super::Server; + + #[test] + fn should_return_allowed_hosts() { + // given + let bind_address = "127.0.0.1".to_owned(); + + // when + let all = Server::allowed_hosts(None, bind_address.clone()); + let address = Server::allowed_hosts(Some(Vec::new()), bind_address.clone()); + let some = Server::allowed_hosts(Some(vec!["ethcore.io".into()]), bind_address.clone()); + + // then + assert_eq!(all, None); + assert_eq!(address, Some(vec!["localhost".into(), "127.0.0.1".into()])); + assert_eq!(some, Some(vec!["ethcore.io".into(), "localhost".into(), "127.0.0.1".into()])); + } +} diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs index 62813500f..e0f974482 100644 --- a/dapps/src/router/host_validation.rs +++ b/dapps/src/router/host_validation.rs @@ -22,13 +22,11 @@ use hyper::net::HttpStream; use jsonrpc_http_server::{is_host_header_valid}; use handlers::ContentHandler; -pub fn is_valid(request: &server::Request, bind_address: &str, endpoints: Vec) -> bool { - let mut endpoints = endpoints.into_iter() +pub fn is_valid(request: &server::Request, allowed_hosts: &[String], endpoints: Vec) -> bool { + let mut endpoints = endpoints.iter() .map(|endpoint| format!("{}{}", endpoint, DAPPS_DOMAIN)) .collect::>(); - // Add localhost domain as valid too if listening on loopback interface. - endpoints.push(bind_address.replace("127.0.0.1", "localhost").into()); - endpoints.push(bind_address.into()); + endpoints.extend_from_slice(allowed_hosts); let header_valid = is_host_header_valid(request, &endpoints); diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index 568dc00da..359337047 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -48,7 +48,7 @@ pub struct Router { fetch: Arc, special: Arc>>, authorization: Arc, - bind_address: String, + allowed_hosts: Option>, handler: Box + Send>, } @@ -56,9 +56,11 @@ impl server::Handler for Router { fn on_request(&mut self, req: server::Request) -> Next { // Validate Host header - if !host_validation::is_valid(&req, &self.bind_address, self.endpoints.keys().cloned().collect()) { - self.handler = host_validation::host_invalid_response(); - return self.handler.on_request(req); + if let Some(ref hosts) = self.allowed_hosts { + if !host_validation::is_valid(&req, hosts, self.endpoints.keys().cloned().collect()) { + self.handler = host_validation::host_invalid_response(); + return self.handler.on_request(req); + } } // Check authorization @@ -125,7 +127,7 @@ impl Router { endpoints: Arc, special: Arc>>, authorization: Arc, - bind_address: String, + allowed_hosts: Option>, ) -> Self { let handler = special.get(&SpecialEndpoint::Rpc).unwrap().to_handler(EndpointPath::default()); @@ -136,7 +138,7 @@ impl Router { fetch: app_fetcher, special: special, authorization: authorization, - bind_address: bind_address, + allowed_hosts: allowed_hosts, handler: handler, } } diff --git a/parity/cli.rs b/parity/cli.rs index 366c73a5b..8f33489dc 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -134,6 +134,11 @@ API and Console Options: --dapps-interface IP Specify the hostname portion of the Dapps server, IP should be an interface's IP address, or local [default: local]. + --dapps-hosts HOSTS List of allowed Host header values. This option will + validate the Host header sent by the browser, it + is additional security against some attack + vectors. Special options: "all", "none", + [default: none]. --dapps-user USERNAME Specify username for Dapps server. It will be used in HTTP Basic Authentication Scheme. If --dapps-pass is not specified you will be @@ -346,6 +351,7 @@ pub struct Args { pub flag_no_dapps: bool, pub flag_dapps_port: u16, pub flag_dapps_interface: String, + pub flag_dapps_hosts: String, pub flag_dapps_user: Option, pub flag_dapps_pass: Option, pub flag_dapps_path: String, diff --git a/parity/configuration.rs b/parity/configuration.rs index b1dbaa3fe..f2fd34853 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -356,6 +356,7 @@ impl Configuration { enabled: self.dapps_enabled(), interface: self.dapps_interface(), port: self.args.flag_dapps_port, + hosts: self.dapps_hosts(), user: self.args.flag_dapps_user.clone(), pass: self.args.flag_dapps_pass.clone(), dapps_path: self.directories().dapps, @@ -485,6 +486,16 @@ impl Configuration { Some(hosts) } + fn dapps_hosts(&self) -> Option> { + match self.args.flag_dapps_hosts.as_ref() { + "none" => return Some(Vec::new()), + "all" => return None, + _ => {} + } + let hosts = self.args.flag_dapps_hosts.split(',').map(|h| h.into()).collect(); + Some(hosts) + } + fn ipc_config(&self) -> Result { let conf = IpcConfiguration { enabled: !(self.args.flag_ipcdisable || self.args.flag_ipc_off || self.args.flag_no_ipc), @@ -860,6 +871,23 @@ mod tests { assert_eq!(conf3.rpc_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); } + #[test] + fn should_parse_dapps_hosts() { + // given + + // when + let conf0 = parse(&["parity"]); + let conf1 = parse(&["parity", "--dapps-hosts", "none"]); + let conf2 = parse(&["parity", "--dapps-hosts", "all"]); + let conf3 = parse(&["parity", "--dapps-hosts", "ethcore.io,something.io"]); + + // then + assert_eq!(conf0.dapps_hosts(), Some(Vec::new())); + assert_eq!(conf1.dapps_hosts(), Some(Vec::new())); + assert_eq!(conf2.dapps_hosts(), None); + assert_eq!(conf3.dapps_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); + } + #[test] fn should_disable_signer_in_geth_compat() { // given diff --git a/parity/dapps.rs b/parity/dapps.rs index 71c6d7f9d..7f759ed3c 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -25,6 +25,7 @@ pub struct Configuration { pub enabled: bool, pub interface: String, pub port: u16, + pub hosts: Option>, pub user: Option, pub pass: Option, pub dapps_path: String, @@ -36,6 +37,7 @@ impl Default for Configuration { enabled: true, interface: "127.0.0.1".into(), port: 8080, + hosts: Some(Vec::new()), user: None, pass: None, dapps_path: replace_home("$HOME/.parity/dapps"), @@ -68,7 +70,7 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result>, _auth: Option<(String, String)>, ) -> Result { Err("Your Parity version has been compiled without WebApps support.".into()) @@ -109,6 +112,7 @@ mod server { deps: Dependencies, dapps_path: String, url: &SocketAddr, + allowed_hosts: Option>, auth: Option<(String, String)> ) -> Result { use ethcore_dapps as dapps; @@ -119,10 +123,10 @@ mod server { let server = rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::UnsafeContext); let start_result = match auth { None => { - server.start_unsecure_http(url) + server.start_unsecured_http(url, allowed_hosts) }, Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password) + server.start_basic_auth_http(url, allowed_hosts, &username, &password) }, }; From 1c19a807d9996e30e9e2f772c5a7f73e7e80ff07 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 25 Aug 2016 22:20:44 +0200 Subject: [PATCH 07/17] Take control of recovered snapshots, start restoration asynchronously (#2010) * take control of given snapshot * start snapshot restoration asynchronously, --- ethcore/src/service.rs | 8 + ethcore/src/snapshot/mod.rs | 8 +- ethcore/src/snapshot/service.rs | 240 ++++++++++++++++++--------- ethcore/src/snapshot/tests/blocks.rs | 1 - ethcore/src/snapshot/tests/state.rs | 3 +- parity/snapshot.rs | 6 +- 6 files changed, 181 insertions(+), 85 deletions(-) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 355c7d580..e2e4772a4 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -22,6 +22,7 @@ use spec::Spec; use error::*; use client::{Client, ClientConfig, ChainNotify}; use miner::Miner; +use snapshot::ManifestData; use snapshot::service::Service as SnapshotService; use std::sync::atomic::AtomicBool; @@ -39,6 +40,8 @@ pub enum ClientIoMessage { BlockVerified, /// New transaction RLPs are ready to be imported NewTransactions(Vec), + /// Begin snapshot restoration + BeginRestoration(ManifestData), /// Feed a state chunk to the snapshot service FeedStateChunk(H256, Bytes), /// Feed a block chunk to the snapshot service @@ -160,6 +163,11 @@ impl IoHandler for ClientIoHandler { match *net_message { ClientIoMessage::BlockVerified => { self.client.import_verified_blocks(); } ClientIoMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(transactions); } + ClientIoMessage::BeginRestoration(ref manifest) => { + if let Err(e) = self.snapshot.init_restore(manifest.clone()) { + warn!("Failed to initialize snapshot restoration: {}", e); + } + } ClientIoMessage::FeedStateChunk(ref hash, ref chunk) => self.snapshot.feed_state_chunk(*hash, chunk), ClientIoMessage::FeedBlockChunk(ref hash, ref chunk) => self.snapshot.feed_block_chunk(*hash, chunk), _ => {} // ignore other messages diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index a1f9812d5..d1ad077fe 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -501,7 +501,7 @@ impl StateRebuilder { /// Check for accounts missing code. Once all chunks have been fed, there should /// be none. - pub fn check_missing(&self) -> Result<(), Error> { + pub fn check_missing(self) -> Result<(), Error> { let missing = self.missing_code.keys().cloned().collect::>(); match missing.is_empty() { true => Ok(()), @@ -640,8 +640,8 @@ impl BlockRebuilder { } /// Glue together any disconnected chunks. To be called at the end. - pub fn glue_chunks(&mut self) { - for &(ref first_num, ref first_hash) in &self.disconnected { + pub fn glue_chunks(self) { + for (first_num, first_hash) in self.disconnected { let parent_num = first_num - 1; // check if the parent is even in the chain. @@ -649,7 +649,7 @@ impl BlockRebuilder { // the first block of the first chunks has nothing to connect to. if let Some(parent_hash) = self.chain.block_hash(parent_num) { // if so, add the child to it. - self.chain.add_child(parent_hash, *first_hash); + self.chain.add_child(parent_hash, first_hash); } } } diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 45e1184b4..9f2b3f34a 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -24,7 +24,7 @@ use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; use super::{ManifestData, StateRebuilder, BlockRebuilder}; -use super::io::{SnapshotReader, LooseReader}; +use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter}; use blockchain::BlockChain; use engines::Engine; @@ -34,7 +34,7 @@ use spec::Spec; use io::IoChannel; -use util::{Bytes, H256, Mutex, UtilError}; +use util::{Bytes, H256, Mutex, RwLock, UtilError}; use util::journaldb::Algorithm; use util::kvdb::{Database, DatabaseConfig}; use util::snappy; @@ -50,8 +50,6 @@ pub enum RestorationStatus { Failed, } -/// Restoration info. - /// The interface for a snapshot network service. /// This handles: /// - restoration of snapshots to temporary databases. @@ -74,8 +72,10 @@ pub trait SnapshotService { /// Begin snapshot restoration. /// If restoration in-progress, this will reset it. /// From this point on, any previous snapshot may become unavailable. - /// Returns true if successful, false otherwise. - fn begin_restore(&self, manifest: ManifestData) -> bool; + fn begin_restore(&self, manifest: ManifestData); + + /// Abort an in-progress restoration if there is one. + fn abort_restore(&self); /// Feed a raw state chunk to the service to be processed asynchronously. /// no-op if not currently restoring. @@ -88,51 +88,59 @@ pub trait SnapshotService { /// State restoration manager. struct Restoration { + manifest: ManifestData, state_chunks_left: HashSet, block_chunks_left: HashSet, state: StateRebuilder, blocks: BlockRebuilder, + writer: LooseWriter, snappy_buffer: Bytes, final_state_root: H256, } +struct RestorationParams<'a> { + manifest: ManifestData, // manifest to base restoration on. + pruning: Algorithm, // pruning algorithm for the database. + db_path: PathBuf, // database path + writer: LooseWriter, // writer for recovered snapshot. + genesis: &'a [u8], // genesis block of the chain. +} + impl Restoration { - // make a new restoration, building databases in the given path. - fn new(manifest: &ManifestData, pruning: Algorithm, path: &Path, gb: &[u8]) -> Result { + // make a new restoration using the given parameters. + fn new(params: RestorationParams) -> Result { + let manifest = params.manifest; + + let state_chunks = manifest.state_hashes.iter().cloned().collect(); + let block_chunks = manifest.block_hashes.iter().cloned().collect(); + let cfg = DatabaseConfig::with_columns(::db::NUM_COLUMNS); - let raw_db = Arc::new(try!(Database::open(&cfg, &*path.to_string_lossy()) + let raw_db = Arc::new(try!(Database::open(&cfg, &*params.db_path.to_string_lossy()) .map_err(UtilError::SimpleString))); - let chain = BlockChain::new(Default::default(), gb, raw_db.clone()); + let chain = BlockChain::new(Default::default(), params.genesis, raw_db.clone()); let blocks = try!(BlockRebuilder::new(chain, manifest.block_number)); + let root = manifest.state_root.clone(); Ok(Restoration { - state_chunks_left: manifest.state_hashes.iter().cloned().collect(), - block_chunks_left: manifest.block_hashes.iter().cloned().collect(), - state: StateRebuilder::new(raw_db, pruning), + manifest: manifest, + state_chunks_left: state_chunks, + block_chunks_left: block_chunks, + state: StateRebuilder::new(raw_db, params.pruning), blocks: blocks, + writer: params.writer, snappy_buffer: Vec::new(), - final_state_root: manifest.state_root, + final_state_root: root, }) } // feeds a state chunk fn feed_state(&mut self, hash: H256, chunk: &[u8]) -> Result<(), Error> { - use util::trie::TrieError; - if self.state_chunks_left.remove(&hash) { - let len = try!(snappy::decompress_into(&chunk, &mut self.snappy_buffer)); + let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); + try!(self.state.feed(&self.snappy_buffer[..len])); - - if self.state_chunks_left.is_empty() { - try!(self.state.check_missing()); - - let root = self.state.state_root(); - if root != self.final_state_root { - warn!("Final restored state has wrong state root: expected {:?}, got {:?}", root, self.final_state_root); - return Err(TrieError::InvalidStateRoot(root).into()); - } - } + try!(self.writer.write_state_chunk(hash, chunk)); } Ok(()) @@ -141,18 +149,39 @@ impl Restoration { // feeds a block chunk fn feed_blocks(&mut self, hash: H256, chunk: &[u8], engine: &Engine) -> Result<(), Error> { if self.block_chunks_left.remove(&hash) { - let len = try!(snappy::decompress_into(&chunk, &mut self.snappy_buffer)); - try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); + let len = try!(snappy::decompress_into(chunk, &mut self.snappy_buffer)); - if self.block_chunks_left.is_empty() { - // connect out-of-order chunks. - self.blocks.glue_chunks(); - } + try!(self.blocks.feed(&self.snappy_buffer[..len], engine)); + try!(self.writer.write_block_chunk(hash, chunk)); } Ok(()) } + // finish up restoration. + fn finalize(self) -> Result<(), Error> { + use util::trie::TrieError; + + if !self.is_done() { return Ok(()) } + + // verify final state root. + let root = self.state.state_root(); + if root != self.final_state_root { + warn!("Final restored state has wrong state root: expected {:?}, got {:?}", root, self.final_state_root); + return Err(TrieError::InvalidStateRoot(root).into()); + } + + // check for missing code. + try!(self.state.check_missing()); + + // connect out-of-order chunks. + self.blocks.glue_chunks(); + + try!(self.writer.finish(self.manifest)); + + Ok(()) + } + // is everything done? fn is_done(&self) -> bool { self.block_chunks_left.is_empty() && self.state_chunks_left.is_empty() @@ -174,7 +203,7 @@ pub struct Service { io_channel: Channel, pruning: Algorithm, status: Mutex, - reader: Option, + reader: RwLock>, engine: Arc, genesis_block: Bytes, state_chunks: AtomicUsize, @@ -190,6 +219,7 @@ impl Service { let reader = { let mut snapshot_path = db_path.clone(); snapshot_path.push("snapshot"); + snapshot_path.push("current"); LooseReader::new(snapshot_path).ok() }; @@ -201,15 +231,15 @@ impl Service { io_channel: io_channel, pruning: pruning, status: Mutex::new(RestorationStatus::Inactive), - reader: reader, + reader: RwLock::new(reader), engine: spec.engine.clone(), genesis_block: spec.genesis_block(), state_chunks: AtomicUsize::new(0), block_chunks: AtomicUsize::new(0), }; - // create the snapshot dir if it doesn't exist. - if let Err(e) = fs::create_dir_all(service.snapshot_dir()) { + // create the root snapshot dir if it doesn't exist. + if let Err(e) = fs::create_dir_all(service.root_dir()) { if e.kind() != ErrorKind::AlreadyExists { return Err(e.into()) } @@ -225,16 +255,23 @@ impl Service { Ok(service) } - // get the snapshot path. - fn snapshot_dir(&self) -> PathBuf { + // get the root path. + fn root_dir(&self) -> PathBuf { let mut dir = self.db_path.clone(); dir.push("snapshot"); dir } + // get the current snapshot dir. + fn snapshot_dir(&self) -> PathBuf { + let mut dir = self.root_dir(); + dir.push("current"); + dir + } + // get the restoration directory. fn restoration_dir(&self) -> PathBuf { - let mut dir = self.snapshot_dir(); + let mut dir = self.root_dir(); dir.push("restoration"); dir } @@ -246,6 +283,13 @@ impl Service { dir } + // temporary snapshot recovery path. + fn temp_recovery_dir(&self) -> PathBuf { + let mut dir = self.restoration_dir(); + dir.push("temp"); + dir + } + // replace one the client's database with our own. fn replace_client_db(&self) -> Result<(), Error> { let our_db = self.restoration_db(); @@ -284,6 +328,42 @@ impl Service { } } + /// Initialize the restoration synchronously. + pub fn init_restore(&self, manifest: ManifestData) -> Result<(), Error> { + let rest_dir = self.restoration_dir(); + + let mut res = self.restoration.lock(); + + // tear down existing restoration. + *res = None; + + // delete and restore the restoration dir. + if let Err(e) = fs::remove_dir_all(&rest_dir) { + match e.kind() { + ErrorKind::NotFound => {}, + _ => return Err(e.into()), + } + } + + try!(fs::create_dir_all(&rest_dir)); + + // make new restoration. + let writer = try!(LooseWriter::new(self.temp_recovery_dir())); + + let params = RestorationParams { + manifest: manifest, + pruning: self.pruning, + db_path: self.restoration_db(), + writer: writer, + genesis: &self.genesis_block, + }; + + *res = Some(try!(Restoration::new(params))); + + *self.status.lock() = RestorationStatus::Ongoing; + Ok(()) + } + // finalize the restoration. this accepts an already-locked // restoration as an argument -- so acquiring it again _will_ // lead to deadlock. @@ -293,27 +373,52 @@ impl Service { self.state_chunks.store(0, Ordering::SeqCst); self.block_chunks.store(0, Ordering::SeqCst); - // destroy the restoration before replacing databases. - *rest = None; - + // destroy the restoration before replacing databases and snapshot. + try!(rest.take().map(Restoration::finalize).unwrap_or(Ok(()))); try!(self.replace_client_db()); - *self.status.lock() = RestorationStatus::Inactive; + let mut reader = self.reader.write(); + *reader = None; // destroy the old reader if it existed. + + let snapshot_dir = self.snapshot_dir(); + + trace!(target: "snapshot", "removing old snapshot dir at {}", snapshot_dir.to_string_lossy()); + if let Err(e) = fs::remove_dir_all(&snapshot_dir) { + match e.kind() { + ErrorKind::NotFound => {} + _ => return Err(e.into()), + } + } + + try!(fs::create_dir(&snapshot_dir)); + + trace!(target: "snapshot", "copying restored snapshot files over"); + for maybe_file in try!(fs::read_dir(self.temp_recovery_dir())) { + let path = try!(maybe_file).path(); + if let Some(name) = path.file_name().map(|x| x.to_owned()) { + let mut new_path = snapshot_dir.clone(); + new_path.push(name); + try!(fs::rename(path, new_path)); + } + } - // TODO: take control of restored snapshot. let _ = fs::remove_dir_all(self.restoration_dir()); + *reader = Some(try!(LooseReader::new(snapshot_dir))); + + *self.status.lock() = RestorationStatus::Inactive; + Ok(()) } /// Feed a chunk of either kind. no-op if no restoration or status is wrong. fn feed_chunk(&self, hash: H256, chunk: &[u8], is_state: bool) -> Result<(), Error> { + // TODO: be able to process block chunks and state chunks at same time? + let mut restoration = self.restoration.lock(); + match self.status() { RestorationStatus::Inactive | RestorationStatus::Failed => Ok(()), RestorationStatus::Ongoing => { - // TODO: be able to process block chunks and state chunks at same time? - let mut restoration = self.restoration.lock(); - let res = { let rest = match *restoration { Some(ref mut r) => r, @@ -373,11 +478,11 @@ impl Service { impl SnapshotService for Service { fn manifest(&self) -> Option { - self.reader.as_ref().map(|r| r.manifest().clone()) + self.reader.read().as_ref().map(|r| r.manifest().clone()) } fn chunk(&self, hash: H256) -> Option { - self.reader.as_ref().and_then(|r| r.chunk(hash).ok()) + self.reader.read().as_ref().and_then(|r| r.chunk(hash).ok()) } fn status(&self) -> RestorationStatus { @@ -388,37 +493,20 @@ impl SnapshotService for Service { (self.state_chunks.load(Ordering::Relaxed), self.block_chunks.load(Ordering::Relaxed)) } - fn begin_restore(&self, manifest: ManifestData) -> bool { - let rest_dir = self.restoration_dir(); + fn begin_restore(&self, manifest: ManifestData) { + self.io_channel.send(ClientIoMessage::BeginRestoration(manifest)) + .expect("snapshot service and io service are kept alive by client service; qed"); + } - let mut res = self.restoration.lock(); - - // tear down existing restoration. - *res = None; - - // delete and restore the restoration dir. - if let Err(e) = fs::remove_dir_all(&rest_dir).and_then(|_| fs::create_dir_all(&rest_dir)) { + fn abort_restore(&self) { + *self.restoration.lock() = None; + *self.status.lock() = RestorationStatus::Inactive; + if let Err(e) = fs::remove_dir_all(&self.restoration_dir()) { match e.kind() { ErrorKind::NotFound => {}, - _ => { - warn!("encountered error {} while beginning snapshot restoration.", e); - return false; - } + _ => warn!("encountered error {} while deleting snapshot restoration dir.", e), } } - - // make new restoration. - let db_path = self.restoration_db(); - *res = match Restoration::new(&manifest, self.pruning, &db_path, &self.genesis_block) { - Ok(b) => Some(b), - Err(e) => { - warn!("encountered error {} while beginning snapshot restoration.", e); - return false; - } - }; - - *self.status.lock() = RestorationStatus::Ongoing; - true } fn restore_state_chunk(&self, hash: H256, chunk: Bytes) { diff --git a/ethcore/src/snapshot/tests/blocks.rs b/ethcore/src/snapshot/tests/blocks.rs index ac9880263..6c4344b6e 100644 --- a/ethcore/src/snapshot/tests/blocks.rs +++ b/ethcore/src/snapshot/tests/blocks.rs @@ -79,7 +79,6 @@ fn chunk_and_restore(amount: u64) { } rebuilder.glue_chunks(); - drop(rebuilder); // and test it. let new_chain = BlockChain::new(Default::default(), &genesis, new_db); diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index a293cdb44..fba6d56f6 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -72,8 +72,9 @@ fn snap_and_restore() { rebuilder.feed(&chunk).unwrap(); } - rebuilder.check_missing().unwrap(); assert_eq!(rebuilder.state_root(), state_root); + rebuilder.check_missing().unwrap(); + new_db }; diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 650123d73..ecc463a2e 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -121,9 +121,9 @@ impl SnapshotCommand { // drop the client so we don't restore while it has open DB handles. drop(service); - if !snapshot.begin_restore(manifest.clone()) { - return Err("Failed to begin restoration.".into()); - } + try!(snapshot.init_restore(manifest.clone()).map_err(|e| { + format!("Failed to begin restoration: {}", e) + })); let (num_state, num_blocks) = (manifest.state_hashes.len(), manifest.block_hashes.len()); From 7b945fcda475618616374ffae67c4f870d6bbf66 Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Fri, 26 Aug 2016 22:07:05 +0700 Subject: [PATCH 08/17] Update gitlab-ci add test stage --- .gitlab-ci.yml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e950996ac..20e31d44c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,6 @@ stages: - build + - test - deploy variables: GIT_DEPTH: "3" @@ -19,6 +20,7 @@ linux-beta: script: - cargo build --release --verbose - strip target/release/parity + - cp target/release/parity parity tags: - rust - rust-beta @@ -26,6 +28,12 @@ linux-beta: paths: - target/release/parity name: "${CI_BUILD_NAME}_parity" + stage: deploy + tags: + - rust + - rust-beta + script: + - ./deploy.sh linux-stable: stage: build image: ethcore/rust:stable @@ -92,6 +100,11 @@ linux-armv7: - tags - stable script: + - rm -rf .cargo + - mkdir -p .cargo + - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config + - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config + - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity tags: @@ -110,6 +123,11 @@ linux-arm: - tags - stable script: + - rm -rf .cargo + - mkdir -p .cargo + - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config + - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config + - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity tags: @@ -129,6 +147,11 @@ linux-armv6: - tags - stable script: + - rm -rf .cargo + - mkdir -p .cargo + - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config + - echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config + - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release --verbose - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity tags: @@ -148,6 +171,11 @@ linux-aarch64: - tags - stable script: + - rm -rf .cargo + - mkdir -p .cargo + - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config + - echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config + - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release --verbose - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity tags: @@ -193,3 +221,11 @@ windows: - target/release/parity.exe - target/release/parity.pdb name: "${CI_BUILD_NAME}_parity" +test-linux: + stage: test + before_script: + - git submodule update --init --recursive + script: + - ./test.sh --verbose + dependencies: + - linux-stable From 73958ae8f88046a8a75e22fdfb112c666f50304b Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Fri, 26 Aug 2016 23:12:44 +0700 Subject: [PATCH 09/17] Update gitlab-ci replace build positions --- .gitlab-ci.yml | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 20e31d44c..ea9581753 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -34,24 +34,6 @@ linux-beta: - rust-beta script: - ./deploy.sh -linux-stable: - stage: build - image: ethcore/rust:stable - only: - - master - - beta - - tags - - stable - script: - - cargo build --release --verbose - - strip target/release/parity - tags: - - rust - - rust-stable - artifacts: - paths: - - target/release/parity - name: "${CI_BUILD_NAME}_parity" linux-nightly: stage: build image: ethcore/rust:nightly @@ -221,6 +203,24 @@ windows: - target/release/parity.exe - target/release/parity.pdb name: "${CI_BUILD_NAME}_parity" +linux-stable: + stage: build + image: ethcore/rust:stable + only: + - master + - beta + - tags + - stable + script: + - cargo build --release --verbose + - strip target/release/parity + tags: + - rust + - rust-stable + artifacts: + paths: + - target/release/parity + name: "${CI_BUILD_NAME}_parity" test-linux: stage: test before_script: From 2d883c43c93231314b527a6c50ff91b9c46a7408 Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Sat, 27 Aug 2016 02:58:32 +0700 Subject: [PATCH 10/17] Update gitlab-ci.yml add export to see ENV --- .gitlab-ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ea9581753..65e60d6eb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -18,6 +18,7 @@ linux-beta: - tags - stable script: + - export - cargo build --release --verbose - strip target/release/parity - cp target/release/parity parity @@ -82,6 +83,7 @@ linux-armv7: - tags - stable script: + - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config @@ -105,6 +107,7 @@ linux-arm: - tags - stable script: + - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config @@ -129,6 +132,7 @@ linux-armv6: - tags - stable script: + - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config @@ -153,6 +157,7 @@ linux-aarch64: - tags - stable script: + - export - rm -rf .cargo - mkdir -p .cargo - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config @@ -212,6 +217,7 @@ linux-stable: - tags - stable script: + - export - cargo build --release --verbose - strip target/release/parity tags: From 3fa0cfe803faaf81ca64ce269eb68646ca3b736c Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 29 Aug 2016 11:33:42 +0200 Subject: [PATCH 11/17] Updated wording Updating wording a bit and fixed remaining capitalization issues. --- README.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index b2ddc80b4..26913183c 100644 --- a/README.md +++ b/README.md @@ -84,8 +84,8 @@ $ cargo build --release This will produce an executable in the `./target/release` subdirectory. -## Start parity -### manually +## Start Parity +### Manually To start Parity manually, just run ```bash $ ./target/release/parity @@ -93,12 +93,12 @@ $ ./target/release/parity and Parity will begin syncing the Ethereum blockchain. -### systemd service file +### Using systemd service file To start Parity as a regular user using systemd init: -1. Copy the ```parity/scripts/parity.service``` in your +1. Copy ```parity/scripts/parity.service``` to your systemd user directory (usually ```~/.config/systemd/user```). -2. To pass any argument to parity, write a ```~/.parity/parity.conf``` file this way: +2. To pass any argument to Parity, write a ```~/.parity/parity.conf``` file this way: ```ARGS="ARG1 ARG2 ARG3"```. Example: ```ARGS="ui --geth --identity MyMachine"```. From 4389742ca39b51c49f516ef7a2fa8a4fddfae4ed Mon Sep 17 00:00:00 2001 From: Nipunn Koorapati Date: Mon, 29 Aug 2016 02:35:24 -0700 Subject: [PATCH 12/17] Make the block header struct's internals private (#2000) * Make the block header struct's internals private Currently, this involves a lot of explicit cloning, but we could migrate the return types of the get_* functions to be copies rather than references since they are mostly copy types anyway. I opted to eliminate the constructor in favor of using Default::default() plus calling a bunch of setters. This is similar to the model that a Google Protobuf client uses and I think it looks fine. * Drop some unnecessary cloning by comparing references * Fix compiler errors from callsites in tests. --- ethcore/src/block.rs | 56 +++--- ethcore/src/block_queue.rs | 6 +- ethcore/src/blockchain/blockchain.rs | 2 +- ethcore/src/blockchain/generator/block.rs | 7 +- ethcore/src/blockchain/generator/generator.rs | 4 +- ethcore/src/client/client.rs | 10 +- ethcore/src/client/test_client.rs | 34 ++-- ethcore/src/engines/basic_authority.rs | 23 ++- ethcore/src/engines/mod.rs | 5 +- ethcore/src/ethereum/ethash.rs | 100 ++++++----- ethcore/src/ethereum/mod.rs | 2 +- ethcore/src/header.rs | 39 ++-- ethcore/src/snapshot/block.rs | 31 ++-- ethcore/src/spec/spec.rs | 52 +++--- ethcore/src/tests/client.rs | 4 +- ethcore/src/tests/helpers.rs | 66 +++---- ethcore/src/verification/verification.rs | 169 +++++++++--------- parity/informant.rs | 2 +- rpc/src/v1/impls/eth.rs | 32 ++-- sync/src/blocks.rs | 8 +- sync/src/chain.rs | 20 +-- 21 files changed, 336 insertions(+), 336 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index cd02b9a1b..784e71dc0 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -245,11 +245,11 @@ impl<'x> OpenBlock<'x> { last_hashes: last_hashes, }; - r.block.base.header.parent_hash = parent.hash(); - r.block.base.header.number = parent.number + 1; - r.block.base.header.author = author; + r.block.base.header.set_parent_hash(parent.hash()); + r.block.base.header.set_number(parent.number() + 1); + r.block.base.header.set_author(author); r.block.base.header.set_timestamp_now(parent.timestamp()); - r.block.base.header.extra_data = extra_data; + r.block.base.header.set_extra_data(extra_data); r.block.base.header.note_dirty(); engine.populate_from_parent(&mut r.block.base.header, parent, gas_range_target.0, gas_range_target.1); @@ -309,13 +309,13 @@ impl<'x> OpenBlock<'x> { pub fn env_info(&self) -> EnvInfo { // TODO: memoise. EnvInfo { - number: self.block.base.header.number, - author: self.block.base.header.author.clone(), - timestamp: self.block.base.header.timestamp, - difficulty: self.block.base.header.difficulty.clone(), + number: self.block.base.header.number(), + author: self.block.base.header.author().clone(), + timestamp: self.block.base.header.timestamp(), + difficulty: self.block.base.header.difficulty().clone(), last_hashes: self.last_hashes.clone(), gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used), - gas_limit: self.block.base.header.gas_limit.clone(), + gas_limit: self.block.base.header.gas_limit().clone(), } } @@ -349,14 +349,13 @@ impl<'x> OpenBlock<'x> { let unclosed_state = s.block.state.clone(); s.engine.on_close_block(&mut s.block); - s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect()); + s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); - s.block.base.header.uncles_hash = uncle_bytes.sha3(); - s.block.base.header.state_root = s.block.state.root().clone(); - s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect()); - s.block.base.header.log_bloom = s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b}); //TODO: use |= operator - s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used); - s.block.base.header.note_dirty(); + s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); + s.block.base.header.set_state_root(s.block.state.root().clone()); + s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); + s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator + s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); ClosedBlock { block: s.block, @@ -371,20 +370,19 @@ impl<'x> OpenBlock<'x> { let mut s = self; s.engine.on_close_block(&mut s.block); - if s.block.base.header.transactions_root.is_zero() || s.block.base.header.transactions_root == SHA3_NULL_RLP { - s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect()); + if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP { + s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect())); } let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out(); - if s.block.base.header.uncles_hash.is_zero() { - s.block.base.header.uncles_hash = uncle_bytes.sha3(); + if s.block.base.header.uncles_hash().is_zero() { + s.block.base.header.set_uncles_hash(uncle_bytes.sha3()); } - if s.block.base.header.receipts_root.is_zero() || s.block.base.header.receipts_root == SHA3_NULL_RLP { - s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect()); + if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP { + s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect())); } - s.block.base.header.state_root = s.block.state.root().clone(); - s.block.base.header.log_bloom = s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b}); //TODO: use |= operator - s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used); - s.block.base.header.note_dirty(); + s.block.base.header.set_state_root(s.block.state.root().clone()); + s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator + s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used)); LockedBlock { block: s.block, @@ -625,9 +623,9 @@ mod tests { let last_hashes = Arc::new(vec![genesis_header.hash()]); let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle1_header = Header::new(); - uncle1_header.extra_data = b"uncle1".to_vec(); + uncle1_header.set_extra_data(b"uncle1".to_vec()); let mut uncle2_header = Header::new(); - uncle2_header.extra_data = b"uncle2".to_vec(); + uncle2_header.set_extra_data(b"uncle2".to_vec()); open_block.push_uncle(uncle1_header).unwrap(); open_block.push_uncle(uncle2_header).unwrap(); let b = open_block.close_and_lock().seal(engine, vec![]).unwrap(); @@ -643,7 +641,7 @@ mod tests { let bytes = e.rlp_bytes(); assert_eq!(bytes, orig_bytes); let uncles = BlockView::new(&bytes).uncles(); - assert_eq!(uncles[1].extra_data, b"uncle2"); + assert_eq!(uncles[1].extra_data(), b"uncle2"); let db = e.drain(); assert_eq!(orig_db.keys(), db.keys()); diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs index 89a620493..7d686cec0 100644 --- a/ethcore/src/block_queue.rs +++ b/ethcore/src/block_queue.rs @@ -260,7 +260,7 @@ impl BlockQueue { fn drain_verifying(verifying: &mut VecDeque, verified: &mut VecDeque, bad: &mut HashSet) { while !verifying.is_empty() && verifying.front().unwrap().block.is_some() { let block = verifying.pop_front().unwrap().block.unwrap(); - if bad.contains(&block.header.parent_hash) { + if bad.contains(block.header.parent_hash()) { bad.insert(block.header.hash()); } else { @@ -313,7 +313,7 @@ impl BlockQueue { return Err(ImportError::KnownBad.into()); } - if bad.contains(&header.parent_hash) { + if bad.contains(header.parent_hash()) { bad.insert(h.clone()); return Err(ImportError::KnownBad.into()); } @@ -351,7 +351,7 @@ impl BlockQueue { let mut new_verified = VecDeque::new(); for block in verified.drain(..) { - if bad.contains(&block.header.parent_hash) { + if bad.contains(block.header.parent_hash()) { bad.insert(block.header.hash()); processing.remove(&block.header.hash()); } else { diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 379d77407..a581e59e9 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -1434,7 +1434,7 @@ mod tests { let mut block_header = bc.block_header(&best_hash); while !block_header.is_none() { - block_header = bc.block_header(&block_header.unwrap().parent_hash); + block_header = bc.block_header(&block_header.unwrap().parent_hash()); } assert!(bc.cache_size().blocks > 1024 * 1024); diff --git a/ethcore/src/blockchain/generator/block.rs b/ethcore/src/blockchain/generator/block.rs index 0a3dad399..238051d2a 100644 --- a/ethcore/src/blockchain/generator/block.rs +++ b/ethcore/src/blockchain/generator/block.rs @@ -44,21 +44,22 @@ impl Encodable for Block { impl Forkable for Block { fn fork(mut self, fork_number: usize) -> Self where Self: Sized { - self.header.difficulty = self.header.difficulty - U256::from(fork_number); + let difficulty = self.header.difficulty().clone() - U256::from(fork_number); + self.header.set_difficulty(difficulty); self } } impl WithBloom for Block { fn with_bloom(mut self, bloom: H2048) -> Self where Self: Sized { - self.header.log_bloom = bloom; + self.header.set_log_bloom(bloom); self } } impl CompleteBlock for Block { fn complete(mut self, parent_hash: H256) -> Bytes { - self.header.parent_hash = parent_hash; + self.header.set_parent_hash(parent_hash); encode(&self).to_vec() } } diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs index 07ce7242b..179839b5a 100644 --- a/ethcore/src/blockchain/generator/generator.rs +++ b/ethcore/src/blockchain/generator/generator.rs @@ -73,8 +73,8 @@ pub struct ChainGenerator { impl ChainGenerator { fn prepare_block(&self) -> Block { let mut block = Block::default(); - block.header.number = self.number; - block.header.difficulty = self.difficulty; + block.header.set_number(self.number); + block.header.set_difficulty(self.difficulty); block } } diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 78669912c..edd671b70 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -99,7 +99,7 @@ impl ClientReport { pub fn accrue_block(&mut self, block: &PreverifiedBlock) { self.blocks_imported += 1; self.transactions_applied += block.transactions.len(); - self.gas_processed = self.gas_processed + block.header.gas_used; + self.gas_processed = self.gas_processed + block.header.gas_used().clone(); } } @@ -284,15 +284,15 @@ impl Client { }; // Check if Parent is in chain - let chain_has_parent = self.chain.block_header(&header.parent_hash); + let chain_has_parent = self.chain.block_header(header.parent_hash()); if let None = chain_has_parent { - warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash); + warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash()); return Err(()); }; // Enact Verified Block let parent = chain_has_parent.unwrap(); - let last_hashes = self.build_last_hashes(header.parent_hash.clone()); + let last_hashes = self.build_last_hashes(header.parent_hash().clone()); let db = self.state_db.lock().boxed_clone(); let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); @@ -352,7 +352,7 @@ impl Client { for block in blocks { let header = &block.header; - if invalid_blocks.contains(&header.parent_hash) { + if invalid_blocks.contains(header.parent_hash()) { invalid_blocks.insert(header.hash()); continue; } diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 8e26a6b0c..fb7f9083e 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -169,19 +169,19 @@ impl TestBlockChainClient { let len = self.numbers.read().len(); for n in len..(len + count) { let mut header = BlockHeader::new(); - header.difficulty = From::from(n); - header.parent_hash = self.last_hash.read().clone(); - header.number = n as BlockNumber; - header.gas_limit = U256::from(1_000_000); + header.set_difficulty(From::from(n)); + header.set_parent_hash(self.last_hash.read().clone()); + header.set_number(n as BlockNumber); + header.set_gas_limit(U256::from(1_000_000)); let uncles = match with { EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => { let mut uncles = RlpStream::new_list(1); let mut uncle_header = BlockHeader::new(); - uncle_header.difficulty = From::from(n); - uncle_header.parent_hash = self.last_hash.read().clone(); - uncle_header.number = n as BlockNumber; + uncle_header.set_difficulty(From::from(n)); + uncle_header.set_parent_hash(self.last_hash.read().clone()); + uncle_header.set_number(n as BlockNumber); uncles.append(&uncle_header); - header.uncles_hash = uncles.as_raw().sha3(); + header.set_uncles_hash(uncles.as_raw().sha3()); uncles }, _ => RlpStream::new_list(0) @@ -219,7 +219,7 @@ impl TestBlockChainClient { pub fn corrupt_block(&mut self, n: BlockNumber) { let hash = self.block_hash(BlockID::Number(n)).unwrap(); let mut header: BlockHeader = decode(&self.block_header(BlockID::Number(n)).unwrap()); - header.extra_data = b"This extra data is way too long to be considered valid".to_vec(); + header.set_extra_data(b"This extra data is way too long to be considered valid".to_vec()); let mut rlp = RlpStream::new_list(3); rlp.append(&header); rlp.append_raw(&rlp::NULL_RLP, 1); @@ -231,7 +231,7 @@ impl TestBlockChainClient { pub fn corrupt_block_parent(&mut self, n: BlockNumber) { let hash = self.block_hash(BlockID::Number(n)).unwrap(); let mut header: BlockHeader = decode(&self.block_header(BlockID::Number(n)).unwrap()); - header.parent_hash = H256::from(42); + header.set_parent_hash(H256::from(42)); let mut rlp = RlpStream::new_list(3); rlp.append(&header); rlp.append_raw(&rlp::NULL_RLP, 1); @@ -470,20 +470,20 @@ impl BlockChainClient for TestBlockChainClient { fn import_block(&self, b: Bytes) -> Result { let header = Rlp::new(&b).val_at::(0); let h = header.hash(); - let number: usize = header.number as usize; + let number: usize = header.number() as usize; if number > self.blocks.read().len() { panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().len(), number); } if number > 0 { - match self.blocks.read().get(&header.parent_hash) { + match self.blocks.read().get(header.parent_hash()) { Some(parent) => { let parent = Rlp::new(parent).val_at::(0); - if parent.number != (header.number - 1) { + if parent.number() != (header.number() - 1) { panic!("Unexpected block parent"); } }, None => { - panic!("Unknown block parent {:?} for block {}", header.parent_hash, number); + panic!("Unknown block parent {:?} for block {}", header.parent_hash(), number); } } } @@ -491,18 +491,18 @@ impl BlockChainClient for TestBlockChainClient { if number == len { { let mut difficulty = self.difficulty.write(); - *difficulty = *difficulty + header.difficulty; + *difficulty = *difficulty + header.difficulty().clone(); } mem::replace(&mut *self.last_hash.write(), h.clone()); self.blocks.write().insert(h.clone(), b); self.numbers.write().insert(number, h.clone()); - let mut parent_hash = header.parent_hash; + let mut parent_hash = header.parent_hash().clone(); if number > 0 { let mut n = number - 1; while n > 0 && self.numbers.read()[&n] != parent_hash { *self.numbers.write().get_mut(&n).unwrap() = parent_hash.clone(); n -= 1; - parent_hash = Rlp::new(&self.blocks.read()[&parent_hash]).val_at::(0).parent_hash; + parent_hash = Rlp::new(&self.blocks.read()[&parent_hash]).val_at::(0).parent_hash().clone(); } } } diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 926399d7b..332d947c3 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -82,17 +82,16 @@ impl Engine for BasicAuthority { } fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) { - header.difficulty = parent.difficulty; - header.gas_limit = { - let gas_limit = parent.gas_limit; + header.set_difficulty(parent.difficulty().clone()); + header.set_gas_limit({ + let gas_limit = parent.gas_limit().clone(); let bound_divisor = self.our_params.gas_limit_bound_divisor; if gas_limit < gas_floor_target { min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into()) } else { max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into()) } - }; - header.note_dirty(); + }); // info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit); } @@ -123,9 +122,9 @@ impl Engine for BasicAuthority { fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { // check the seal fields. // TODO: pull this out into common code. - if header.seal.len() != self.seal_fields() { + if header.seal().len() != self.seal_fields() { return Err(From::from(BlockError::InvalidSealArity( - Mismatch { expected: self.seal_fields(), found: header.seal.len() } + Mismatch { expected: self.seal_fields(), found: header.seal().len() } ))); } Ok(()) @@ -133,7 +132,7 @@ impl Engine for BasicAuthority { fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { // check the signature is legit. - let sig = try!(UntrustedRlp::new(&header.seal[0]).as_val::()); + let sig = try!(UntrustedRlp::new(&header.seal()[0]).as_val::()); let signer = public_to_address(&try!(recover(&sig.into(), &header.bare_hash()))); if !self.our_params.authorities.contains(&signer) { return try!(Err(BlockError::InvalidSeal)); @@ -152,10 +151,10 @@ impl Engine for BasicAuthority { return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: *parent.difficulty(), found: *header.difficulty() }))) } let gas_limit_divisor = self.our_params.gas_limit_bound_divisor; - let min_gas = parent.gas_limit - parent.gas_limit / gas_limit_divisor; - let max_gas = parent.gas_limit + parent.gas_limit / gas_limit_divisor; - if header.gas_limit <= min_gas || header.gas_limit >= max_gas { - return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit }))); + let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor; + let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; + if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { + return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }))); } Ok(()) } diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index e7738fbaa..6414ba5e4 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -108,9 +108,8 @@ pub trait Engine : Sync + Send { /// Don't forget to call Super::populate_from_parent when subclassing & overriding. // TODO: consider including State in the params. fn populate_from_parent(&self, header: &mut Header, parent: &Header, _gas_floor_target: U256, _gas_ceil_target: U256) { - header.difficulty = parent.difficulty; - header.gas_limit = parent.gas_limit; - header.note_dirty(); + header.set_difficulty(parent.difficulty().clone()); + header.set_gas_limit(parent.gas_limit().clone()); } // TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index c658432a2..82a74d9ea 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -114,9 +114,9 @@ impl Engine for Ethash { } fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, gas_ceil_target: U256) { - header.difficulty = self.calculate_difficuty(header, parent); - header.gas_limit = { - let gas_limit = parent.gas_limit; + let difficulty = self.calculate_difficulty(header, parent); + let gas_limit = { + let gas_limit = parent.gas_limit().clone(); let bound_divisor = self.ethash_params.gas_limit_bound_divisor; if gas_limit < gas_floor_target { min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into()) @@ -126,21 +126,23 @@ impl Engine for Ethash { min(gas_ceil_target, max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into() + - (header.gas_used * 6.into() / 5.into()) / bound_divisor)) + (header.gas_used().clone() * 6.into() / 5.into()) / bound_divisor)) } }; - if header.number >= self.ethash_params.dao_hardfork_transition && - header.number <= self.ethash_params.dao_hardfork_transition + 9 { - header.extra_data = b"dao-hard-fork"[..].to_owned(); + header.set_difficulty(difficulty); + header.set_gas_limit(gas_limit); + if header.number() >= self.ethash_params.dao_hardfork_transition && + header.number() <= self.ethash_params.dao_hardfork_transition + 9 { + header.set_extra_data(b"dao-hard-fork"[..].to_owned()); } header.note_dirty(); -// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit); +// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number(), header.difficulty(), header.gas_limit()); } fn on_new_block(&self, block: &mut ExecutedBlock) { - if block.fields().header.number == self.ethash_params.dao_hardfork_transition { + if block.fields().header.number() == self.ethash_params.dao_hardfork_transition { // TODO: enable trigger function maybe? -// if block.fields().header.gas_limit <= 4_000_000.into() { +// if block.fields().header.gas_limit() <= 4_000_000.into() { let mut state = block.fields_mut().state; for child in &self.ethash_params.dao_hardfork_accounts { let b = state.balance(child); @@ -157,7 +159,7 @@ impl Engine for Ethash { let fields = block.fields_mut(); // Bestow block reward - fields.state.add_balance(&fields.header.author, &(reward + reward / U256::from(32) * U256::from(fields.uncles.len()))); + fields.state.add_balance(&fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len()))); // Bestow uncle rewards let current_number = fields.header.number(); @@ -171,18 +173,18 @@ impl Engine for Ethash { fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { // check the seal fields. - if header.seal.len() != self.seal_fields() { + if header.seal().len() != self.seal_fields() { return Err(From::from(BlockError::InvalidSealArity( - Mismatch { expected: self.seal_fields(), found: header.seal.len() } + Mismatch { expected: self.seal_fields(), found: header.seal().len() } ))); } - try!(UntrustedRlp::new(&header.seal[0]).as_val::()); - try!(UntrustedRlp::new(&header.seal[1]).as_val::()); + try!(UntrustedRlp::new(&header.seal()[0]).as_val::()); + try!(UntrustedRlp::new(&header.seal()[1]).as_val::()); // TODO: consider removing these lines. let min_difficulty = self.ethash_params.minimum_difficulty; - if header.difficulty < min_difficulty { - return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty }))) + if header.difficulty() < &min_difficulty { + return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty().clone() }))) } let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(quick_get_difficulty( @@ -190,37 +192,37 @@ impl Engine for Ethash { header.nonce().low_u64(), &Ethash::to_ethash(header.mix_hash()) ))); - if difficulty < header.difficulty { - return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty }))); + if &difficulty < header.difficulty() { + return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty }))); } - if header.number >= self.ethash_params.dao_hardfork_transition && - header.number <= self.ethash_params.dao_hardfork_transition + 9 && - header.extra_data[..] != b"dao-hard-fork"[..] { + if header.number() >= self.ethash_params.dao_hardfork_transition && + header.number() <= self.ethash_params.dao_hardfork_transition + 9 && + header.extra_data()[..] != b"dao-hard-fork"[..] { return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: None, found: 0 }))); } - if header.gas_limit > 0x7fffffffffffffffu64.into() { - return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit }))); + if header.gas_limit() > &0x7fffffffffffffffu64.into() { + return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit().clone() }))); } Ok(()) } fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { - if header.seal.len() != self.seal_fields() { + if header.seal().len() != self.seal_fields() { return Err(From::from(BlockError::InvalidSealArity( - Mismatch { expected: self.seal_fields(), found: header.seal.len() } + Mismatch { expected: self.seal_fields(), found: header.seal().len() } ))); } - let result = self.pow.compute_light(header.number as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64()); + let result = self.pow.compute_light(header.number() as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64()); let mix = Ethash::from_ethash(result.mix_hash); let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(result.value)); if mix != header.mix_hash() { return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() }))); } - if difficulty < header.difficulty { - return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty }))); + if &difficulty < header.difficulty() { + return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty }))); } Ok(()) } @@ -232,15 +234,15 @@ impl Engine for Ethash { } // Check difficulty is correct given the two timestamps. - let expected_difficulty = self.calculate_difficuty(header, parent); - if header.difficulty != expected_difficulty { - return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty }))) + let expected_difficulty = self.calculate_difficulty(header, parent); + if header.difficulty() != &expected_difficulty { + return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty().clone() }))) } let gas_limit_divisor = self.ethash_params.gas_limit_bound_divisor; - let min_gas = parent.gas_limit - parent.gas_limit / gas_limit_divisor; - let max_gas = parent.gas_limit + parent.gas_limit / gas_limit_divisor; - if header.gas_limit <= min_gas || header.gas_limit >= max_gas { - return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit }))); + let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor; + let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; + if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { + return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }))); } Ok(()) } @@ -259,9 +261,9 @@ impl Engine for Ethash { #[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self impl Ethash { - fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 { + fn calculate_difficulty(&self, header: &Header, parent: &Header) -> U256 { const EXP_DIFF_PERIOD: u64 = 100000; - if header.number == 0 { + if header.number() == 0 { panic!("Can't calculate genesis block difficulty"); } @@ -270,25 +272,25 @@ impl Ethash { let duration_limit = self.ethash_params.duration_limit; let frontier_limit = self.ethash_params.frontier_compatibility_mode_limit; - let mut target = if header.number < frontier_limit { - if header.timestamp >= parent.timestamp + duration_limit { - parent.difficulty - (parent.difficulty / difficulty_bound_divisor) + let mut target = if header.number() < frontier_limit { + if header.timestamp() >= parent.timestamp() + duration_limit { + parent.difficulty().clone() - (parent.difficulty().clone() / difficulty_bound_divisor) } else { - parent.difficulty + (parent.difficulty / difficulty_bound_divisor) + parent.difficulty().clone() + (parent.difficulty().clone() / difficulty_bound_divisor) } } else { - trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty, header.timestamp, parent.timestamp); + trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp()); //block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99) - let diff_inc = (header.timestamp - parent.timestamp) / 10; + let diff_inc = (header.timestamp() - parent.timestamp()) / 10; if diff_inc <= 1 { - parent.difficulty + parent.difficulty / From::from(2048) * From::from(1 - diff_inc) + parent.difficulty().clone() + parent.difficulty().clone() / From::from(2048) * From::from(1 - diff_inc) } else { - parent.difficulty - parent.difficulty / From::from(2048) * From::from(min(diff_inc - 1, 99)) + parent.difficulty().clone() - parent.difficulty().clone() / From::from(2048) * From::from(min(diff_inc - 1, 99)) } }; target = max(min_difficulty, target); - let period = ((parent.number + 1) / EXP_DIFF_PERIOD) as usize; + let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize; if period > 1 { target = max(min_difficulty, target + (U256::from(1) << (period - 2))); } @@ -336,7 +338,7 @@ impl Header { /// Set the nonce and mix hash fields of the header. pub fn set_nonce_and_mix_hash(&mut self, nonce: &H64, mix_hash: &H256) { - self.seal = vec![encode(mix_hash).to_vec(), encode(nonce).to_vec()]; + self.set_seal(vec![encode(mix_hash).to_vec(), encode(nonce).to_vec()]); } } @@ -374,7 +376,7 @@ mod tests { let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle = Header::new(); let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); - uncle.author = uncle_author.clone(); + uncle.set_author(uncle_author); b.push_uncle(uncle).unwrap(); let b = b.close(); diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 40e85d619..1efe001e5 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -68,7 +68,7 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); - let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce(), Default::default()).unwrap(); + let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap(); assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into()); assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()), 1u64.into()); diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs index a123197a9..9b0e155f4 100644 --- a/ethcore/src/header.rs +++ b/ethcore/src/header.rs @@ -33,43 +33,42 @@ pub type BlockNumber = u64; /// Doesn't do all that much on its own. #[derive(Debug, Clone, Eq)] pub struct Header { - // TODO: make all private. /// Parent hash. - pub parent_hash: H256, + parent_hash: H256, /// Block timestamp. - pub timestamp: u64, + timestamp: u64, /// Block number. - pub number: BlockNumber, + number: BlockNumber, /// Block author. - pub author: Address, + author: Address, /// Transactions root. - pub transactions_root: H256, + transactions_root: H256, /// Block uncles hash. - pub uncles_hash: H256, + uncles_hash: H256, /// Block extra data. - pub extra_data: Bytes, + extra_data: Bytes, /// State root. - pub state_root: H256, + state_root: H256, /// Block receipts root. - pub receipts_root: H256, + receipts_root: H256, /// Block bloom. - pub log_bloom: LogBloom, + log_bloom: LogBloom, /// Gas used for contracts execution. - pub gas_used: U256, + gas_used: U256, /// Block gas limit. - pub gas_limit: U256, + gas_limit: U256, /// Block difficulty. - pub difficulty: U256, + difficulty: U256, /// Vector of post-RLP-encoded fields. - pub seal: Vec, + seal: Vec, /// The memoized hash of the RLP representation *including* the seal fields. - pub hash: RefCell>, + hash: RefCell>, /// The memoized hash of the RLP representation *without* the seal fields. - pub bare_hash: RefCell>, + bare_hash: RefCell>, } impl PartialEq for Header { @@ -134,15 +133,21 @@ impl Header { /// Get the extra data field of the header. pub fn extra_data(&self) -> &Bytes { &self.extra_data } + /// Get a mutable reference to extra_data + pub fn extra_data_mut(&mut self) -> &mut Bytes { self.note_dirty(); &mut self.extra_data } /// Get the state root field of the header. pub fn state_root(&self) -> &H256 { &self.state_root } /// Get the receipts root field of the header. pub fn receipts_root(&self) -> &H256 { &self.receipts_root } + /// Get the log bloom field of the header. + pub fn log_bloom(&self) -> &LogBloom { &self.log_bloom } /// Get the transactions root field of the header. pub fn transactions_root(&self) -> &H256 { &self.transactions_root } /// Get the uncles hash field of the header. pub fn uncles_hash(&self) -> &H256 { &self.uncles_hash } + /// Get the gas used field of the header. + pub fn gas_used(&self) -> &U256 { &self.gas_used } /// Get the gas limit field of the header. pub fn gas_limit(&self) -> &U256 { &self.gas_limit } diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index f317cf54e..66fc06066 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -90,27 +90,26 @@ impl AbridgedBlock { let rlp = UntrustedRlp::new(&self.rlp).decompress(RlpType::Blocks); let rlp = UntrustedRlp::new(&rlp); - let mut header = Header { - parent_hash: parent_hash, - author: try!(rlp.val_at(0)), - state_root: try!(rlp.val_at(1)), - transactions_root: try!(rlp.val_at(2)), - receipts_root: try!(rlp.val_at(3)), - log_bloom: try!(rlp.val_at(4)), - difficulty: try!(rlp.val_at(5)), - number: number, - gas_limit: try!(rlp.val_at(6)), - gas_used: try!(rlp.val_at(7)), - timestamp: try!(rlp.val_at(8)), - extra_data: try!(rlp.val_at(9)), - ..Default::default() - }; + let mut header: Header = Default::default(); + header.set_parent_hash(parent_hash); + header.set_author(try!(rlp.val_at(0))); + header.set_state_root(try!(rlp.val_at(1))); + header.set_transactions_root(try!(rlp.val_at(2))); + header.set_receipts_root(try!(rlp.val_at(3))); + header.set_log_bloom(try!(rlp.val_at(4))); + header.set_difficulty(try!(rlp.val_at(5))); + header.set_number(number); + header.set_gas_limit(try!(rlp.val_at(6))); + header.set_gas_used(try!(rlp.val_at(7))); + header.set_timestamp(try!(rlp.val_at(8))); + header.set_extra_data(try!(rlp.val_at(9))); + let transactions = try!(rlp.val_at(10)); let uncles: Vec
= try!(rlp.val_at(11)); let mut uncles_rlp = RlpStream::new(); uncles_rlp.append(&uncles); - header.uncles_hash = uncles_rlp.as_raw().sha3(); + header.set_uncles_hash(uncles_rlp.as_raw().sha3()); let mut seal_fields = Vec::new(); for i in (HEADER_FIELDS + BLOCK_FIELDS)..rlp.item_count() { diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index a0c32d51a..7a03a2acd 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -25,8 +25,6 @@ use super::seal::Generic as GenericSeal; use ethereum; use ethjson; -use std::cell::RefCell; - /// Parameters common to all engines. #[derive(Debug, PartialEq, Clone)] #[cfg_attr(test, derive(Default))] @@ -161,32 +159,30 @@ impl Spec { /// Get the header of the genesis block. pub fn genesis_header(&self) -> Header { - Header { - parent_hash: self.parent_hash.clone(), - timestamp: self.timestamp, - number: 0, - author: self.author.clone(), - transactions_root: self.transactions_root.clone(), - uncles_hash: RlpStream::new_list(0).out().sha3(), - extra_data: self.extra_data.clone(), - state_root: self.state_root().clone(), - receipts_root: self.receipts_root.clone(), - log_bloom: H2048::new().clone(), - gas_used: self.gas_used.clone(), - gas_limit: self.gas_limit.clone(), - difficulty: self.difficulty.clone(), - seal: { - let seal = { - let mut s = RlpStream::new_list(self.seal_fields); - s.append_raw(&self.seal_rlp, self.seal_fields); - s.out() - }; - let r = Rlp::new(&seal); - (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() - }, - hash: RefCell::new(None), - bare_hash: RefCell::new(None), - } + let mut header: Header = Default::default(); + header.set_parent_hash(self.parent_hash.clone()); + header.set_timestamp(self.timestamp); + header.set_number(0); + header.set_author(self.author.clone()); + header.set_transactions_root(self.transactions_root.clone()); + header.set_uncles_hash(RlpStream::new_list(0).out().sha3()); + header.set_extra_data(self.extra_data.clone()); + header.set_state_root(self.state_root().clone()); + header.set_receipts_root(self.receipts_root.clone()); + header.set_log_bloom(H2048::new().clone()); + header.set_gas_used(self.gas_used.clone()); + header.set_gas_limit(self.gas_limit.clone()); + header.set_difficulty(self.difficulty.clone()); + header.set_seal({ + let seal = { + let mut s = RlpStream::new_list(self.seal_fields); + s.append_raw(&self.seal_rlp, self.seal_fields); + s.out() + }; + let r = Rlp::new(&seal); + (0..self.seal_fields).map(|i| r.at(i).as_raw().to_vec()).collect() + }); + return header; } /// Compose the genesis block for this chain. diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 1ac26c83b..26256a760 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -45,9 +45,9 @@ fn returns_state_root_basic() { let client_result = generate_dummy_client(6); let client = client_result.reference(); let test_spec = get_test_spec(); - let state_root = test_spec.genesis_header().state_root; + let genesis_header = test_spec.genesis_header(); - assert!(client.state_data(&state_root).is_some()); + assert!(client.state_data(genesis_header.state_root()).is_some()); } #[test] diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 4942ace5a..f3975a0d0 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -84,26 +84,26 @@ pub fn create_test_block(header: &Header) -> Bytes { fn create_unverifiable_block_header(order: u32, parent_hash: H256) -> Header { let mut header = Header::new(); - header.gas_limit = 0.into(); - header.difficulty = (order * 100).into(); - header.timestamp = (order * 10) as u64; - header.number = order as u64; - header.parent_hash = parent_hash; - header.state_root = H256::zero(); + header.set_gas_limit(0.into()); + header.set_difficulty((order * 100).into()); + header.set_timestamp((order * 10) as u64); + header.set_number(order as u64); + header.set_parent_hash(parent_hash); + header.set_state_root(H256::zero()); header } fn create_unverifiable_block_with_extra(order: u32, parent_hash: H256, extra: Option) -> Bytes { let mut header = create_unverifiable_block_header(order, parent_hash); - header.extra_data = match extra { + header.set_extra_data(match extra { Some(extra_data) => extra_data, None => { let base = (order & 0x000000ff) as u8; let generated: Vec = vec![base + 1, base + 2, base + 3]; generated } - }; + }); create_test_block(&header) } @@ -204,7 +204,7 @@ pub fn push_blocks_to_client(client: &Arc, timestamp_salt: u64, starting let test_spec = get_test_spec(); let test_engine = &test_spec.engine; //let test_engine = test_spec.to_engine().unwrap(); - let state_root = test_spec.genesis_header().state_root; + let state_root = test_spec.genesis_header().state_root().clone(); let mut rolling_hash = client.chain_info().best_block_hash; let mut rolling_block_number = starting_number as u64; let mut rolling_timestamp = timestamp_salt + starting_number as u64 * 10; @@ -212,12 +212,12 @@ pub fn push_blocks_to_client(client: &Arc, timestamp_salt: u64, starting for _ in 0..block_number { let mut header = Header::new(); - header.gas_limit = test_engine.params().min_gas_limit; - header.difficulty = U256::from(0x20000); - header.timestamp = rolling_timestamp; - header.number = rolling_block_number; - header.parent_hash = rolling_hash; - header.state_root = state_root.clone(); + header.set_gas_limit(test_engine.params().min_gas_limit); + header.set_difficulty(U256::from(0x20000)); + header.set_timestamp(rolling_timestamp); + header.set_number(rolling_block_number); + header.set_parent_hash(rolling_hash); + header.set_state_root(state_root); rolling_hash = header.hash(); rolling_block_number = rolling_block_number + 1; @@ -345,12 +345,12 @@ pub fn get_good_dummy_block_fork_seq(start_number: usize, count: usize, parent_h let mut r = Vec::new(); for i in start_number .. start_number + count + 1 { let mut block_header = Header::new(); - block_header.gas_limit = test_engine.params().min_gas_limit; - block_header.difficulty = U256::from(i).mul(U256([0, 1, 0, 0])); - block_header.timestamp = rolling_timestamp; - block_header.number = i as u64; - block_header.parent_hash = parent; - block_header.state_root = test_spec.genesis_header().state_root; + block_header.set_gas_limit(test_engine.params().min_gas_limit); + block_header.set_difficulty(U256::from(i).mul(U256([0, 1, 0, 0]))); + block_header.set_timestamp(rolling_timestamp); + block_header.set_number(i as u64); + block_header.set_parent_hash(parent); + block_header.set_state_root(test_spec.genesis_header().state_root().clone()); parent = block_header.hash(); rolling_timestamp = rolling_timestamp + 10; @@ -365,12 +365,12 @@ pub fn get_good_dummy_block() -> Bytes { let mut block_header = Header::new(); let test_spec = get_test_spec(); let test_engine = &test_spec.engine; - block_header.gas_limit = test_engine.params().min_gas_limit; - block_header.difficulty = U256::from(0x20000); - block_header.timestamp = 40; - block_header.number = 1; - block_header.parent_hash = test_spec.genesis_header().hash(); - block_header.state_root = test_spec.genesis_header().state_root; + block_header.set_gas_limit(test_engine.params().min_gas_limit); + block_header.set_difficulty(U256::from(0x20000)); + block_header.set_timestamp(40); + block_header.set_number(1); + block_header.set_parent_hash(test_spec.genesis_header().hash()); + block_header.set_state_root(test_spec.genesis_header().state_root().clone()); create_test_block(&block_header) } @@ -379,12 +379,12 @@ pub fn get_bad_state_dummy_block() -> Bytes { let mut block_header = Header::new(); let test_spec = get_test_spec(); let test_engine = &test_spec.engine; - block_header.gas_limit = test_engine.params().min_gas_limit; - block_header.difficulty = U256::from(0x20000); - block_header.timestamp = 40; - block_header.number = 1; - block_header.parent_hash = test_spec.genesis_header().hash(); - block_header.state_root = 0xbad.into(); + block_header.set_gas_limit(test_engine.params().min_gas_limit); + block_header.set_difficulty(U256::from(0x20000)); + block_header.set_timestamp(40); + block_header.set_number(1); + block_header.set_parent_hash(test_spec.genesis_header().hash()); + block_header.set_state_root(0xbad.into()); create_test_block(&block_header) } diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index 9cea3bede..aa719cf23 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -38,7 +38,7 @@ pub struct PreverifiedBlock { /// Phase 1 quick block verification. Only does checks that are cheap. Operates on a single block pub fn verify_block_basic(header: &Header, bytes: &[u8], engine: &Engine) -> Result<(), Error> { try!(verify_header(&header, engine)); - try!(verify_block_integrity(bytes, &header.transactions_root, &header.uncles_hash)); + try!(verify_block_integrity(bytes, &header.transactions_root(), &header.uncles_hash())); try!(engine.verify_block_basic(&header, Some(bytes))); for u in try!(UntrustedRlp::new(bytes).at(2)).iter().map(|rlp| rlp.as_val::
()) { let u = try!(u); @@ -81,7 +81,7 @@ pub fn verify_block_unordered(header: Header, bytes: Bytes, engine: &Engine) -> /// Phase 3 verification. Check block information against parent and uncles. pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: &BlockProvider) -> Result<(), Error> { // TODO: verify timestamp - let parent = try!(bc.block_header(&header.parent_hash).ok_or_else(|| Error::from(BlockError::UnknownParent(header.parent_hash.clone())))); + let parent = try!(bc.block_header(&header.parent_hash()).ok_or_else(|| Error::from(BlockError::UnknownParent(header.parent_hash().clone())))); try!(verify_parent(&header, &parent)); try!(engine.verify_block_family(&header, &parent, Some(bytes))); @@ -93,7 +93,7 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: & let mut excluded = HashSet::new(); excluded.insert(header.hash()); - let mut hash = header.parent_hash.clone(); + let mut hash = header.parent_hash().clone(); excluded.insert(hash.clone()); for _ in 0..engine.maximum_uncle_age() { match bc.block_details(&hash) { @@ -122,12 +122,12 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: & // 6 7 // (8 Invalid) - let depth = if header.number > uncle.number { header.number - uncle.number } else { 0 }; + let depth = if header.number() > uncle.number() { header.number() - uncle.number() } else { 0 }; if depth > engine.maximum_uncle_age() as u64 { - return Err(From::from(BlockError::UncleTooOld(OutOfBounds { min: Some(header.number - depth), max: Some(header.number - 1), found: uncle.number }))); + return Err(From::from(BlockError::UncleTooOld(OutOfBounds { min: Some(header.number() - depth), max: Some(header.number() - 1), found: uncle.number() }))); } else if depth < 1 { - return Err(From::from(BlockError::UncleIsBrother(OutOfBounds { min: Some(header.number - depth), max: Some(header.number - 1), found: uncle.number }))); + return Err(From::from(BlockError::UncleIsBrother(OutOfBounds { min: Some(header.number() - depth), max: Some(header.number() - 1), found: uncle.number() }))); } // cB @@ -139,8 +139,8 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: & // cB.p^6 -----------/ 6 // cB.p^7 -------------/ // cB.p^8 - let mut expected_uncle_parent = header.parent_hash.clone(); - let uncle_parent = try!(bc.block_header(&uncle.parent_hash).ok_or_else(|| Error::from(BlockError::UnknownUncleParent(uncle.parent_hash.clone())))); + let mut expected_uncle_parent = header.parent_hash().clone(); + let uncle_parent = try!(bc.block_header(&uncle.parent_hash()).ok_or_else(|| Error::from(BlockError::UnknownUncleParent(uncle.parent_hash().clone())))); for _ in 0..depth { match bc.block_details(&expected_uncle_parent) { Some(details) => { @@ -162,50 +162,50 @@ pub fn verify_block_family(header: &Header, bytes: &[u8], engine: &Engine, bc: & /// Phase 4 verification. Check block information against transaction enactment results, pub fn verify_block_final(expected: &Header, got: &Header) -> Result<(), Error> { - if expected.gas_used != got.gas_used { - return Err(From::from(BlockError::InvalidGasUsed(Mismatch { expected: expected.gas_used, found: got.gas_used }))) + if expected.gas_used() != got.gas_used() { + return Err(From::from(BlockError::InvalidGasUsed(Mismatch { expected: expected.gas_used().clone(), found: got.gas_used().clone() }))) } - if expected.log_bloom != got.log_bloom { - return Err(From::from(BlockError::InvalidLogBloom(Mismatch { expected: expected.log_bloom.clone(), found: got.log_bloom.clone() }))) + if expected.log_bloom() != got.log_bloom() { + return Err(From::from(BlockError::InvalidLogBloom(Mismatch { expected: expected.log_bloom().clone(), found: got.log_bloom().clone() }))) } - if expected.state_root != got.state_root { - return Err(From::from(BlockError::InvalidStateRoot(Mismatch { expected: expected.state_root.clone(), found: got.state_root.clone() }))) + if expected.state_root() != got.state_root() { + return Err(From::from(BlockError::InvalidStateRoot(Mismatch { expected: expected.state_root().clone(), found: got.state_root().clone() }))) } - if expected.receipts_root != got.receipts_root { - return Err(From::from(BlockError::InvalidReceiptsRoot(Mismatch { expected: expected.receipts_root.clone(), found: got.receipts_root.clone() }))) + if expected.receipts_root() != got.receipts_root() { + return Err(From::from(BlockError::InvalidReceiptsRoot(Mismatch { expected: expected.receipts_root().clone(), found: got.receipts_root().clone() }))) } Ok(()) } /// Check basic header parameters. fn verify_header(header: &Header, engine: &Engine) -> Result<(), Error> { - if header.number >= From::from(BlockNumber::max_value()) { - return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number }))) + if header.number() >= From::from(BlockNumber::max_value()) { + return Err(From::from(BlockError::RidiculousNumber(OutOfBounds { max: Some(From::from(BlockNumber::max_value())), min: None, found: header.number() }))) } - if header.gas_used > header.gas_limit { - return Err(From::from(BlockError::TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit), min: None, found: header.gas_used }))); + if header.gas_used() > header.gas_limit() { + return Err(From::from(BlockError::TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit().clone()), min: None, found: header.gas_used().clone() }))); } let min_gas_limit = engine.params().min_gas_limit; - if header.gas_limit < min_gas_limit { - return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit }))); + if header.gas_limit() < &min_gas_limit { + return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit().clone() }))); } let maximum_extra_data_size = engine.maximum_extra_data_size(); - if header.number != 0 && header.extra_data.len() > maximum_extra_data_size { - return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: Some(maximum_extra_data_size), found: header.extra_data.len() }))); + if header.number() != 0 && header.extra_data().len() > maximum_extra_data_size { + return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: Some(maximum_extra_data_size), found: header.extra_data().len() }))); } Ok(()) } /// Check header parameters agains parent header. fn verify_parent(header: &Header, parent: &Header) -> Result<(), Error> { - if !header.parent_hash.is_zero() && parent.hash() != header.parent_hash { - return Err(From::from(BlockError::InvalidParentHash(Mismatch { expected: parent.hash(), found: header.parent_hash.clone() }))) + if !header.parent_hash().is_zero() && &parent.hash() != header.parent_hash() { + return Err(From::from(BlockError::InvalidParentHash(Mismatch { expected: parent.hash(), found: header.parent_hash().clone() }))) } - if header.timestamp <= parent.timestamp { - return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp + 1), found: header.timestamp }))) + if header.timestamp() <= parent.timestamp() { + return Err(From::from(BlockError::InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp() + 1), found: header.timestamp() }))) } - if header.number != parent.number + 1 { - return Err(From::from(BlockError::InvalidNumber(Mismatch { expected: parent.number + 1, found: header.number }))); + if header.number() != parent.number() + 1 { + return Err(From::from(BlockError::InvalidNumber(Mismatch { expected: parent.number() + 1, found: header.number() }))); } Ok(()) } @@ -307,9 +307,9 @@ mod tests { self.blocks.get(hash).map(|bytes| { let header = BlockView::new(bytes).header(); BlockDetails { - number: header.number, - total_difficulty: header.difficulty, - parent: header.parent_hash, + number: header.number(), + total_difficulty: header.difficulty().clone(), + parent: header.parent_hash().clone(), children: Vec::new(), } }) @@ -352,9 +352,9 @@ mod tests { let engine = &*spec.engine; let min_gas_limit = engine.params().min_gas_limit; - good.gas_limit = min_gas_limit; - good.timestamp = 40; - good.number = 10; + good.set_gas_limit(min_gas_limit); + good.set_timestamp(40); + good.set_number(10); let keypair = Random.generate().unwrap(); @@ -381,31 +381,31 @@ mod tests { let diff_inc = U256::from(0x40); let mut parent6 = good.clone(); - parent6.number = 6; + parent6.set_number(6); let mut parent7 = good.clone(); - parent7.number = 7; - parent7.parent_hash = parent6.hash(); - parent7.difficulty = parent6.difficulty + diff_inc; - parent7.timestamp = parent6.timestamp + 10; + parent7.set_number(7); + parent7.set_parent_hash(parent6.hash()); + parent7.set_difficulty(parent6.difficulty().clone() + diff_inc); + parent7.set_timestamp(parent6.timestamp() + 10); let mut parent8 = good.clone(); - parent8.number = 8; - parent8.parent_hash = parent7.hash(); - parent8.difficulty = parent7.difficulty + diff_inc; - parent8.timestamp = parent7.timestamp + 10; + parent8.set_number(8); + parent8.set_parent_hash(parent7.hash()); + parent8.set_difficulty(parent7.difficulty().clone() + diff_inc); + parent8.set_timestamp(parent7.timestamp() + 10); let mut good_uncle1 = good.clone(); - good_uncle1.number = 9; - good_uncle1.parent_hash = parent8.hash(); - good_uncle1.difficulty = parent8.difficulty + diff_inc; - good_uncle1.timestamp = parent8.timestamp + 10; - good_uncle1.extra_data.push(1u8); + good_uncle1.set_number(9); + good_uncle1.set_parent_hash(parent8.hash()); + good_uncle1.set_difficulty(parent8.difficulty().clone() + diff_inc); + good_uncle1.set_timestamp(parent8.timestamp() + 10); + good_uncle1.extra_data_mut().push(1u8); let mut good_uncle2 = good.clone(); - good_uncle2.number = 8; - good_uncle2.parent_hash = parent7.hash(); - good_uncle2.difficulty = parent7.difficulty + diff_inc; - good_uncle2.timestamp = parent7.timestamp + 10; - good_uncle2.extra_data.push(2u8); + good_uncle2.set_number(8); + good_uncle2.set_parent_hash(parent7.hash()); + good_uncle2.set_difficulty(parent7.difficulty().clone() + diff_inc); + good_uncle2.set_timestamp(parent7.timestamp() + 10); + good_uncle2.extra_data_mut().push(2u8); let good_uncles = vec![ good_uncle1.clone(), good_uncle2.clone() ]; let mut uncles_rlp = RlpStream::new(); @@ -414,14 +414,14 @@ mod tests { let good_transactions_root = ordered_trie_root(good_transactions.iter().map(|t| encode::(t).to_vec()).collect()); let mut parent = good.clone(); - parent.number = 9; - parent.timestamp = parent8.timestamp + 10; - parent.parent_hash = parent8.hash(); - parent.difficulty = parent8.difficulty + diff_inc; + parent.set_number(9); + parent.set_timestamp(parent8.timestamp() + 10); + parent.set_parent_hash(parent8.hash()); + parent.set_difficulty(parent8.difficulty().clone() + diff_inc); - good.parent_hash = parent.hash(); - good.difficulty = parent.difficulty + diff_inc; - good.timestamp = parent.timestamp + 10; + good.set_parent_hash(parent.hash()); + good.set_difficulty(parent.difficulty().clone() + diff_inc); + good.set_timestamp(parent.timestamp() + 10); let mut bc = TestBlockChain::new(); bc.insert(create_test_block(&good)); @@ -433,61 +433,62 @@ mod tests { check_ok(basic_test(&create_test_block(&good), engine)); let mut header = good.clone(); - header.transactions_root = good_transactions_root.clone(); - header.uncles_hash = good_uncles_hash.clone(); + header.set_transactions_root(good_transactions_root.clone()); + header.set_uncles_hash(good_uncles_hash.clone()); check_ok(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine)); - header.gas_limit = min_gas_limit - From::from(1); + header.set_gas_limit(min_gas_limit - From::from(1)); check_fail(basic_test(&create_test_block(&header), engine), - InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit })); + InvalidGasLimit(OutOfBounds { min: Some(min_gas_limit), max: None, found: header.gas_limit().clone() })); header = good.clone(); - header.number = BlockNumber::max_value(); + header.set_number(BlockNumber::max_value()); check_fail(basic_test(&create_test_block(&header), engine), - RidiculousNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number })); + RidiculousNumber(OutOfBounds { max: Some(BlockNumber::max_value()), min: None, found: header.number() })); header = good.clone(); - header.gas_used = header.gas_limit + From::from(1); + let gas_used = header.gas_limit().clone() + 1.into(); + header.set_gas_used(gas_used); check_fail(basic_test(&create_test_block(&header), engine), - TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit), min: None, found: header.gas_used })); + TooMuchGasUsed(OutOfBounds { max: Some(header.gas_limit().clone()), min: None, found: header.gas_used().clone() })); header = good.clone(); - header.extra_data.resize(engine.maximum_extra_data_size() + 1, 0u8); + header.extra_data_mut().resize(engine.maximum_extra_data_size() + 1, 0u8); check_fail(basic_test(&create_test_block(&header), engine), - ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data.len() })); + ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data().len() })); header = good.clone(); - header.extra_data.resize(engine.maximum_extra_data_size() + 1, 0u8); + header.extra_data_mut().resize(engine.maximum_extra_data_size() + 1, 0u8); check_fail(basic_test(&create_test_block(&header), engine), - ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data.len() })); + ExtraDataOutOfBounds(OutOfBounds { max: Some(engine.maximum_extra_data_size()), min: None, found: header.extra_data().len() })); header = good.clone(); - header.uncles_hash = good_uncles_hash.clone(); + header.set_uncles_hash(good_uncles_hash.clone()); check_fail(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), - InvalidTransactionsRoot(Mismatch { expected: good_transactions_root.clone(), found: header.transactions_root })); + InvalidTransactionsRoot(Mismatch { expected: good_transactions_root.clone(), found: header.transactions_root().clone() })); header = good.clone(); - header.transactions_root = good_transactions_root.clone(); + header.set_transactions_root(good_transactions_root.clone()); check_fail(basic_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine), - InvalidUnclesHash(Mismatch { expected: good_uncles_hash.clone(), found: header.uncles_hash })); + InvalidUnclesHash(Mismatch { expected: good_uncles_hash.clone(), found: header.uncles_hash().clone() })); check_ok(family_test(&create_test_block(&good), engine, &bc)); check_ok(family_test(&create_test_block_with_data(&good, &good_transactions, &good_uncles), engine, &bc)); header = good.clone(); - header.parent_hash = H256::random(); + header.set_parent_hash(H256::random()); check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), - UnknownParent(header.parent_hash)); + UnknownParent(header.parent_hash().clone())); header = good.clone(); - header.timestamp = 10; + header.set_timestamp(10); check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), - InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp + 1), found: header.timestamp })); + InvalidTimestamp(OutOfBounds { max: None, min: Some(parent.timestamp() + 1), found: header.timestamp() })); header = good.clone(); - header.number = 9; + header.set_number(9); check_fail(family_test(&create_test_block_with_data(&header, &good_transactions, &good_uncles), engine, &bc), - InvalidNumber(Mismatch { expected: parent.number + 1, found: header.number })); + InvalidNumber(Mismatch { expected: parent.number() + 1, found: header.number() })); header = good.clone(); let mut bad_uncles = good_uncles.clone(); diff --git a/parity/informant.rs b/parity/informant.rs index b6e8b7a84..58accd140 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -169,7 +169,7 @@ impl ChainNotify for Informant { Colour::White.bold().paint(format!("#{}", header.number())), Colour::White.bold().paint(format!("{}", header.hash())), Colour::Yellow.bold().paint(format!("{}", tx_count)), - Colour::Yellow.bold().paint(format!("{:.2}", header.gas_used.low_u64() as f32 / 1000000f32)), + Colour::Yellow.bold().paint(format!("{:.2}", header.gas_used().low_u64() as f32 / 1000000f32)), Colour::Purple.bold().paint(format!("{:.2}", duration as f32 / 1000000f32)), Colour::Blue.bold().paint(format!("{:.2}", size as f32 / 1024f32)), if skipped > 0 { format!(" + another {} block(s)", Colour::Red.bold().paint(format!("{}", skipped))) } else { String::new() } diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 3628f99a9..d805d57ee 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -158,22 +158,22 @@ impl EthClient where let block = Block { hash: Some(uncle.hash().into()), size: None, - parent_hash: uncle.parent_hash.into(), - uncles_hash: uncle.uncles_hash.into(), - author: uncle.author.into(), - miner: uncle.author.into(), - state_root: uncle.state_root.into(), - transactions_root: uncle.transactions_root.into(), - number: Some(uncle.number.into()), - gas_used: uncle.gas_used.into(), - gas_limit: uncle.gas_limit.into(), - logs_bloom: uncle.log_bloom.into(), - timestamp: uncle.timestamp.into(), - difficulty: uncle.difficulty.into(), - total_difficulty: (uncle.difficulty + parent_difficulty).into(), - receipts_root: uncle.receipts_root.into(), - extra_data: uncle.extra_data.into(), - seal_fields: uncle.seal.into_iter().map(|f| decode(&f)).map(Bytes::new).collect(), + parent_hash: uncle.parent_hash().clone().into(), + uncles_hash: uncle.uncles_hash().clone().into(), + author: uncle.author().clone().into(), + miner: uncle.author().clone().into(), + state_root: uncle.state_root().clone().into(), + transactions_root: uncle.transactions_root().clone().into(), + number: Some(uncle.number().into()), + gas_used: uncle.gas_used().clone().into(), + gas_limit: uncle.gas_limit().clone().into(), + logs_bloom: uncle.log_bloom().clone().into(), + timestamp: uncle.timestamp().into(), + difficulty: uncle.difficulty().clone().into(), + total_difficulty: (uncle.difficulty().clone() + parent_difficulty).into(), + receipts_root: uncle.receipts_root().clone().into(), + extra_data: uncle.extra_data().clone().into(), + seal_fields: uncle.seal().clone().into_iter().map(|f| decode(&f)).map(Bytes::new).collect(), uncles: vec![], transactions: BlockTransactions::Hashes(vec![]), }; diff --git a/sync/src/blocks.rs b/sync/src/blocks.rs index 2e0aa5d4e..498b35f43 100644 --- a/sync/src/blocks.rs +++ b/sync/src/blocks.rs @@ -270,7 +270,7 @@ impl BlockCollection { match self.head { None if hash == self.heads[0] => { trace!("New head {}", hash); - self.head = Some(info.parent_hash); + self.head = Some(info.parent_hash().clone()); }, _ => () } @@ -280,8 +280,8 @@ impl BlockCollection { body: None, }; let header_id = HeaderId { - transactions_root: info.transactions_root, - uncles: info.uncles_hash + transactions_root: info.transactions_root().clone(), + uncles: info.uncles_hash().clone(), }; if header_id.transactions_root == rlp::SHA3_NULL_RLP && header_id.uncles == rlp::SHA3_EMPTY_LIST_RLP { // empty body, just mark as downloaded @@ -294,7 +294,7 @@ impl BlockCollection { self.header_ids.insert(header_id, hash.clone()); } - self.parents.insert(info.parent_hash.clone(), hash.clone()); + self.parents.insert(info.parent_hash().clone(), hash.clone()); self.blocks.insert(hash.clone(), block); trace!(target: "sync", "New header: {}", hash.hex()); Ok(hash) diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 80ed82596..9abd05398 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -503,7 +503,7 @@ impl ChainSync { let mut valid_response = item_count == 0; //empty response is valid for i in 0..item_count { let info: BlockHeader = try!(r.val_at(i)); - let number = BlockNumber::from(info.number); + let number = BlockNumber::from(info.number()); // Check if any of the headers matches the hash we requested if !valid_response { if let Some(expected) = expected_hash { @@ -645,11 +645,11 @@ impl ChainSync { trace!(target: "sync", "New block already queued {:?}", h); }, Ok(_) => { - if header.number == self.last_imported_block + 1 { - self.last_imported_block = header.number; + if header.number() == self.last_imported_block + 1 { + self.last_imported_block = header.number(); self.last_imported_hash = header.hash(); } - trace!(target: "sync", "New block queued {:?} ({})", h, header.number); + trace!(target: "sync", "New block queued {:?} ({})", h, header.number()); }, Err(BlockImportError::Block(BlockError::UnknownParent(p))) => { unknown = true; @@ -1539,12 +1539,12 @@ mod tests { fn get_dummy_block(order: u32, parent_hash: H256) -> Bytes { let mut header = Header::new(); - header.gas_limit = 0.into(); - header.difficulty = (order * 100).into(); - header.timestamp = (order * 10) as u64; - header.number = order as u64; - header.parent_hash = parent_hash; - header.state_root = H256::zero(); + header.set_gas_limit(0.into()); + header.set_difficulty((order * 100).into()); + header.set_timestamp((order * 10) as u64); + header.set_number(order as u64); + header.set_parent_hash(parent_hash); + header.set_state_root(H256::zero()); let mut rlp = RlpStream::new_list(3); rlp.append(&header); From a34bd389ce98625846393bacc26fb71417164c2a Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 30 Aug 2016 11:10:59 +0200 Subject: [PATCH 13/17] builtin trait refactoring --- ethcore/src/builtin.rs | 501 +++++++++++++++++++++++------------------ 1 file changed, 284 insertions(+), 217 deletions(-) diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index d4ea5e30e..bcd02cbf3 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -14,283 +14,350 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use crypto::sha2::Sha256; -use crypto::ripemd160::Ripemd160; +use crypto::sha2::Sha256 as Sha256Digest; +use crypto::ripemd160::Ripemd160 as Ripemd160Digest; use crypto::digest::Digest; use util::*; -use ethkey::{Signature, recover}; +use ethkey::{Signature, recover as ec_recover}; use ethjson; -/// Definition of a contract whose implementation is built-in. -pub struct Builtin { - /// The gas cost of running this built-in for the given size of input data. - pub cost: Box U256>, // TODO: U256 should be bignum. - /// Run this built-in function with the input being the first argument and the output - /// being placed into the second. - pub execute: Box, +/// Native implementation of a built-in contract. +pub trait Impl: Send + Sync { + /// execute this built-in on the given input, writing to the given output. + fn execute(&self, input: &[u8], out: &mut [u8]); } -// Rust does not mark closurer that do not capture as Sync -// We promise that all builtins are thread safe since they only operate on given input. -unsafe impl Sync for Builtin {} -unsafe impl Send for Builtin {} +/// A gas pricing scheme for built-in contracts. +pub trait Pricer: Send + Sync { + /// The gas cost of running this built-in for the given size of input data. + fn cost(&self, in_size: usize) -> U256; +} -impl fmt::Debug for Builtin { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "") +/// A linear pricing model. This computes a price using a base cost and a cost per-word. +struct Linear { + base: usize, + word: usize, +} + +impl Pricer for Linear { + fn cost(&self, in_size: usize) -> U256 { + U256::from(self.base) + U256::from(self.word) * U256::from((in_size + 31) / 32) } } +/// Pricing scheme and execution definition for a built-in contract. +pub struct Builtin { + pricer: Box, + native: Box, +} + impl Builtin { - /// Create a new object from components. - pub fn new(cost: Box U256>, execute: Box) -> Builtin { - Builtin {cost: cost, execute: execute} - } - - /// Create a new object from a builtin-function name with a linear cost associated with input size. - pub fn from_named_linear(name: &str, base_cost: usize, word_cost: usize) -> Builtin { - let cost = Box::new(move|s: usize| -> U256 { - U256::from(base_cost) + U256::from(word_cost) * U256::from((s + 31) / 32) - }); - - Self::new(cost, new_builtin_exec(name)) - } - /// Simple forwarder for cost. - pub fn cost(&self, s: usize) -> U256 { (*self.cost)(s) } + pub fn cost(&self, s: usize) -> U256 { self.pricer.cost(s) } /// Simple forwarder for execute. - pub fn execute(&self, input: &[u8], output: &mut[u8]) { (*self.execute)(input, output); } + pub fn execute(&self, input: &[u8], output: &mut[u8]) { self.native.execute(input, output) } } impl From for Builtin { fn from(b: ethjson::spec::Builtin) -> Self { - match b.pricing { + let pricer = match b.pricing { ethjson::spec::Pricing::Linear(linear) => { - Self::from_named_linear(b.name.as_ref(), linear.base, linear.word) + Box::new(Linear { + base: linear.base, + word: linear.word, + }) } + }; + + Builtin { + pricer: pricer, + native: ethereum_builtin(&b.name), } } } -/// Copy a bunch of bytes to a destination; if the `src` is too small to fill `dest`, -/// leave the rest unchanged. -pub fn copy_to(src: &[u8], dest: &mut[u8]) { - // NICE: optimise - for i in 0..min(src.len(), dest.len()) { - dest[i] = src[i]; +// Ethereum builtin creator. +fn ethereum_builtin(name: &str) -> Box { + match name { + "identity" => Box::new(Identity) as Box, + "ecrecover" => Box::new(EcRecover) as Box, + "sha256" => Box::new(Sha256) as Box, + "ripemd160" => Box::new(Ripemd160) as Box, + _ => panic!("invalid builtin name: {}", name), } } -/// Create a new builtin executor according to `name`. -/// TODO: turn in to a factory with dynamic registration. -pub fn new_builtin_exec(name: &str) -> Box { - match name { - "identity" => Box::new(move|input: &[u8], output: &mut[u8]| { - for i in 0..min(input.len(), output.len()) { - output[i] = input[i]; - } - }), - "ecrecover" => Box::new(move|input: &[u8], output: &mut[u8]| { - #[repr(packed)] - #[derive(Debug, Default)] - struct InType { - hash: H256, - v: H256, - r: H256, - s: H256, - } - let mut it = InType::default(); - it.copy_raw(input); - if it.v == H256::from(&U256::from(27)) || it.v == H256::from(&U256::from(28)) { - let s = Signature::from_rsv(&it.r, &it.s, it.v[31] - 27); - if s.is_valid() { - if let Ok(p) = recover(&s, &it.hash) { - let r = p.as_slice().sha3(); - // NICE: optimise and separate out into populate-like function - for i in 0..min(32, output.len()) { - output[i] = if i < 12 {0} else {r[i]}; - } - } +// Ethereum builtins: +// +// - The identity function +// - ec recovery +// - sha256 +// - ripemd160 + +#[derive(Debug)] +struct Identity; + +#[derive(Debug)] +struct EcRecover; + +#[derive(Debug)] +struct Sha256; + +#[derive(Debug)] +struct Ripemd160; + +impl Impl for Identity { + fn execute(&self, input: &[u8], output: &mut [u8]) { + let len = min(input.len(), output.len()); + output[..len].copy_from_slice(&input[..len]); + } +} + +impl Impl for EcRecover { + fn execute(&self, i: &[u8], output: &mut [u8]) { + let len = min(i.len(), 128); + + let mut input = [0; 128]; + input[..len].copy_from_slice(&i[..len]); + + let hash = H256::from_slice(&input[0..32]); + let v = H256::from_slice(&input[32..64]); + let r = H256::from_slice(&input[64..96]); + let s = H256::from_slice(&input[96..128]); + + let bit = match v[31] { + 27 | 28 if &v.as_slice()[..31] == &[0; 31] => v[31] - 27, + _ => return, + }; + + let s = Signature::from_rsv(&r, &s, bit); + if s.is_valid() { + if let Ok(p) = ec_recover(&s, &hash) { + let r = p.as_slice().sha3(); + + let out_len = min(output.len(), 32); + + for x in &mut output[0.. min(12, out_len)] { + *x = 0; + } + + if out_len > 12 { + output[12..out_len].copy_from_slice(&r[12..out_len]); } } - }), - "sha256" => Box::new(move|input: &[u8], output: &mut[u8]| { - let mut sha = Sha256::new(); - sha.input(input); - if output.len() >= 32 { - sha.result(output); - } else { - let mut ret = H256::new(); - sha.result(ret.as_slice_mut()); - copy_to(&ret, output); - } - }), - "ripemd160" => Box::new(move|input: &[u8], output: &mut[u8]| { - let mut sha = Ripemd160::new(); - sha.input(input); - let mut ret = H256::new(); - sha.result(&mut ret.as_slice_mut()[12..32]); - copy_to(&ret, output); - }), - _ => { - panic!("invalid builtin name {}", name); } } } -#[test] -fn identity() { - let f = new_builtin_exec("identity"); - let i = [0u8, 1, 2, 3]; +impl Impl for Sha256 { + fn execute(&self, input: &[u8], output: &mut [u8]) { + let out_len = min(output.len(), 32); - let mut o2 = [255u8; 2]; - f(&i[..], &mut o2[..]); - assert_eq!(i[0..2], o2); + let mut sha = Sha256Digest::new(); + sha.input(input); - let mut o4 = [255u8; 4]; - f(&i[..], &mut o4[..]); - assert_eq!(i, o4); + if out_len == 32 { + sha.result(&mut output[0..32]); + } else { + let mut out = [0; 32]; + sha.result(&mut out); - let mut o8 = [255u8; 8]; - f(&i[..], &mut o8[..]); - assert_eq!(i, o8[..4]); - assert_eq!([255u8; 4], o8[4..]); + output.copy_from_slice(&out[..out_len]) + } + } } -#[test] -fn sha256() { - use rustc_serialize::hex::FromHex; - let f = new_builtin_exec("sha256"); - let i = [0u8; 0]; +impl Impl for Ripemd160 { + fn execute(&self, input: &[u8], output: &mut [u8]) { + let out_len = min(output.len(), 32); - let mut o = [255u8; 32]; - f(&i[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); + let mut sha = Ripemd160Digest::new(); + sha.input(input); - let mut o8 = [255u8; 8]; - f(&i[..], &mut o8[..]); - assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]); + for x in &mut output[0.. min(12, out_len)] { + *x = 0; + } - let mut o34 = [255u8; 34]; - f(&i[..], &mut o34[..]); - assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]); + if out_len >= 32 { + sha.result(&mut output[12..32]); + } else if out_len > 12 { + let mut out = [0; 20]; + sha.result(&mut out); + + output.copy_from_slice(&out[12..out_len]) + } + } } -#[test] -fn ripemd160() { - use rustc_serialize::hex::FromHex; - let f = new_builtin_exec("ripemd160"); - let i = [0u8; 0]; +#[cfg(test)] +mod tests { + use super::{Builtin, Linear, ethereum_builtin, Pricer}; + use ethjson; + use util::U256; - let mut o = [255u8; 32]; - f(&i[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]); + #[test] + fn identity() { + let f = ethereum_builtin("identity"); - let mut o8 = [255u8; 8]; - f(&i[..], &mut o8[..]); - assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); + let i = [0u8, 1, 2, 3]; - let mut o34 = [255u8; 34]; - f(&i[..], &mut o34[..]); - assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]); -} + let mut o2 = [255u8; 2]; + f.execute(&i[..], &mut o2[..]); + assert_eq!(i[0..2], o2); -#[test] -fn ecrecover() { - use rustc_serialize::hex::FromHex; - /*let k = KeyPair::from_secret(b"test".sha3()).unwrap(); - let a: Address = From::from(k.public().sha3()); - println!("Address: {}", a); - let m = b"hello world".sha3(); - println!("Message: {}", m); - let s = k.sign(&m).unwrap(); - println!("Signed: {}", s);*/ + let mut o4 = [255u8; 4]; + f.execute(&i[..], &mut o4[..]); + assert_eq!(i, o4); - let f = new_builtin_exec("ecrecover"); - let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut o8[..]); + assert_eq!(i, o8[..4]); + assert_eq!([255u8; 4], o8[4..]); + } - let mut o = [255u8; 32]; - f(&i[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]); + #[test] + fn sha256() { + use rustc_serialize::hex::FromHex; + let f = ethereum_builtin("sha256"); - let mut o8 = [255u8; 8]; - f(&i[..], &mut o8[..]); - assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); + let i = [0u8; 0]; - let mut o34 = [255u8; 34]; - f(&i[..], &mut o34[..]); - assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]); + let mut o = [255u8; 32]; + f.execute(&i[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855").unwrap())[..]); - let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); - let mut o = [255u8; 32]; - f(&i_bad[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut o8[..]); + assert_eq!(&o8[..], &(FromHex::from_hex("e3b0c44298fc1c14").unwrap())[..]); - let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap(); - let mut o = [255u8; 32]; - f(&i_bad[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + let mut o34 = [255u8; 34]; + f.execute(&i[..], &mut o34[..]); + assert_eq!(&o34[..], &(FromHex::from_hex("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855ffff").unwrap())[..]); + } - let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap(); - let mut o = [255u8; 32]; - f(&i_bad[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + #[test] + fn ripemd160() { + use rustc_serialize::hex::FromHex; + let f = ethereum_builtin("ripemd160"); - let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap(); - let mut o = [255u8; 32]; - f(&i_bad[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + let i = [0u8; 0]; - let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); - let mut o = [255u8; 32]; - f(&i_bad[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + let mut o = [255u8; 32]; + f.execute(&i[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31").unwrap())[..]); - // TODO: Should this (corrupted version of the above) fail rather than returning some address? -/* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); - let mut o = [255u8; 32]; - f(&i_bad[..], &mut o[..]); - assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ -} + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut o8[..]); + assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); -#[test] -#[should_panic] -fn from_unknown_linear() { - let _ = Builtin::from_named_linear("dw", 10, 20); -} + let mut o34 = [255u8; 34]; + f.execute(&i[..], &mut o34[..]); + assert_eq!(&o34[..], &(FromHex::from_hex("0000000000000000000000009c1185a5c5e9fc54612808977ee8f548b2258d31ffff").unwrap())[..]); + } -#[test] -fn from_named_linear() { - let b = Builtin::from_named_linear("identity", 10, 20); - assert_eq!((*b.cost)(0), U256::from(10)); - assert_eq!((*b.cost)(1), U256::from(30)); - assert_eq!((*b.cost)(32), U256::from(30)); - assert_eq!((*b.cost)(33), U256::from(50)); + #[test] + fn ecrecover() { + use rustc_serialize::hex::FromHex; + /*let k = KeyPair::from_secret(b"test".sha3()).unwrap(); + let a: Address = From::from(k.public().sha3()); + println!("Address: {}", a); + let m = b"hello world".sha3(); + println!("Message: {}", m); + let s = k.sign(&m).unwrap(); + println!("Signed: {}", s);*/ - let i = [0u8, 1, 2, 3]; - let mut o = [255u8; 4]; - (*b.execute)(&i[..], &mut o[..]); - assert_eq!(i, o); -} + let f = ethereum_builtin("ecrecover"); -#[test] -fn from_json() { - let b = Builtin::from(ethjson::spec::Builtin { - name: "identity".to_owned(), - pricing: ethjson::spec::Pricing::Linear(ethjson::spec::Linear { - base: 10, - word: 20, - }) - }); + let i = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); - assert_eq!((*b.cost)(0), U256::from(10)); - assert_eq!((*b.cost)(1), U256::from(30)); - assert_eq!((*b.cost)(32), U256::from(30)); - assert_eq!((*b.cost)(33), U256::from(50)); + let mut o = [255u8; 32]; + f.execute(&i[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddb").unwrap())[..]); - let i = [0u8, 1, 2, 3]; - let mut o = [255u8; 4]; - (*b.execute)(&i[..], &mut o[..]); - assert_eq!(i, o); -} + let mut o8 = [255u8; 8]; + f.execute(&i[..], &mut o8[..]); + assert_eq!(&o8[..], &(FromHex::from_hex("0000000000000000").unwrap())[..]); + + let mut o34 = [255u8; 34]; + f.execute(&i[..], &mut o34[..]); + assert_eq!(&o34[..], &(FromHex::from_hex("000000000000000000000000c08b5542d177ac6686946920409741463a15dddbffff").unwrap())[..]); + + let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001a650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + + let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + + let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + + let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000000000001b").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + + let i_bad = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b000000000000000000000000000000000000000000000000000000000000001bffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]); + + // TODO: Should this (corrupted version of the above) fail rather than returning some address? + /* let i_bad = FromHex::from_hex("48173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); + let mut o = [255u8; 32]; + f.execute(&i_bad[..], &mut o[..]); + assert_eq!(&o[..], &(FromHex::from_hex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff").unwrap())[..]);*/ + } + + #[test] + #[should_panic] + fn from_unknown_linear() { + let _ = ethereum_builtin("foo"); + } + + #[test] + fn from_named_linear() { + let pricer = Box::new(Linear { base: 10, word: 20 }); + let b = Builtin { + pricer: pricer as Box, + native: ethereum_builtin("identity"), + }; + + assert_eq!(b.cost(0), U256::from(10)); + assert_eq!(b.cost(1), U256::from(30)); + assert_eq!(b.cost(32), U256::from(30)); + assert_eq!(b.cost(33), U256::from(50)); + + let i = [0u8, 1, 2, 3]; + let mut o = [255u8; 4]; + b.execute(&i[..], &mut o[..]); + assert_eq!(i, o); + } + + #[test] + fn from_json() { + let b = Builtin::from(ethjson::spec::Builtin { + name: "identity".to_owned(), + pricing: ethjson::spec::Pricing::Linear(ethjson::spec::Linear { + base: 10, + word: 20, + }) + }); + + assert_eq!(b.cost(0), U256::from(10)); + assert_eq!(b.cost(1), U256::from(30)); + assert_eq!(b.cost(32), U256::from(30)); + assert_eq!(b.cost(33), U256::from(50)); + + let i = [0u8, 1, 2, 3]; + let mut o = [255u8; 4]; + b.execute(&i[..], &mut o[..]); + assert_eq!(i, o); + } +} \ No newline at end of file From 6f321d9849c8be23c8e89558d653d3ebd6173fff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 30 Aug 2016 14:04:52 +0200 Subject: [PATCH 14/17] LRU cache for dapps (#2006) Conflicts: dapps/Cargo.toml dapps/src/lib.rs --- Cargo.lock | 7 ++ dapps/Cargo.toml | 1 + dapps/src/apps/cache.rs | 128 ++++++++++++++++++++++++ dapps/src/apps/fetcher.rs | 51 +++++----- dapps/src/apps/mod.rs | 1 + dapps/src/handlers/client/fetch_file.rs | 32 +++++- dapps/src/handlers/fetch.rs | 26 +++-- dapps/src/handlers/mod.rs | 2 +- dapps/src/lib.rs | 1 + dapps/src/page/local.rs | 4 + 10 files changed, 214 insertions(+), 39 deletions(-) create mode 100644 dapps/src/apps/cache.rs diff --git a/Cargo.lock b/Cargo.lock index 119e87fdf..79bb6dba9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -295,6 +295,7 @@ dependencies = [ "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)", + "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-dapps 1.4.0 (git+https://github.com/ethcore/parity-ui.git)", @@ -793,6 +794,11 @@ name = "libc" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "linked-hash-map" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "log" version = "0.3.6" @@ -1699,6 +1705,7 @@ dependencies = [ "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "97def9dc7ce1d8e153e693e3a33020bc69972181adb2f871e87e888876feae49" +"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" "checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" "checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 2c7c9db9c..1f5a0c491 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -22,6 +22,7 @@ serde_json = "0.7.0" serde_macros = { version = "0.7.0", optional = true } zip = { version = "0.1", default-features = false } ethabi = "0.2.1" +linked-hash-map = "0.3" ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } parity-dapps = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } diff --git a/dapps/src/apps/cache.rs b/dapps/src/apps/cache.rs new file mode 100644 index 000000000..bf1c5f3cc --- /dev/null +++ b/dapps/src/apps/cache.rs @@ -0,0 +1,128 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Fetchable Dapps support. + +use std::fs; +use std::sync::{Arc}; +use std::sync::atomic::{AtomicBool, Ordering}; + +use linked_hash_map::LinkedHashMap; +use page::LocalPageEndpoint; + +pub enum ContentStatus { + Fetching(Arc), + Ready(LocalPageEndpoint), +} + +#[derive(Default)] +pub struct ContentCache { + cache: LinkedHashMap, +} + +impl ContentCache { + pub fn insert(&mut self, content_id: String, status: ContentStatus) -> Option { + self.cache.insert(content_id, status) + } + + pub fn remove(&mut self, content_id: &str) -> Option { + self.cache.remove(content_id) + } + + pub fn get(&mut self, content_id: &str) -> Option<&mut ContentStatus> { + self.cache.get_refresh(content_id) + } + + pub fn clear_garbage(&mut self, expected_size: usize) -> Vec<(String, ContentStatus)> { + let mut len = self.cache.len(); + + if len <= expected_size { + return Vec::new(); + } + + let mut removed = Vec::with_capacity(len - expected_size); + while len > expected_size { + let entry = self.cache.pop_front().unwrap(); + match entry.1 { + ContentStatus::Fetching(ref abort) => { + trace!(target: "dapps", "Aborting {} because of limit.", entry.0); + // Mark as aborted + abort.store(true, Ordering::Relaxed); + }, + ContentStatus::Ready(ref endpoint) => { + trace!(target: "dapps", "Removing {} because of limit.", entry.0); + // Remove path + let res = fs::remove_dir_all(&endpoint.path()); + if let Err(e) = res { + warn!(target: "dapps", "Unable to remove dapp: {:?}", e); + } + } + } + + removed.push(entry); + len -= 1; + } + removed + } + + #[cfg(test)] + pub fn len(&self) -> usize { + self.cache.len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn only_keys(data: Vec<(String, ContentStatus)>) -> Vec { + data.into_iter().map(|x| x.0).collect() + } + + #[test] + fn should_remove_least_recently_used() { + // given + let mut cache = ContentCache::default(); + cache.insert("a".into(), ContentStatus::Fetching(Default::default())); + cache.insert("b".into(), ContentStatus::Fetching(Default::default())); + cache.insert("c".into(), ContentStatus::Fetching(Default::default())); + + // when + let res = cache.clear_garbage(2); + + // then + assert_eq!(cache.len(), 2); + assert_eq!(only_keys(res), vec!["a"]); + } + + #[test] + fn should_update_lru_if_accessed() { + // given + let mut cache = ContentCache::default(); + cache.insert("a".into(), ContentStatus::Fetching(Default::default())); + cache.insert("b".into(), ContentStatus::Fetching(Default::default())); + cache.insert("c".into(), ContentStatus::Fetching(Default::default())); + + // when + cache.get("a"); + let res = cache.clear_garbage(2); + + // then + assert_eq!(cache.len(), 2); + assert_eq!(only_keys(res), vec!["b"]); + } + +} diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 347c8da5f..e31aae55d 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -23,7 +23,7 @@ use std::{fs, env}; use std::io::{self, Read, Write}; use std::path::PathBuf; use std::sync::Arc; -use std::collections::HashMap; +use std::sync::atomic::{AtomicBool}; use rustc_serialize::hex::FromHex; use hyper::Control; @@ -33,20 +33,18 @@ use random_filename; use util::{Mutex, H256}; use util::sha3::sha3; use page::LocalPageEndpoint; -use handlers::{ContentHandler, AppFetcherHandler, DappHandler}; +use handlers::{ContentHandler, ContentFetcherHandler, ContentValidator}; use endpoint::{Endpoint, EndpointPath, Handler}; +use apps::cache::{ContentCache, ContentStatus}; use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest}; use apps::urlhint::{URLHintContract, URLHint}; -enum AppStatus { - Fetching, - Ready(LocalPageEndpoint), -} +const MAX_CACHED_DAPPS: usize = 10; pub struct AppFetcher { dapps_path: PathBuf, resolver: R, - dapps: Arc>>, + dapps: Arc>, } impl Drop for AppFetcher { @@ -65,17 +63,17 @@ impl AppFetcher { AppFetcher { dapps_path: dapps_path, resolver: resolver, - dapps: Arc::new(Mutex::new(HashMap::new())), + dapps: Arc::new(Mutex::new(ContentCache::default())), } } #[cfg(test)] - fn set_status(&self, app_id: &str, status: AppStatus) { + fn set_status(&self, app_id: &str, status: ContentStatus) { self.dapps.lock().insert(app_id.to_owned(), status); } pub fn contains(&self, app_id: &str) -> bool { - let dapps = self.dapps.lock(); + let mut dapps = self.dapps.lock(); match dapps.get(app_id) { // Check if we already have the app Some(_) => true, @@ -95,11 +93,11 @@ impl AppFetcher { let status = dapps.get(&app_id); match status { // Just server dapp - Some(&AppStatus::Ready(ref endpoint)) => { + Some(&mut ContentStatus::Ready(ref endpoint)) => { (None, endpoint.to_handler(path)) }, // App is already being fetched - Some(&AppStatus::Fetching) => { + Some(&mut ContentStatus::Fetching(_)) => { (None, Box::new(ContentHandler::html( StatusCode::ServiceUnavailable, format!( @@ -111,11 +109,13 @@ impl AppFetcher { }, // We need to start fetching app None => { - // TODO [todr] Keep only last N dapps available! let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true."); let app = self.resolver.resolve(app_hex).expect("to_handler is called only when `contains` returns true."); - (Some(AppStatus::Fetching), Box::new(AppFetcherHandler::new( + let abort = Arc::new(AtomicBool::new(false)); + + (Some(ContentStatus::Fetching(abort.clone())), Box::new(ContentFetcherHandler::new( app, + abort, control, path.using_dapps_domains, DappInstaller { @@ -129,6 +129,7 @@ impl AppFetcher { }; if let Some(status) = new_status { + dapps.clear_garbage(MAX_CACHED_DAPPS); dapps.insert(app_id, status); } @@ -161,7 +162,7 @@ impl From for ValidationError { struct DappInstaller { dapp_id: String, dapps_path: PathBuf, - dapps: Arc>>, + dapps: Arc>, } impl DappInstaller { @@ -196,7 +197,7 @@ impl DappInstaller { } } -impl DappHandler for DappInstaller { +impl ContentValidator for DappInstaller { type Error = ValidationError; fn validate_and_install(&self, app_path: PathBuf) -> Result { @@ -262,7 +263,7 @@ impl DappHandler for DappInstaller { Some(manifest) => { let path = self.dapp_target_path(manifest); let app = LocalPageEndpoint::new(path, manifest.clone().into()); - dapps.insert(self.dapp_id.clone(), AppStatus::Ready(app)); + dapps.insert(self.dapp_id.clone(), ContentStatus::Ready(app)); }, // In case of error None => { @@ -274,12 +275,13 @@ impl DappHandler for DappInstaller { #[cfg(test)] mod tests { - use std::path::PathBuf; - use super::{AppFetcher, AppStatus}; - use apps::urlhint::{GithubApp, URLHint}; + use std::env; + use util::Bytes; use endpoint::EndpointInfo; use page::LocalPageEndpoint; - use util::Bytes; + use apps::cache::ContentStatus; + use apps::urlhint::{GithubApp, URLHint}; + use super::AppFetcher; struct FakeResolver; impl URLHint for FakeResolver { @@ -291,8 +293,9 @@ mod tests { #[test] fn should_true_if_contains_the_app() { // given + let path = env::temp_dir(); let fetcher = AppFetcher::new(FakeResolver); - let handler = LocalPageEndpoint::new(PathBuf::from("/tmp/test"), EndpointInfo { + let handler = LocalPageEndpoint::new(path, EndpointInfo { name: "fake".into(), description: "".into(), version: "".into(), @@ -301,8 +304,8 @@ mod tests { }); // when - fetcher.set_status("test", AppStatus::Ready(handler)); - fetcher.set_status("test2", AppStatus::Fetching); + fetcher.set_status("test", ContentStatus::Ready(handler)); + fetcher.set_status("test2", ContentStatus::Fetching(Default::default())); // then assert_eq!(fetcher.contains("test"), true); diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs index 84a3c5ddf..65bee587d 100644 --- a/dapps/src/apps/mod.rs +++ b/dapps/src/apps/mod.rs @@ -19,6 +19,7 @@ use page::PageEndpoint; use proxypac::ProxyPac; use parity_dapps::WebApp; +mod cache; mod fs; pub mod urlhint; pub mod fetcher; diff --git a/dapps/src/handlers/client/fetch_file.rs b/dapps/src/handlers/client/fetch_file.rs index 27b8bbe8e..f11827ed8 100644 --- a/dapps/src/handlers/client/fetch_file.rs +++ b/dapps/src/handlers/client/fetch_file.rs @@ -18,7 +18,8 @@ use std::{env, io, fs, fmt}; use std::path::PathBuf; -use std::sync::mpsc; +use std::sync::{mpsc, Arc}; +use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use random_filename; @@ -29,6 +30,7 @@ use hyper::{self, Decoder, Encoder, Next}; #[derive(Debug)] pub enum Error { + Aborted, NotStarted, UnexpectedStatus(StatusCode), IoError(io::Error), @@ -40,6 +42,7 @@ pub type OnDone = Box; pub struct Fetch { path: PathBuf, + abort: Arc, file: Option, result: Option, sender: mpsc::Sender, @@ -56,7 +59,7 @@ impl Drop for Fetch { fn drop(&mut self) { let res = self.result.take().unwrap_or(Err(Error::NotStarted)); // Remove file if there was an error - if res.is_err() { + if res.is_err() || self.is_aborted() { if let Some(file) = self.file.take() { drop(file); // Remove file @@ -72,12 +75,13 @@ impl Drop for Fetch { } impl Fetch { - pub fn new(sender: mpsc::Sender, on_done: OnDone) -> Self { + pub fn new(sender: mpsc::Sender, abort: Arc, on_done: OnDone) -> Self { let mut dir = env::temp_dir(); dir.push(random_filename()); Fetch { path: dir, + abort: abort, file: None, result: None, sender: sender, @@ -86,17 +90,36 @@ impl Fetch { } } +impl Fetch { + fn is_aborted(&self) -> bool { + self.abort.load(Ordering::Relaxed) + } + fn mark_aborted(&mut self) -> Next { + self.result = Some(Err(Error::Aborted)); + Next::end() + } +} + impl hyper::client::Handler for Fetch { fn on_request(&mut self, req: &mut Request) -> Next { + if self.is_aborted() { + return self.mark_aborted(); + } req.headers_mut().set(Connection::close()); read() } fn on_request_writable(&mut self, _encoder: &mut Encoder) -> Next { + if self.is_aborted() { + return self.mark_aborted(); + } read() } fn on_response(&mut self, res: Response) -> Next { + if self.is_aborted() { + return self.mark_aborted(); + } if *res.status() != StatusCode::Ok { self.result = Some(Err(Error::UnexpectedStatus(*res.status()))); return Next::end(); @@ -117,6 +140,9 @@ impl hyper::client::Handler for Fetch { } fn on_response_readable(&mut self, decoder: &mut Decoder) -> Next { + if self.is_aborted() { + return self.mark_aborted(); + } match io::copy(decoder, self.file.as_mut().expect("File is there because on_response has created it.")) { Ok(0) => Next::end(), Ok(_) => read(), diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 94bce1492..d4919562a 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -18,7 +18,8 @@ use std::{fs, fmt}; use std::path::PathBuf; -use std::sync::mpsc; +use std::sync::{mpsc, Arc}; +use std::sync::atomic::AtomicBool; use std::time::{Instant, Duration}; use hyper::{header, server, Decoder, Encoder, Next, Method, Control, Client}; @@ -38,19 +39,20 @@ enum FetchState { Error(ContentHandler), InProgress { deadline: Instant, - receiver: mpsc::Receiver + receiver: mpsc::Receiver, }, Done(Manifest), } -pub trait DappHandler { +pub trait ContentValidator { type Error: fmt::Debug; fn validate_and_install(&self, app: PathBuf) -> Result; fn done(&self, Option<&Manifest>); } -pub struct AppFetcherHandler { +pub struct ContentFetcherHandler { + abort: Arc, control: Option, status: FetchState, client: Option>, @@ -58,7 +60,7 @@ pub struct AppFetcherHandler { dapp: H, } -impl Drop for AppFetcherHandler { +impl Drop for ContentFetcherHandler { fn drop(&mut self) { let manifest = match self.status { FetchState::Done(ref manifest) => Some(manifest), @@ -68,16 +70,18 @@ impl Drop for AppFetcherHandler { } } -impl AppFetcherHandler { +impl ContentFetcherHandler { pub fn new( app: GithubApp, + abort: Arc, control: Control, using_dapps_domains: bool, handler: H) -> Self { let client = Client::new().expect("Failed to create a Client"); - AppFetcherHandler { + ContentFetcherHandler { + abort: abort, control: Some(control), client: Some(client), status: FetchState::NotStarted(app), @@ -94,12 +98,12 @@ impl AppFetcherHandler { // TODO [todr] https support - fn fetch_app(client: &mut Client, app: &GithubApp, control: Control) -> Result, String> { + fn fetch_app(client: &mut Client, app: &GithubApp, abort: Arc, control: Control) -> Result, String> { let url = try!(app.url().parse().map_err(|e| format!("{:?}", e))); trace!(target: "dapps", "Fetching from: {:?}", url); let (tx, rx) = mpsc::channel(); - let res = client.request(url, Fetch::new(tx, Box::new(move || { + let res = client.request(url, Fetch::new(tx, abort, Box::new(move || { trace!(target: "dapps", "Fetching finished."); // Ignoring control errors let _ = control.ready(Next::read()); @@ -111,7 +115,7 @@ impl AppFetcherHandler { } } -impl server::Handler for AppFetcherHandler { +impl server::Handler for ContentFetcherHandler { fn on_request(&mut self, request: server::Request) -> Next { let status = if let FetchState::NotStarted(ref app) = self.status { Some(match *request.method() { @@ -120,7 +124,7 @@ impl server::Handler for AppFetcherHandler { trace!(target: "dapps", "Fetching dapp: {:?}", app); let control = self.control.take().expect("on_request is called only once, thus control is always Some"); let client = self.client.as_mut().expect("on_request is called before client is closed."); - let fetch = Self::fetch_app(client, app, control); + let fetch = Self::fetch_app(client, app, self.abort.clone(), control); match fetch { Ok(receiver) => FetchState::InProgress { deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT), diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index 85a8bd439..6f6423b58 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -27,7 +27,7 @@ pub use self::auth::AuthRequiredHandler; pub use self::echo::EchoHandler; pub use self::content::ContentHandler; pub use self::redirect::Redirection; -pub use self::fetch::{AppFetcherHandler, DappHandler}; +pub use self::fetch::{ContentFetcherHandler, ContentValidator}; use url::Url; use hyper::{server, header, net, uri}; diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index e50bc2006..a2c17a42c 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -60,6 +60,7 @@ extern crate rustc_serialize; extern crate parity_dapps; extern crate ethcore_rpc; extern crate ethcore_util as util; +extern crate linked_hash_map; mod endpoint; mod apps; diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs index 52e32bf5e..dcfd9bed2 100644 --- a/dapps/src/page/local.rs +++ b/dapps/src/page/local.rs @@ -33,6 +33,10 @@ impl LocalPageEndpoint { info: info, } } + + pub fn path(&self) -> PathBuf { + self.path.clone() + } } impl Endpoint for LocalPageEndpoint { From efc846bb3e4c38e1130e4735b6c055e8328d3e28 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 30 Aug 2016 16:05:02 +0400 Subject: [PATCH 15/17] Control service for IPC (#2013) * hypervisor extension * sorted with shutdown-wait * hypervisor lifecycle alter --- ipc/hypervisor/src/lib.rs | 33 +++++++++++----- ipc/hypervisor/src/service.rs.in | 68 ++++++++++++++++++++++++++------ parity/boot.rs | 4 +- parity/modules.rs | 1 + parity/run.rs | 4 ++ parity/sync.rs | 46 ++++++++++++++++----- 6 files changed, 123 insertions(+), 33 deletions(-) diff --git a/ipc/hypervisor/src/lib.rs b/ipc/hypervisor/src/lib.rs index b0e1564ab..3cfd464e9 100644 --- a/ipc/hypervisor/src/lib.rs +++ b/ipc/hypervisor/src/lib.rs @@ -33,7 +33,7 @@ use service::{HypervisorService, IpcModuleId}; use std::process::{Command,Child}; use std::collections::HashMap; -pub use service::{HypervisorServiceClient, CLIENT_MODULE_ID, SYNC_MODULE_ID}; +pub use service::{HypervisorServiceClient, ControlService, CLIENT_MODULE_ID, SYNC_MODULE_ID}; pub type BinaryId = &'static str; @@ -174,6 +174,10 @@ impl Hypervisor { self.service.unchecked_count() == 0 } + pub fn modules_shutdown(&self) -> bool { + self.service.running_count() == 0 + } + /// Waits for every required module to check in pub fn wait_for_startup(&self) { let mut worker = self.ipc_worker.write().unwrap(); @@ -182,21 +186,30 @@ impl Hypervisor { } } - /// Shutdown the ipc and all managed child processes - pub fn shutdown(&self, wait_time: Option) { - if wait_time.is_some() { std::thread::sleep(wait_time.unwrap()) } - - let mut childs = self.processes.write().unwrap(); - for (ref mut module, ref mut child) in childs.iter_mut() { - trace!(target: "hypervisor", "Stopping process module: {}", module); - child.kill().unwrap(); + /// Waits for every required module to check in + pub fn wait_for_shutdown(&self) { + let mut worker = self.ipc_worker.write().unwrap(); + while !self.modules_shutdown() { + worker.poll() } } + + /// Shutdown the ipc and all managed child processes + pub fn shutdown(&self) { + let mut childs = self.processes.write().unwrap(); + for (ref mut module, _) in childs.iter_mut() { + trace!(target: "hypervisor", "Stopping process module: {}", module); + self.service.send_shutdown(**module); + } + trace!(target: "hypervisor", "Waiting for shutdown..."); + self.wait_for_shutdown(); + trace!(target: "hypervisor", "All modules reported shutdown"); + } } impl Drop for Hypervisor { fn drop(&mut self) { - self.shutdown(Some(std::time::Duration::new(1, 0))); + self.shutdown(); } } diff --git a/ipc/hypervisor/src/service.rs.in b/ipc/hypervisor/src/service.rs.in index 3b1a4d145..69585ee6c 100644 --- a/ipc/hypervisor/src/service.rs.in +++ b/ipc/hypervisor/src/service.rs.in @@ -17,6 +17,7 @@ use std::sync::{RwLock,Arc}; use ipc::IpcConfig; use std::collections::HashMap; +use nanoipc; pub type IpcModuleId = u64; @@ -28,15 +29,43 @@ pub const SYNC_MODULE_ID: IpcModuleId = 2100; /// IPC service that handles module management pub struct HypervisorService { - check_list: RwLock>, + modules: RwLock>, +} + +#[derive(Default)] +pub struct ModuleState { + started: bool, + control_url: String, + shutdown: bool, +} + + +#[derive(Ipc)] +pub trait ControlService { + fn shutdown(&self); } #[derive(Ipc)] impl HypervisorService { - fn module_ready(&self, module_id: u64) -> bool { - let mut check_list = self.check_list.write().unwrap(); - check_list.get_mut(&module_id).map(|mut status| *status = true); - check_list.iter().any(|(_, status)| !status) + // return type for making method synchronous + fn module_ready(&self, module_id: u64, control_url: String) -> bool { + let mut modules = self.modules.write().unwrap(); + modules.get_mut(&module_id).map(|mut module| { + module.started = true; + module.control_url = control_url; + }); + trace!(target: "hypervisor", "Module ready: {}", module_id); + true + } + + // return type for making method synchronous + fn module_shutdown(&self, module_id: u64) -> bool { + let mut modules = self.modules.write().unwrap(); + modules.get_mut(&module_id).map(|mut module| { + module.shutdown = true; + }); + trace!(target: "hypervisor", "Module shutdown: {}", module_id); + true } } @@ -48,29 +77,46 @@ impl HypervisorService { /// New service with list of modules that will report for being ready pub fn with_modules(module_ids: Vec) -> Arc { - let mut check_list = HashMap::new(); + let mut modules = HashMap::new(); for module_id in module_ids { - check_list.insert(module_id, false); + modules.insert(module_id, ModuleState::default()); } Arc::new(HypervisorService { - check_list: RwLock::new(check_list), + modules: RwLock::new(modules), }) } /// Add the module to the check-list pub fn add_module(&self, module_id: IpcModuleId) { - self.check_list.write().unwrap().insert(module_id, false); + self.modules.write().unwrap().insert(module_id, ModuleState::default()); } /// Number of modules still being waited for check-in pub fn unchecked_count(&self) -> usize { - self.check_list.read().unwrap().iter().filter(|&(_, status)| !status).count() + self.modules.read().unwrap().iter().filter(|&(_, module)| !module.started).count() } /// List of all modules within this service pub fn module_ids(&self) -> Vec { - self.check_list.read().unwrap().iter().map(|(module_id, _)| module_id).cloned().collect() + self.modules.read().unwrap().iter().map(|(module_id, _)| module_id).cloned().collect() + } + + /// Number of modules started and running + pub fn running_count(&self) -> usize { + self.modules.read().unwrap().iter().filter(|&(_, module)| module.started && !module.shutdown).count() + } + + pub fn send_shutdown(&self, module_id: IpcModuleId) { + let modules = self.modules.read().unwrap(); + modules.get(&module_id).map(|module| { + trace!(target: "hypervisor", "Sending shutdown to {}({})", module_id, &module.control_url); + let client = nanoipc::init_client::>(&module.control_url).unwrap(); + client.shutdown(); + trace!(target: "hypervisor", "Sent shutdown to {}", module_id); + }); } } impl ::ipc::IpcConfig for HypervisorService {} + +impl ::ipc::IpcConfig for ControlService {} diff --git a/parity/boot.rs b/parity/boot.rs index ddc05437c..1614317b8 100644 --- a/parity/boot.rs +++ b/parity/boot.rs @@ -62,10 +62,10 @@ pub fn payload() -> Result { .map_err(|binary_error| BootError::DecodeArgs(binary_error)) } -pub fn register(hv_url: &str, module_id: IpcModuleId) -> GuardedSocket>{ +pub fn register(hv_url: &str, control_url: &str, module_id: IpcModuleId) -> GuardedSocket>{ let hypervisor_client = nanoipc::init_client::>(hv_url).unwrap(); hypervisor_client.handshake().unwrap(); - hypervisor_client.module_ready(module_id); + hypervisor_client.module_ready(module_id, control_url.to_owned()); hypervisor_client } diff --git a/parity/modules.rs b/parity/modules.rs index 83ae44802..5edbca702 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -32,6 +32,7 @@ pub mod service_urls { pub const SYNC: &'static str = "parity-sync.ipc"; pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc"; pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc"; + pub const SYNC_CONTROL: &'static str = "parity-sync-control.ipc"; #[cfg(feature="stratum")] pub const STRATUM: &'static str = "parity-stratum.ipc"; #[cfg(feature="stratum")] diff --git a/parity/run.rs b/parity/run.rs index 220f77376..71995cd5f 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -260,6 +260,10 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { // Handle exit wait_for_exit(panic_handler, http_server, ipc_server, dapps_server, signer_server); + // hypervisor should be shutdown first while everything still works and can be + // terminated gracefully + drop(hypervisor); + Ok(()) } diff --git a/parity/sync.rs b/parity/sync.rs index 5d3056acd..95c9924c6 100644 --- a/parity/sync.rs +++ b/parity/sync.rs @@ -16,14 +16,26 @@ //! Parity sync service -use std; use std::sync::Arc; -use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL}; +use std::sync::atomic::AtomicBool; +use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL, ControlService}; use ethcore::client::{RemoteClient, ChainNotify}; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; -use std::thread; use modules::service_urls; use boot; +use nanoipc; + +#[derive(Default)] +struct SyncControlService { + pub stop: Arc, +} + +impl ControlService for SyncControlService { + fn shutdown(&self) { + trace!(target: "hypervisor", "Received shutdown from control service"); + self.stop.store(true, ::std::sync::atomic::Ordering::Relaxed); + } +} pub fn main() { boot::setup_cli_logger("sync"); @@ -33,31 +45,45 @@ pub fn main() { let remote_client = dependency!(RemoteClient, &service_urls::with_base(&service_config.io_path, service_urls::CLIENT)); - let stop = boot::main_thread(); let sync = EthSync::new(service_config.sync, remote_client.service().clone(), service_config.net).unwrap(); - let _ = boot::register( + let _ = boot::main_thread(); + let service_stop = Arc::new(AtomicBool::new(false)); + + let hypervisor = boot::register( &service_urls::with_base(&service_config.io_path, HYPERVISOR_IPC_URL), + &service_urls::with_base(&service_config.io_path, service_urls::SYNC_CONTROL), SYNC_MODULE_ID ); boot::host_service( &service_urls::with_base(&service_config.io_path, service_urls::SYNC), - stop.clone(), + service_stop.clone(), sync.clone() as Arc ); boot::host_service( &service_urls::with_base(&service_config.io_path, service_urls::NETWORK_MANAGER), - stop.clone(), + service_stop.clone(), sync.clone() as Arc ); boot::host_service( &service_urls::with_base(&service_config.io_path, service_urls::SYNC_NOTIFY), - stop.clone(), + service_stop.clone(), sync.clone() as Arc ); - while !stop.load(::std::sync::atomic::Ordering::Relaxed) { - thread::park_timeout(std::time::Duration::from_millis(1000)); + let control_service = Arc::new(SyncControlService::default()); + let as_control = control_service.clone() as Arc; + let mut worker = nanoipc::Worker::::new(&as_control); + worker.add_reqrep( + &service_urls::with_base(&service_config.io_path, service_urls::SYNC_CONTROL) + ).unwrap(); + + while !control_service.stop.load(::std::sync::atomic::Ordering::Relaxed) { + worker.poll(); } + service_stop.store(true, ::std::sync::atomic::Ordering::Relaxed); + + hypervisor.module_shutdown(SYNC_MODULE_ID); + trace!(target: "hypervisor", "Sync process terminated gracefully"); } From 76966ab2fc191f3aaa5dbf4bf212cffdc8c63bea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 30 Aug 2016 16:05:18 +0200 Subject: [PATCH 16/17] Fixing redirection. Adding tests --- dapps/src/handlers/redirect.rs | 3 +- dapps/src/lib.rs | 9 +- dapps/src/router/mod.rs | 7 +- dapps/src/tests/api.rs | 84 ++++++++++++++ dapps/src/tests/authorization.rs | 79 +++++++++++++ dapps/src/tests/helpers.rs | 123 ++++++++++++++++++++ dapps/src/tests/mod.rs | 25 +++++ dapps/src/tests/redirection.rs | 185 +++++++++++++++++++++++++++++++ dapps/src/tests/validation.rs | 79 +++++++++++++ 9 files changed, 591 insertions(+), 3 deletions(-) create mode 100644 dapps/src/tests/api.rs create mode 100644 dapps/src/tests/authorization.rs create mode 100644 dapps/src/tests/helpers.rs create mode 100644 dapps/src/tests/mod.rs create mode 100644 dapps/src/tests/redirection.rs create mode 100644 dapps/src/tests/validation.rs diff --git a/dapps/src/handlers/redirect.rs b/dapps/src/handlers/redirect.rs index dbe5f6e4a..8b6158266 100644 --- a/dapps/src/handlers/redirect.rs +++ b/dapps/src/handlers/redirect.rs @@ -42,7 +42,8 @@ impl server::Handler for Redirection { } fn on_response(&mut self, res: &mut server::Response) -> Next { - res.set_status(StatusCode::MovedPermanently); + // Don't use `MovedPermanently` here to prevent browser from caching the redirections. + res.set_status(StatusCode::Found); res.headers_mut().set(header::Location(self.to_url.to_owned())); Next::write() } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index a2c17a42c..2beef0639 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -71,6 +71,8 @@ mod rpc; mod api; mod proxypac; mod url; +#[cfg(test)] +mod tests; pub use self::apps::urlhint::ContractClient; @@ -205,6 +207,11 @@ impl Server { pub fn set_panic_handler(&self, handler: F) where F : Fn() -> () + Send + 'static { *self.panic_handler.lock().unwrap() = Some(Box::new(handler)); } + + /// Returns address that this server is bound to. + pub fn addr(&self) -> &SocketAddr { + self.server.as_ref().unwrap().addr() + } } impl Drop for Server { @@ -239,7 +246,7 @@ pub fn random_filename() -> String { } #[cfg(test)] -mod tests { +mod util_tests { use super::Server; #[test] diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index 359337047..94d0a0fc0 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -83,7 +83,7 @@ impl server::Handler for Router { (Some(ref path), _) if self.endpoints.contains_key(&path.app_id) => { self.endpoints.get(&path.app_id).unwrap().to_handler(path.clone()) }, - // Try to resolve and fetch dapp + // Try to resolve and fetch the dapp (Some(ref path), _) if self.fetch.contains(&path.app_id) => { let control = self.control.take().expect("on_request is called only once, thus control is always defined."); self.fetch.to_handler(path.clone(), control) @@ -93,6 +93,11 @@ impl server::Handler for Router { let address = apps::redirection_address(path.using_dapps_domains, self.main_page); Redirection::new(address.as_str()) }, + // Redirect any GET request to home. + _ if *req.method() == hyper::method::Method::Get => { + let address = apps::redirection_address(false, self.main_page); + Redirection::new(address.as_str()) + }, // RPC by default _ => { self.special.get(&SpecialEndpoint::Rpc).unwrap().to_handler(EndpointPath::default()) diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs new file mode 100644 index 000000000..a9d3eba3b --- /dev/null +++ b/dapps/src/tests/api.rs @@ -0,0 +1,84 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use tests::helpers::{serve, request}; + +#[test] +fn should_return_error() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET /api/empty HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 404 Not Found".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Content-Type: application/json"); + assert_eq!(response.body, format!("58\n{}\n0\n\n", r#"{"code":"404","title":"Not Found","detail":"Resource you requested has not been found."}"#)); +} + +#[test] +fn should_serve_apps() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET /api/apps HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Content-Type: application/json"); + assert!(response.body.contains("Parity Home Screen")); +} + +#[test] +fn should_handle_ping() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + POST /api/ping HTTP/1.1\r\n\ + Host: home.parity\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Content-Type: application/json"); + assert_eq!(response.body, "0\n\n".to_owned()); +} + diff --git a/dapps/src/tests/authorization.rs b/dapps/src/tests/authorization.rs new file mode 100644 index 000000000..dceb194b7 --- /dev/null +++ b/dapps/src/tests/authorization.rs @@ -0,0 +1,79 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use tests::helpers::{serve_with_auth, request}; + +#[test] +fn should_require_authorization() { + // given + let server = serve_with_auth("test", "test"); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "WWW-Authenticate: Basic realm=\"Parity\""); +} + +#[test] +fn should_reject_on_invalid_auth() { + // given + let server = serve_with_auth("test", "test"); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); + assert_eq!(response.body, "15\n

Unauthorized

\n0\n\n".to_owned()); + assert_eq!(response.headers_raw.contains("WWW-Authenticate"), false); +} + +#[test] +fn should_allow_on_valid_auth() { + // given + let server = serve_with_auth("Aladdin", "OpenSesame"); + + // when + let response = request(server, + "\ + GET /home/ HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); +} diff --git a/dapps/src/tests/helpers.rs b/dapps/src/tests/helpers.rs new file mode 100644 index 000000000..84f638b34 --- /dev/null +++ b/dapps/src/tests/helpers.rs @@ -0,0 +1,123 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::env; +use std::io::{Read, Write}; +use std::str::{self, Lines}; +use std::sync::Arc; +use std::net::TcpStream; +use rustc_serialize::hex::{ToHex, FromHex}; + +use ServerBuilder; +use Server; +use apps::urlhint::ContractClient; +use util::{Bytes, Address, Mutex, ToPretty}; + +const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; +const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; + +pub struct FakeRegistrar { + pub calls: Arc>>, + pub responses: Mutex>>, +} + +impl FakeRegistrar { + fn new() -> Self { + FakeRegistrar { + calls: Arc::new(Mutex::new(Vec::new())), + responses: Mutex::new( + vec![ + Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), + Ok(Vec::new()) + ] + ), + } + } +} + +impl ContractClient for FakeRegistrar { + fn registrar(&self) -> Result { + Ok(REGISTRAR.parse().unwrap()) + } + + fn call(&self, address: Address, data: Bytes) -> Result { + self.calls.lock().push((address.to_hex(), data.to_hex())); + self.responses.lock().remove(0) + } +} + +pub fn serve_hosts(hosts: Option>) -> Server { + let registrar = Arc::new(FakeRegistrar::new()); + let mut dapps_path = env::temp_dir(); + dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); + let builder = ServerBuilder::new(dapps_path.to_str().unwrap().into(), registrar); + builder.start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), hosts).unwrap() +} + +pub fn serve_with_auth(user: &str, pass: &str) -> Server { + let registrar = Arc::new(FakeRegistrar::new()); + let builder = ServerBuilder::new(env::temp_dir().to_str().unwrap().into(), registrar); + builder.start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), None, user, pass).unwrap() +} + +pub fn serve() -> Server { + serve_hosts(None) +} + +pub struct Response { + pub status: String, + pub headers: Vec, + pub headers_raw: String, + pub body: String, +} + +pub fn read_block(lines: &mut Lines, all: bool) -> String { + let mut block = String::new(); + loop { + let line = lines.next(); + match line { + None => break, + Some("") if !all => break, + Some(v) => { + block.push_str(v); + block.push_str("\n"); + }, + } + } + block +} + +pub fn request(server: Server, request: &str) -> Response { + let mut req = TcpStream::connect(server.addr()).unwrap(); + req.write_all(request.as_bytes()).unwrap(); + + let mut response = String::new(); + req.read_to_string(&mut response).unwrap(); + + let mut lines = response.lines(); + let status = lines.next().unwrap().to_owned(); + let headers_raw = read_block(&mut lines, false); + let headers = headers_raw.split('\n').map(|v| v.to_owned()).collect(); + let body = read_block(&mut lines, true); + + Response { + status: status, + headers: headers, + headers_raw: headers_raw, + body: body, + } +} + diff --git a/dapps/src/tests/mod.rs b/dapps/src/tests/mod.rs new file mode 100644 index 000000000..8c5bf2283 --- /dev/null +++ b/dapps/src/tests/mod.rs @@ -0,0 +1,25 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Dapps server test suite + +mod helpers; + +mod api; +mod authorization; +mod redirection; +mod validation; + diff --git a/dapps/src/tests/redirection.rs b/dapps/src/tests/redirection.rs new file mode 100644 index 000000000..53aa393e2 --- /dev/null +++ b/dapps/src/tests/redirection.rs @@ -0,0 +1,185 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use tests::helpers::{serve, request}; + +#[test] +fn should_redirect_to_home() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Location: /home/"); +} + +#[test] +fn should_redirect_to_home_when_trailing_slash_is_missing() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET /app HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Location: /home/"); +} + +#[test] +fn should_redirect_to_home_on_invalid_dapp() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET /invaliddapp/ HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Location: /home/"); +} + +#[test] +fn should_redirect_to_home_on_invalid_dapp_with_domain() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: invaliddapp.parity\r\n\ + Connection: close\r\n\ + \r\n\ + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 302 Found".to_owned()); + assert_eq!(response.headers.get(0).unwrap(), "Location: http://home.parity/"); +} + +#[test] +fn should_serve_rpc() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + POST / HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + Content-Type: application/json\r\n + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); +} + +#[test] +fn should_serve_rpc_at_slash_rpc() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + POST /rpc HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + Content-Type: application/json\r\n + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.body, format!("57\n{}\n0\n\n", r#"{"jsonrpc":"2.0","error":{"code":-32700,"message":"Parse error","data":null},"id":null}"#)); +} + + +#[test] +fn should_serve_proxy_pac() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET /proxy/proxy.pac HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.body, "86\n\nfunction FindProxyForURL(url, host) {\n\tif (shExpMatch(host, \"*.parity\"))\n\t{\n\t\treturn \"PROXY 127.0.0.1:8080\";\n\t}\n\n\treturn \"DIRECT\";\n}\n\n0\n\n".to_owned()); +} + +#[test] +fn should_serve_utils() { + // given + let server = serve(); + + // when + let response = request(server, + "\ + GET /parity-utils/inject.js HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + assert_eq!(response.body.contains("function(){"), true); +} + diff --git a/dapps/src/tests/validation.rs b/dapps/src/tests/validation.rs new file mode 100644 index 000000000..b233a07d8 --- /dev/null +++ b/dapps/src/tests/validation.rs @@ -0,0 +1,79 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use tests::helpers::{serve_hosts, request}; + +#[test] +fn should_reject_invalid_host() { + // given + let server = serve_hosts(Some(vec!["localhost:8080".into()])); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); + assert_eq!(response.body, "85\n\n\t\t

Request with disallowed Host header has been blocked.

\n\t\t

Check the URL in your browser address bar.

\n\t\t\n0\n\n".to_owned()); +} + +#[test] +fn should_allow_valid_host() { + // given + let server = serve_hosts(Some(vec!["localhost:8080".into()])); + + // when + let response = request(server, + "\ + GET /home/ HTTP/1.1\r\n\ + Host: localhost:8080\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); +} + +#[test] +fn should_serve_dapps_domains() { + // given + let server = serve_hosts(Some(vec!["localhost:8080".into()])); + + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: home.parity\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); +} + From dcea5c252667c9b474b4d24270d417f9fefc7754 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 30 Aug 2016 16:56:46 +0200 Subject: [PATCH 17/17] Adding proof --- dapps/src/lib.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 2beef0639..bc54b0f37 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -208,9 +208,10 @@ impl Server { *self.panic_handler.lock().unwrap() = Some(Box::new(handler)); } + #[cfg(test)] /// Returns address that this server is bound to. pub fn addr(&self) -> &SocketAddr { - self.server.as_ref().unwrap().addr() + self.server.as_ref().expect("server is always Some at the start; it's consumed only when object is dropped; qed").addr() } }