diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index e950996ac..65e60d6eb 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,5 +1,6 @@
stages:
- build
+ - test
- deploy
variables:
GIT_DEPTH: "3"
@@ -17,8 +18,10 @@ linux-beta:
- tags
- stable
script:
+ - export
- cargo build --release --verbose
- strip target/release/parity
+ - cp target/release/parity parity
tags:
- rust
- rust-beta
@@ -26,24 +29,12 @@ linux-beta:
paths:
- target/release/parity
name: "${CI_BUILD_NAME}_parity"
-linux-stable:
- stage: build
- image: ethcore/rust:stable
- only:
- - master
- - beta
- - tags
- - stable
- script:
- - cargo build --release --verbose
- - strip target/release/parity
+ stage: deploy
tags:
- rust
- - rust-stable
- artifacts:
- paths:
- - target/release/parity
- name: "${CI_BUILD_NAME}_parity"
+ - rust-beta
+ script:
+ - ./deploy.sh
linux-nightly:
stage: build
image: ethcore/rust:nightly
@@ -92,6 +83,12 @@ linux-armv7:
- tags
- stable
script:
+ - export
+ - rm -rf .cargo
+ - mkdir -p .cargo
+ - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config
+ - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
+ - cat .cargo/config
- cargo build --target armv7-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity
tags:
@@ -110,6 +107,12 @@ linux-arm:
- tags
- stable
script:
+ - export
+ - rm -rf .cargo
+ - mkdir -p .cargo
+ - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config
+ - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config
+ - cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabihf --release --verbose
- arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity
tags:
@@ -129,6 +132,12 @@ linux-armv6:
- tags
- stable
script:
+ - export
+ - rm -rf .cargo
+ - mkdir -p .cargo
+ - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config
+ - echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config
+ - cat .cargo/config
- cargo build --target arm-unknown-linux-gnueabi --release --verbose
- arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity
tags:
@@ -148,6 +157,12 @@ linux-aarch64:
- tags
- stable
script:
+ - export
+ - rm -rf .cargo
+ - mkdir -p .cargo
+ - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config
+ - echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config
+ - cat .cargo/config
- cargo build --target aarch64-unknown-linux-gnu --release --verbose
- aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity
tags:
@@ -193,3 +208,30 @@ windows:
- target/release/parity.exe
- target/release/parity.pdb
name: "${CI_BUILD_NAME}_parity"
+linux-stable:
+ stage: build
+ image: ethcore/rust:stable
+ only:
+ - master
+ - beta
+ - tags
+ - stable
+ script:
+ - export
+ - cargo build --release --verbose
+ - strip target/release/parity
+ tags:
+ - rust
+ - rust-stable
+ artifacts:
+ paths:
+ - target/release/parity
+ name: "${CI_BUILD_NAME}_parity"
+test-linux:
+ stage: test
+ before_script:
+ - git submodule update --init --recursive
+ script:
+ - ./test.sh --verbose
+ dependencies:
+ - linux-stable
diff --git a/Cargo.lock b/Cargo.lock
index 8312b0b71..515f61218 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -301,6 +301,7 @@ dependencies = [
"hyper 0.9.4 (git+https://github.com/ethcore/hyper)",
"jsonrpc-core 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)",
+ "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-dapps 1.4.0 (git+https://github.com/ethcore/parity-ui.git)",
@@ -808,6 +809,11 @@ name = "libc"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
+[[package]]
+name = "linked-hash-map"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
[[package]]
name = "log"
version = "0.3.6"
@@ -1752,7 +1758,8 @@ dependencies = [
"checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d"
"checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a"
"checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f"
-"checksum libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "23e3757828fa702a20072c37ff47938e9dd331b92fac6e223d26d4b7a55f7ee2"
+"checksum libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "97def9dc7ce1d8e153e693e3a33020bc69972181adb2f871e87e888876feae49"
+"checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd"
"checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054"
"checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e"
"checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20"
diff --git a/README.md b/README.md
index f3a8a92bc..26913183c 100644
--- a/README.md
+++ b/README.md
@@ -84,9 +84,21 @@ $ cargo build --release
This will produce an executable in the `./target/release` subdirectory.
-To get started, just run
+## Start Parity
+### Manually
+To start Parity manually, just run
```bash
$ ./target/release/parity
```
-and parity will begin syncing the Ethereum blockchain.
+and Parity will begin syncing the Ethereum blockchain.
+
+### Using systemd service file
+To start Parity as a regular user using systemd init:
+
+1. Copy ```parity/scripts/parity.service``` to your
+systemd user directory (usually ```~/.config/systemd/user```).
+2. To pass any argument to Parity, write a ```~/.parity/parity.conf``` file this way:
+```ARGS="ARG1 ARG2 ARG3"```.
+
+ Example: ```ARGS="ui --geth --identity MyMachine"```.
diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml
index 0ea3d0de4..14f3940bd 100644
--- a/dapps/Cargo.toml
+++ b/dapps/Cargo.toml
@@ -22,6 +22,7 @@ serde_json = "0.7.0"
serde_macros = { version = "0.7.0", optional = true }
zip = { version = "0.1", default-features = false }
ethabi = "0.2.1"
+linked-hash-map = "0.3"
ethcore-rpc = { path = "../rpc" }
ethcore-util = { path = "../util" }
https-fetch = { path = "../util/https-fetch" }
diff --git a/dapps/src/apps/cache.rs b/dapps/src/apps/cache.rs
new file mode 100644
index 000000000..bf1c5f3cc
--- /dev/null
+++ b/dapps/src/apps/cache.rs
@@ -0,0 +1,128 @@
+// Copyright 2015, 2016 Ethcore (UK) Ltd.
+// This file is part of Parity.
+
+// Parity is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// Parity is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with Parity. If not, see .
+
+//! Fetchable Dapps support.
+
+use std::fs;
+use std::sync::{Arc};
+use std::sync::atomic::{AtomicBool, Ordering};
+
+use linked_hash_map::LinkedHashMap;
+use page::LocalPageEndpoint;
+
+pub enum ContentStatus {
+ Fetching(Arc),
+ Ready(LocalPageEndpoint),
+}
+
+#[derive(Default)]
+pub struct ContentCache {
+ cache: LinkedHashMap,
+}
+
+impl ContentCache {
+ pub fn insert(&mut self, content_id: String, status: ContentStatus) -> Option {
+ self.cache.insert(content_id, status)
+ }
+
+ pub fn remove(&mut self, content_id: &str) -> Option {
+ self.cache.remove(content_id)
+ }
+
+ pub fn get(&mut self, content_id: &str) -> Option<&mut ContentStatus> {
+ self.cache.get_refresh(content_id)
+ }
+
+ pub fn clear_garbage(&mut self, expected_size: usize) -> Vec<(String, ContentStatus)> {
+ let mut len = self.cache.len();
+
+ if len <= expected_size {
+ return Vec::new();
+ }
+
+ let mut removed = Vec::with_capacity(len - expected_size);
+ while len > expected_size {
+ let entry = self.cache.pop_front().unwrap();
+ match entry.1 {
+ ContentStatus::Fetching(ref abort) => {
+ trace!(target: "dapps", "Aborting {} because of limit.", entry.0);
+ // Mark as aborted
+ abort.store(true, Ordering::Relaxed);
+ },
+ ContentStatus::Ready(ref endpoint) => {
+ trace!(target: "dapps", "Removing {} because of limit.", entry.0);
+ // Remove path
+ let res = fs::remove_dir_all(&endpoint.path());
+ if let Err(e) = res {
+ warn!(target: "dapps", "Unable to remove dapp: {:?}", e);
+ }
+ }
+ }
+
+ removed.push(entry);
+ len -= 1;
+ }
+ removed
+ }
+
+ #[cfg(test)]
+ pub fn len(&self) -> usize {
+ self.cache.len()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ fn only_keys(data: Vec<(String, ContentStatus)>) -> Vec {
+ data.into_iter().map(|x| x.0).collect()
+ }
+
+ #[test]
+ fn should_remove_least_recently_used() {
+ // given
+ let mut cache = ContentCache::default();
+ cache.insert("a".into(), ContentStatus::Fetching(Default::default()));
+ cache.insert("b".into(), ContentStatus::Fetching(Default::default()));
+ cache.insert("c".into(), ContentStatus::Fetching(Default::default()));
+
+ // when
+ let res = cache.clear_garbage(2);
+
+ // then
+ assert_eq!(cache.len(), 2);
+ assert_eq!(only_keys(res), vec!["a"]);
+ }
+
+ #[test]
+ fn should_update_lru_if_accessed() {
+ // given
+ let mut cache = ContentCache::default();
+ cache.insert("a".into(), ContentStatus::Fetching(Default::default()));
+ cache.insert("b".into(), ContentStatus::Fetching(Default::default()));
+ cache.insert("c".into(), ContentStatus::Fetching(Default::default()));
+
+ // when
+ cache.get("a");
+ let res = cache.clear_garbage(2);
+
+ // then
+ assert_eq!(cache.len(), 2);
+ assert_eq!(only_keys(res), vec!["b"]);
+ }
+
+}
diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs
index 91b6ed990..214c3e2b5 100644
--- a/dapps/src/apps/fetcher.rs
+++ b/dapps/src/apps/fetcher.rs
@@ -23,7 +23,7 @@ use std::{fs, env, fmt};
use std::io::{self, Read, Write};
use std::path::PathBuf;
use std::sync::Arc;
-use std::collections::HashMap;
+use std::sync::atomic::{AtomicBool};
use rustc_serialize::hex::FromHex;
use hyper::Control;
@@ -33,20 +33,18 @@ use random_filename;
use util::{Mutex, H256};
use util::sha3::sha3;
use page::LocalPageEndpoint;
-use handlers::{ContentHandler, AppFetcherHandler, DappHandler};
+use handlers::{ContentHandler, ContentFetcherHandler, ContentValidator};
use endpoint::{Endpoint, EndpointPath, Handler};
+use apps::cache::{ContentCache, ContentStatus};
use apps::manifest::{MANIFEST_FILENAME, deserialize_manifest, serialize_manifest, Manifest};
use apps::urlhint::{URLHintContract, URLHint};
-enum AppStatus {
- Fetching,
- Ready(LocalPageEndpoint),
-}
+const MAX_CACHED_DAPPS: usize = 10;
pub struct AppFetcher {
dapps_path: PathBuf,
resolver: R,
- dapps: Arc>>,
+ dapps: Arc>,
}
impl Drop for AppFetcher {
@@ -65,17 +63,17 @@ impl AppFetcher {
AppFetcher {
dapps_path: dapps_path,
resolver: resolver,
- dapps: Arc::new(Mutex::new(HashMap::new())),
+ dapps: Arc::new(Mutex::new(ContentCache::default())),
}
}
#[cfg(test)]
- fn set_status(&self, app_id: &str, status: AppStatus) {
+ fn set_status(&self, app_id: &str, status: ContentStatus) {
self.dapps.lock().insert(app_id.to_owned(), status);
}
pub fn contains(&self, app_id: &str) -> bool {
- let dapps = self.dapps.lock();
+ let mut dapps = self.dapps.lock();
match dapps.get(app_id) {
// Check if we already have the app
Some(_) => true,
@@ -95,11 +93,11 @@ impl AppFetcher {
let status = dapps.get(&app_id);
match status {
// Just server dapp
- Some(&AppStatus::Ready(ref endpoint)) => {
+ Some(&mut ContentStatus::Ready(ref endpoint)) => {
(None, endpoint.to_handler(path))
},
// App is already being fetched
- Some(&AppStatus::Fetching) => {
+ Some(&mut ContentStatus::Fetching(_)) => {
(None, Box::new(ContentHandler::html(
StatusCode::ServiceUnavailable,
format!(
@@ -111,11 +109,13 @@ impl AppFetcher {
},
// We need to start fetching app
None => {
- // TODO [todr] Keep only last N dapps available!
let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true.");
let app = self.resolver.resolve(app_hex).expect("to_handler is called only when `contains` returns true.");
- (Some(AppStatus::Fetching), Box::new(AppFetcherHandler::new(
+ let abort = Arc::new(AtomicBool::new(false));
+
+ (Some(ContentStatus::Fetching(abort.clone())), Box::new(ContentFetcherHandler::new(
app,
+ abort,
control,
path.using_dapps_domains,
DappInstaller {
@@ -129,6 +129,7 @@ impl AppFetcher {
};
if let Some(status) = new_status {
+ dapps.clear_garbage(MAX_CACHED_DAPPS);
dapps.insert(app_id, status);
}
@@ -178,7 +179,7 @@ impl From for ValidationError {
struct DappInstaller {
dapp_id: String,
dapps_path: PathBuf,
- dapps: Arc>>,
+ dapps: Arc>,
}
impl DappInstaller {
@@ -213,7 +214,7 @@ impl DappInstaller {
}
}
-impl DappHandler for DappInstaller {
+impl ContentValidator for DappInstaller {
type Error = ValidationError;
fn validate_and_install(&self, app_path: PathBuf) -> Result {
@@ -280,7 +281,7 @@ impl DappHandler for DappInstaller {
Some(manifest) => {
let path = self.dapp_target_path(manifest);
let app = LocalPageEndpoint::new(path, manifest.clone().into());
- dapps.insert(self.dapp_id.clone(), AppStatus::Ready(app));
+ dapps.insert(self.dapp_id.clone(), ContentStatus::Ready(app));
},
// In case of error
None => {
@@ -292,12 +293,13 @@ impl DappHandler for DappInstaller {
#[cfg(test)]
mod tests {
- use std::path::PathBuf;
- use super::{AppFetcher, AppStatus};
- use apps::urlhint::{GithubApp, URLHint};
+ use std::env;
+ use util::Bytes;
use endpoint::EndpointInfo;
use page::LocalPageEndpoint;
- use util::Bytes;
+ use apps::cache::ContentStatus;
+ use apps::urlhint::{GithubApp, URLHint};
+ use super::AppFetcher;
struct FakeResolver;
impl URLHint for FakeResolver {
@@ -309,8 +311,9 @@ mod tests {
#[test]
fn should_true_if_contains_the_app() {
// given
+ let path = env::temp_dir();
let fetcher = AppFetcher::new(FakeResolver);
- let handler = LocalPageEndpoint::new(PathBuf::from("/tmp/test"), EndpointInfo {
+ let handler = LocalPageEndpoint::new(path, EndpointInfo {
name: "fake".into(),
description: "".into(),
version: "".into(),
@@ -319,8 +322,8 @@ mod tests {
});
// when
- fetcher.set_status("test", AppStatus::Ready(handler));
- fetcher.set_status("test2", AppStatus::Fetching);
+ fetcher.set_status("test", ContentStatus::Ready(handler));
+ fetcher.set_status("test2", ContentStatus::Fetching(Default::default()));
// then
assert_eq!(fetcher.contains("test"), true);
diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs
index 84a3c5ddf..65bee587d 100644
--- a/dapps/src/apps/mod.rs
+++ b/dapps/src/apps/mod.rs
@@ -19,6 +19,7 @@ use page::PageEndpoint;
use proxypac::ProxyPac;
use parity_dapps::WebApp;
+mod cache;
mod fs;
pub mod urlhint;
pub mod fetcher;
diff --git a/dapps/src/handlers/client/fetch_file.rs b/dapps/src/handlers/client/fetch_file.rs
index 835de1145..763d193f4 100644
--- a/dapps/src/handlers/client/fetch_file.rs
+++ b/dapps/src/handlers/client/fetch_file.rs
@@ -18,7 +18,8 @@
use std::{env, io, fs, fmt};
use std::path::PathBuf;
-use std::sync::mpsc;
+use std::sync::{mpsc, Arc};
+use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration;
use random_filename;
@@ -31,6 +32,7 @@ use super::FetchError;
#[derive(Debug)]
pub enum Error {
+ Aborted,
NotStarted,
UnexpectedStatus(StatusCode),
IoError(io::Error),
@@ -42,6 +44,7 @@ pub type OnDone = Box;
pub struct Fetch {
path: PathBuf,
+ abort: Arc,
file: Option,
result: Option,
sender: mpsc::Sender,
@@ -58,7 +61,7 @@ impl Drop for Fetch {
fn drop(&mut self) {
let res = self.result.take().unwrap_or(Err(Error::NotStarted.into()));
// Remove file if there was an error
- if res.is_err() {
+ if res.is_err() || self.is_aborted() {
if let Some(file) = self.file.take() {
drop(file);
// Remove file
@@ -74,12 +77,13 @@ impl Drop for Fetch {
}
impl Fetch {
- pub fn new(sender: mpsc::Sender, on_done: OnDone) -> Self {
+ pub fn new(sender: mpsc::Sender, abort: Arc, on_done: OnDone) -> Self {
let mut dir = env::temp_dir();
dir.push(random_filename());
Fetch {
path: dir,
+ abort: abort,
file: None,
result: None,
sender: sender,
@@ -88,17 +92,36 @@ impl Fetch {
}
}
+impl Fetch {
+ fn is_aborted(&self) -> bool {
+ self.abort.load(Ordering::Relaxed)
+ }
+ fn mark_aborted(&mut self) -> Next {
+ self.result = Some(Err(Error::Aborted));
+ Next::end()
+ }
+}
+
impl hyper::client::Handler for Fetch {
fn on_request(&mut self, req: &mut Request) -> Next {
+ if self.is_aborted() {
+ return self.mark_aborted();
+ }
req.headers_mut().set(Connection::close());
read()
}
fn on_request_writable(&mut self, _encoder: &mut Encoder) -> Next {
+ if self.is_aborted() {
+ return self.mark_aborted();
+ }
read()
}
fn on_response(&mut self, res: Response) -> Next {
+ if self.is_aborted() {
+ return self.mark_aborted();
+ }
if *res.status() != StatusCode::Ok {
self.result = Some(Err(Error::UnexpectedStatus(*res.status()).into()));
return Next::end();
@@ -119,6 +142,9 @@ impl hyper::client::Handler for Fetch {
}
fn on_response_readable(&mut self, decoder: &mut Decoder) -> Next {
+ if self.is_aborted() {
+ return self.mark_aborted();
+ }
match io::copy(decoder, self.file.as_mut().expect("File is there because on_response has created it.")) {
Ok(0) => Next::end(),
Ok(_) => read(),
diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs
index 94110e534..8448bd16b 100644
--- a/dapps/src/handlers/fetch.rs
+++ b/dapps/src/handlers/fetch.rs
@@ -18,7 +18,8 @@
use std::fmt;
use std::path::PathBuf;
-use std::sync::mpsc;
+use std::sync::{mpsc, Arc};
+use std::sync::atomic::AtomicBool;
use std::time::{Instant, Duration};
use hyper::{header, server, Decoder, Encoder, Next, Method, Control};
@@ -38,19 +39,20 @@ enum FetchState {
Error(ContentHandler),
InProgress {
deadline: Instant,
- receiver: mpsc::Receiver
+ receiver: mpsc::Receiver,
},
Done(Manifest),
}
-pub trait DappHandler {
+pub trait ContentValidator {
type Error: fmt::Debug + fmt::Display;
fn validate_and_install(&self, app: PathBuf) -> Result;
fn done(&self, Option<&Manifest>);
}
-pub struct AppFetcherHandler {
+pub struct ContentFetcherHandler {
+ abort: Arc,
control: Option,
status: FetchState,
client: Option,
@@ -58,7 +60,7 @@ pub struct AppFetcherHandler {
dapp: H,
}
-impl Drop for AppFetcherHandler {
+impl Drop for ContentFetcherHandler {
fn drop(&mut self) {
let manifest = match self.status {
FetchState::Done(ref manifest) => Some(manifest),
@@ -68,16 +70,18 @@ impl Drop for AppFetcherHandler {
}
}
-impl AppFetcherHandler {
+impl ContentFetcherHandler {
pub fn new(
app: GithubApp,
+ abort: Arc,
control: Control,
using_dapps_domains: bool,
handler: H) -> Self {
let client = Client::new();
- AppFetcherHandler {
+ ContentFetcherHandler {
+ abort: abort,
control: Some(control),
client: Some(client),
status: FetchState::NotStarted(app),
@@ -93,9 +97,8 @@ impl AppFetcherHandler {
}
- // TODO [todr] https support
- fn fetch_app(client: &mut Client, app: &GithubApp, control: Control) -> Result, String> {
- client.request(app.url(), Box::new(move || {
+ fn fetch_app(client: &mut Client, app: &GithubApp, abort: Arc, control: Control) -> Result, String> {
+ client.request(app.url(), abort, Box::new(move || {
trace!(target: "dapps", "Fetching finished.");
// Ignoring control errors
let _ = control.ready(Next::read());
@@ -103,7 +106,7 @@ impl AppFetcherHandler {
}
}
-impl server::Handler for AppFetcherHandler {
+impl server::Handler for ContentFetcherHandler {
fn on_request(&mut self, request: server::Request) -> Next {
let status = if let FetchState::NotStarted(ref app) = self.status {
Some(match *request.method() {
@@ -112,7 +115,7 @@ impl server::Handler for AppFetcherHandler {
trace!(target: "dapps", "Fetching dapp: {:?}", app);
let control = self.control.take().expect("on_request is called only once, thus control is always Some");
let client = self.client.as_mut().expect("on_request is called before client is closed.");
- let fetch = Self::fetch_app(client, app, control);
+ let fetch = Self::fetch_app(client, app, self.abort.clone(), control);
match fetch {
Ok(receiver) => FetchState::InProgress {
deadline: Instant::now() + Duration::from_secs(FETCH_TIMEOUT),
diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs
index 85a8bd439..6f6423b58 100644
--- a/dapps/src/handlers/mod.rs
+++ b/dapps/src/handlers/mod.rs
@@ -27,7 +27,7 @@ pub use self::auth::AuthRequiredHandler;
pub use self::echo::EchoHandler;
pub use self::content::ContentHandler;
pub use self::redirect::Redirection;
-pub use self::fetch::{AppFetcherHandler, DappHandler};
+pub use self::fetch::{ContentFetcherHandler, ContentValidator};
use url::Url;
use hyper::{server, header, net, uri};
diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs
index 9f08e4dbc..0a2e7d8c5 100644
--- a/dapps/src/lib.rs
+++ b/dapps/src/lib.rs
@@ -61,6 +61,7 @@ extern crate parity_dapps;
extern crate https_fetch;
extern crate ethcore_rpc;
extern crate ethcore_util as util;
+extern crate linked_hash_map;
mod endpoint;
mod apps;
@@ -109,14 +110,28 @@ impl ServerBuilder {
/// Asynchronously start server with no authentication,
/// returns result with `Server` handle on success or an error.
- pub fn start_unsecure_http(&self, addr: &SocketAddr) -> Result {
- Server::start_http(addr, NoAuth, self.handler.clone(), self.dapps_path.clone(), self.registrar.clone())
+ pub fn start_unsecured_http(&self, addr: &SocketAddr, hosts: Option>) -> Result {
+ Server::start_http(
+ addr,
+ hosts,
+ NoAuth,
+ self.handler.clone(),
+ self.dapps_path.clone(),
+ self.registrar.clone()
+ )
}
/// Asynchronously start server with `HTTP Basic Authentication`,
/// return result with `Server` handle on success or an error.
- pub fn start_basic_auth_http(&self, addr: &SocketAddr, username: &str, password: &str) -> Result {
- Server::start_http(addr, HttpBasicAuth::single_user(username, password), self.handler.clone(), self.dapps_path.clone(), self.registrar.clone())
+ pub fn start_basic_auth_http(&self, addr: &SocketAddr, hosts: Option>, username: &str, password: &str) -> Result {
+ Server::start_http(
+ addr,
+ hosts,
+ HttpBasicAuth::single_user(username, password),
+ self.handler.clone(),
+ self.dapps_path.clone(),
+ self.registrar.clone()
+ )
}
}
@@ -127,8 +142,24 @@ pub struct Server {
}
impl Server {
+ /// Returns a list of allowed hosts or `None` if all hosts are allowed.
+ fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> {
+ let mut allowed = Vec::new();
+
+ match hosts {
+ Some(hosts) => allowed.extend_from_slice(&hosts),
+ None => return None,
+ }
+
+ // Add localhost domain as valid too if listening on loopback interface.
+ allowed.push(bind_address.replace("127.0.0.1", "localhost").into());
+ allowed.push(bind_address.into());
+ Some(allowed)
+ }
+
fn start_http(
addr: &SocketAddr,
+ hosts: Option>,
authorization: A,
handler: Arc,
dapps_path: String,
@@ -145,7 +176,7 @@ impl Server {
special.insert(router::SpecialEndpoint::Utils, apps::utils());
special
});
- let bind_address = format!("{}", addr);
+ let hosts = Self::allowed_hosts(hosts, format!("{}", addr));
try!(hyper::Server::http(addr))
.handle(move |ctrl| router::Router::new(
@@ -155,7 +186,7 @@ impl Server {
endpoints.clone(),
special.clone(),
authorization.clone(),
- bind_address.clone(),
+ hosts.clone(),
))
.map(|(l, srv)| {
@@ -208,3 +239,23 @@ pub fn random_filename() -> String {
rng.gen_ascii_chars().take(12).collect()
}
+#[cfg(test)]
+mod tests {
+ use super::Server;
+
+ #[test]
+ fn should_return_allowed_hosts() {
+ // given
+ let bind_address = "127.0.0.1".to_owned();
+
+ // when
+ let all = Server::allowed_hosts(None, bind_address.clone());
+ let address = Server::allowed_hosts(Some(Vec::new()), bind_address.clone());
+ let some = Server::allowed_hosts(Some(vec!["ethcore.io".into()]), bind_address.clone());
+
+ // then
+ assert_eq!(all, None);
+ assert_eq!(address, Some(vec!["localhost".into(), "127.0.0.1".into()]));
+ assert_eq!(some, Some(vec!["ethcore.io".into(), "localhost".into(), "127.0.0.1".into()]));
+ }
+}
diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs
index 52e32bf5e..dcfd9bed2 100644
--- a/dapps/src/page/local.rs
+++ b/dapps/src/page/local.rs
@@ -33,6 +33,10 @@ impl LocalPageEndpoint {
info: info,
}
}
+
+ pub fn path(&self) -> PathBuf {
+ self.path.clone()
+ }
}
impl Endpoint for LocalPageEndpoint {
diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs
index 62813500f..e0f974482 100644
--- a/dapps/src/router/host_validation.rs
+++ b/dapps/src/router/host_validation.rs
@@ -22,13 +22,11 @@ use hyper::net::HttpStream;
use jsonrpc_http_server::{is_host_header_valid};
use handlers::ContentHandler;
-pub fn is_valid(request: &server::Request, bind_address: &str, endpoints: Vec) -> bool {
- let mut endpoints = endpoints.into_iter()
+pub fn is_valid(request: &server::Request, allowed_hosts: &[String], endpoints: Vec) -> bool {
+ let mut endpoints = endpoints.iter()
.map(|endpoint| format!("{}{}", endpoint, DAPPS_DOMAIN))
.collect::>();
- // Add localhost domain as valid too if listening on loopback interface.
- endpoints.push(bind_address.replace("127.0.0.1", "localhost").into());
- endpoints.push(bind_address.into());
+ endpoints.extend_from_slice(allowed_hosts);
let header_valid = is_host_header_valid(request, &endpoints);
diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs
index 568dc00da..359337047 100644
--- a/dapps/src/router/mod.rs
+++ b/dapps/src/router/mod.rs
@@ -48,7 +48,7 @@ pub struct Router {
fetch: Arc,
special: Arc>>,
authorization: Arc,
- bind_address: String,
+ allowed_hosts: Option>,
handler: Box + Send>,
}
@@ -56,9 +56,11 @@ impl server::Handler for Router {
fn on_request(&mut self, req: server::Request) -> Next {
// Validate Host header
- if !host_validation::is_valid(&req, &self.bind_address, self.endpoints.keys().cloned().collect()) {
- self.handler = host_validation::host_invalid_response();
- return self.handler.on_request(req);
+ if let Some(ref hosts) = self.allowed_hosts {
+ if !host_validation::is_valid(&req, hosts, self.endpoints.keys().cloned().collect()) {
+ self.handler = host_validation::host_invalid_response();
+ return self.handler.on_request(req);
+ }
}
// Check authorization
@@ -125,7 +127,7 @@ impl Router {
endpoints: Arc,
special: Arc>>,
authorization: Arc,
- bind_address: String,
+ allowed_hosts: Option>,
) -> Self {
let handler = special.get(&SpecialEndpoint::Rpc).unwrap().to_handler(EndpointPath::default());
@@ -136,7 +138,7 @@ impl Router {
fetch: app_fetcher,
special: special,
authorization: authorization,
- bind_address: bind_address,
+ allowed_hosts: allowed_hosts,
handler: handler,
}
}
diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs
index cd02b9a1b..784e71dc0 100644
--- a/ethcore/src/block.rs
+++ b/ethcore/src/block.rs
@@ -245,11 +245,11 @@ impl<'x> OpenBlock<'x> {
last_hashes: last_hashes,
};
- r.block.base.header.parent_hash = parent.hash();
- r.block.base.header.number = parent.number + 1;
- r.block.base.header.author = author;
+ r.block.base.header.set_parent_hash(parent.hash());
+ r.block.base.header.set_number(parent.number() + 1);
+ r.block.base.header.set_author(author);
r.block.base.header.set_timestamp_now(parent.timestamp());
- r.block.base.header.extra_data = extra_data;
+ r.block.base.header.set_extra_data(extra_data);
r.block.base.header.note_dirty();
engine.populate_from_parent(&mut r.block.base.header, parent, gas_range_target.0, gas_range_target.1);
@@ -309,13 +309,13 @@ impl<'x> OpenBlock<'x> {
pub fn env_info(&self) -> EnvInfo {
// TODO: memoise.
EnvInfo {
- number: self.block.base.header.number,
- author: self.block.base.header.author.clone(),
- timestamp: self.block.base.header.timestamp,
- difficulty: self.block.base.header.difficulty.clone(),
+ number: self.block.base.header.number(),
+ author: self.block.base.header.author().clone(),
+ timestamp: self.block.base.header.timestamp(),
+ difficulty: self.block.base.header.difficulty().clone(),
last_hashes: self.last_hashes.clone(),
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
- gas_limit: self.block.base.header.gas_limit.clone(),
+ gas_limit: self.block.base.header.gas_limit().clone(),
}
}
@@ -349,14 +349,13 @@ impl<'x> OpenBlock<'x> {
let unclosed_state = s.block.state.clone();
s.engine.on_close_block(&mut s.block);
- s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect());
+ s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect()));
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
- s.block.base.header.uncles_hash = uncle_bytes.sha3();
- s.block.base.header.state_root = s.block.state.root().clone();
- s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect());
- s.block.base.header.log_bloom = s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b}); //TODO: use |= operator
- s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used);
- s.block.base.header.note_dirty();
+ s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
+ s.block.base.header.set_state_root(s.block.state.root().clone());
+ s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect()));
+ s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
+ s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
ClosedBlock {
block: s.block,
@@ -371,20 +370,19 @@ impl<'x> OpenBlock<'x> {
let mut s = self;
s.engine.on_close_block(&mut s.block);
- if s.block.base.header.transactions_root.is_zero() || s.block.base.header.transactions_root == SHA3_NULL_RLP {
- s.block.base.header.transactions_root = ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect());
+ if s.block.base.header.transactions_root().is_zero() || s.block.base.header.transactions_root() == &SHA3_NULL_RLP {
+ s.block.base.header.set_transactions_root(ordered_trie_root(s.block.base.transactions.iter().map(|e| e.rlp_bytes().to_vec()).collect()));
}
let uncle_bytes = s.block.base.uncles.iter().fold(RlpStream::new_list(s.block.base.uncles.len()), |mut s, u| {s.append_raw(&u.rlp(Seal::With), 1); s} ).out();
- if s.block.base.header.uncles_hash.is_zero() {
- s.block.base.header.uncles_hash = uncle_bytes.sha3();
+ if s.block.base.header.uncles_hash().is_zero() {
+ s.block.base.header.set_uncles_hash(uncle_bytes.sha3());
}
- if s.block.base.header.receipts_root.is_zero() || s.block.base.header.receipts_root == SHA3_NULL_RLP {
- s.block.base.header.receipts_root = ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect());
+ if s.block.base.header.receipts_root().is_zero() || s.block.base.header.receipts_root() == &SHA3_NULL_RLP {
+ s.block.base.header.set_receipts_root(ordered_trie_root(s.block.receipts.iter().map(|r| r.rlp_bytes().to_vec()).collect()));
}
- s.block.base.header.state_root = s.block.state.root().clone();
- s.block.base.header.log_bloom = s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b}); //TODO: use |= operator
- s.block.base.header.gas_used = s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used);
- s.block.base.header.note_dirty();
+ s.block.base.header.set_state_root(s.block.state.root().clone());
+ s.block.base.header.set_log_bloom(s.block.receipts.iter().fold(LogBloom::zero(), |mut b, r| {b = &b | &r.log_bloom; b})); //TODO: use |= operator
+ s.block.base.header.set_gas_used(s.block.receipts.last().map_or(U256::zero(), |r| r.gas_used));
LockedBlock {
block: s.block,
@@ -625,9 +623,9 @@ mod tests {
let last_hashes = Arc::new(vec![genesis_header.hash()]);
let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let mut uncle1_header = Header::new();
- uncle1_header.extra_data = b"uncle1".to_vec();
+ uncle1_header.set_extra_data(b"uncle1".to_vec());
let mut uncle2_header = Header::new();
- uncle2_header.extra_data = b"uncle2".to_vec();
+ uncle2_header.set_extra_data(b"uncle2".to_vec());
open_block.push_uncle(uncle1_header).unwrap();
open_block.push_uncle(uncle2_header).unwrap();
let b = open_block.close_and_lock().seal(engine, vec![]).unwrap();
@@ -643,7 +641,7 @@ mod tests {
let bytes = e.rlp_bytes();
assert_eq!(bytes, orig_bytes);
let uncles = BlockView::new(&bytes).uncles();
- assert_eq!(uncles[1].extra_data, b"uncle2");
+ assert_eq!(uncles[1].extra_data(), b"uncle2");
let db = e.drain();
assert_eq!(orig_db.keys(), db.keys());
diff --git a/ethcore/src/block_queue.rs b/ethcore/src/block_queue.rs
index 89a620493..7d686cec0 100644
--- a/ethcore/src/block_queue.rs
+++ b/ethcore/src/block_queue.rs
@@ -260,7 +260,7 @@ impl BlockQueue {
fn drain_verifying(verifying: &mut VecDeque, verified: &mut VecDeque, bad: &mut HashSet) {
while !verifying.is_empty() && verifying.front().unwrap().block.is_some() {
let block = verifying.pop_front().unwrap().block.unwrap();
- if bad.contains(&block.header.parent_hash) {
+ if bad.contains(block.header.parent_hash()) {
bad.insert(block.header.hash());
}
else {
@@ -313,7 +313,7 @@ impl BlockQueue {
return Err(ImportError::KnownBad.into());
}
- if bad.contains(&header.parent_hash) {
+ if bad.contains(header.parent_hash()) {
bad.insert(h.clone());
return Err(ImportError::KnownBad.into());
}
@@ -351,7 +351,7 @@ impl BlockQueue {
let mut new_verified = VecDeque::new();
for block in verified.drain(..) {
- if bad.contains(&block.header.parent_hash) {
+ if bad.contains(block.header.parent_hash()) {
bad.insert(block.header.hash());
processing.remove(&block.header.hash());
} else {
diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs
index 379d77407..a581e59e9 100644
--- a/ethcore/src/blockchain/blockchain.rs
+++ b/ethcore/src/blockchain/blockchain.rs
@@ -1434,7 +1434,7 @@ mod tests {
let mut block_header = bc.block_header(&best_hash);
while !block_header.is_none() {
- block_header = bc.block_header(&block_header.unwrap().parent_hash);
+ block_header = bc.block_header(&block_header.unwrap().parent_hash());
}
assert!(bc.cache_size().blocks > 1024 * 1024);
diff --git a/ethcore/src/blockchain/generator/block.rs b/ethcore/src/blockchain/generator/block.rs
index 0a3dad399..238051d2a 100644
--- a/ethcore/src/blockchain/generator/block.rs
+++ b/ethcore/src/blockchain/generator/block.rs
@@ -44,21 +44,22 @@ impl Encodable for Block {
impl Forkable for Block {
fn fork(mut self, fork_number: usize) -> Self where Self: Sized {
- self.header.difficulty = self.header.difficulty - U256::from(fork_number);
+ let difficulty = self.header.difficulty().clone() - U256::from(fork_number);
+ self.header.set_difficulty(difficulty);
self
}
}
impl WithBloom for Block {
fn with_bloom(mut self, bloom: H2048) -> Self where Self: Sized {
- self.header.log_bloom = bloom;
+ self.header.set_log_bloom(bloom);
self
}
}
impl CompleteBlock for Block {
fn complete(mut self, parent_hash: H256) -> Bytes {
- self.header.parent_hash = parent_hash;
+ self.header.set_parent_hash(parent_hash);
encode(&self).to_vec()
}
}
diff --git a/ethcore/src/blockchain/generator/generator.rs b/ethcore/src/blockchain/generator/generator.rs
index 07ce7242b..179839b5a 100644
--- a/ethcore/src/blockchain/generator/generator.rs
+++ b/ethcore/src/blockchain/generator/generator.rs
@@ -73,8 +73,8 @@ pub struct ChainGenerator {
impl ChainGenerator {
fn prepare_block(&self) -> Block {
let mut block = Block::default();
- block.header.number = self.number;
- block.header.difficulty = self.difficulty;
+ block.header.set_number(self.number);
+ block.header.set_difficulty(self.difficulty);
block
}
}
diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs
index 78669912c..edd671b70 100644
--- a/ethcore/src/client/client.rs
+++ b/ethcore/src/client/client.rs
@@ -99,7 +99,7 @@ impl ClientReport {
pub fn accrue_block(&mut self, block: &PreverifiedBlock) {
self.blocks_imported += 1;
self.transactions_applied += block.transactions.len();
- self.gas_processed = self.gas_processed + block.header.gas_used;
+ self.gas_processed = self.gas_processed + block.header.gas_used().clone();
}
}
@@ -284,15 +284,15 @@ impl Client {
};
// Check if Parent is in chain
- let chain_has_parent = self.chain.block_header(&header.parent_hash);
+ let chain_has_parent = self.chain.block_header(header.parent_hash());
if let None = chain_has_parent {
- warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
+ warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash());
return Err(());
};
// Enact Verified Block
let parent = chain_has_parent.unwrap();
- let last_hashes = self.build_last_hashes(header.parent_hash.clone());
+ let last_hashes = self.build_last_hashes(header.parent_hash().clone());
let db = self.state_db.lock().boxed_clone();
let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.factories.clone());
@@ -352,7 +352,7 @@ impl Client {
for block in blocks {
let header = &block.header;
- if invalid_blocks.contains(&header.parent_hash) {
+ if invalid_blocks.contains(header.parent_hash()) {
invalid_blocks.insert(header.hash());
continue;
}
diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs
index 8e26a6b0c..fb7f9083e 100644
--- a/ethcore/src/client/test_client.rs
+++ b/ethcore/src/client/test_client.rs
@@ -169,19 +169,19 @@ impl TestBlockChainClient {
let len = self.numbers.read().len();
for n in len..(len + count) {
let mut header = BlockHeader::new();
- header.difficulty = From::from(n);
- header.parent_hash = self.last_hash.read().clone();
- header.number = n as BlockNumber;
- header.gas_limit = U256::from(1_000_000);
+ header.set_difficulty(From::from(n));
+ header.set_parent_hash(self.last_hash.read().clone());
+ header.set_number(n as BlockNumber);
+ header.set_gas_limit(U256::from(1_000_000));
let uncles = match with {
EachBlockWith::Uncle | EachBlockWith::UncleAndTransaction => {
let mut uncles = RlpStream::new_list(1);
let mut uncle_header = BlockHeader::new();
- uncle_header.difficulty = From::from(n);
- uncle_header.parent_hash = self.last_hash.read().clone();
- uncle_header.number = n as BlockNumber;
+ uncle_header.set_difficulty(From::from(n));
+ uncle_header.set_parent_hash(self.last_hash.read().clone());
+ uncle_header.set_number(n as BlockNumber);
uncles.append(&uncle_header);
- header.uncles_hash = uncles.as_raw().sha3();
+ header.set_uncles_hash(uncles.as_raw().sha3());
uncles
},
_ => RlpStream::new_list(0)
@@ -219,7 +219,7 @@ impl TestBlockChainClient {
pub fn corrupt_block(&mut self, n: BlockNumber) {
let hash = self.block_hash(BlockID::Number(n)).unwrap();
let mut header: BlockHeader = decode(&self.block_header(BlockID::Number(n)).unwrap());
- header.extra_data = b"This extra data is way too long to be considered valid".to_vec();
+ header.set_extra_data(b"This extra data is way too long to be considered valid".to_vec());
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&rlp::NULL_RLP, 1);
@@ -231,7 +231,7 @@ impl TestBlockChainClient {
pub fn corrupt_block_parent(&mut self, n: BlockNumber) {
let hash = self.block_hash(BlockID::Number(n)).unwrap();
let mut header: BlockHeader = decode(&self.block_header(BlockID::Number(n)).unwrap());
- header.parent_hash = H256::from(42);
+ header.set_parent_hash(H256::from(42));
let mut rlp = RlpStream::new_list(3);
rlp.append(&header);
rlp.append_raw(&rlp::NULL_RLP, 1);
@@ -470,20 +470,20 @@ impl BlockChainClient for TestBlockChainClient {
fn import_block(&self, b: Bytes) -> Result {
let header = Rlp::new(&b).val_at::(0);
let h = header.hash();
- let number: usize = header.number as usize;
+ let number: usize = header.number() as usize;
if number > self.blocks.read().len() {
panic!("Unexpected block number. Expected {}, got {}", self.blocks.read().len(), number);
}
if number > 0 {
- match self.blocks.read().get(&header.parent_hash) {
+ match self.blocks.read().get(header.parent_hash()) {
Some(parent) => {
let parent = Rlp::new(parent).val_at::(0);
- if parent.number != (header.number - 1) {
+ if parent.number() != (header.number() - 1) {
panic!("Unexpected block parent");
}
},
None => {
- panic!("Unknown block parent {:?} for block {}", header.parent_hash, number);
+ panic!("Unknown block parent {:?} for block {}", header.parent_hash(), number);
}
}
}
@@ -491,18 +491,18 @@ impl BlockChainClient for TestBlockChainClient {
if number == len {
{
let mut difficulty = self.difficulty.write();
- *difficulty = *difficulty + header.difficulty;
+ *difficulty = *difficulty + header.difficulty().clone();
}
mem::replace(&mut *self.last_hash.write(), h.clone());
self.blocks.write().insert(h.clone(), b);
self.numbers.write().insert(number, h.clone());
- let mut parent_hash = header.parent_hash;
+ let mut parent_hash = header.parent_hash().clone();
if number > 0 {
let mut n = number - 1;
while n > 0 && self.numbers.read()[&n] != parent_hash {
*self.numbers.write().get_mut(&n).unwrap() = parent_hash.clone();
n -= 1;
- parent_hash = Rlp::new(&self.blocks.read()[&parent_hash]).val_at::(0).parent_hash;
+ parent_hash = Rlp::new(&self.blocks.read()[&parent_hash]).val_at::(0).parent_hash().clone();
}
}
}
diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs
index 926399d7b..332d947c3 100644
--- a/ethcore/src/engines/basic_authority.rs
+++ b/ethcore/src/engines/basic_authority.rs
@@ -82,17 +82,16 @@ impl Engine for BasicAuthority {
}
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) {
- header.difficulty = parent.difficulty;
- header.gas_limit = {
- let gas_limit = parent.gas_limit;
+ header.set_difficulty(parent.difficulty().clone());
+ header.set_gas_limit({
+ let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.our_params.gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
- };
- header.note_dirty();
+ });
// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit);
}
@@ -123,9 +122,9 @@ impl Engine for BasicAuthority {
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
// check the seal fields.
// TODO: pull this out into common code.
- if header.seal.len() != self.seal_fields() {
+ if header.seal().len() != self.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity(
- Mismatch { expected: self.seal_fields(), found: header.seal.len() }
+ Mismatch { expected: self.seal_fields(), found: header.seal().len() }
)));
}
Ok(())
@@ -133,7 +132,7 @@ impl Engine for BasicAuthority {
fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
// check the signature is legit.
- let sig = try!(UntrustedRlp::new(&header.seal[0]).as_val::());
+ let sig = try!(UntrustedRlp::new(&header.seal()[0]).as_val::());
let signer = public_to_address(&try!(recover(&sig.into(), &header.bare_hash())));
if !self.our_params.authorities.contains(&signer) {
return try!(Err(BlockError::InvalidSeal));
@@ -152,10 +151,10 @@ impl Engine for BasicAuthority {
return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: *parent.difficulty(), found: *header.difficulty() })))
}
let gas_limit_divisor = self.our_params.gas_limit_bound_divisor;
- let min_gas = parent.gas_limit - parent.gas_limit / gas_limit_divisor;
- let max_gas = parent.gas_limit + parent.gas_limit / gas_limit_divisor;
- if header.gas_limit <= min_gas || header.gas_limit >= max_gas {
- return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit })));
+ let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
+ let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
+ if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
+ return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
Ok(())
}
diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs
index e7738fbaa..6414ba5e4 100644
--- a/ethcore/src/engines/mod.rs
+++ b/ethcore/src/engines/mod.rs
@@ -108,9 +108,8 @@ pub trait Engine : Sync + Send {
/// Don't forget to call Super::populate_from_parent when subclassing & overriding.
// TODO: consider including State in the params.
fn populate_from_parent(&self, header: &mut Header, parent: &Header, _gas_floor_target: U256, _gas_ceil_target: U256) {
- header.difficulty = parent.difficulty;
- header.gas_limit = parent.gas_limit;
- header.note_dirty();
+ header.set_difficulty(parent.difficulty().clone());
+ header.set_gas_limit(parent.gas_limit().clone());
}
// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs
index c658432a2..82a74d9ea 100644
--- a/ethcore/src/ethereum/ethash.rs
+++ b/ethcore/src/ethereum/ethash.rs
@@ -114,9 +114,9 @@ impl Engine for Ethash {
}
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, gas_ceil_target: U256) {
- header.difficulty = self.calculate_difficuty(header, parent);
- header.gas_limit = {
- let gas_limit = parent.gas_limit;
+ let difficulty = self.calculate_difficulty(header, parent);
+ let gas_limit = {
+ let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.ethash_params.gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
@@ -126,21 +126,23 @@ impl Engine for Ethash {
min(gas_ceil_target,
max(gas_floor_target,
gas_limit - gas_limit / bound_divisor + 1.into() +
- (header.gas_used * 6.into() / 5.into()) / bound_divisor))
+ (header.gas_used().clone() * 6.into() / 5.into()) / bound_divisor))
}
};
- if header.number >= self.ethash_params.dao_hardfork_transition &&
- header.number <= self.ethash_params.dao_hardfork_transition + 9 {
- header.extra_data = b"dao-hard-fork"[..].to_owned();
+ header.set_difficulty(difficulty);
+ header.set_gas_limit(gas_limit);
+ if header.number() >= self.ethash_params.dao_hardfork_transition &&
+ header.number() <= self.ethash_params.dao_hardfork_transition + 9 {
+ header.set_extra_data(b"dao-hard-fork"[..].to_owned());
}
header.note_dirty();
-// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number, header.difficulty, header.gas_limit);
+// info!("ethash: populate_from_parent #{}: difficulty={} and gas_limit={}", header.number(), header.difficulty(), header.gas_limit());
}
fn on_new_block(&self, block: &mut ExecutedBlock) {
- if block.fields().header.number == self.ethash_params.dao_hardfork_transition {
+ if block.fields().header.number() == self.ethash_params.dao_hardfork_transition {
// TODO: enable trigger function maybe?
-// if block.fields().header.gas_limit <= 4_000_000.into() {
+// if block.fields().header.gas_limit() <= 4_000_000.into() {
let mut state = block.fields_mut().state;
for child in &self.ethash_params.dao_hardfork_accounts {
let b = state.balance(child);
@@ -157,7 +159,7 @@ impl Engine for Ethash {
let fields = block.fields_mut();
// Bestow block reward
- fields.state.add_balance(&fields.header.author, &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())));
+ fields.state.add_balance(&fields.header.author(), &(reward + reward / U256::from(32) * U256::from(fields.uncles.len())));
// Bestow uncle rewards
let current_number = fields.header.number();
@@ -171,18 +173,18 @@ impl Engine for Ethash {
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
// check the seal fields.
- if header.seal.len() != self.seal_fields() {
+ if header.seal().len() != self.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity(
- Mismatch { expected: self.seal_fields(), found: header.seal.len() }
+ Mismatch { expected: self.seal_fields(), found: header.seal().len() }
)));
}
- try!(UntrustedRlp::new(&header.seal[0]).as_val::());
- try!(UntrustedRlp::new(&header.seal[1]).as_val::());
+ try!(UntrustedRlp::new(&header.seal()[0]).as_val::());
+ try!(UntrustedRlp::new(&header.seal()[1]).as_val::());
// TODO: consider removing these lines.
let min_difficulty = self.ethash_params.minimum_difficulty;
- if header.difficulty < min_difficulty {
- return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty })))
+ if header.difficulty() < &min_difficulty {
+ return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty().clone() })))
}
let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(quick_get_difficulty(
@@ -190,37 +192,37 @@ impl Engine for Ethash {
header.nonce().low_u64(),
&Ethash::to_ethash(header.mix_hash())
)));
- if difficulty < header.difficulty {
- return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty })));
+ if &difficulty < header.difficulty() {
+ return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
}
- if header.number >= self.ethash_params.dao_hardfork_transition &&
- header.number <= self.ethash_params.dao_hardfork_transition + 9 &&
- header.extra_data[..] != b"dao-hard-fork"[..] {
+ if header.number() >= self.ethash_params.dao_hardfork_transition &&
+ header.number() <= self.ethash_params.dao_hardfork_transition + 9 &&
+ header.extra_data()[..] != b"dao-hard-fork"[..] {
return Err(From::from(BlockError::ExtraDataOutOfBounds(OutOfBounds { min: None, max: None, found: 0 })));
}
- if header.gas_limit > 0x7fffffffffffffffu64.into() {
- return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit })));
+ if header.gas_limit() > &0x7fffffffffffffffu64.into() {
+ return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: None, max: Some(0x7fffffffffffffffu64.into()), found: header.gas_limit().clone() })));
}
Ok(())
}
fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
- if header.seal.len() != self.seal_fields() {
+ if header.seal().len() != self.seal_fields() {
return Err(From::from(BlockError::InvalidSealArity(
- Mismatch { expected: self.seal_fields(), found: header.seal.len() }
+ Mismatch { expected: self.seal_fields(), found: header.seal().len() }
)));
}
- let result = self.pow.compute_light(header.number as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64());
+ let result = self.pow.compute_light(header.number() as u64, &Ethash::to_ethash(header.bare_hash()), header.nonce().low_u64());
let mix = Ethash::from_ethash(result.mix_hash);
let difficulty = Ethash::boundary_to_difficulty(&Ethash::from_ethash(result.value));
if mix != header.mix_hash() {
return Err(From::from(BlockError::MismatchedH256SealElement(Mismatch { expected: mix, found: header.mix_hash() })));
}
- if difficulty < header.difficulty {
- return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty), max: None, found: difficulty })));
+ if &difficulty < header.difficulty() {
+ return Err(From::from(BlockError::InvalidProofOfWork(OutOfBounds { min: Some(header.difficulty().clone()), max: None, found: difficulty })));
}
Ok(())
}
@@ -232,15 +234,15 @@ impl Engine for Ethash {
}
// Check difficulty is correct given the two timestamps.
- let expected_difficulty = self.calculate_difficuty(header, parent);
- if header.difficulty != expected_difficulty {
- return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty })))
+ let expected_difficulty = self.calculate_difficulty(header, parent);
+ if header.difficulty() != &expected_difficulty {
+ return Err(From::from(BlockError::InvalidDifficulty(Mismatch { expected: expected_difficulty, found: header.difficulty().clone() })))
}
let gas_limit_divisor = self.ethash_params.gas_limit_bound_divisor;
- let min_gas = parent.gas_limit - parent.gas_limit / gas_limit_divisor;
- let max_gas = parent.gas_limit + parent.gas_limit / gas_limit_divisor;
- if header.gas_limit <= min_gas || header.gas_limit >= max_gas {
- return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit })));
+ let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
+ let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
+ if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
+ return Err(From::from(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
Ok(())
}
@@ -259,9 +261,9 @@ impl Engine for Ethash {
#[cfg_attr(feature="dev", allow(wrong_self_convention))] // to_ethash should take self
impl Ethash {
- fn calculate_difficuty(&self, header: &Header, parent: &Header) -> U256 {
+ fn calculate_difficulty(&self, header: &Header, parent: &Header) -> U256 {
const EXP_DIFF_PERIOD: u64 = 100000;
- if header.number == 0 {
+ if header.number() == 0 {
panic!("Can't calculate genesis block difficulty");
}
@@ -270,25 +272,25 @@ impl Ethash {
let duration_limit = self.ethash_params.duration_limit;
let frontier_limit = self.ethash_params.frontier_compatibility_mode_limit;
- let mut target = if header.number < frontier_limit {
- if header.timestamp >= parent.timestamp + duration_limit {
- parent.difficulty - (parent.difficulty / difficulty_bound_divisor)
+ let mut target = if header.number() < frontier_limit {
+ if header.timestamp() >= parent.timestamp() + duration_limit {
+ parent.difficulty().clone() - (parent.difficulty().clone() / difficulty_bound_divisor)
} else {
- parent.difficulty + (parent.difficulty / difficulty_bound_divisor)
+ parent.difficulty().clone() + (parent.difficulty().clone() / difficulty_bound_divisor)
}
}
else {
- trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty, header.timestamp, parent.timestamp);
+ trace!(target: "ethash", "Calculating difficulty parent.difficulty={}, header.timestamp={}, parent.timestamp={}", parent.difficulty(), header.timestamp(), parent.timestamp());
//block_diff = parent_diff + parent_diff // 2048 * max(1 - (block_timestamp - parent_timestamp) // 10, -99)
- let diff_inc = (header.timestamp - parent.timestamp) / 10;
+ let diff_inc = (header.timestamp() - parent.timestamp()) / 10;
if diff_inc <= 1 {
- parent.difficulty + parent.difficulty / From::from(2048) * From::from(1 - diff_inc)
+ parent.difficulty().clone() + parent.difficulty().clone() / From::from(2048) * From::from(1 - diff_inc)
} else {
- parent.difficulty - parent.difficulty / From::from(2048) * From::from(min(diff_inc - 1, 99))
+ parent.difficulty().clone() - parent.difficulty().clone() / From::from(2048) * From::from(min(diff_inc - 1, 99))
}
};
target = max(min_difficulty, target);
- let period = ((parent.number + 1) / EXP_DIFF_PERIOD) as usize;
+ let period = ((parent.number() + 1) / EXP_DIFF_PERIOD) as usize;
if period > 1 {
target = max(min_difficulty, target + (U256::from(1) << (period - 2)));
}
@@ -336,7 +338,7 @@ impl Header {
/// Set the nonce and mix hash fields of the header.
pub fn set_nonce_and_mix_hash(&mut self, nonce: &H64, mix_hash: &H256) {
- self.seal = vec![encode(mix_hash).to_vec(), encode(nonce).to_vec()];
+ self.set_seal(vec![encode(mix_hash).to_vec(), encode(nonce).to_vec()]);
}
}
@@ -374,7 +376,7 @@ mod tests {
let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let mut uncle = Header::new();
let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into();
- uncle.author = uncle_author.clone();
+ uncle.set_author(uncle_author);
b.push_uncle(uncle).unwrap();
let b = b.close();
diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs
index 40e85d619..1efe001e5 100644
--- a/ethcore/src/ethereum/mod.rs
+++ b/ethcore/src/ethereum/mod.rs
@@ -68,7 +68,7 @@ mod tests {
let mut db_result = get_temp_journal_db();
let mut db = db_result.take();
spec.ensure_db_good(db.as_hashdb_mut()).unwrap();
- let s = State::from_existing(db, genesis_header.state_root.clone(), engine.account_start_nonce(), Default::default()).unwrap();
+ let s = State::from_existing(db, genesis_header.state_root().clone(), engine.account_start_nonce(), Default::default()).unwrap();
assert_eq!(s.balance(&"0000000000000000000000000000000000000001".into()), 1u64.into());
assert_eq!(s.balance(&"0000000000000000000000000000000000000002".into()), 1u64.into());
assert_eq!(s.balance(&"0000000000000000000000000000000000000003".into()), 1u64.into());
diff --git a/ethcore/src/header.rs b/ethcore/src/header.rs
index a123197a9..9b0e155f4 100644
--- a/ethcore/src/header.rs
+++ b/ethcore/src/header.rs
@@ -33,43 +33,42 @@ pub type BlockNumber = u64;
/// Doesn't do all that much on its own.
#[derive(Debug, Clone, Eq)]
pub struct Header {
- // TODO: make all private.
/// Parent hash.
- pub parent_hash: H256,
+ parent_hash: H256,
/// Block timestamp.
- pub timestamp: u64,
+ timestamp: u64,
/// Block number.
- pub number: BlockNumber,
+ number: BlockNumber,
/// Block author.
- pub author: Address,
+ author: Address,
/// Transactions root.
- pub transactions_root: H256,
+ transactions_root: H256,
/// Block uncles hash.
- pub uncles_hash: H256,
+ uncles_hash: H256,
/// Block extra data.
- pub extra_data: Bytes,
+ extra_data: Bytes,
/// State root.
- pub state_root: H256,
+ state_root: H256,
/// Block receipts root.
- pub receipts_root: H256,
+ receipts_root: H256,
/// Block bloom.
- pub log_bloom: LogBloom,
+ log_bloom: LogBloom,
/// Gas used for contracts execution.
- pub gas_used: U256,
+ gas_used: U256,
/// Block gas limit.
- pub gas_limit: U256,
+ gas_limit: U256,
/// Block difficulty.
- pub difficulty: U256,
+ difficulty: U256,
/// Vector of post-RLP-encoded fields.
- pub seal: Vec,
+ seal: Vec,
/// The memoized hash of the RLP representation *including* the seal fields.
- pub hash: RefCell