From d996754927c82ae81f54fa3dec6541cdb1ca084b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 19 Aug 2016 14:18:57 +0200 Subject: [PATCH 01/29] Getting rid of syntex --- Cargo.lock | 7 +------ Cargo.toml | 1 - dapps/Cargo.toml | 1 - dapps/build.rs | 6 +----- db/Cargo.toml | 1 - db/build.rs | 9 ++------- ethstore/Cargo.toml | 1 - ethstore/build.rs | 6 +----- ipc/codegen/src/lib.rs | 7 +++++++ ipc/tests/Cargo.toml | 1 - json/Cargo.toml | 1 - json/build.rs | 6 +----- rpc/Cargo.toml | 1 - rpc/build.rs | 6 +----- 14 files changed, 14 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b5194c97b..d4a97ba0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -36,7 +36,6 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -292,7 +291,6 @@ dependencies = [ "serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", @@ -370,7 +368,6 @@ dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "nanomsg 0.5.1 (git+https://github.com/ethcore/nanomsg.rs.git)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -429,7 +426,6 @@ dependencies = [ "serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -504,7 +500,6 @@ dependencies = [ "serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -533,7 +528,6 @@ dependencies = [ "serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", - "syntex 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1702,6 +1696,7 @@ dependencies = [ "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "086e1fa5fe48840b1cfdef3a20c7e3115599f8d5c4c87ef32a794a7cdd184d76" "checksum json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)" = "" +"checksum json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)" = "" "checksum jsonrpc-core 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ec4477e4e8218da23caa5dd31f4eb39999aa0ea9035660617eccfb19a23bf5ad" "checksum jsonrpc-http-server 6.1.0 (git+https://github.com/ethcore/jsonrpc-http-server.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" diff --git a/Cargo.toml b/Cargo.toml index 2ef6c24a9..106cb357a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,7 +8,6 @@ build = "build.rs" [build-dependencies] rustc_version = "0.1" -syntex = "*" ethcore-ipc-codegen = { path = "ipc/codegen" } ethcore-ipc-tests = { path = "ipc/tests" } diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 3bd5d0875..30b161355 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -33,7 +33,6 @@ clippy = { version = "0.0.82", optional = true} [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } -syntex = "*" [features] default = ["serde_codegen", "extra-dapps"] diff --git a/dapps/build.rs b/dapps/build.rs index 0776c03ec..b178027ae 100644 --- a/dapps/build.rs +++ b/dapps/build.rs @@ -16,7 +16,6 @@ #[cfg(not(feature = "serde_macros"))] mod inner { - extern crate syntex; extern crate serde_codegen; use std::env; @@ -28,10 +27,7 @@ mod inner { let src = Path::new("./src/api/types.rs.in"); let dst = Path::new(&out_dir).join("types.rs"); - let mut registry = syntex::Registry::new(); - - serde_codegen::register(&mut registry); - registry.expand("", &src, &dst).unwrap(); + serde_codegen::expand(&src, &dst).unwrap(); } } diff --git a/db/Cargo.toml b/db/Cargo.toml index edbbbaad1..5b2805c60 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -8,7 +8,6 @@ authors = ["Ethcore "] build = "build.rs" [build-dependencies] -syntex = "*" ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] diff --git a/db/build.rs b/db/build.rs index 0f70bd68c..239185d84 100644 --- a/db/build.rs +++ b/db/build.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -extern crate syntex; extern crate ethcore_ipc_codegen as codegen; use std::env; @@ -27,17 +26,13 @@ pub fn main() { { let src = Path::new("src/lib.rs.in"); let dst = Path::new(&out_dir).join("lib.intermediate.rs.in"); - let mut registry = syntex::Registry::new(); - codegen::register(&mut registry); - registry.expand("", &src, &dst).unwrap(); + codegen::expand(&src, &dst); } // binary serialization pass { let src = Path::new(&out_dir).join("lib.intermediate.rs.in"); let dst = Path::new(&out_dir).join("lib.rs"); - let mut registry = syntex::Registry::new(); - codegen::register(&mut registry); - registry.expand("", &src, &dst).unwrap(); + codegen::expand(&src, &dst); } } diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index dd6a391e6..691cfd969 100644 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -21,7 +21,6 @@ itertools = "0.4" [build-dependencies] serde_codegen = { version = "0.7", optional = true } -syntex = "0.33.0" [features] default = ["serde_codegen"] diff --git a/ethstore/build.rs b/ethstore/build.rs index 1c4e05f84..65606cab9 100644 --- a/ethstore/build.rs +++ b/ethstore/build.rs @@ -16,7 +16,6 @@ #[cfg(not(feature = "serde_macros"))] mod inner { - extern crate syntex; extern crate serde_codegen; use std::env; @@ -28,10 +27,7 @@ mod inner { let src = Path::new("src/json/mod.rs.in"); let dst = Path::new(&out_dir).join("mod.rs"); - let mut registry = syntex::Registry::new(); - - serde_codegen::register(&mut registry); - registry.expand("", &src, &dst).unwrap(); + serde_codegen::expand(&src, &dst).unwrap(); } } diff --git a/ipc/codegen/src/lib.rs b/ipc/codegen/src/lib.rs index 8a3b4ba56..ce1ca8592 100644 --- a/ipc/codegen/src/lib.rs +++ b/ipc/codegen/src/lib.rs @@ -48,6 +48,13 @@ include!(concat!(env!("OUT_DIR"), "/lib.rs")); #[cfg(not(feature = "with-syntex"))] include!("lib.rs.in"); +#[cfg(feature = "with-syntex")] +pub fn expand(src: &std::path::Path, dst: &std::path::Path) { + let mut registry = syntex::Registry::new(); + register(&mut registry); + registry.expand("", src, dst).unwrap(); +} + #[cfg(feature = "with-syntex")] pub fn register(reg: &mut syntex::Registry) { use syntax::{ast, fold}; diff --git a/ipc/tests/Cargo.toml b/ipc/tests/Cargo.toml index 61903434f..23bde87e2 100644 --- a/ipc/tests/Cargo.toml +++ b/ipc/tests/Cargo.toml @@ -17,5 +17,4 @@ ethcore-util = { path = "../../util" } log = "0.3" [build-dependencies] -syntex = "0.33" ethcore-ipc-codegen = { path = "../codegen" } diff --git a/json/Cargo.toml b/json/Cargo.toml index cd26b3fa3..ccfa27a5a 100644 --- a/json/Cargo.toml +++ b/json/Cargo.toml @@ -14,7 +14,6 @@ clippy = { version = "0.0.82", optional = true} [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } -syntex = "*" [features] default = ["serde_codegen"] diff --git a/json/build.rs b/json/build.rs index a23790d86..8be54476e 100644 --- a/json/build.rs +++ b/json/build.rs @@ -16,7 +16,6 @@ #[cfg(not(feature = "serde_macros"))] mod inner { - extern crate syntex; extern crate serde_codegen; use std::env; @@ -28,10 +27,7 @@ mod inner { let src = Path::new("src/lib.rs.in"); let dst = Path::new(&out_dir).join("lib.rs"); - let mut registry = syntex::Registry::new(); - - serde_codegen::register(&mut registry); - registry.expand("", &src, &dst).unwrap(); + serde_codegen::expand(&src, &dst).unwrap(); } } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 8a3df0623..38f855631 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -31,7 +31,6 @@ ethcore-ipc = { path = "../ipc/rpc" } [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } -syntex = "*" [features] default = ["serde_codegen"] diff --git a/rpc/build.rs b/rpc/build.rs index 659bc35eb..1d1a9c19d 100644 --- a/rpc/build.rs +++ b/rpc/build.rs @@ -16,7 +16,6 @@ #[cfg(not(feature = "serde_macros"))] mod inner { - extern crate syntex; extern crate serde_codegen; use std::env; @@ -28,10 +27,7 @@ mod inner { let src = Path::new("src/v1/types/mod.rs.in"); let dst = Path::new(&out_dir).join("mod.rs"); - let mut registry = syntex::Registry::new(); - - serde_codegen::register(&mut registry); - registry.expand("", &src, &dst).unwrap(); + serde_codegen::expand(&src, &dst).unwrap(); } } From 34de330ed90c729d93291526461738a77cc66c89 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Fri, 19 Aug 2016 16:49:58 +0400 Subject: [PATCH 02/29] remove binaries from hypervisor (#1960) --- ipc/hypervisor/src/lib.rs | 35 +++++++++++++++-------------------- parity/modules.rs | 2 +- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/ipc/hypervisor/src/lib.rs b/ipc/hypervisor/src/lib.rs index 16b6896bd..3e82908cd 100644 --- a/ipc/hypervisor/src/lib.rs +++ b/ipc/hypervisor/src/lib.rs @@ -41,8 +41,8 @@ pub struct Hypervisor { ipc_addr: String, service: Arc, ipc_worker: RwLock>, - processes: RwLock>, - modules: HashMap, + processes: RwLock>, + modules: HashMap, } /// Boot arguments for binary @@ -79,8 +79,8 @@ impl Hypervisor { Hypervisor::with_url(HYPERVISOR_IPC_URL) } - pub fn module(mut self, module_id: IpcModuleId, binary_id: BinaryId, args: BootArgs) -> Hypervisor { - self.modules.insert(module_id, (binary_id, args)); + pub fn module(mut self, module_id: IpcModuleId, args: BootArgs) -> Hypervisor { + self.modules.insert(module_id, args); self.service.add_module(module_id); self } @@ -106,7 +106,7 @@ impl Hypervisor { /// Since one binary can host multiple modules /// we match binaries - fn match_module(&self, module_id: &IpcModuleId) -> Option<&(BinaryId, BootArgs)> { + fn match_module(&self, module_id: &IpcModuleId) -> Option<&BootArgs> { self.modules.get(module_id) } @@ -126,24 +126,19 @@ impl Hypervisor { fn start_module(&self, module_id: IpcModuleId) { use std::io::Write; - self.match_module(&module_id).map(|&(ref binary_id, ref binary_args)| { + self.match_module(&module_id).map(|boot_args| { let mut processes = self.processes.write().unwrap(); { - if processes.get(binary_id).is_some() { + if processes.get(&module_id).is_some() { // already started for another module return; } } - let mut executable_path = std::env::current_exe().unwrap(); - executable_path.pop(); - executable_path.push(binary_id); - - let executable_path = executable_path.to_str().unwrap(); - let mut command = Command::new(&executable_path); + let mut command = Command::new(&std::env::current_exe().unwrap()); command.stderr(std::process::Stdio::inherit()); - if let Some(ref cli_args) = binary_args.cli { + if let Some(ref cli_args) = boot_args.cli { for arg in cli_args { command.arg(arg); } } @@ -152,18 +147,18 @@ impl Hypervisor { trace!(target: "hypervisor", "Spawn executable: {:?}", command); let mut child = command.spawn().unwrap_or_else( - |e| panic!("Hypervisor cannot start binary ({:?}): {}", executable_path, e)); + |e| panic!("Hypervisor cannot execute command ({:?}): {}", command, e)); - if let Some(ref std_in) = binary_args.stdin { + if let Some(ref std_in) = boot_args.stdin { trace!(target: "hypervisor", "Pushing std-in payload..."); child.stdin.as_mut() .expect("std-in should be piped above") .write(std_in) - .unwrap_or_else(|e| panic!(format!("Error trying to pipe stdin for {}: {:?}", &executable_path, e))); + .unwrap_or_else(|e| panic!(format!("Error trying to pipe stdin for {:?}: {:?}", &command, e))); drop(child.stdin.take()); } - processes.insert(binary_id, child); + processes.insert(module_id, child); }); } @@ -185,8 +180,8 @@ impl Hypervisor { if wait_time.is_some() { std::thread::sleep(wait_time.unwrap()) } let mut childs = self.processes.write().unwrap(); - for (ref mut binary, ref mut child) in childs.iter_mut() { - trace!(target: "hypervisor", "Stopping process module: {}", binary); + for (ref mut module, ref mut child) in childs.iter_mut() { + trace!(target: "hypervisor", "Stopping process module: {}", module); child.kill().unwrap(); } } diff --git a/parity/modules.rs b/parity/modules.rs index 5ae5db231..a41fc58b6 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -105,7 +105,7 @@ pub fn sync -> Result { let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration"); - hypervisor = hypervisor.module(SYNC_MODULE_ID, "parity", sync_arguments(sync_cfg, net_cfg, log_settings)); + hypervisor = hypervisor.module(SYNC_MODULE_ID, sync_arguments(sync_cfg, net_cfg, log_settings)); hypervisor.start(); hypervisor.wait_for_startup(); From f69b3f85228ca2ac8e6f02d623037889ee5ab4b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Sun, 21 Aug 2016 11:23:47 +0200 Subject: [PATCH 03/29] Create network-specific nodes files (#1970) --- parity/configuration.rs | 14 ++++++++++++-- parity/helpers.rs | 1 + sync/src/api.rs | 6 +++++- util/network/src/host.rs | 7 +++++-- 4 files changed, 23 insertions(+), 5 deletions(-) diff --git a/parity/configuration.rs b/parity/configuration.rs index 908dcb954..5b14660a9 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -32,7 +32,7 @@ use ethcore_rpc::NetworkSettings; use cache::CacheConfig; use helpers::{to_duration, to_mode, to_block_id, to_u256, to_pending_set, to_price, replace_home, geth_ipc_path, parity_ipc_path, to_bootnodes, to_addresses, to_address}; -use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras}; +use params::{ResealPolicy, AccountsConfig, GasPricerConfig, MinerExtras, SpecType}; use ethcore_logger::Config as LogConfig; use dir::Directories; use dapps::Configuration as DappsConfiguration; @@ -440,13 +440,23 @@ impl Configuration { ret.min_peers = self.min_peers(); let mut net_path = PathBuf::from(self.directories().db); net_path.push("network"); + let net_specific_path = net_path.join(&try!(self.network_specific_path())); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); + ret.net_config_path = Some(net_specific_path.to_str().unwrap().to_owned()); ret.reserved_nodes = try!(self.init_reserved_nodes()); - ret.allow_non_reserved = !self.args.flag_reserved_only; Ok(ret) } + fn network_specific_path(&self) -> Result { + let spec_type : SpecType = try!(self.chain().parse()); + let spec = try!(spec_type.spec()); + let id = try!(self.network_id()); + let mut path = PathBuf::new(); + path.push(format!("{}", id.unwrap_or_else(|| spec.network_id()))); + Ok(path) + } + fn network_id(&self) -> Result, String> { let net_id = self.args.flag_network_id.as_ref().or(self.args.flag_networkid.as_ref()); match net_id { diff --git a/parity/helpers.rs b/parity/helpers.rs index d0688d3de..778dc1265 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -172,6 +172,7 @@ pub fn default_network_config() -> ::ethsync::NetworkConfiguration { use ethsync::NetworkConfiguration; NetworkConfiguration { config_path: Some(replace_home("$HOME/.parity/network")), + net_config_path: Some(replace_home("$HOME/.parity/network/1")), listen_address: Some("0.0.0.0:30303".into()), public_address: None, udp_port: None, diff --git a/sync/src/api.rs b/sync/src/api.rs index 608d9d521..f98387abd 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -215,8 +215,10 @@ impl ManageNetwork for EthSync { #[derive(Binary, Debug, Clone, PartialEq, Eq)] /// Network service configuration pub struct NetworkConfiguration { - /// Directory path to store network configuration. None means nothing will be saved + /// Directory path to store general network configuration. None means nothing will be saved pub config_path: Option, + /// Directory path to store network-specific configuration. None means nothing will be saved + pub net_config_path: Option, /// IP address to listen for incoming connections. Listen to all connections by default pub listen_address: Option, /// IP address to advertise. Detected automatically if none. @@ -264,6 +266,7 @@ impl NetworkConfiguration { Ok(BasicNetworkConfiguration { config_path: self.config_path, + net_config_path: self.net_config_path, listen_address: match self.listen_address { None => None, Some(addr) => Some(try!(SocketAddr::from_str(&addr))) }, public_address: match self.public_address { None => None, Some(addr) => Some(try!(SocketAddr::from_str(&addr))) }, udp_port: self.udp_port, @@ -283,6 +286,7 @@ impl From for NetworkConfiguration { fn from(other: BasicNetworkConfiguration) -> Self { NetworkConfiguration { config_path: other.config_path, + net_config_path: other.net_config_path, listen_address: other.listen_address.and_then(|addr| Some(format!("{}", addr))), public_address: other.public_address.and_then(|addr| Some(format!("{}", addr))), udp_port: other.udp_port, diff --git a/util/network/src/host.rs b/util/network/src/host.rs index 5951288f0..a414ade40 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -53,8 +53,10 @@ const MAINTENANCE_TIMEOUT: u64 = 1000; #[derive(Debug, PartialEq, Clone)] /// Network service configuration pub struct NetworkConfiguration { - /// Directory path to store network configuration. None means nothing will be saved + /// Directory path to store general network configuration. None means nothing will be saved pub config_path: Option, + /// Directory path to store network-specific configuration. None means nothing will be saved + pub net_config_path: Option, /// IP address to listen for incoming connections. Listen to all connections by default pub listen_address: Option, /// IP address to advertise. Detected automatically if none. @@ -90,6 +92,7 @@ impl NetworkConfiguration { pub fn new() -> Self { NetworkConfiguration { config_path: None, + net_config_path: None, listen_address: None, public_address: None, udp_port: None, @@ -367,7 +370,7 @@ impl Host { }, |s| KeyPair::from_secret(s).expect("Error creating node secret key")) }; - let path = config.config_path.clone(); + let path = config.net_config_path.clone(); // Setup the server socket let tcp_listener = try!(TcpListener::bind(&listen_address)); listen_address = SocketAddr::new(listen_address.ip(), try!(tcp_listener.local_addr()).port()); From 880b7b811e8a45ddb32b06b41512b308ebd260b0 Mon Sep 17 00:00:00 2001 From: Nipunn Koorapati Date: Mon, 22 Aug 2016 06:16:23 -0700 Subject: [PATCH 04/29] Upgrade hyper dependency to 0.9 (#1973) --- Cargo.lock | 45 +-------------------------------------------- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 45 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d4a97ba0d..7a5cf566e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -24,7 +24,7 @@ dependencies = [ "ethcore-util 1.4.0", "ethsync 1.4.0", "fdlimit 0.1.0", - "hyper 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -605,26 +605,6 @@ name = "httparse" version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "hyper" -version = "0.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", - "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)", - "typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "hyper" version = "0.9.4" @@ -1534,18 +1514,6 @@ name = "unicode-xid" version = "0.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "url" -version = "0.5.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", - "unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "uuid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "url" version = "1.2.0" @@ -1564,14 +1532,6 @@ name = "utf8-ranges" version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -[[package]] -name = "uuid" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -dependencies = [ - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "vecio" version = "0.1.0" @@ -1688,7 +1648,6 @@ dependencies = [ "checksum heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "abb306abb8d398e053cfb1b3e7b72c2f580be048b85745c52652954f8ad1439c" "checksum hpack 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2da7d3a34cf6406d9d700111b8eafafe9a251de41ae71d8052748259343b58" "checksum httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "46534074dbb80b070d60a5cb8ecadd8963a00a438ae1a95268850a7ef73b67ae" -"checksum hyper 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bb0f4d00bb781e559b6e66ae4b5479df0fdf9ab15949f52fa2f1f5de16d4cc07" "checksum hyper 0.9.10 (registry+https://github.com/rust-lang/crates.io-index)" = "eb27e8a3e8f17ac43ffa41bbda9cf5ad3f9f13ef66fa4873409d4902310275f7" "checksum hyper 0.9.4 (git+https://github.com/ethcore/hyper)" = "" "checksum idna 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1053236e00ce4f668aeca4a769a09b3bf5a682d802abd6f3cb39374f6b162c11" @@ -1793,10 +1752,8 @@ dependencies = [ "checksum unicode-bidi 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c1f7ceb96afdfeedee42bade65a0d585a6a0106f681b6749c8ff4daa8df30b3f" "checksum unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "26643a2f83bac55f1976fb716c10234485f9202dcd65cfbdf9da49867b271172" "checksum unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "36dff09cafb4ec7c8cf0023eb0b686cb6ce65499116a12201c9e11840ca01beb" -"checksum url 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)" = "f6d04073d0fcd045a1cf57aea560d1be5ba812d8f28814e1e1cf0e90ff4d2f03" "checksum url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afe9ec54bc4db14bc8744b7fed060d785ac756791450959b2248443319d5b119" "checksum utf8-ranges 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1ca13c08c41c9c3e04224ed9ff80461d97e121589ff27c753a16cb10830ae0f" -"checksum uuid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9767696a9e1bc7a73f2d5f8e0f5428b076cecd9199c200c0364aa0b2d57b8dfa" "checksum vecio 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0795a11576d29ae80525a3fda315bf7b534f8feb9d34101e5fe63fb95bb2fd24" "checksum vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56b639f935488eb40f06d17c3e3bcc3054f6f75d264e187b1107c8d1cba8d31c" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" diff --git a/Cargo.toml b/Cargo.toml index 106cb357a..602a8985f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ winapi = "0.2" daemonize = "0.2" [dependencies.hyper] -version = "0.8" +version = "0.9" default-features = false [features] From 7c5435d3bbb4719b93e3175bc3f49fcbf90e1c81 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Mon, 22 Aug 2016 19:41:58 +0300 Subject: [PATCH 05/29] fix to use relative socket path --- ethcore/src/service.rs | 13 ++++++++---- ipc/hypervisor/src/lib.rs | 9 +++++++- ipc/nano/src/lib.rs | 5 +++++ parity/blockchain.rs | 2 ++ parity/configuration.rs | 9 ++++++++ parity/dir.rs | 7 +++++++ parity/modules.rs | 43 +++++++++++++++++++++++++++------------ parity/run.rs | 3 ++- parity/snapshot.rs | 1 + parity/sync.rs | 26 ++++++++++++++++++----- sync/src/api.rs | 1 + 11 files changed, 95 insertions(+), 24 deletions(-) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 5c6e2e93a..6d81f6b49 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -60,6 +60,7 @@ impl ClientService { config: ClientConfig, spec: &Spec, db_path: &Path, + ipc_path: &Path, miner: Arc, ) -> Result { @@ -86,7 +87,7 @@ impl ClientService { try!(io_service.register_handler(client_io)); let stop_guard = ::devtools::StopGuard::new(); - run_ipc(client.clone(), stop_guard.share()); + run_ipc(ipc_path, client.clone(), stop_guard.share()); Ok(ClientService { io_service: Arc::new(io_service), @@ -167,10 +168,13 @@ impl IoHandler for ClientIoHandler { } #[cfg(feature="ipc")] -fn run_ipc(client: Arc, stop: Arc) { +fn run_ipc(base_path: &Path, client: Arc, stop: Arc) { + let mut path = ::std::path::PathBuf::from(base_path); + path.push("parity-chain.ipc"); + let socket_addr = format!("ipc://{}", path.to_str().unwrap()); ::std::thread::spawn(move || { let mut worker = nanoipc::Worker::new(&(client as Arc)); - worker.add_reqrep("ipc:///tmp/parity-chain.ipc").expect("Ipc expected to initialize with no issues"); + worker.add_reqrep(&socket_addr).expect("Ipc expected to initialize with no issues"); while !stop.load(::std::sync::atomic::Ordering::Relaxed) { worker.poll(); @@ -179,7 +183,7 @@ fn run_ipc(client: Arc, stop: Arc) { } #[cfg(not(feature="ipc"))] -fn run_ipc(_client: Arc, _stop: Arc) { +fn run_ipc(_base_path: &Path, _client: Arc, _stop: Arc) { } #[cfg(test)] @@ -203,6 +207,7 @@ mod tests { ClientConfig::default(), &spec, &path, + &path, Arc::new(Miner::with_spec(&spec)), ); assert!(service.is_ok()); diff --git a/ipc/hypervisor/src/lib.rs b/ipc/hypervisor/src/lib.rs index 3e82908cd..b0e1564ab 100644 --- a/ipc/hypervisor/src/lib.rs +++ b/ipc/hypervisor/src/lib.rs @@ -26,7 +26,7 @@ extern crate semver; pub mod service; /// Default value for hypervisor ipc listener -pub const HYPERVISOR_IPC_URL: &'static str = "ipc:///tmp/parity-internal-hyper-status.ipc"; +pub const HYPERVISOR_IPC_URL: &'static str = "parity-internal-hyper-status.ipc"; use std::sync::{Arc,RwLock}; use service::{HypervisorService, IpcModuleId}; @@ -43,6 +43,7 @@ pub struct Hypervisor { ipc_worker: RwLock>, processes: RwLock>, modules: HashMap, + pub io_path: String, } /// Boot arguments for binary @@ -90,6 +91,11 @@ impl Hypervisor { self } + pub fn io_path(mut self, directory: &str) -> Hypervisor { + self.io_path = directory.to_owned(); + self + } + /// Starts with the specified address for the ipc listener and /// the specified list of modules in form of created service pub fn with_url(addr: &str) -> Hypervisor { @@ -101,6 +107,7 @@ impl Hypervisor { ipc_worker: RwLock::new(worker), processes: RwLock::new(HashMap::new()), modules: HashMap::new(), + io_path: "/tmp".to_owned(), } } diff --git a/ipc/nano/src/lib.rs b/ipc/nano/src/lib.rs index 58c2d22de..df2e34d04 100644 --- a/ipc/nano/src/lib.rs +++ b/ipc/nano/src/lib.rs @@ -97,6 +97,7 @@ pub fn init_client(socket_addr: &str) -> Result, SocketError SocketError::RequestLink })); + trace!(target: "ipc", "Created cleint for {}", socket_addr); Ok(GuardedSocket { client: Arc::new(S::init(socket)), _endpoint: endpoint, @@ -189,6 +190,8 @@ impl Worker where S: IpcInterface { self.rebuild_poll_request(); + trace!(target: "ipc", "Started duplex worker at {}", addr); + Ok(()) } @@ -200,6 +203,7 @@ impl Worker where S: IpcInterface { SocketError::DuplexLink })); + let endpoint = try!(socket.bind(addr).map_err(|e| { warn!(target: "ipc", "Failed to bind socket to address '{}': {:?}", addr, e); SocketError::DuplexLink @@ -209,6 +213,7 @@ impl Worker where S: IpcInterface { self.rebuild_poll_request(); + trace!(target: "ipc", "Started request-reply worker at {}", addr); Ok(()) } } diff --git a/parity/blockchain.rs b/parity/blockchain.rs index b0c5d95a7..06e03ae72 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -138,6 +138,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { client_config, &spec, Path::new(&client_path), + Path::new(&cmd.dirs.ipc_path()), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); @@ -248,6 +249,7 @@ fn execute_export(cmd: ExportBlockchain) -> Result { client_config, &spec, Path::new(&client_path), + Path::new(&cmd.dirs.ipc_path()), Arc::new(Miner::with_spec(&spec)), ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/parity/configuration.rs b/parity/configuration.rs index 5b14660a9..5786b10de 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -540,6 +540,15 @@ impl Configuration { |e| warn!("Failed to create '{}' for geth mode: {}", &geth_path.to_str().unwrap(), e)); } + if cfg!(feature = "ipc") && !cfg!(feature = "windows") { + let mut path_buf = PathBuf::from(db_path.clone()); + path_buf.push("ipc"); + let ipc_path = path_buf.to_str().unwrap(); + ::std::fs::create_dir_all(ipc_path).unwrap_or_else( + |e| warn!("Failed to directory '{}' for ipc sockets: {}", ipc_path, e) + ); + } + Directories { keys: keys_path, db: db_path, diff --git a/parity/dir.rs b/parity/dir.rs index bb92e1277..f1f230163 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -66,6 +66,13 @@ impl Directories { dir.push("db"); dir } + + /// Get the ipc sockets path + pub fn ipc_path(&self) -> PathBuf { + let mut dir = Path::new(&self.db).to_path_buf(); + dir.push("ipc"); + dir + } } #[cfg(test)] diff --git a/parity/modules.rs b/parity/modules.rs index a41fc58b6..20f2567ce 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -23,12 +23,22 @@ use self::no_ipc_deps::*; #[cfg(feature="ipc")] use self::ipc_deps::*; use ethcore_logger::Config as LogConfig; +use std::path::Path; pub mod service_urls { - pub const CLIENT: &'static str = "ipc:///tmp/parity-chain.ipc"; - pub const SYNC: &'static str = "ipc:///tmp/parity-sync.ipc"; - pub const SYNC_NOTIFY: &'static str = "ipc:///tmp/parity-sync-notify.ipc"; - pub const NETWORK_MANAGER: &'static str = "ipc:///tmp/parity-manage-net.ipc"; + use std::path::PathBuf; + + pub const CLIENT: &'static str = "parity-chain.ipc"; + pub const SYNC: &'static str = "parity-sync.ipc"; + pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc"; + pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc"; + + pub fn with_base(data_dir: &str, service_path: &str) -> String { + let mut path = PathBuf::from(data_dir); + path.push(service_path); + + format!("ipc://{}", path.to_str().unwrap()) + } } #[cfg(not(feature="ipc"))] @@ -51,27 +61,30 @@ pub type SyncModules = (Arc, Arc, Arc) mod ipc_deps { pub use ethsync::{SyncClient, NetworkManagerClient, ServiceConfiguration}; pub use ethcore::client::ChainNotifyClient; - pub use hypervisor::{SYNC_MODULE_ID, BootArgs}; + pub use hypervisor::{SYNC_MODULE_ID, BootArgs, HYPERVISOR_IPC_URL}; pub use nanoipc::{GuardedSocket, NanoSocket, init_client}; pub use ipc::IpcSocket; pub use ipc::binary::serialize; } #[cfg(feature="ipc")] -pub fn hypervisor() -> Option { - Some(Hypervisor::new()) +pub fn hypervisor(base_path: &Path) -> Option { + Some(Hypervisor + ::with_url(&service_urls::with_base(base_path.to_str().unwrap(), HYPERVISOR_IPC_URL)) + .io_path(base_path.to_str().unwrap())) } #[cfg(not(feature="ipc"))] -pub fn hypervisor() -> Option { +pub fn hypervisor(_: &Path) -> Option { None } #[cfg(feature="ipc")] -fn sync_arguments(sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs { +fn sync_arguments(io_path: &str, sync_cfg: SyncConfig, net_cfg: NetworkConfiguration, log_settings: &LogConfig) -> BootArgs { let service_config = ServiceConfiguration { sync: sync_cfg, net: net_cfg, + io_path: io_path.to_owned(), }; // initialisation payload is passed via stdin @@ -105,14 +118,18 @@ pub fn sync -> Result { let mut hypervisor = hypervisor_ref.take().expect("There should be hypervisor for ipc configuration"); - hypervisor = hypervisor.module(SYNC_MODULE_ID, sync_arguments(sync_cfg, net_cfg, log_settings)); + let args = sync_arguments(&hypervisor.io_path, sync_cfg, net_cfg, log_settings); + hypervisor = hypervisor.module(SYNC_MODULE_ID, args); hypervisor.start(); hypervisor.wait_for_startup(); - let sync_client = init_client::>(service_urls::SYNC).unwrap(); - let notify_client = init_client::>(service_urls::SYNC_NOTIFY).unwrap(); - let manage_client = init_client::>(service_urls::NETWORK_MANAGER).unwrap(); + let sync_client = init_client::>( + &service_urls::with_base(&hypervisor.io_path, service_urls::SYNC)).unwrap(); + let notify_client = init_client::>( + &service_urls::with_base(&hypervisor.io_path, service_urls::SYNC_NOTIFY)).unwrap(); + let manage_client = init_client::>( + &service_urls::with_base(&hypervisor.io_path, service_urls::NETWORK_MANAGER)).unwrap(); *hypervisor_ref = Some(hypervisor); Ok((sync_client, manage_client, notify_client)) diff --git a/parity/run.rs b/parity/run.rs index c659d4d25..91f8d5bfa 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -163,13 +163,14 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { } // create supervisor - let mut hypervisor = modules::hypervisor(); + let mut hypervisor = modules::hypervisor(Path::new(&cmd.dirs.ipc_path())); // create client service. let service = try!(ClientService::start( client_config, &spec, Path::new(&client_path), + Path::new(&cmd.dirs.ipc_path()), miner.clone(), ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 2a3c12567..c3e43e89f 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -95,6 +95,7 @@ impl SnapshotCommand { client_config, &spec, Path::new(&client_path), + Path::new(&self.dirs.ipc_path()), Arc::new(Miner::with_spec(&spec)) ).map_err(|e| format!("Client service error: {:?}", e))); diff --git a/parity/sync.rs b/parity/sync.rs index 447f3678c..382c1806d 100644 --- a/parity/sync.rs +++ b/parity/sync.rs @@ -86,18 +86,34 @@ pub fn main() { io::stdin().read_to_end(&mut buffer).expect("Failed to read initialisation payload"); let service_config = ipc::binary::deserialize::(&buffer).expect("Failed deserializing initialisation payload"); - let remote_client = nanoipc::init_client::>(service_urls::CLIENT).unwrap(); + let remote_client = nanoipc::init_client::>( + &service_urls::with_base(&service_config.io_path, service_urls::CLIENT), + ).unwrap(); remote_client.handshake().unwrap(); let stop = Arc::new(AtomicBool::new(false)); let sync = EthSync::new(service_config.sync, remote_client.service().clone(), service_config.net).unwrap(); - run_service(service_urls::SYNC, stop.clone(), sync.clone() as Arc); - run_service(service_urls::NETWORK_MANAGER, stop.clone(), sync.clone() as Arc); - run_service(service_urls::SYNC_NOTIFY, stop.clone(), sync.clone() as Arc); + run_service( + &service_urls::with_base(&service_config.io_path, service_urls::SYNC), + stop.clone(), + sync.clone() as Arc + ); + run_service( + &service_urls::with_base(&service_config.io_path, service_urls::NETWORK_MANAGER), + stop.clone(), + sync.clone() as Arc + ); + run_service( + &service_urls::with_base(&service_config.io_path, service_urls::SYNC_NOTIFY), + stop.clone(), + sync.clone() as Arc + ); - let hypervisor_client = nanoipc::init_client::>(HYPERVISOR_IPC_URL).unwrap(); + let hypervisor_client = nanoipc::init_client::>( + &service_urls::with_base(&service_config.io_path, HYPERVISOR_IPC_URL), + ).unwrap(); hypervisor_client.handshake().unwrap(); hypervisor_client.module_ready(SYNC_MODULE_ID); diff --git a/sync/src/api.rs b/sync/src/api.rs index f98387abd..1ded32367 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -306,4 +306,5 @@ impl From for NetworkConfiguration { pub struct ServiceConfiguration { pub sync: SyncConfig, pub net: NetworkConfiguration, + pub io_path: String, } From 869803f60e67e19ef5dd7f15aa3cf6e4e48c6b84 Mon Sep 17 00:00:00 2001 From: Chiu-Hsiang Hsu Date: Tue, 23 Aug 2016 13:53:24 +0800 Subject: [PATCH 06/29] Fix open on FreeBSD --- parity/url.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/parity/url.rs b/parity/url.rs index 129812387..fca7c6620 100644 --- a/parity/url.rs +++ b/parity/url.rs @@ -46,7 +46,7 @@ pub fn open(url: &str) { } } -#[cfg(target_os="macos")] +#[cfg(any(target_os="macos", target_os="freebsd"))] pub fn open(url: &str) { use std; let _ = std::process::Command::new("open").arg(url).spawn(); From 6cb439fbc84894e10880ebb1e0eca5f6ac9a0b62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 Aug 2016 10:10:12 +0200 Subject: [PATCH 07/29] Fixing serde overflow error (#1977) --- rpc/src/v1/types/trace.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/rpc/src/v1/types/trace.rs b/rpc/src/v1/types/trace.rs index abde04b96..f4380b0a3 100644 --- a/rpc/src/v1/types/trace.rs +++ b/rpc/src/v1/types/trace.rs @@ -99,6 +99,7 @@ pub struct VMOperation { /// Information concerning the execution of the operation. pub ex: Option, /// Subordinate trace of the CALL/CREATE if applicable. + #[serde(bound="VMTrace: Serialize")] pub sub: Option, } From f4826d1b2a2b46f6c89a90f8ef4f9c0da2340ab2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 Aug 2016 10:10:19 +0200 Subject: [PATCH 08/29] Bump clippy (#1982) --- Cargo.lock | 24 ++++++++++++------------ Cargo.toml | 2 +- dapps/Cargo.toml | 2 +- db/Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- json/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- signer/Cargo.toml | 2 +- sync/Cargo.toml | 2 +- util/Cargo.toml | 2 +- util/network/Cargo.toml | 2 +- 11 files changed, 22 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7a5cf566e..2ea22f715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,7 +3,7 @@ name = "parity" version = "1.4.0" dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", @@ -133,15 +133,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "clippy" -version = "0.0.82" +version = "0.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "clippy_lints 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "clippy_lints" -version = "0.0.82" +version = "0.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -245,7 +245,7 @@ version = "1.4.0" dependencies = [ "bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", @@ -274,7 +274,7 @@ dependencies = [ name = "ethcore-dapps" version = "1.4.0" dependencies = [ - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", @@ -408,7 +408,7 @@ dependencies = [ name = "ethcore-rpc" version = "1.4.0" dependencies = [ - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.4.0", "ethcore 1.4.0", "ethcore-devtools 1.4.0", @@ -433,7 +433,7 @@ dependencies = [ name = "ethcore-signer" version = "1.4.0" dependencies = [ - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-io 1.4.0", "ethcore-rpc 1.4.0", @@ -467,7 +467,7 @@ dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)", "bigint 0.1.0", - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.4.0 (git+https://github.com/ethcore/elastic-array)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", @@ -536,7 +536,7 @@ dependencies = [ name = "ethsync" version = "1.4.0" dependencies = [ - "clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.4.0", "ethcore-io 1.4.0", @@ -1630,8 +1630,8 @@ dependencies = [ "checksum bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c129aff112dcc562970abb69e2508b40850dd24c274761bb50fb8a0067ba6c27" "checksum bytes 0.4.0-dev (git+https://github.com/carllerche/bytes)" = "" "checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c" -"checksum clippy 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)" = "8be61845840f25e9abc06b930d1211c3207f3eb5db92bc001b0510b7e4f361aa" -"checksum clippy_lints 0.0.82 (registry+https://github.com/rust-lang/crates.io-index)" = "5de435cbb0abacae719e2424a5702afcdf6b51d99b4d52ed5de86094a30e0a80" +"checksum clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "97f6d6efa6d7aec74d4eca1be62164b605d43b7fcb5256e9db0449f685130cba" +"checksum clippy_lints 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)" = "dc96d3c877b63943b08ce3037c0ae8fd3bd5dead5fab11178b93afc71ca16031" "checksum cookie 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "90266f45846f14a1e986c77d1e9c2626b8c342ed806fe60241ec38cc8697b245" "checksum crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "fb974f835e90390c5f9dfac00f05b06dc117299f5ea4e85fbc7bb443af4911cc" "checksum ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)" = "" diff --git a/Cargo.toml b/Cargo.toml index 602a8985f..6ef8bcc7e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,7 @@ ethcore-ipc-hypervisor = { path = "ipc/hypervisor" } ethcore-logger = { path = "logger" } json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } ethcore-dapps = { path = "dapps", optional = true } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} ethcore-stratum = { path = "stratum" } [target.'cfg(windows)'.dependencies] diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 30b161355..8212538f8 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -29,7 +29,7 @@ parity-dapps-status = { git = "https://github.com/ethcore/parity-ui.git", versio parity-dapps-home = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } parity-dapps-wallet = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true } mime_guess = { version = "1.6.1" } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } diff --git a/db/Cargo.toml b/db/Cargo.toml index 5b2805c60..c7cbba2ea 100644 --- a/db/Cargo.toml +++ b/db/Cargo.toml @@ -11,7 +11,7 @@ build = "build.rs" ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} ethcore-devtools = { path = "../devtools" } ethcore-ipc = { path = "../ipc/rpc" } rocksdb = { git = "https://github.com/ethcore/rust-rocksdb" } diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 46496ffa0..729f9c268 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -25,7 +25,7 @@ semver = "0.2" bit-set = "0.4" time = "0.1" evmjit = { path = "../evmjit", optional = true } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} ethash = { path = "../ethash" } ethcore-util = { path = "../util" } ethcore-io = { path = "../util/io" } diff --git a/json/Cargo.toml b/json/Cargo.toml index ccfa27a5a..b5eb7afdd 100644 --- a/json/Cargo.toml +++ b/json/Cargo.toml @@ -10,7 +10,7 @@ rustc-serialize = "0.3" serde = "0.7.0" serde_json = "0.7.0" serde_macros = { version = "0.7.0", optional = true } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 38f855631..6a9478eb3 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -25,7 +25,7 @@ ethcore-devtools = { path = "../devtools" } rustc-serialize = "0.3" transient-hashmap = "0.1" serde_macros = { version = "0.7.0", optional = true } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } ethcore-ipc = { path = "../ipc/rpc" } diff --git a/signer/Cargo.toml b/signer/Cargo.toml index ac9496421..5bb6325f1 100644 --- a/signer/Cargo.toml +++ b/signer/Cargo.toml @@ -21,7 +21,7 @@ ethcore-io = { path = "../util/io" } ethcore-rpc = { path = "../rpc" } parity-dapps-signer = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4", optional = true} -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} [features] dev = ["clippy"] diff --git a/sync/Cargo.toml b/sync/Cargo.toml index 2b3ce16e2..3a2e8aec2 100644 --- a/sync/Cargo.toml +++ b/sync/Cargo.toml @@ -16,7 +16,7 @@ ethcore-util = { path = "../util" } ethcore-network = { path = "../util/network" } ethcore-io = { path = "../util/io" } ethcore = { path = "../ethcore" } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} log = "0.3" env_logger = "0.3" time = "0.1.34" diff --git a/util/Cargo.toml b/util/Cargo.toml index 13d8229eb..3a9505e15 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -22,7 +22,7 @@ elastic-array = { git = "https://github.com/ethcore/elastic-array" } heapsize = { version = "0.3", features = ["unstable"] } itertools = "0.4" sha3 = { path = "sha3" } -clippy = { version = "0.0.82", optional = true} +clippy = { version = "0.0.85", optional = true} ethcore-devtools = { path = "../devtools" } libc = "0.2.7" vergen = "0.1" diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 6cd5ee365..661d25c51 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -14,7 +14,7 @@ time = "0.1.34" tiny-keccak = "1.0" rust-crypto = "0.2.34" slab = "0.2" -clippy = { version = "0.0.79", optional = true} +clippy = { version = "0.0.85", optional = true} igd = "0.5.0" libc = "0.2.7" parking_lot = "0.2.6" From 2d0a7c33bb62a851156c8bfc21c84a9437fc18c6 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 23 Aug 2016 13:41:12 +0300 Subject: [PATCH 09/29] address grumbles --- ethcore/src/service.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index 6d81f6b49..355c7d580 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -169,9 +169,9 @@ impl IoHandler for ClientIoHandler { #[cfg(feature="ipc")] fn run_ipc(base_path: &Path, client: Arc, stop: Arc) { - let mut path = ::std::path::PathBuf::from(base_path); + let mut path = base_path.to_owned(); path.push("parity-chain.ipc"); - let socket_addr = format!("ipc://{}", path.to_str().unwrap()); + let socket_addr = format!("ipc://{}", path.to_string_lossy()); ::std::thread::spawn(move || { let mut worker = nanoipc::Worker::new(&(client as Arc)); worker.add_reqrep(&socket_addr).expect("Ipc expected to initialize with no issues"); From 35ecb396b60886fc89af3bfee84f67d523d33513 Mon Sep 17 00:00:00 2001 From: Gav Wood Date: Tue, 23 Aug 2016 13:30:33 +0200 Subject: [PATCH 10/29] Market-orientated transaction pricing (#1963) * Market-orientated transaction pricing Avoid a strict gas-limit and let the market decide through using a priority queue based around gas pricing for transactions. In periods of low transaction volume, they'll be processed for a lower fee. * Fix tests, add/clarify documentation, fix some logic. * Change default to reflect CLI. * Specify type. * Make test more precise. * Fix doc test --- ethcore/src/miner/transaction_queue.rs | 466 ++++++++++++++++--------- parity/cli.rs | 2 +- parity/params.rs | 2 +- 3 files changed, 295 insertions(+), 175 deletions(-) diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 9d3a85995..8a2a37145 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -43,14 +43,14 @@ //! //! let st1 = t1.sign(&key.secret()); //! let st2 = t2.sign(&key.secret()); -//! let default_nonce = |_a: &Address| AccountDetails { +//! let default_account_details = |_a: &Address| AccountDetails { //! nonce: U256::from(10), //! balance: U256::from(1_000_000), //! }; //! //! let mut txq = TransactionQueue::new(); -//! txq.add(st2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); -//! txq.add(st1.clone(), &default_nonce, TransactionOrigin::External).unwrap(); +//! txq.add(st2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); +//! txq.add(st1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); //! //! // Check status //! assert_eq!(txq.status().pending, 2); @@ -62,7 +62,7 @@ //! //! // And when transaction is removed (but nonce haven't changed) //! // it will move subsequent transactions to future -//! txq.remove_invalid(&st1.hash(), &default_nonce); +//! txq.remove_invalid(&st1.hash(), &default_account_details); //! assert_eq!(txq.status().pending, 0); //! assert_eq!(txq.status().future, 1); //! assert_eq!(txq.top_transactions().len(), 0); @@ -82,7 +82,7 @@ use std::cmp::Ordering; use std::cmp; -use std::collections::{HashMap, BTreeSet}; +use std::collections::{HashSet, HashMap, BTreeSet, BTreeMap}; use util::{Address, H256, Uint, U256}; use util::table::Table; use transaction::*; @@ -226,23 +226,34 @@ impl VerifiedTransaction { struct TransactionSet { by_priority: BTreeSet, by_address: Table, + by_gas_price: BTreeMap>, limit: usize, } impl TransactionSet { - /// Inserts `TransactionOrder` to this set + /// Inserts `TransactionOrder` to this set. Transaction does not need to be unique - + /// the same transaction may be validly inserted twice. Any previous transaction that + /// it replaces (i.e. with the same `sender` and `nonce`) should be returned. fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { - self.by_priority.insert(order.clone()); - let r = self.by_address.insert(sender, nonce, order); - // If transaction was replaced remove it from priority queue - if let Some(ref old_order) = r { - self.by_priority.remove(old_order); + if !self.by_priority.insert(order.clone()) { + return Some(order.clone()); } - assert_eq!(self.by_priority.len(), self.by_address.len()); - r + let order_hash = order.hash.clone(); + let order_gas_price = order.gas_price.clone(); + let by_address_replaced = self.by_address.insert(sender, nonce, order); + // If transaction was replaced remove it from priority queue + if let Some(ref old_order) = by_address_replaced { + assert!(self.by_priority.remove(old_order), "hash is in `by_address`; all transactions in `by_address` must be in `by_priority`; qed"); + assert!(Self::remove_item(&mut self.by_gas_price, &old_order.gas_price, &old_order.hash), + "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); + } + Self::insert_item(&mut self.by_gas_price, order_gas_price, order_hash); + debug_assert_eq!(self.by_priority.len(), self.by_address.len()); + debug_assert_eq!(self.by_gas_price.iter().map(|(_, v)| v.len()).fold(0, |a, b| a + b), self.by_address.len()); + by_address_replaced } - /// Remove low priority transactions if there is more then specified by given `limit`. + /// Remove low priority transactions if there is more than specified by given `limit`. /// /// It drops transactions from this set but also removes associated `VerifiedTransaction`. /// Returns addresses and lowest nonces of transactions removed because of limit. @@ -267,7 +278,7 @@ impl TransactionSet { .expect("Transaction has just been found in `by_priority`; so it is in `by_address` also."); by_hash.remove(&order.hash) - .expect("Hash found in `by_priorty` matches the one dropped; so it is included in `by_hash`"); + .expect("hash is in `by_priorty`; all hashes in `by_priority` must be in `by_hash`; qed"); let min = removed.get(&sender).map_or(nonce, |val| cmp::min(*val, nonce)); removed.insert(sender, min); @@ -278,6 +289,8 @@ impl TransactionSet { /// Drop transaction from this set (remove from `by_priority` and `by_address`) fn drop(&mut self, sender: &Address, nonce: &U256) -> Option { if let Some(tx_order) = self.by_address.remove(sender, nonce) { + assert!(Self::remove_item(&mut self.by_gas_price, &tx_order.gas_price, &tx_order.hash), + "hash is in `by_address`; all transactions' gas_prices in `by_address` must be in `by_gas_limit`; qed"); self.by_priority.remove(&tx_order); assert_eq!(self.by_priority.len(), self.by_address.len()); return Some(tx_order); @@ -290,6 +303,7 @@ impl TransactionSet { fn clear(&mut self) { self.by_priority.clear(); self.by_address.clear(); + self.by_gas_price.clear(); } /// Sets new limit for number of transactions in this `TransactionSet`. @@ -297,6 +311,41 @@ impl TransactionSet { fn set_limit(&mut self, limit: usize) { self.limit = limit; } + + /// Get the minimum gas price that we can accept into this queue that wouldn't cause the transaction to + /// immediately be dropped. 0 if the queue isn't at capacity; 1 plus the lowest if it is. + fn gas_price_entry_limit(&self) -> U256 { + match self.by_gas_price.keys().next() { + Some(k) if self.by_priority.len() >= self.limit => *k + 1.into(), + _ => U256::default(), + } + } + + /// Insert an item into a BTreeMap/HashSet "multimap". + fn insert_item(into: &mut BTreeMap>, gas_price: U256, hash: H256) -> bool { + into.entry(gas_price).or_insert_with(Default::default).insert(hash) + } + + /// Remove an item from a BTreeMap/HashSet "multimap". + /// Returns true if the item was removed successfully. + fn remove_item(from: &mut BTreeMap>, gas_price: &U256, hash: &H256) -> bool { + if let Some(mut hashes) = from.get_mut(gas_price) { + let only_one_left = hashes.len() == 1; + if !only_one_left { + // Operation may be ok: only if hash is in gas-price's Set. + return hashes.remove(hash); + } + if hashes.iter().next().unwrap() != hash { + // Operation failed: hash not the single item in gas-price's Set. + return false; + } + } else { + // Operation failed: gas-price not found in Map. + return false; + } + // Operation maybe ok: only if hash not found in gas-price Set. + from.remove(gas_price).is_some() + } } #[derive(Debug)] @@ -316,7 +365,6 @@ pub struct AccountDetails { pub balance: U256, } - /// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue. const GAS_LIMIT_HYSTERESIS: usize = 10; // % @@ -355,12 +403,14 @@ impl TransactionQueue { let current = TransactionSet { by_priority: BTreeSet::new(), by_address: Table::new(), + by_gas_price: Default::default(), limit: limit, }; let future = TransactionSet { by_priority: BTreeSet::new(), by_address: Table::new(), + by_gas_price: Default::default(), limit: limit, }; @@ -400,6 +450,12 @@ impl TransactionQueue { self.minimal_gas_price = min_gas_price; } + /// Get one more than the lowest gas price in the queue iff the pool is + /// full, otherwise 0. + pub fn effective_minimum_gas_price(&self) -> U256 { + self.current.gas_price_entry_limit() + } + /// Sets new gas limit. Transactions with gas slightly (`GAS_LIMIT_HYSTERESIS`) above the limit won't be imported. /// Any transaction already imported to the queue is not affected. pub fn set_gas_limit(&mut self, gas_limit: U256) { @@ -445,6 +501,21 @@ impl TransactionQueue { })); } + let full_queues_lowest = self.effective_minimum_gas_price(); + if tx.gas_price < full_queues_lowest && origin != TransactionOrigin::Local { + trace!(target: "txqueue", + "Dropping transaction below lowest gas price in a full queue: {:?} (gp: {} < {})", + tx.hash(), + tx.gas_price, + full_queues_lowest + ); + + return Err(Error::Transaction(TransactionError::InsufficientGasPrice { + minimal: full_queues_lowest, + got: tx.gas_price, + })); + } + try!(tx.check_low_s()); if tx.gas > self.gas_limit || tx.gas > self.tx_gas_limit { @@ -811,59 +882,86 @@ mod test { } } - fn new_unsigned_tx(nonce: U256) -> Transaction { + fn default_nonce() -> U256 { 123.into() } + fn default_gas_price() -> U256 { 1.into() } + + fn new_unsigned_tx(nonce: U256, gas_price: U256) -> Transaction { Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), gas: U256::from(100_000), - gas_price: U256::one(), + gas_price: gas_price, nonce: nonce } } - fn new_tx() -> SignedTransaction { + fn new_tx(nonce: U256, gas_price: U256) -> SignedTransaction { let keypair = KeyPair::create().unwrap(); - new_unsigned_tx(U256::from(123)).sign(keypair.secret()) + new_unsigned_tx(nonce, gas_price).sign(keypair.secret()) } - - fn default_nonce_val() -> U256 { - U256::from(123) + fn new_tx_default() -> SignedTransaction { + new_tx(default_nonce(), default_gas_price()) } - fn default_nonce(_address: &Address) -> AccountDetails { + fn default_account_details(_address: &Address) -> AccountDetails { AccountDetails { - nonce: default_nonce_val(), + nonce: default_nonce(), balance: !U256::zero() } } - /// Returns two transactions with identical (sender, nonce) but different hashes - fn new_similar_txs() -> (SignedTransaction, SignedTransaction) { + fn new_tx_pair(nonce: U256, gas_price: U256, nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { + let tx1 = new_unsigned_tx(nonce, gas_price); + let tx2 = new_unsigned_tx(nonce + nonce_increment, gas_price + gas_price_increment); + let keypair = KeyPair::create().unwrap(); let secret = &keypair.secret(); - let nonce = U256::from(123); - let tx = new_unsigned_tx(nonce); - let mut tx2 = new_unsigned_tx(nonce); - tx2.gas_price = U256::from(2); - - (tx.sign(secret), tx2.sign(secret)) + (tx1.sign(secret), tx2.sign(secret)) } - fn new_txs(second_nonce: U256) -> (SignedTransaction, SignedTransaction) { - new_txs_with_gas_price_diff(second_nonce, U256::zero()) + fn new_tx_pair_default(nonce_increment: U256, gas_price_increment: U256) -> (SignedTransaction, SignedTransaction) { + new_tx_pair(default_nonce(), default_gas_price(), nonce_increment, gas_price_increment) } - fn new_txs_with_gas_price_diff(second_nonce: U256, gas_price: U256) -> (SignedTransaction, SignedTransaction) { - let keypair = KeyPair::create().unwrap(); - let secret = &keypair.secret(); - let nonce = U256::from(123); - let tx = new_unsigned_tx(nonce); - let mut tx2 = new_unsigned_tx(nonce + second_nonce); - tx2.gas_price = tx2.gas_price + gas_price; + /// Returns two transactions with identical (sender, nonce) but different gas_price/hash. + fn new_similar_tx_pair() -> (SignedTransaction, SignedTransaction) { + new_tx_pair_default(0.into(), 1.into()) + } - (tx.sign(secret), tx2.sign(secret)) + #[test] + fn should_return_correct_nonces_when_dropped_because_of_limit() { + // given + let mut txq = TransactionQueue::with_limits(2, !U256::zero()); + let (tx1, tx2) = new_tx_pair(123.into(), 1.into(), 1.into(), 0.into()); + let sender = tx1.sender().unwrap(); + let nonce = tx1.nonce; + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + assert_eq!(txq.status().pending, 2); + assert_eq!(txq.last_nonce(&sender), Some(nonce + U256::one())); + + // when + let tx = new_tx(123.into(), 1.into()); + let res = txq.add(tx.clone(), &default_account_details, TransactionOrigin::External); + + // then + // No longer the case as we don't even consider a transaction that isn't above a full + // queue's minimum gas price. + // We may want to reconsider this in the near future so leaving this code in as a + // possible alternative. + /* + assert_eq!(res.unwrap(), TransactionImportResult::Current); + assert_eq!(txq.status().pending, 2); + assert_eq!(txq.last_nonce(&sender), Some(nonce)); + */ + assert_eq!(unwrap_tx_err(res), TransactionError::InsufficientGasPrice { + minimal: 2.into(), + got: 1.into(), + }); + assert_eq!(txq.status().pending, 2); + assert_eq!(txq.last_nonce(&sender), Some(tx2.nonce)); } #[test] @@ -872,9 +970,10 @@ mod test { let mut set = TransactionSet { by_priority: BTreeSet::new(), by_address: Table::new(), + by_gas_price: Default::default(), limit: 1 }; - let (tx1, tx2) = new_txs(U256::from(1)); + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); let tx1 = VerifiedTransaction::new(tx1, TransactionOrigin::External).unwrap(); let tx2 = VerifiedTransaction::new(tx2, TransactionOrigin::External).unwrap(); let mut by_hash = { @@ -911,11 +1010,12 @@ mod test { let mut set = TransactionSet { by_priority: BTreeSet::new(), by_address: Table::new(), + by_gas_price: Default::default(), limit: 1 }; // Create two transactions with same nonce // (same hash) - let (tx1, tx2) = new_txs(U256::from(0)); + let (tx1, tx2) = new_tx_pair_default(0.into(), 0.into()); let tx1 = VerifiedTransaction::new(tx1, TransactionOrigin::External).unwrap(); let tx2 = VerifiedTransaction::new(tx2, TransactionOrigin::External).unwrap(); let by_hash = { @@ -931,25 +1031,68 @@ mod test { set.insert(tx1.sender(), tx1.nonce(), order1.clone()); assert_eq!(set.by_priority.len(), 1); assert_eq!(set.by_address.len(), 1); + assert_eq!(set.by_gas_price.len(), 1); + assert_eq!(*set.by_gas_price.iter().next().unwrap().0, 1.into()); + assert_eq!(set.by_gas_price.iter().next().unwrap().1.len(), 1); // Two different orders (imagine nonce changed in the meantime) let order2 = TransactionOrder::for_transaction(&tx2, U256::one()); set.insert(tx2.sender(), tx2.nonce(), order2.clone()); assert_eq!(set.by_priority.len(), 1); assert_eq!(set.by_address.len(), 1); + assert_eq!(set.by_gas_price.len(), 1); + assert_eq!(*set.by_gas_price.iter().next().unwrap().0, 1.into()); + assert_eq!(set.by_gas_price.iter().next().unwrap().1.len(), 1); // then assert_eq!(by_hash.len(), 1); assert_eq!(set.by_priority.len(), 1); assert_eq!(set.by_address.len(), 1); + assert_eq!(set.by_gas_price.len(), 1); + assert_eq!(*set.by_gas_price.iter().next().unwrap().0, 1.into()); + assert_eq!(set.by_gas_price.iter().next().unwrap().1.len(), 1); assert_eq!(set.by_priority.iter().next().unwrap().clone(), order2); } + #[test] + fn should_not_insert_same_transaction_twice_into_set() { + let mut set = TransactionSet { + by_priority: BTreeSet::new(), + by_address: Table::new(), + by_gas_price: Default::default(), + limit: 2 + }; + let tx = new_tx_default(); + let tx1 = VerifiedTransaction::new(tx.clone(), TransactionOrigin::External).unwrap(); + let order1 = TransactionOrder::for_transaction(&tx1, U256::zero()); + assert!(set.insert(tx1.sender(), tx1.nonce(), order1).is_none()); + let tx2 = VerifiedTransaction::new(tx, TransactionOrigin::External).unwrap(); + let order2 = TransactionOrder::for_transaction(&tx2, U256::zero()); + assert!(set.insert(tx2.sender(), tx2.nonce(), order2).is_some()); + } + + #[test] + fn should_give_correct_gas_price_entry_limit() { + let mut set = TransactionSet { + by_priority: BTreeSet::new(), + by_address: Table::new(), + by_gas_price: Default::default(), + limit: 1 + }; + + assert_eq!(set.gas_price_entry_limit(), 0.into()); + let tx = new_tx_default(); + let tx1 = VerifiedTransaction::new(tx.clone(), TransactionOrigin::External).unwrap(); + let order1 = TransactionOrder::for_transaction(&tx1, U256::zero()); + assert!(set.insert(tx1.sender(), tx1.nonce(), order1.clone()).is_none()); + assert_eq!(set.gas_price_entry_limit(), 2.into()); + } + #[test] fn should_handle_same_transaction_imported_twice_with_different_state_nonces() { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_similar_txs(); - let prev_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce - U256::one(), balance: + let (tx, tx2) = new_similar_tx_pair(); + let prev_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance: !U256::zero() }; // First insert one transaction to future @@ -958,7 +1101,7 @@ mod test { assert_eq!(txq.status().future, 1); // now import second transaction to current - let res = txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External); + let res = txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External); // and then there should be only one transaction in current (the one with higher gas_price) assert_eq!(res.unwrap(), TransactionImportResult::Current); @@ -974,10 +1117,10 @@ mod test { fn should_import_tx() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); // when - let res = txq.add(tx, &default_nonce, TransactionOrigin::External); + let res = txq.add(tx, &default_account_details, TransactionOrigin::External); // then assert_eq!(res.unwrap(), TransactionImportResult::Current); @@ -1003,13 +1146,13 @@ mod test { fn should_not_import_transaction_above_gas_limit() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); let gas = tx.gas; let limit = gas / U256::from(2); txq.set_gas_limit(limit); // when - let res = txq.add(tx, &default_nonce, TransactionOrigin::External); + let res = txq.add(tx, &default_account_details, TransactionOrigin::External); // then assert_eq!(unwrap_tx_err(res), TransactionError::GasLimitExceeded { @@ -1026,9 +1169,9 @@ mod test { fn should_drop_transactions_from_senders_without_balance() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); let account = |a: &Address| AccountDetails { - nonce: default_nonce(a).nonce, + nonce: default_account_details(a).nonce, balance: U256::one() }; @@ -1049,11 +1192,11 @@ mod test { fn should_not_import_transaction_below_min_gas_price_threshold_if_external() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); txq.set_minimal_gas_price(tx.gas_price + U256::one()); // when - let res = txq.add(tx, &default_nonce, TransactionOrigin::External); + let res = txq.add(tx, &default_account_details, TransactionOrigin::External); // then assert_eq!(unwrap_tx_err(res), TransactionError::InsufficientGasPrice { @@ -1069,11 +1212,11 @@ mod test { fn should_import_transaction_below_min_gas_price_threshold_if_local() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); txq.set_minimal_gas_price(tx.gas_price + U256::one()); // when - let res = txq.add(tx, &default_nonce, TransactionOrigin::Local); + let res = txq.add(tx, &default_account_details, TransactionOrigin::Local); // then assert_eq!(res.unwrap(), TransactionImportResult::Current); @@ -1086,7 +1229,7 @@ mod test { fn should_reject_incorectly_signed_transaction() { // given let mut txq = TransactionQueue::new(); - let tx = new_unsigned_tx(U256::from(123)); + let tx = new_unsigned_tx(123.into(), 1.into()); let stx = { let mut s = RlpStream::new_list(9); s.append(&tx.nonce); @@ -1101,7 +1244,7 @@ mod test { decode(s.as_raw()) }; // when - let res = txq.add(stx, &default_nonce, TransactionOrigin::External); + let res = txq.add(stx, &default_account_details, TransactionOrigin::External); // then assert!(res.is_err()); @@ -1112,11 +1255,11 @@ mod test { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); // when - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); // then let top = txq.top_transactions(); @@ -1129,15 +1272,15 @@ mod test { fn should_prioritize_local_transactions_within_same_nonce_height() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); // the second one has same nonce but higher `gas_price` - let (_, tx2) = new_similar_txs(); + let (_, tx2) = new_similar_tx_pair(); // when // first insert the one with higher gas price - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); // then the one with lower gas price, but local - txq.add(tx.clone(), &default_nonce, TransactionOrigin::Local).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); // then let top = txq.top_transactions(); @@ -1150,11 +1293,11 @@ mod test { fn should_not_prioritize_local_transactions_with_different_nonce_height() { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); // when - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::Local).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::Local).unwrap(); // then let top = txq.top_transactions(); @@ -1168,11 +1311,11 @@ mod test { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); // when - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); // then let top = txq.pending_hashes(); @@ -1186,11 +1329,11 @@ mod test { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(2)); + let (tx, tx2) = new_tx_pair_default(2.into(), 0.into()); // when - let res1 = txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - let res2 = txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let res1 = txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + let res2 = txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); // then assert_eq!(res1, TransactionImportResult::Current); @@ -1206,13 +1349,13 @@ mod test { #[test] fn should_correctly_update_futures_when_removing() { // given - let prev_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce - U256::one(), balance: + let prev_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance: !U256::zero() }; - let next2_nonce = default_nonce_val() + U256::from(3); + let next2_nonce = default_nonce() + U256::from(3); let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); txq.add(tx.clone(), &prev_nonce, TransactionOrigin::External).unwrap(); txq.add(tx2.clone(), &prev_nonce, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 2); @@ -1232,17 +1375,17 @@ mod test { let mut txq = TransactionQueue::new(); let kp = KeyPair::create().unwrap(); let secret = kp.secret(); - let tx = new_unsigned_tx(U256::from(123)).sign(secret); - let tx1 = new_unsigned_tx(U256::from(124)).sign(secret); - let tx2 = new_unsigned_tx(U256::from(125)).sign(secret); + let tx = new_unsigned_tx(123.into(), 1.into()).sign(secret); + let tx1 = new_unsigned_tx(124.into(), 1.into()).sign(secret); + let tx2 = new_unsigned_tx(125.into(), 1.into()).sign(secret); - txq.add(tx, &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx, &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().pending, 1); - txq.add(tx2, &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx2, &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 1); // when - txq.add(tx1, &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx1, &default_account_details, TransactionOrigin::External).unwrap(); // then let stats = txq.status(); @@ -1254,9 +1397,9 @@ mod test { fn should_remove_transaction() { // given let mut txq2 = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(3)); - txq2.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq2.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let (tx, tx2) = new_tx_pair_default(3.into(), 0.into()); + txq2.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq2.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq2.status().pending, 1); assert_eq!(txq2.status().future, 1); @@ -1275,16 +1418,16 @@ mod test { fn should_move_transactions_to_future_if_gap_introduced() { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); - let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); + let tx3 = new_tx_default(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx3.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().pending, 3); // when - txq.remove_invalid(&tx.hash(), &default_nonce); + txq.remove_invalid(&tx.hash(), &default_account_details); // then let stats = txq.status(); @@ -1296,11 +1439,11 @@ mod test { fn should_clear_queue() { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::one()); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); // add - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); let stats = txq.status(); assert_eq!(stats.pending, 2); @@ -1316,60 +1459,38 @@ mod test { fn should_drop_old_transactions_when_hitting_the_limit() { // given let mut txq = TransactionQueue::with_limits(1, !U256::zero()); - let (tx, tx2) = new_txs(U256::one()); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); let sender = tx.sender().unwrap(); let nonce = tx.nonce; - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().pending, 1); // when - let res = txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External); + let res = txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External); // then let t = txq.top_transactions(); - assert_eq!(unwrap_tx_err(res), TransactionError::LimitReached); + assert_eq!(unwrap_tx_err(res), TransactionError::InsufficientGasPrice { minimal: 2.into(), got: 1.into() }); assert_eq!(txq.status().pending, 1); assert_eq!(t.len(), 1); assert_eq!(t[0], tx); assert_eq!(txq.last_nonce(&sender), Some(nonce)); } - #[test] - fn should_return_correct_nonces_when_dropped_because_of_limit() { - // given - let mut txq = TransactionQueue::with_limits(2, !U256::zero()); - let tx = new_tx(); - let (tx1, tx2) = new_txs(U256::one()); - let sender = tx1.sender().unwrap(); - let nonce = tx1.nonce; - txq.add(tx1.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - assert_eq!(txq.status().pending, 2); - assert_eq!(txq.last_nonce(&sender), Some(nonce + U256::one())); - - // when - let res = txq.add(tx.clone(), &default_nonce, TransactionOrigin::External); - - // then - assert_eq!(res.unwrap(), TransactionImportResult::Current); - assert_eq!(txq.status().pending, 2); - assert_eq!(txq.last_nonce(&sender), Some(nonce)); - } - #[test] fn should_limit_future_transactions() { let mut txq = TransactionQueue::with_limits(1, !U256::zero()); txq.current.set_limit(10); - let (tx1, tx2) = new_txs_with_gas_price_diff(U256::from(4), U256::from(1)); - let (tx3, tx4) = new_txs_with_gas_price_diff(U256::from(4), U256::from(2)); - txq.add(tx1.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx3.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let (tx1, tx2) = new_tx_pair_default(4.into(), 1.into()); + let (tx3, tx4) = new_tx_pair_default(4.into(), 2.into()); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx3.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().pending, 2); // when - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx4.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx4.clone(), &default_account_details, TransactionOrigin::External).unwrap(); // then assert_eq!(txq.status().future, 1); @@ -1378,7 +1499,7 @@ mod test { #[test] fn should_drop_transactions_with_old_nonces() { let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); let last_nonce = tx.nonce + U256::one(); let fetch_last_nonce = |_a: &Address| AccountDetails{ nonce: last_nonce, balance: !U256::zero() }; @@ -1395,11 +1516,11 @@ mod test { #[test] fn should_not_insert_same_transaction_twice() { // given - let nonce = |a: &Address| AccountDetails { nonce: default_nonce(a).nonce + U256::one(), + let nonce = |a: &Address| AccountDetails { nonce: default_account_details(a).nonce + U256::one(), balance: !U256::zero() }; let mut txq = TransactionQueue::new(); - let (_tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let (_tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 1); assert_eq!(txq.status().pending, 0); @@ -1417,16 +1538,16 @@ mod test { fn should_accept_same_transaction_twice_if_removed() { // given let mut txq = TransactionQueue::new(); - let (tx1, tx2) = new_txs(U256::from(1)); - txq.add(tx1.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().pending, 2); // when - txq.remove_invalid(&tx1.hash(), &default_nonce); + txq.remove_invalid(&tx1.hash(), &default_account_details); assert_eq!(txq.status().pending, 0); assert_eq!(txq.status().future, 1); - txq.add(tx1.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx1.clone(), &default_account_details, TransactionOrigin::External).unwrap(); // then let stats = txq.status(); @@ -1438,17 +1559,17 @@ mod test { fn should_not_move_to_future_if_state_nonce_is_higher() { // given let mut txq = TransactionQueue::new(); - let (tx, tx2) = new_txs(U256::from(1)); - let tx3 = new_tx(); - txq.add(tx2.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + let (tx, tx2) = new_tx_pair_default(1.into(), 0.into()); + let tx3 = new_tx_default(); + txq.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx3.clone(), &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx.clone(), &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx3.clone(), &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().pending, 3); // when let sender = tx.sender().unwrap(); - txq.remove_all(sender, default_nonce_val() + U256::one()); + txq.remove_all(sender, default_nonce() + U256::one()); // then let stats = txq.status(); @@ -1462,7 +1583,7 @@ mod test { // given let mut txq = TransactionQueue::new(); let keypair = KeyPair::create().unwrap(); - let tx = new_unsigned_tx(U256::from(123)).sign(keypair.secret()); + let tx = new_unsigned_tx(123.into(), 1.into()).sign(keypair.secret()); let tx2 = { let mut tx2 = (*tx).clone(); tx2.gas_price = U256::from(200); @@ -1470,8 +1591,8 @@ mod test { }; // when - txq.add(tx, &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2, &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx, &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2, &default_account_details, TransactionOrigin::External).unwrap(); // then let stats = txq.status(); @@ -1485,7 +1606,7 @@ mod test { // given let mut txq = TransactionQueue::new(); let keypair = KeyPair::create().unwrap(); - let tx0 = new_unsigned_tx(U256::from(123)).sign(keypair.secret()); + let tx0 = new_unsigned_tx(123.into(), 1.into()).sign(keypair.secret()); let tx1 = { let mut tx1 = (*tx0).clone(); tx1.nonce = U256::from(124); @@ -1498,10 +1619,10 @@ mod test { }; // when - txq.add(tx1, &default_nonce, TransactionOrigin::External).unwrap(); - txq.add(tx2, &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx1, &default_account_details, TransactionOrigin::External).unwrap(); + txq.add(tx2, &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 1); - txq.add(tx0, &default_nonce, TransactionOrigin::External).unwrap(); + txq.add(tx0, &default_account_details, TransactionOrigin::External).unwrap(); // then let stats = txq.status(); @@ -1513,12 +1634,12 @@ mod test { #[test] fn should_recalculate_height_when_removing_from_future() { // given - let previous_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce - U256::one(), balance: + let previous_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce - U256::one(), balance: !U256::zero() }; - let next_nonce = |a: &Address| AccountDetails{ nonce: default_nonce(a).nonce + U256::one(), balance: + let next_nonce = |a: &Address| AccountDetails{ nonce: default_account_details(a).nonce + U256::one(), balance: !U256::zero() }; let mut txq = TransactionQueue::new(); - let (tx1, tx2) = new_txs(U256::one()); + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); txq.add(tx1.clone(), &previous_nonce, TransactionOrigin::External).unwrap(); txq.add(tx2, &previous_nonce, TransactionOrigin::External).unwrap(); assert_eq!(txq.status().future, 2); @@ -1545,7 +1666,7 @@ mod test { fn should_return_correct_nonce_when_transactions_from_given_address_exist() { // given let mut txq = TransactionQueue::new(); - let tx = new_tx(); + let tx = new_tx_default(); let from = tx.sender().unwrap(); let nonce = tx.nonce; let details = |_a: &Address| AccountDetails { nonce: nonce, balance: !U256::zero() }; @@ -1561,7 +1682,7 @@ mod test { fn should_remove_old_transaction_even_if_newer_transaction_was_not_known() { // given let mut txq = TransactionQueue::new(); - let (tx1, tx2) = new_txs(U256::one()); + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); let (nonce1, nonce2) = (tx1.nonce, tx2.nonce); let details1 = |_a: &Address| AccountDetails { nonce: nonce1, balance: !U256::zero() }; @@ -1579,7 +1700,7 @@ mod test { fn should_return_valid_last_nonce_after_remove_all() { // given let mut txq = TransactionQueue::new(); - let (tx1, tx2) = new_txs(U256::from(4)); + let (tx1, tx2) = new_tx_pair_default(4.into(), 0.into()); let sender = tx1.sender().unwrap(); let (nonce1, nonce2) = (tx1.nonce, tx2.nonce); let details1 = |_a: &Address| AccountDetails { nonce: nonce1, balance: !U256::zero() }; @@ -1603,13 +1724,13 @@ mod test { fn should_return_true_if_there_is_local_transaction_pending() { // given let mut txq = TransactionQueue::new(); - let (tx1, tx2) = new_txs(U256::from(1)); + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); assert_eq!(txq.has_local_pending_transactions(), false); // when - assert_eq!(txq.add(tx1, &default_nonce, TransactionOrigin::External).unwrap(), TransactionImportResult::Current); + assert_eq!(txq.add(tx1, &default_account_details, TransactionOrigin::External).unwrap(), TransactionImportResult::Current); assert_eq!(txq.has_local_pending_transactions(), false); - assert_eq!(txq.add(tx2, &default_nonce, TransactionOrigin::Local).unwrap(), TransactionImportResult::Current); + assert_eq!(txq.add(tx2, &default_account_details, TransactionOrigin::Local).unwrap(), TransactionImportResult::Current); // then assert_eq!(txq.has_local_pending_transactions(), true); @@ -1619,9 +1740,9 @@ mod test { fn should_keep_right_order_in_future() { // given let mut txq = TransactionQueue::with_limits(1, !U256::zero()); - let (tx1, tx2) = new_txs(U256::from(1)); - let prev_nonce = |a: &Address| AccountDetails { nonce: default_nonce(a).nonce - U256::one(), balance: - default_nonce(a).balance }; + let (tx1, tx2) = new_tx_pair_default(1.into(), 0.into()); + let prev_nonce = |a: &Address| AccountDetails { nonce: default_account_details(a).nonce - U256::one(), balance: + default_account_details(a).balance }; // when assert_eq!(txq.add(tx2, &prev_nonce, TransactionOrigin::External).unwrap(), TransactionImportResult::Future); @@ -1639,25 +1760,24 @@ mod test { let (tx1, tx2, tx2_2, tx3) = { let keypair = KeyPair::create().unwrap(); let secret = &keypair.secret(); - let nonce = U256::from(123); - let tx = new_unsigned_tx(nonce); - let tx2 = new_unsigned_tx(nonce + 1.into()); - let mut tx2_2 = new_unsigned_tx(nonce + 1.into()); - tx2_2.gas_price = U256::from(5); - let tx3 = new_unsigned_tx(nonce + 2.into()); + let nonce = 123.into(); + let tx = new_unsigned_tx(nonce, 1.into()); + let tx2 = new_unsigned_tx(nonce + 1.into(), 1.into()); + let tx2_2 = new_unsigned_tx(nonce + 1.into(), 5.into()); + let tx3 = new_unsigned_tx(nonce + 2.into(), 1.into()); (tx.sign(secret), tx2.sign(secret), tx2_2.sign(secret), tx3.sign(secret)) }; let sender = tx1.sender().unwrap(); - txq.add(tx1, &default_nonce, TransactionOrigin::Local).unwrap(); - txq.add(tx2, &default_nonce, TransactionOrigin::Local).unwrap(); - txq.add(tx3, &default_nonce, TransactionOrigin::Local).unwrap(); + txq.add(tx1, &default_account_details, TransactionOrigin::Local).unwrap(); + txq.add(tx2, &default_account_details, TransactionOrigin::Local).unwrap(); + txq.add(tx3, &default_account_details, TransactionOrigin::Local).unwrap(); assert_eq!(txq.future.by_priority.len(), 0); assert_eq!(txq.current.by_priority.len(), 3); // when - let res = txq.add(tx2_2, &default_nonce, TransactionOrigin::Local); + let res = txq.add(tx2_2, &default_account_details, TransactionOrigin::Local); // then assert_eq!(txq.last_nonce(&sender).unwrap(), 125.into()); diff --git a/parity/cli.rs b/parity/cli.rs index cb354aff5..5f7448772 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -170,7 +170,7 @@ Sealing/Mining Options: lenient - Same as strict when mining, and cheap when not [default: cheap]. --usd-per-tx USD Amount of USD to be paid for a basic transaction - [default: 0.005]. The minimum gas price is set + [default: 0]. The minimum gas price is set accordingly. --usd-per-eth SOURCE USD value of a single ETH. SOURCE may be either an amount in USD, a web service or 'auto' to use each diff --git a/parity/params.rs b/parity/params.rs index bc58e455d..54a680414 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -179,7 +179,7 @@ pub enum GasPricerConfig { impl Default for GasPricerConfig { fn default() -> Self { GasPricerConfig::Calibrated { - usd_per_tx: 0.005, + usd_per_tx: 0f32, recalibration_period: Duration::from_secs(3600), } } From d4777f9296a10d65ec6b55166540e65a83dbe88d Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Tue, 23 Aug 2016 13:31:03 +0200 Subject: [PATCH 11/29] fixed #1933 (#1979) --- ethcore/src/trace/executive_tracer.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ethcore/src/trace/executive_tracer.rs b/ethcore/src/trace/executive_tracer.rs index 5c2e158e9..9963a9f27 100644 --- a/ethcore/src/trace/executive_tracer.rs +++ b/ethcore/src/trace/executive_tracer.rs @@ -52,11 +52,12 @@ fn update_trace_address(traces: Vec) -> Vec { let mut subtrace_subtraces_left = 0; traces.into_iter().map(|mut trace| { let is_top_subtrace = trace.trace_address.is_empty(); + let is_subtrace = trace.trace_address.len() == 1; trace.trace_address.push_front(top_subtrace_index); if is_top_subtrace { subtrace_subtraces_left = trace.subtraces; - } else { + } else if is_subtrace { subtrace_subtraces_left -= 1; } From 9fc144cc2fd5f9515161af7bd502020501c8aa52 Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Tue, 23 Aug 2016 18:51:23 +0400 Subject: [PATCH 12/29] timeout multiplied (#1990) --- ipc/nano/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipc/nano/src/lib.rs b/ipc/nano/src/lib.rs index df2e34d04..da48151a6 100644 --- a/ipc/nano/src/lib.rs +++ b/ipc/nano/src/lib.rs @@ -28,7 +28,7 @@ use nanomsg::{Socket, Protocol, Error, Endpoint, PollRequest, PollFd, PollInOut} use std::ops::Deref; const POLL_TIMEOUT: isize = 200; -const CLIENT_CONNECTION_TIMEOUT: isize = 15000; +const CLIENT_CONNECTION_TIMEOUT: isize = 120000; /// Generic worker to handle service (binded) sockets pub struct Worker where S: IpcInterface { From 59ede63edaa452482573e3d1f3fd4fe8527137bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 Aug 2016 16:53:24 +0200 Subject: [PATCH 13/29] CLI for Signer interface (#1980) * # This is a combination of 2 commits. # The first commit's message is: CLI to specify signer interface # This is the 2nd commit message: Fixing paths on windows * CLI to specify signer interface --- parity/cli.rs | 6 +++++- parity/configuration.rs | 47 ++++++++++++++++++++++++++++++++++++----- parity/signer.rs | 4 +++- 3 files changed, 50 insertions(+), 7 deletions(-) diff --git a/parity/cli.rs b/parity/cli.rs index 5f7448772..366c73a5b 100644 --- a/parity/cli.rs +++ b/parity/cli.rs @@ -73,6 +73,9 @@ Account Options: Signer UIs. --signer-port PORT Specify the port of Trusted Signer server [default: 8180]. + --signer-interface IP Specify the hostname portion of the Trusted Signer + server, IP should be an interface's IP address, + or local [default: local]. --signer-path PATH Specify directory where Signer UIs tokens should be stored. [default: $HOME/.parity/signer] --signer-no-validation Disable Origin and Host headers validation for @@ -349,6 +352,7 @@ pub struct Args { pub flag_force_signer: bool, pub flag_no_signer: bool, pub flag_signer_port: u16, + pub flag_signer_interface: String, pub flag_signer_path: String, pub flag_signer_no_validation: bool, pub flag_force_sealing: bool, @@ -371,7 +375,7 @@ pub struct Args { pub flag_version: bool, pub flag_from: String, pub flag_to: String, - pub flag_at: String, + pub flag_at: String, pub flag_format: Option, pub flag_jitvm: bool, pub flag_log_file: Option, diff --git a/parity/configuration.rs b/parity/configuration.rs index 5786b10de..b1dbaa3fe 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -345,6 +345,7 @@ impl Configuration { SignerConfiguration { enabled: self.signer_enabled(), port: self.args.flag_signer_port, + interface: self.signer_interface(), signer_path: self.directories().signer, skip_origin_validation: self.args.flag_signer_no_validation, } @@ -573,6 +574,13 @@ impl Configuration { } } + fn signer_interface(&self) -> String { + match self.args.flag_signer_interface.as_str() { + "local" => "127.0.0.1", + x => x, + }.into() + } + fn rpc_interface(&self) -> String { match self.network_settings().rpc_interface.as_str() { "all" => "0.0.0.0", @@ -614,6 +622,7 @@ mod tests { use ethcore::client::{VMType, BlockID}; use helpers::{replace_home, default_network_config}; use run::RunCmd; + use signer::Configuration as SignerConfiguration; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat}; use presale::ImportWallet; use account::{AccountCmd, NewAccount, ImportAccounts}; @@ -876,16 +885,44 @@ mod tests { } #[test] - fn should_parse_signer_allow_all_flag() { + fn should_parse_signer_configration() { // given // when - let conf0 = parse(&["parity", "--signer-no-validation"]); - let conf1 = parse(&["parity"]); + let conf0 = parse(&["parity", "--signer-path", "signer"]); + let conf1 = parse(&["parity", "--signer-path", "signer", "--signer-no-validation"]); + let conf2 = parse(&["parity", "--signer-path", "signer", "--signer-port", "3123"]); + let conf3 = parse(&["parity", "--signer-path", "signer", "--signer-interface", "test"]); // then - assert_eq!(conf0.args.flag_signer_no_validation, true); - assert_eq!(conf1.args.flag_signer_no_validation, false); + assert_eq!(conf0.signer_config(), SignerConfiguration { + enabled: true, + port: 8180, + interface: "127.0.0.1".into(), + signer_path: "signer".into(), + skip_origin_validation: false, + }); + assert_eq!(conf1.signer_config(), SignerConfiguration { + enabled: true, + port: 8180, + interface: "127.0.0.1".into(), + signer_path: "signer".into(), + skip_origin_validation: true, + }); + assert_eq!(conf2.signer_config(), SignerConfiguration { + enabled: true, + port: 3123, + interface: "127.0.0.1".into(), + signer_path: "signer".into(), + skip_origin_validation: false, + }); + assert_eq!(conf3.signer_config(), SignerConfiguration { + enabled: true, + port: 8180, + interface: "test".into(), + signer_path: "signer".into(), + skip_origin_validation: false, + }); } #[test] diff --git a/parity/signer.rs b/parity/signer.rs index e61ab8411..e6924dcef 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -31,6 +31,7 @@ const CODES_FILENAME: &'static str = "authcodes"; pub struct Configuration { pub enabled: bool, pub port: u16, + pub interface: String, pub signer_path: String, pub skip_origin_validation: bool, } @@ -40,6 +41,7 @@ impl Default for Configuration { Configuration { enabled: true, port: 8180, + interface: "127.0.0.1".into(), signer_path: replace_home("$HOME/.parity/signer"), skip_origin_validation: false, } @@ -82,7 +84,7 @@ fn generate_new_token(path: String) -> io::Result { } fn do_start(conf: Configuration, deps: Dependencies) -> Result { - let addr = try!(format!("127.0.0.1:{}", conf.port) + let addr = try!(format!("{}:{}", conf.interface, conf.port) .parse() .map_err(|_| format!("Invalid port specified: {}", conf.port))); From 2a550c2adfc64c1639bd8220c05bb548c71b8c5a Mon Sep 17 00:00:00 2001 From: Nipunn Koorapati Date: Tue, 23 Aug 2016 08:07:00 -0700 Subject: [PATCH 14/29] Add timeout for eth_getWork call (#1975) --- Cargo.lock | 1 + ethcore/src/client/test_client.rs | 10 +++++++++- rpc/Cargo.toml | 1 + rpc/src/lib.rs | 1 + rpc/src/v1/helpers/errors.rs | 9 +++++++++ rpc/src/v1/impls/eth.rs | 7 +++++-- rpc/src/v1/tests/mocked/eth.rs | 32 ++++++++++++++++++++++++++++--- 7 files changed, 55 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ea22f715..7a10ec3c8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -426,6 +426,7 @@ dependencies = [ "serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "transient-hashmap 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 212dead9a..be1e9da25 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -73,6 +73,8 @@ pub struct TestBlockChainClient { pub spec: Spec, /// VM Factory pub vm_factory: EvmFactory, + /// Timestamp assigned to latest sealed block + pub latest_block_timestamp: RwLock, } #[derive(Clone)] @@ -114,6 +116,7 @@ impl TestBlockChainClient { miner: Arc::new(Miner::with_spec(&spec)), spec: spec, vm_factory: EvmFactory::new(VMType::Interpreter), + latest_block_timestamp: RwLock::new(10_000_000), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().clone(); @@ -155,6 +158,11 @@ impl TestBlockChainClient { self.queue_size.store(size, AtomicOrder::Relaxed); } + /// Set timestamp assigned to latest sealed block + pub fn set_latest_block_timestamp(&self, ts: u64) { + *self.latest_block_timestamp.write() = ts; + } + /// Add blocks to test client. pub fn add_blocks(&self, count: usize, with: EachBlockWith) { let len = self.numbers.read().len(); @@ -279,7 +287,7 @@ impl MiningBlockChainClient for TestBlockChainClient { extra_data ).expect("Opening block for tests will not fail."); // TODO [todr] Override timestamp for predictability (set_timestamp_now kind of sucks) - open_block.set_timestamp(10_000_000); + open_block.set_timestamp(*self.latest_block_timestamp.read()); open_block } diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 6a9478eb3..7a70f52c7 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -28,6 +28,7 @@ serde_macros = { version = "0.7.0", optional = true } clippy = { version = "0.0.85", optional = true} json-ipc-server = { git = "https://github.com/ethcore/json-ipc-server.git" } ethcore-ipc = { path = "../ipc/rpc" } +time = "0.1" [build-dependencies] serde_codegen = { version = "0.7.0", optional = true } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 9322d8dbc..01a901732 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -35,6 +35,7 @@ extern crate ethsync; extern crate transient_hashmap; extern crate json_ipc_server as ipc; extern crate ethcore_ipc; +extern crate time; #[cfg(test)] extern crate ethjson; diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index bb62a80e5..fbd134e33 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -30,6 +30,7 @@ mod codes { pub const UNSUPPORTED_REQUEST: i64 = -32000; pub const NO_WORK: i64 = -32001; pub const NO_AUTHOR: i64 = -32002; + pub const NO_NEW_WORK: i64 = -32003; pub const UNKNOWN_ERROR: i64 = -32009; pub const TRANSACTION_ERROR: i64 = -32010; pub const ACCOUNT_LOCKED: i64 = -32020; @@ -114,6 +115,14 @@ pub fn no_work() -> Error { } } +pub fn no_new_work() -> Error { + Error { + code: ErrorCode::ServerError(codes::NO_NEW_WORK), + message: "Work has not changed.".into(), + data: None + } +} + pub fn no_author() -> Error { Error { code: ErrorCode::ServerError(codes::NO_AUTHOR), diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 64ca059af..3628f99a9 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -23,6 +23,7 @@ use std::process::{Command, Stdio}; use std::thread; use std::time::{Instant, Duration}; use std::sync::{Arc, Weak}; +use time::get_time; use ethsync::{SyncProvider, SyncState}; use ethcore::miner::{MinerService, ExternalMinerService}; use jsonrpc_core::*; @@ -516,7 +517,7 @@ impl Eth for EthClient where fn work(&self, params: Params) -> Result { try!(self.active()); - try!(expect_no_params(params)); + let (no_new_work_timeout,) = from_params::<(u64,)>(params).unwrap_or((0,)); let client = take_weak!(self.client); // check if we're still syncing and return empty strings in that case @@ -545,7 +546,9 @@ impl Eth for EthClient where let target = Ethash::difficulty_to_boundary(b.block().header().difficulty()); let seed_hash = self.seed_compute.lock().get_seedhash(b.block().header().number()); - if self.options.send_block_number_in_get_work { + if no_new_work_timeout > 0 && b.block().header().timestamp() + no_new_work_timeout < get_time().sec as u64 { + Err(errors::no_new_work()) + } else if self.options.send_block_number_in_get_work { let block_number = RpcU256::from(b.block().header().number()); to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number)) } else { diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 20d074ca6..4b880419a 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -30,6 +30,7 @@ use ethsync::SyncState; use v1::{Eth, EthClient, EthClientOptions, EthSigning, EthSigningUnsafeClient}; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService}; use rustc_serialize::hex::ToHex; +use time::get_time; fn blockchain_client() -> Arc { let client = TestBlockChainClient::new(); @@ -818,7 +819,7 @@ fn rpc_eth_compile_serpent() { } #[test] -fn returns_no_work_if_cant_mine() { +fn rpc_get_work_returns_no_work_if_cant_mine() { let eth_tester = EthTester::default(); eth_tester.client.set_queue_size(10); @@ -829,7 +830,7 @@ fn returns_no_work_if_cant_mine() { } #[test] -fn returns_correct_work_package() { +fn rpc_get_work_returns_correct_work_package() { let eth_tester = EthTester::default(); eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()); @@ -840,7 +841,7 @@ fn returns_correct_work_package() { } #[test] -fn should_not_return_block_number() { +fn rpc_get_work_should_not_return_block_number() { let eth_tester = EthTester::new_with_options(EthClientOptions { allow_pending_receipt_query: true, send_block_number_in_get_work: false, @@ -852,3 +853,28 @@ fn should_not_return_block_number() { assert_eq!(eth_tester.io.handle_request(request), Some(response.to_owned())); } + +#[test] +fn rpc_get_work_should_timeout() { + let eth_tester = EthTester::default(); + eth_tester.miner.set_author(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()); + eth_tester.client.set_latest_block_timestamp(get_time().sec as u64 - 1000); // Set latest block to 1000 seconds ago + let hash = eth_tester.miner.map_sealing_work(&*eth_tester.client, |b| b.hash()).unwrap(); + + // Request with timeout of 0 seconds. This should work since we're disabling timeout. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; + let work_response = format!( + r#"{{"jsonrpc":"2.0","result":["0x{:?}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x01"],"id":1}}"#, + hash, + ); + assert_eq!(eth_tester.io.handle_request(request), Some(work_response.to_owned())); + + // Request with timeout of 10K seconds. This should work. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": ["10000"], "id": 1}"#; + assert_eq!(eth_tester.io.handle_request(request), Some(work_response.to_owned())); + + // Request with timeout of 10 seconds. This should fail. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": ["10"], "id": 1}"#; + let err_response = r#"{"jsonrpc":"2.0","error":{"code":-32003,"message":"Work has not changed.","data":null},"id":1}"#; + assert_eq!(eth_tester.io.handle_request(request), Some(err_response.to_owned())); +} From dda57d92944e9aa13477c51263c3f958282803f7 Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Tue, 23 Aug 2016 23:18:13 +0700 Subject: [PATCH 15/29] Update gitlab-ci add allow_failure: true to arm* --- .gitlab-ci.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6beaa2789..d7599f343 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -62,6 +62,7 @@ linux-nightly: paths: - target/release/parity name: "${CI_BUILD_NAME}_parity" + allow_failure: true linux-centos: stage: build image: ethcore/rust-centos:latest @@ -104,6 +105,7 @@ linux-armv7: paths: - target/armv7-unknown-linux-gnueabihf/release/parity name: "${CI_BUILD_NAME}_parity" + allow_failure: true linux-arm: stage: build image: ethcore/rust-arm:latest @@ -126,6 +128,7 @@ linux-arm: paths: - target/arm-unknown-linux-gnueabihf/release/parity name: "${CI_BUILD_NAME}_parity" + allow_failure: true linux-armv6: stage: build image: ethcore/rust-arm:latest @@ -148,6 +151,7 @@ linux-armv6: paths: - target/arm-unknown-linux-gnueabi/release/parity name: "${CI_BUILD_NAME}_parity" + allow_failure: true linux-aarch64: stage: build image: ethcore/rust-arm:latest @@ -170,6 +174,7 @@ linux-aarch64: paths: - target/aarch64-unknown-linux-gnu/release/parity name: "${CI_BUILD_NAME}_parity" + allow_failure: true darwin: stage: build only: From 124a5da75eae85aa8f6470a5cbf9e39fe0001477 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 23 Aug 2016 19:28:21 +0200 Subject: [PATCH 16/29] Resolving URLs from contract (#1964) * Fetching dapp from github. * Unpacking dapp * Removing hardcodes * Proper Host validation * Randomizing paths * Splitting into files * Serving donwloaded apps from different path * Extracting URLHint to separate module * Whitespace and docs * Resolving from URLHint contract * Fixing test * Resolving githubhint url from registrar * Proper redirections * Fixing test * fixing ethstore [ci skip] * Correct version of registrar * Removing superfluous Box --- Cargo.lock | 15 ++ dapps/Cargo.toml | 1 + dapps/src/apps/fetcher.rs | 25 ++-- dapps/src/apps/mod.rs | 9 +- dapps/src/apps/registrar.json | 21 +++ dapps/src/apps/urlhint.json | 6 + dapps/src/apps/urlhint.rs | 253 +++++++++++++++++++++++++++++---- dapps/src/endpoint.rs | 1 + dapps/src/handlers/fetch.rs | 8 +- dapps/src/handlers/redirect.rs | 6 +- dapps/src/lib.rs | 23 ++- dapps/src/page/handler.rs | 3 +- dapps/src/router/mod.rs | 15 +- ethcore/src/miner/miner.rs | 4 +- ethstore/src/bin/ethstore.rs | 5 +- parity/dapps.rs | 139 ++++++++++++------ parity/run.rs | 1 + 17 files changed, 434 insertions(+), 101 deletions(-) create mode 100644 dapps/src/apps/registrar.json create mode 100644 dapps/src/apps/urlhint.json diff --git a/Cargo.lock b/Cargo.lock index 7a10ec3c8..a8c97516d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -229,6 +229,19 @@ dependencies = [ "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethabi" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "docopt 0.6.80 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_codegen 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ethash" version = "1.4.0" @@ -275,6 +288,7 @@ name = "ethcore-dapps" version = "1.4.0" dependencies = [ "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", @@ -1642,6 +1656,7 @@ dependencies = [ "checksum elastic-array 0.4.0 (git+https://github.com/ethcore/elastic-array)" = "" "checksum env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "aba65b63ffcc17ffacd6cf5aa843da7c5a25e3bd4bbe0b7def8b214e411250e5" "checksum eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)" = "" +"checksum ethabi 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bc7789d1518abba0c61606826a5229284d47a9d0934feb62a1ee218882780a9b" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" "checksum gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)" = "3da3a2cbaeb01363c8e3704fd9fd0eb2ceb17c6f27abd4c1ef040fb57d20dc79" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 8212538f8..2c7c9db9c 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -21,6 +21,7 @@ serde = "0.7.0" serde_json = "0.7.0" serde_macros = { version = "0.7.0", optional = true } zip = { version = "0.1", default-features = false } +ethabi = "0.2.1" ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } parity-dapps = { git = "https://github.com/ethcore/parity-ui.git", version = "1.4" } diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 2b1b9e658..0119b921e 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -24,6 +24,7 @@ use std::io::{self, Read, Write}; use std::path::PathBuf; use std::sync::Arc; use std::collections::HashMap; +use rustc_serialize::hex::FromHex; use hyper::Control; use hyper::status::StatusCode; @@ -54,12 +55,6 @@ impl Drop for AppFetcher { } } -impl Default for AppFetcher { - fn default() -> Self { - AppFetcher::new(URLHintContract) - } -} - impl AppFetcher { pub fn new(resolver: R) -> Self { @@ -84,7 +79,10 @@ impl AppFetcher { // Check if we already have the app Some(_) => true, // fallback to resolver - None => self.resolver.resolve(app_id).is_some(), + None => match app_id.from_hex() { + Ok(app_id) => self.resolver.resolve(app_id).is_some(), + _ => false, + }, } } @@ -103,16 +101,22 @@ impl AppFetcher { Some(&AppStatus::Fetching) => { (None, Box::new(ContentHandler::html( StatusCode::ServiceUnavailable, - "

This dapp is already being downloaded.

".into() + format!( + "{}{}", + "", + "

This dapp is already being downloaded.

Please wait...

", + ) )) as Box) }, // We need to start fetching app None => { // TODO [todr] Keep only last N dapps available! - let app = self.resolver.resolve(&app_id).expect("to_handler is called only when `contains` returns true."); + let app_hex = app_id.from_hex().expect("to_handler is called only when `contains` returns true."); + let app = self.resolver.resolve(app_hex).expect("to_handler is called only when `contains` returns true."); (Some(AppStatus::Fetching), Box::new(AppFetcherHandler::new( app, control, + path.using_dapps_domains, DappInstaller { dapp_id: app_id.clone(), dapps_path: self.dapps_path.clone(), @@ -265,10 +269,11 @@ mod tests { use apps::urlhint::{GithubApp, URLHint}; use endpoint::EndpointInfo; use page::LocalPageEndpoint; + use util::Bytes; struct FakeResolver; impl URLHint for FakeResolver { - fn resolve(&self, _app_id: &str) -> Option { + fn resolve(&self, _app_id: Bytes) -> Option { None } } diff --git a/dapps/src/apps/mod.rs b/dapps/src/apps/mod.rs index 97b018e68..84a3c5ddf 100644 --- a/dapps/src/apps/mod.rs +++ b/dapps/src/apps/mod.rs @@ -33,7 +33,14 @@ pub const API_PATH : &'static str = "api"; pub const UTILS_PATH : &'static str = "parity-utils"; pub fn main_page() -> &'static str { - "/home/" + "home" +} +pub fn redirection_address(using_dapps_domains: bool, app_id: &str) -> String { + if using_dapps_domains { + format!("http://{}{}/", app_id, DAPPS_DOMAIN) + } else { + format!("/{}/", app_id) + } } pub fn utils() -> Box { diff --git a/dapps/src/apps/registrar.json b/dapps/src/apps/registrar.json new file mode 100644 index 000000000..38edcc787 --- /dev/null +++ b/dapps/src/apps/registrar.json @@ -0,0 +1,21 @@ +[ + {"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"set","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"type":"function"}, + {"constant":false,"inputs":[],"name":"drain","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"type":"function"}, + {"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"type":"function"}, + {"constant":true,"inputs":[{"name":"","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"type":"function"}, + {"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"Drained","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"amount","type":"uint256"}],"name":"FeeChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"owner","type":"address"}],"name":"Reserved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"oldOwner","type":"address"},{"indexed":true,"name":"newOwner","type":"address"}],"name":"Transferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"owner","type":"address"}],"name":"Dropped","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"bytes32"},{"indexed":true,"name":"owner","type":"address"},{"indexed":true,"name":"key","type":"string"}],"name":"DataChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":true,"name":"reverse","type":"address"}],"name":"ReverseProposed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":true,"name":"reverse","type":"address"}],"name":"ReverseConfirmed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"name","type":"string"},{"indexed":true,"name":"reverse","type":"address"}],"name":"ReverseRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"} +] diff --git a/dapps/src/apps/urlhint.json b/dapps/src/apps/urlhint.json new file mode 100644 index 000000000..629f166bb --- /dev/null +++ b/dapps/src/apps/urlhint.json @@ -0,0 +1,6 @@ +[ + {"constant":false,"inputs":[{"name":"_content","type":"bytes32"},{"name":"_url","type":"string"}],"name":"hintURL","outputs":[],"type":"function"}, + {"constant":false,"inputs":[{"name":"_content","type":"bytes32"},{"name":"_accountSlashRepo","type":"string"},{"name":"_commit","type":"bytes20"}],"name":"hint","outputs":[],"type":"function"}, + {"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"entries","outputs":[{"name":"accountSlashRepo","type":"string"},{"name":"commit","type":"bytes20"},{"name":"owner","type":"address"}],"type":"function"}, + {"constant":false,"inputs":[{"name":"_content","type":"bytes32"}],"name":"unhint","outputs":[],"type":"function"} +] diff --git a/dapps/src/apps/urlhint.rs b/dapps/src/apps/urlhint.rs index 61dbc0dec..cbf85b10a 100644 --- a/dapps/src/apps/urlhint.rs +++ b/dapps/src/apps/urlhint.rs @@ -14,13 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::fmt; +use std::sync::Arc; use rustc_serialize::hex::ToHex; -use util::{Address, FromHex}; +use ethabi::{Interface, Contract, Token}; +use util::{Address, Bytes, Hashable}; const COMMIT_LEN: usize = 20; -#[derive(Debug)] +#[derive(Debug, PartialEq)] pub struct GithubApp { pub account: String, pub repo: String, @@ -48,42 +51,240 @@ impl GithubApp { } } -pub trait URLHint { - fn resolve(&self, app_id: &str) -> Option; +/// RAW Contract interface. +/// Should execute transaction using current blockchain state. +pub trait ContractClient: Send + Sync { + /// Get registrar address + fn registrar(&self) -> Result; + /// Call Contract + fn call(&self, address: Address, data: Bytes) -> Result; } -pub struct URLHintContract; +/// URLHint Contract interface +pub trait URLHint { + /// Resolves given id to registrar entry. + fn resolve(&self, app_id: Bytes) -> Option; +} -impl URLHint for URLHintContract { - fn resolve(&self, app_id: &str) -> Option { - // TODO [todr] use GithubHint contract to check the details - // For now we are just accepting patterns: ...parity - let mut app_parts = app_id.split('.'); +pub struct URLHintContract { + urlhint: Contract, + registrar: Contract, + client: Arc, +} - let hash = app_parts.next() - .and_then(|h| h.from_hex().ok()) - .and_then(|h| GithubApp::commit(&h)); - let repo = app_parts.next(); - let account = app_parts.next(); +impl URLHintContract { + pub fn new(client: Arc) -> Self { + let urlhint = Interface::load(include_bytes!("./urlhint.json")).expect("urlhint.json is valid ABI"); + let registrar = Interface::load(include_bytes!("./registrar.json")).expect("registrar.json is valid ABI"); - match (hash, repo, account) { - (Some(hash), Some(repo), Some(account)) => { - Some(GithubApp { - account: account.into(), - repo: repo.into(), - commit: hash, - owner: Address::default(), - }) + URLHintContract { + urlhint: Contract::new(urlhint), + registrar: Contract::new(registrar), + client: client, + } + } + + fn urlhint_address(&self) -> Option
{ + let res = || { + let get_address = try!(self.registrar.function("getAddress".into()).map_err(as_string)); + let params = try!(get_address.encode_call( + vec![Token::FixedBytes((*"githubhint".sha3()).to_vec()), Token::String("A".into())] + ).map_err(as_string)); + let output = try!(self.client.call(try!(self.client.registrar()), params)); + let result = try!(get_address.decode_output(output).map_err(as_string)); + + match result.get(0) { + Some(&Token::Address(address)) if address != *Address::default() => Ok(address.into()), + Some(&Token::Address(_)) => Err(format!("Contract not found.")), + e => Err(format!("Invalid result: {:?}", e)), + } + }; + + match res() { + Ok(res) => Some(res), + Err(e) => { + warn!(target: "dapps", "Error while calling registrar: {:?}", e); + None + } + } + } + + fn encode_urlhint_call(&self, app_id: Bytes) -> Option { + let call = self.urlhint + .function("entries".into()) + .and_then(|f| f.encode_call(vec![Token::FixedBytes(app_id)])); + + match call { + Ok(res) => { + Some(res) }, - _ => None, + Err(e) => { + warn!(target: "dapps", "Error while encoding urlhint call: {:?}", e); + None + } + } + } + + fn decode_urlhint_output(&self, output: Bytes) -> Option { + trace!(target: "dapps", "Output: {:?}", output.to_hex()); + let output = self.urlhint + .function("entries".into()) + .and_then(|f| f.decode_output(output)); + + if let Ok(vec) = output { + if vec.len() != 3 { + warn!(target: "dapps", "Invalid contract output: {:?}", vec); + return None; + } + + let mut it = vec.into_iter(); + let account_slash_repo = it.next().unwrap(); + let commit = it.next().unwrap(); + let owner = it.next().unwrap(); + + match (account_slash_repo, commit, owner) { + (Token::String(account_slash_repo), Token::FixedBytes(commit), Token::Address(owner)) => { + let owner = owner.into(); + if owner == Address::default() { + return None; + } + let (account, repo) = { + let mut it = account_slash_repo.split('/'); + match (it.next(), it.next()) { + (Some(account), Some(repo)) => (account.into(), repo.into()), + _ => return None, + } + }; + + GithubApp::commit(&commit).map(|commit| GithubApp { + account: account, + repo: repo, + commit: commit, + owner: owner, + }) + }, + e => { + warn!(target: "dapps", "Invalid contract output parameters: {:?}", e); + None + }, + } + } else { + warn!(target: "dapps", "Invalid contract output: {:?}", output); + None } } } +impl URLHint for URLHintContract { + fn resolve(&self, app_id: Bytes) -> Option { + self.urlhint_address().and_then(|address| { + // Prepare contract call + self.encode_urlhint_call(app_id) + .and_then(|data| { + let call = self.client.call(address, data); + if let Err(ref e) = call { + warn!(target: "dapps", "Error while calling urlhint: {:?}", e); + } + call.ok() + }) + .and_then(|output| self.decode_urlhint_output(output)) + }) + } +} + +fn as_string(e: T) -> String { + format!("{:?}", e) +} + #[cfg(test)] mod tests { - use super::GithubApp; - use util::Address; + use std::sync::Arc; + use std::str::FromStr; + use rustc_serialize::hex::{ToHex, FromHex}; + + use super::*; + use util::{Bytes, Address, Mutex, ToPretty}; + + struct FakeRegistrar { + pub calls: Arc>>, + pub responses: Mutex>>, + } + + const REGISTRAR: &'static str = "8e4e9b13d4b45cb0befc93c3061b1408f67316b2"; + const URLHINT: &'static str = "deadbeefcafe0000000000000000000000000000"; + + impl FakeRegistrar { + fn new() -> Self { + FakeRegistrar { + calls: Arc::new(Mutex::new(Vec::new())), + responses: Mutex::new( + vec![ + Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), + Ok(Vec::new()) + ] + ), + } + } + } + + impl ContractClient for FakeRegistrar { + + fn registrar(&self) -> Result { + Ok(REGISTRAR.parse().unwrap()) + } + + fn call(&self, address: Address, data: Bytes) -> Result { + self.calls.lock().push((address.to_hex(), data.to_hex())); + self.responses.lock().remove(0) + } + } + + #[test] + fn should_call_registrar_and_urlhint_contracts() { + // given + let registrar = FakeRegistrar::new(); + let calls = registrar.calls.clone(); + let urlhint = URLHintContract::new(Arc::new(registrar)); + + // when + let res = urlhint.resolve("test".bytes().collect()); + let calls = calls.lock(); + let call0 = calls.get(0).expect("Registrar resolve called"); + let call1 = calls.get(1).expect("URLHint Resolve called"); + + // then + assert!(res.is_none()); + assert_eq!(call0.0, REGISTRAR); + assert_eq!(call0.1, + "6795dbcd058740ee9a5a3fb9f1cfa10752baec87e09cc45cd7027fd54708271aca300c75000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000014100000000000000000000000000000000000000000000000000000000000000".to_owned() + ); + assert_eq!(call1.0, URLHINT); + assert_eq!(call1.1, + "267b69227465737400000000000000000000000000000000000000000000000000000000".to_owned() + ); + } + + #[test] + fn should_decode_urlhint_output() { + // given + let mut registrar = FakeRegistrar::new(); + registrar.responses = Mutex::new(vec![ + Ok(format!("000000000000000000000000{}", URLHINT).from_hex().unwrap()), + Ok("0000000000000000000000000000000000000000000000000000000000000060ec4c1fe06c808fe3739858c347109b1f5f1ed4b5000000000000000000000000000000000000000000000000deadcafebeefbeefcafedeaddeedfeedffffffff0000000000000000000000000000000000000000000000000000000000000011657468636f72652f64616f2e636c61696d000000000000000000000000000000".from_hex().unwrap()), + ]); + let urlhint = URLHintContract::new(Arc::new(registrar)); + + // when + let res = urlhint.resolve("test".bytes().collect()); + + // then + assert_eq!(res, Some(GithubApp { + account: "ethcore".into(), + repo: "dao.claim".into(), + commit: GithubApp::commit(&"ec4c1fe06c808fe3739858c347109b1f5f1ed4b5".from_hex().unwrap()).unwrap(), + owner: Address::from_str("deadcafebeefbeefcafedeaddeedfeedffffffff").unwrap(), + })) + } #[test] fn should_return_valid_url() { diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index 62816b088..dbb02a3d3 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -24,6 +24,7 @@ pub struct EndpointPath { pub app_id: String, pub host: String, pub port: u16, + pub using_dapps_domains: bool, } #[derive(Debug, PartialEq, Clone)] diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 3ada8fb82..94bce1492 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -27,7 +27,7 @@ use hyper::status::StatusCode; use handlers::ContentHandler; use handlers::client::{Fetch, FetchResult}; -use apps::DAPPS_DOMAIN; +use apps::redirection_address; use apps::urlhint::GithubApp; use apps::manifest::Manifest; @@ -54,6 +54,7 @@ pub struct AppFetcherHandler { control: Option, status: FetchState, client: Option>, + using_dapps_domains: bool, dapp: H, } @@ -72,6 +73,7 @@ impl AppFetcherHandler { pub fn new( app: GithubApp, control: Control, + using_dapps_domains: bool, handler: H) -> Self { let client = Client::new().expect("Failed to create a Client"); @@ -79,6 +81,7 @@ impl AppFetcherHandler { control: Some(control), client: Some(client), status: FetchState::NotStarted(app), + using_dapps_domains: using_dapps_domains, dapp: handler, } } @@ -207,8 +210,7 @@ impl server::Handler for AppFetcherHandler { FetchState::Done(ref manifest) => { trace!(target: "dapps", "Fetching dapp finished. Redirecting to {}", manifest.id); res.set_status(StatusCode::Found); - // TODO [todr] should detect if its using nice-urls - res.headers_mut().set(header::Location(format!("http://{}{}", manifest.id, DAPPS_DOMAIN))); + res.headers_mut().set(header::Location(redirection_address(self.using_dapps_domains, &manifest.id))); Next::write() }, FetchState::Error(ref mut handler) => handler.on_response(res), diff --git a/dapps/src/handlers/redirect.rs b/dapps/src/handlers/redirect.rs index 6d738115d..dbe5f6e4a 100644 --- a/dapps/src/handlers/redirect.rs +++ b/dapps/src/handlers/redirect.rs @@ -21,13 +21,13 @@ use hyper::net::HttpStream; use hyper::status::StatusCode; pub struct Redirection { - to_url: &'static str + to_url: String } impl Redirection { - pub fn new(url: &'static str) -> Box { + pub fn new(url: &str) -> Box { Box::new(Redirection { - to_url: url + to_url: url.to_owned() }) } } diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 49940080f..3373f5c58 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -52,6 +52,7 @@ extern crate serde; extern crate serde_json; extern crate zip; extern crate rand; +extern crate ethabi; extern crate jsonrpc_core; extern crate jsonrpc_http_server; extern crate parity_dapps; @@ -70,6 +71,8 @@ mod api; mod proxypac; mod url; +pub use self::apps::urlhint::ContractClient; + use std::sync::{Arc, Mutex}; use std::net::SocketAddr; use std::collections::HashMap; @@ -84,6 +87,7 @@ static DAPPS_DOMAIN : &'static str = ".parity"; pub struct ServerBuilder { dapps_path: String, handler: Arc, + registrar: Arc, } impl Extendable for ServerBuilder { @@ -94,23 +98,24 @@ impl Extendable for ServerBuilder { impl ServerBuilder { /// Construct new dapps server - pub fn new(dapps_path: String) -> Self { + pub fn new(dapps_path: String, registrar: Arc) -> Self { ServerBuilder { dapps_path: dapps_path, - handler: Arc::new(IoHandler::new()) + handler: Arc::new(IoHandler::new()), + registrar: registrar, } } /// Asynchronously start server with no authentication, /// returns result with `Server` handle on success or an error. pub fn start_unsecure_http(&self, addr: &SocketAddr) -> Result { - Server::start_http(addr, NoAuth, self.handler.clone(), self.dapps_path.clone()) + Server::start_http(addr, NoAuth, self.handler.clone(), self.dapps_path.clone(), self.registrar.clone()) } /// Asynchronously start server with `HTTP Basic Authentication`, /// return result with `Server` handle on success or an error. pub fn start_basic_auth_http(&self, addr: &SocketAddr, username: &str, password: &str) -> Result { - Server::start_http(addr, HttpBasicAuth::single_user(username, password), self.handler.clone(), self.dapps_path.clone()) + Server::start_http(addr, HttpBasicAuth::single_user(username, password), self.handler.clone(), self.dapps_path.clone(), self.registrar.clone()) } } @@ -121,10 +126,16 @@ pub struct Server { } impl Server { - fn start_http(addr: &SocketAddr, authorization: A, handler: Arc, dapps_path: String) -> Result { + fn start_http( + addr: &SocketAddr, + authorization: A, + handler: Arc, + dapps_path: String, + registrar: Arc, + ) -> Result { let panic_handler = Arc::new(Mutex::new(None)); let authorization = Arc::new(authorization); - let apps_fetcher = Arc::new(apps::fetcher::AppFetcher::default()); + let apps_fetcher = Arc::new(apps::fetcher::AppFetcher::new(apps::urlhint::URLHintContract::new(registrar))); let endpoints = Arc::new(apps::all_endpoints(dapps_path)); let special = Arc::new({ let mut special = HashMap::new(); diff --git a/dapps/src/page/handler.rs b/dapps/src/page/handler.rs index c3ec354e0..eca242e7b 100644 --- a/dapps/src/page/handler.rs +++ b/dapps/src/page/handler.rs @@ -187,7 +187,8 @@ fn should_extract_path_with_appid() { path: EndpointPath { app_id: "app".to_owned(), host: "".to_owned(), - port: 8080 + port: 8080, + using_dapps_domains: true, }, file: None, safe_to_embed: true, diff --git a/dapps/src/router/mod.rs b/dapps/src/router/mod.rs index 3dad8250b..568dc00da 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router/mod.rs @@ -86,9 +86,10 @@ impl server::Handler for Router { let control = self.control.take().expect("on_request is called only once, thus control is always defined."); self.fetch.to_handler(path.clone(), control) }, - // Redirection to main page - _ if *req.method() == hyper::method::Method::Get => { - Redirection::new(self.main_page) + // Redirection to main page (maybe 404 instead?) + (Some(ref path), _) if *req.method() == hyper::method::Method::Get => { + let address = apps::redirection_address(path.using_dapps_domains, self.main_page); + Redirection::new(address.as_str()) }, // RPC by default _ => { @@ -165,6 +166,7 @@ fn extract_endpoint(url: &Option) -> (Option, SpecialEndpoint app_id: id, host: domain.clone(), port: url.port, + using_dapps_domains: true, }), special_endpoint(url)) }, _ if url.path.len() > 1 => { @@ -173,6 +175,7 @@ fn extract_endpoint(url: &Option) -> (Option, SpecialEndpoint app_id: id.clone(), host: format!("{}", url.host), port: url.port, + using_dapps_domains: false, }), special_endpoint(url)) }, _ => (None, special_endpoint(url)), @@ -192,6 +195,7 @@ fn should_extract_endpoint() { app_id: "status".to_owned(), host: "localhost".to_owned(), port: 8080, + using_dapps_domains: false, }), SpecialEndpoint::None) ); @@ -202,6 +206,7 @@ fn should_extract_endpoint() { app_id: "rpc".to_owned(), host: "localhost".to_owned(), port: 8080, + using_dapps_domains: false, }), SpecialEndpoint::Rpc) ); @@ -211,6 +216,7 @@ fn should_extract_endpoint() { app_id: "my.status".to_owned(), host: "my.status.parity".to_owned(), port: 80, + using_dapps_domains: true, }), SpecialEndpoint::Utils) ); @@ -221,6 +227,7 @@ fn should_extract_endpoint() { app_id: "my.status".to_owned(), host: "my.status.parity".to_owned(), port: 80, + using_dapps_domains: true, }), SpecialEndpoint::None) ); @@ -231,6 +238,7 @@ fn should_extract_endpoint() { app_id: "my.status".to_owned(), host: "my.status.parity".to_owned(), port: 80, + using_dapps_domains: true, }), SpecialEndpoint::Rpc) ); @@ -241,6 +249,7 @@ fn should_extract_endpoint() { app_id: "my.status".to_owned(), host: "my.status.parity".to_owned(), port: 80, + using_dapps_domains: true, }), SpecialEndpoint::Api) ); } diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index e034c86f2..06879ca81 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -723,8 +723,8 @@ impl MinerService for Miner { .position(|t| t == *hash) .map(|index| { let prev_gas = if index == 0 { Default::default() } else { pending.receipts()[index - 1].gas_used }; - let ref tx = txs[index]; - let ref receipt = pending.receipts()[index]; + let tx = &txs[index]; + let receipt = &pending.receipts()[index]; RichReceipt { transaction_hash: hash.clone(), transaction_index: index, diff --git a/ethstore/src/bin/ethstore.rs b/ethstore/src/bin/ethstore.rs index 71dedee5c..94823dc06 100644 --- a/ethstore/src/bin/ethstore.rs +++ b/ethstore/src/bin/ethstore.rs @@ -20,7 +20,6 @@ extern crate ethstore; use std::{env, process, fs}; use std::io::Read; -use std::ops::Deref; use docopt::Docopt; use ethstore::ethkey::Address; use ethstore::dir::{KeyDirectory, ParityDirectory, DiskDirectory, GethDirectory, DirectoryType}; @@ -142,7 +141,7 @@ fn execute(command: I) -> Result where I: IntoIterator(command: I) -> Result where I: IntoIterator. use std::sync::Arc; -use std::net::SocketAddr; use io::PanicHandler; use rpc_apis; +use ethcore::client::Client; use helpers::replace_home; -#[cfg(feature = "dapps")] -pub use ethcore_dapps::Server as WebappServer; -#[cfg(not(feature = "dapps"))] -pub struct WebappServer; - #[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, @@ -51,6 +46,7 @@ impl Default for Configuration { pub struct Dependencies { pub panic_handler: Arc, pub apis: Arc, + pub client: Arc, } pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { @@ -75,45 +71,102 @@ pub fn new(configuration: Configuration, deps: Dependencies) -> Result, -) -> Result { - Err("Your Parity version has been compiled without WebApps support.".into()) -} +mod server { + use super::Dependencies; + use std::net::SocketAddr; -#[cfg(feature = "dapps")] -pub fn setup_dapps_server( - deps: Dependencies, - dapps_path: String, - url: &SocketAddr, - auth: Option<(String, String)> -) -> Result { - use ethcore_dapps as dapps; - - let server = dapps::ServerBuilder::new(dapps_path); - let server = rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::UnsafeContext); - let start_result = match auth { - None => { - server.start_unsecure_http(url) - }, - Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password) - }, - }; - - match start_result { - Err(dapps::ServerError::IoError(err)) => Err(format!("WebApps io error: {}", err)), - Err(e) => Err(format!("WebApps error: {:?}", e)), - Ok(server) => { - server.set_panic_handler(move || { - deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned()); - }); - Ok(server) - }, + pub struct WebappServer; + pub fn setup_dapps_server( + _deps: Dependencies, + _dapps_path: String, + _url: &SocketAddr, + _auth: Option<(String, String)>, + ) -> Result { + Err("Your Parity version has been compiled without WebApps support.".into()) } } +#[cfg(feature = "dapps")] +mod server { + use super::Dependencies; + use std::sync::Arc; + use std::net::SocketAddr; + use util::{Bytes, Address, U256}; + + use ethcore::transaction::{Transaction, Action}; + use ethcore::client::{Client, BlockChainClient, BlockID}; + + use rpc_apis; + use ethcore_dapps::ContractClient; + + pub use ethcore_dapps::Server as WebappServer; + + pub fn setup_dapps_server( + deps: Dependencies, + dapps_path: String, + url: &SocketAddr, + auth: Option<(String, String)> + ) -> Result { + use ethcore_dapps as dapps; + + let server = dapps::ServerBuilder::new(dapps_path, Arc::new(Registrar { + client: deps.client.clone(), + })); + let server = rpc_apis::setup_rpc(server, deps.apis.clone(), rpc_apis::ApiSet::UnsafeContext); + let start_result = match auth { + None => { + server.start_unsecure_http(url) + }, + Some((username, password)) => { + server.start_basic_auth_http(url, &username, &password) + }, + }; + + match start_result { + Err(dapps::ServerError::IoError(err)) => Err(format!("WebApps io error: {}", err)), + Err(e) => Err(format!("WebApps error: {:?}", e)), + Ok(server) => { + server.set_panic_handler(move || { + deps.panic_handler.notify_all("Panic in WebApp thread.".to_owned()); + }); + Ok(server) + }, + } + } + + struct Registrar { + client: Arc, + } + + impl ContractClient for Registrar { + fn registrar(&self) -> Result { + self.client.additional_params().get("registrar") + .ok_or_else(|| "Registrar not defined.".into()) + .and_then(|registrar| { + registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) + }) + } + + fn call(&self, address: Address, data: Bytes) -> Result { + let from = Address::default(); + let transaction = Transaction { + nonce: self.client.latest_nonce(&from), + action: Action::Call(address), + gas: U256::from(50_000_000), + gas_price: U256::default(), + value: U256::default(), + data: data, + }.fake_sign(from); + + self.client.call(&transaction, BlockID::Latest, Default::default()) + .map_err(|e| format!("{:?}", e)) + .map(|executed| { + executed.output + }) + } + } +} diff --git a/parity/run.rs b/parity/run.rs index 91f8d5bfa..220f77376 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -223,6 +223,7 @@ pub fn execute(cmd: RunCmd) -> Result<(), String> { let dapps_deps = dapps::Dependencies { panic_handler: panic_handler.clone(), apis: deps_for_rpc_apis.clone(), + client: client.clone(), }; // start dapps server From 3dd1bdda50ed01596960b443ddd561cfb980439f Mon Sep 17 00:00:00 2001 From: Nipunn Koorapati Date: Wed, 24 Aug 2016 04:20:15 -0700 Subject: [PATCH 17/29] Improve eth_getWork timeout test rpc_get_work_should_timeout (#1992) --- rpc/src/v1/tests/mocked/eth.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 4b880419a..d7d93137f 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -861,7 +861,7 @@ fn rpc_get_work_should_timeout() { eth_tester.client.set_latest_block_timestamp(get_time().sec as u64 - 1000); // Set latest block to 1000 seconds ago let hash = eth_tester.miner.map_sealing_work(&*eth_tester.client, |b| b.hash()).unwrap(); - // Request with timeout of 0 seconds. This should work since we're disabling timeout. + // Request without providing timeout. This should work since we're disabling timeout. let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": [], "id": 1}"#; let work_response = format!( r#"{{"jsonrpc":"2.0","result":["0x{:?}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x01"],"id":1}}"#, @@ -869,6 +869,14 @@ fn rpc_get_work_should_timeout() { ); assert_eq!(eth_tester.io.handle_request(request), Some(work_response.to_owned())); + // Request with timeout of 0 seconds. This should work since we're disabling timeout. + let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": ["0"], "id": 1}"#; + let work_response = format!( + r#"{{"jsonrpc":"2.0","result":["0x{:?}","0x0000000000000000000000000000000000000000000000000000000000000000","0x0000800000000000000000000000000000000000000000000000000000000000","0x01"],"id":1}}"#, + hash, + ); + assert_eq!(eth_tester.io.handle_request(request), Some(work_response.to_owned())); + // Request with timeout of 10K seconds. This should work. let request = r#"{"jsonrpc": "2.0", "method": "eth_getWork", "params": ["10000"], "id": 1}"#; assert_eq!(eth_tester.io.handle_request(request), Some(work_response.to_owned())); From b96d4eaddb0d15d59d1921b935ab15220176813e Mon Sep 17 00:00:00 2001 From: debris Date: Wed, 24 Aug 2016 13:59:50 +0200 Subject: [PATCH 18/29] bloom optimization --- ethcore/src/blockchain/blockchain.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index 278d50b00..e76cf42c4 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -995,8 +995,13 @@ impl BlockChain { let log_blooms = match info.location { BlockLocation::Branch => HashMap::new(), BlockLocation::CanonChain => { - let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); - chain.insert(info.number as bc::Number, Bloom::from(header.log_bloom()).into()) + let log_bloom = header.log_bloom(); + if log_bloom.is_zero() { + HashMap::new() + } else { + let chain = bc::group::BloomGroupChain::new(self.blooms_config, self); + chain.insert(info.number as bc::Number, Bloom::from(log_bloom).into()) + } }, BlockLocation::BranchBecomingCanonChain(ref data) => { let ancestor_number = self.block_number(&data.ancestor).unwrap(); From 33e0a234f2030f0ed0e9c9d96055906b7223ce6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Wed, 24 Aug 2016 16:53:07 +0200 Subject: [PATCH 19/29] Validating sha3 of a dapp bundle (#1993) * Validating sha3 of a file * sha3 in utils * Removing devtools --- Cargo.lock | 2 ++ dapps/src/apps/fetcher.rs | 20 +++++++++---- dapps/src/lib.rs | 4 +-- devtools/src/random_path.rs | 12 ++++++-- util/Cargo.toml | 1 + util/src/lib.rs | 1 + util/src/sha3.rs | 60 ++++++++++++++++++++++++++++++++----- 7 files changed, 84 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8c97516d..d659c4bd7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -289,6 +289,7 @@ version = "1.4.0" dependencies = [ "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", @@ -502,6 +503,7 @@ dependencies = [ "table 0.1.0", "target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "using_queue 0.1.0", "vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 0119b921e..347c8da5f 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -30,7 +30,8 @@ use hyper::Control; use hyper::status::StatusCode; use random_filename; -use util::Mutex; +use util::{Mutex, H256}; +use util::sha3::sha3; use page::LocalPageEndpoint; use handlers::{ContentHandler, AppFetcherHandler, DappHandler}; use endpoint::{Endpoint, EndpointPath, Handler}; @@ -137,10 +138,12 @@ impl AppFetcher { #[derive(Debug)] pub enum ValidationError { - ManifestNotFound, - ManifestSerialization(String), Io(io::Error), Zip(zip::result::ZipError), + InvalidDappId, + ManifestNotFound, + ManifestSerialization(String), + HashMismatch { expected: H256, got: H256, }, } impl From for ValidationError { @@ -198,8 +201,15 @@ impl DappHandler for DappInstaller { fn validate_and_install(&self, app_path: PathBuf) -> Result { trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path); - // TODO [ToDr] Validate file hash - let file = try!(fs::File::open(app_path)); + let mut file = try!(fs::File::open(app_path)); + let hash = try!(sha3(&mut file)); + let dapp_id = try!(self.dapp_id.as_str().parse().map_err(|_| ValidationError::InvalidDappId)); + if dapp_id != hash { + return Err(ValidationError::HashMismatch { + expected: dapp_id, + got: hash, + }); + } // Unpack archive let mut zip = try!(zip::ZipArchive::new(file)); // First find manifest file diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 3373f5c58..574c38acf 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -55,11 +55,11 @@ extern crate rand; extern crate ethabi; extern crate jsonrpc_core; extern crate jsonrpc_http_server; +extern crate mime_guess; +extern crate rustc_serialize; extern crate parity_dapps; extern crate ethcore_rpc; extern crate ethcore_util as util; -extern crate mime_guess; -extern crate rustc_serialize; mod endpoint; mod apps; diff --git a/devtools/src/random_path.rs b/devtools/src/random_path.rs index f9c454c30..d58042512 100644 --- a/devtools/src/random_path.rs +++ b/devtools/src/random_path.rs @@ -67,10 +67,18 @@ impl RandomTempPath { } } +impl AsRef for RandomTempPath { + fn as_ref(&self) -> &Path { + self.as_path() + } +} + impl Drop for RandomTempPath { fn drop(&mut self) { - if let Err(e) = fs::remove_dir_all(self.as_path()) { - panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); + if let Err(_) = fs::remove_dir_all(&self) { + if let Err(e) = fs::remove_file(&self) { + panic!("Failed to remove temp directory. Here's what prevented this from happening: ({})", e); + } } } } diff --git a/util/Cargo.toml b/util/Cargo.toml index 3a9505e15..719e4c255 100644 --- a/util/Cargo.toml +++ b/util/Cargo.toml @@ -32,6 +32,7 @@ parking_lot = "0.2.6" using_queue = { path = "using_queue" } table = { path = "table" } ansi_term = "0.7" +tiny-keccak= "1.0" [features] default = [] diff --git a/util/src/lib.rs b/util/src/lib.rs index 91c4e7d50..f8dc34af8 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -112,6 +112,7 @@ extern crate parking_lot; pub extern crate using_queue; pub extern crate table; extern crate ansi_term; +extern crate tiny_keccak; pub mod bloom; pub mod standard; diff --git a/util/src/sha3.rs b/util/src/sha3.rs index d2a071759..0dcde2ccb 100644 --- a/util/src/sha3.rs +++ b/util/src/sha3.rs @@ -17,7 +17,9 @@ //! Wrapper around tiny-keccak crate. extern crate sha3 as sha3_ext; +use std::io; use std::mem::uninitialized; +use tiny_keccak::Keccak; use bytes::{BytesConvertable, Populatable}; use hash::{H256, FixedHash}; use self::sha3_ext::*; @@ -64,12 +66,56 @@ impl Hashable for T where T: BytesConvertable { } } -#[test] -fn sha3_empty() { - assert_eq!([0u8; 0].sha3(), SHA3_EMPTY); -} -#[test] -fn sha3_as() { - assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8")); +/// Calculate SHA3 of given stream. +pub fn sha3(r: &mut R) -> Result { + let mut output = [0u8; 32]; + let mut input = [0u8; 1024]; + let mut sha3 = Keccak::new_keccak256(); + + // read file + loop { + let some = try!(r.read(&mut input)); + if some == 0 { + break; + } + sha3.update(&input[0..some]); + } + + sha3.finalize(&mut output); + Ok(output.into()) } +#[cfg(test)] +mod tests { + use std::fs; + use std::io::Write; + use super::*; + + #[test] + fn sha3_empty() { + assert_eq!([0u8; 0].sha3(), SHA3_EMPTY); + } + #[test] + fn sha3_as() { + assert_eq!([0x41u8; 32].sha3(), From::from("59cad5948673622c1d64e2322488bf01619f7ff45789741b15a9f782ce9290a8")); + } + + #[test] + fn should_sha3_a_file() { + // given + use devtools::RandomTempPath; + let path = RandomTempPath::new(); + // Prepare file + { + let mut file = fs::File::create(&path).unwrap(); + file.write_all(b"something").unwrap(); + } + + let mut file = fs::File::open(&path).unwrap(); + // when + let hash = sha3(&mut file).unwrap(); + + // then + assert_eq!(format!("{:?}", hash), "68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87"); + } +} From 190e4db266b2731e6c0578f286a80debaaabd9da Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 24 Aug 2016 16:53:36 +0200 Subject: [PATCH 20/29] Trie query recording and AccountDB factory for no mangling (#1944) * optionally use no mangling for accountdb * add the recorder module * get_recorded for tries, no virtual dispatch on readonly tries * add recording test --- ethcore/src/account_db.rs | 107 +++++++++++ ethcore/src/block.rs | 49 ++--- ethcore/src/client/client.rs | 31 ++-- ethcore/src/client/test_client.rs | 1 - ethcore/src/engines/basic_authority.rs | 3 +- ethcore/src/engines/instant_seal.rs | 3 +- ethcore/src/ethereum/ethash.rs | 6 +- ethcore/src/evm/factory.rs | 5 +- ethcore/src/factory.rs | 30 ++++ ethcore/src/json_tests/state.rs | 3 +- ethcore/src/lib.rs | 1 + ethcore/src/miner/miner.rs | 2 +- ethcore/src/snapshot/account.rs | 3 +- ethcore/src/snapshot/mod.rs | 3 +- ethcore/src/snapshot/tests/helpers.rs | 8 +- ethcore/src/state/account.rs | 9 +- ethcore/src/state/mod.rs | 103 +++++------ ethcore/src/tests/helpers.rs | 2 - util/src/hashdb.rs | 2 +- util/src/trie/fatdb.rs | 15 +- util/src/trie/mod.rs | 67 ++++++- util/src/trie/recorder.rs | 236 +++++++++++++++++++++++++ util/src/trie/sectriedb.rs | 8 +- util/src/trie/triedb.rs | 75 ++++---- 24 files changed, 590 insertions(+), 182 deletions(-) create mode 100644 ethcore/src/factory.rs create mode 100644 util/src/trie/recorder.rs diff --git a/ethcore/src/account_db.rs b/ethcore/src/account_db.rs index cee2b4d48..15042403d 100644 --- a/ethcore/src/account_db.rs +++ b/ethcore/src/account_db.rs @@ -35,6 +35,38 @@ fn combine_key<'a>(address_hash: &'a H256, key: &'a H256) -> H256 { dst } +/// A factory for different kinds of account dbs. +#[derive(Debug, Clone)] +pub enum Factory { + /// Mangle hashes based on address. + Mangled, + /// Don't mangle hashes. + Plain, +} + +impl Default for Factory { + fn default() -> Self { Factory::Mangled } +} + +impl Factory { + /// Create a read-only accountdb. + /// This will panic when write operations are called. + pub fn readonly<'db>(&self, db: &'db HashDB, address_hash: H256) -> Box { + match *self { + Factory::Mangled => Box::new(AccountDB::from_hash(db, address_hash)), + Factory::Plain => Box::new(Wrapping(db)), + } + } + + /// Create a new mutable hashdb. + pub fn create<'db>(&self, db: &'db mut HashDB, address_hash: H256) -> Box { + match *self { + Factory::Mangled => Box::new(AccountDBMut::from_hash(db, address_hash)), + Factory::Plain => Box::new(WrappingMut(db)), + } + } +} + // TODO: introduce HashDBMut? /// DB backend wrapper for Account trie /// Transforms trie node keys for the database @@ -162,4 +194,79 @@ impl<'db> HashDB for AccountDBMut<'db>{ } } +struct Wrapping<'db>(&'db HashDB); +impl<'db> HashDB for Wrapping<'db> { + fn keys(&self) -> HashMap { + unimplemented!() + } + + fn get(&self, key: &H256) -> Option<&[u8]> { + if key == &SHA3_NULL_RLP { + return Some(&NULL_RLP_STATIC); + } + self.0.get(key) + } + + fn contains(&self, key: &H256) -> bool { + if key == &SHA3_NULL_RLP { + return true; + } + self.0.contains(key) + } + + fn insert(&mut self, _value: &[u8]) -> H256 { + unimplemented!() + } + + fn emplace(&mut self, _key: H256, _value: Bytes) { + unimplemented!() + } + + fn remove(&mut self, _key: &H256) { + unimplemented!() + } +} + +struct WrappingMut<'db>(&'db mut HashDB); + +impl<'db> HashDB for WrappingMut<'db>{ + fn keys(&self) -> HashMap { + unimplemented!() + } + + fn get(&self, key: &H256) -> Option<&[u8]> { + if key == &SHA3_NULL_RLP { + return Some(&NULL_RLP_STATIC); + } + self.0.get(key) + } + + fn contains(&self, key: &H256) -> bool { + if key == &SHA3_NULL_RLP { + return true; + } + self.0.contains(key) + } + + fn insert(&mut self, value: &[u8]) -> H256 { + if value == &NULL_RLP { + return SHA3_NULL_RLP.clone(); + } + self.0.insert(value) + } + + fn emplace(&mut self, key: H256, value: Bytes) { + if key == SHA3_NULL_RLP { + return; + } + self.0.emplace(key, value) + } + + fn remove(&mut self, key: &H256) { + if key == &SHA3_NULL_RLP { + return; + } + self.0.remove(key) + } +} \ No newline at end of file diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 3591fee05..cd02b9a1b 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -21,7 +21,7 @@ use engines::Engine; use state::*; use verification::PreverifiedBlock; use trace::FlatTrace; -use evm::Factory as EvmFactory; +use factory::Factories; /// A block, encoded as it is on the block chain. #[derive(Default, Debug, Clone, PartialEq)] @@ -192,7 +192,6 @@ impl IsBlock for ExecutedBlock { pub struct OpenBlock<'x> { block: ExecutedBlock, engine: &'x Engine, - vm_factory: &'x EvmFactory, last_hashes: Arc, } @@ -230,8 +229,7 @@ impl<'x> OpenBlock<'x> { /// Create a new `OpenBlock` ready for transaction pushing. pub fn new( engine: &'x Engine, - vm_factory: &'x EvmFactory, - trie_factory: TrieFactory, + factories: Factories, tracing: bool, db: Box, parent: &Header, @@ -240,11 +238,10 @@ impl<'x> OpenBlock<'x> { gas_range_target: (U256, U256), extra_data: Bytes, ) -> Result { - let state = try!(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(), trie_factory)); + let state = try!(State::from_existing(db, parent.state_root().clone(), engine.account_start_nonce(), factories)); let mut r = OpenBlock { block: ExecutedBlock::new(state, tracing), engine: engine, - vm_factory: vm_factory, last_hashes: last_hashes, }; @@ -332,7 +329,7 @@ impl<'x> OpenBlock<'x> { let env_info = self.env_info(); // info!("env_info says gas_used={}", env_info.gas_used); - match self.block.state.apply(&env_info, self.engine, self.vm_factory, &t, self.block.traces.is_some()) { + match self.block.state.apply(&env_info, self.engine, &t, self.block.traces.is_some()) { Ok(outcome) => { self.block.transactions_set.insert(h.unwrap_or_else(||t.hash())); self.block.base.transactions.push(t); @@ -421,14 +418,13 @@ impl ClosedBlock { } /// Given an engine reference, reopen the `ClosedBlock` into an `OpenBlock`. - pub fn reopen<'a>(self, engine: &'a Engine, vm_factory: &'a EvmFactory) -> OpenBlock<'a> { + pub fn reopen<'a>(self, engine: &'a Engine) -> OpenBlock<'a> { // revert rewards (i.e. set state back at last transaction's state). let mut block = self.block; block.state = self.unclosed_state; OpenBlock { block: block, engine: engine, - vm_factory: vm_factory, last_hashes: self.last_hashes, } } @@ -499,17 +495,16 @@ pub fn enact( db: Box, parent: &Header, last_hashes: Arc, - vm_factory: &EvmFactory, - trie_factory: TrieFactory, + factories: Factories, ) -> Result { { if ::log::max_log_level() >= ::log::LogLevel::Trace { - let s = try!(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(), trie_factory.clone())); + let s = try!(State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(), factories.clone())); trace!("enact(): root={}, author={}, author_balance={}\n", s.root(), header.author(), s.balance(&header.author())); } } - let mut b = try!(OpenBlock::new(engine, vm_factory, trie_factory, tracing, db, parent, last_hashes, Address::new(), (3141562.into(), 31415620.into()), vec![])); + let mut b = try!(OpenBlock::new(engine, factories, tracing, db, parent, last_hashes, Address::new(), (3141562.into(), 31415620.into()), vec![])); b.set_difficulty(*header.difficulty()); b.set_gas_limit(*header.gas_limit()); b.set_timestamp(header.timestamp()); @@ -532,12 +527,11 @@ pub fn enact_bytes( db: Box, parent: &Header, last_hashes: Arc, - vm_factory: &EvmFactory, - trie_factory: TrieFactory, + factories: Factories, ) -> Result { let block = BlockView::new(block_bytes); let header = block.header(); - enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, vm_factory, trie_factory) + enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, factories) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header @@ -549,11 +543,10 @@ pub fn enact_verified( db: Box, parent: &Header, last_hashes: Arc, - vm_factory: &EvmFactory, - trie_factory: TrieFactory, + factories: Factories, ) -> Result { let view = BlockView::new(&block.bytes); - enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, vm_factory, trie_factory) + enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, factories) } /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards @@ -565,11 +558,10 @@ pub fn enact_and_seal( db: Box, parent: &Header, last_hashes: Arc, - vm_factory: &EvmFactory, - trie_factory: TrieFactory, + factories: Factories, ) -> Result { let header = BlockView::new(block_bytes).header_view(); - Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)).seal(engine, header.seal()))) + Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)).seal(engine, header.seal()))) } #[cfg(test)] @@ -587,8 +579,7 @@ mod tests { let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let vm_factory = Default::default(); - let b = OpenBlock::new(&*spec.engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); + let b = OpenBlock::new(&*spec.engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); let _ = b.seal(&*spec.engine, vec![]); } @@ -603,9 +594,8 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); - let vm_factory = Default::default(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap() + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap() .close_and_lock().seal(engine, vec![]).unwrap(); let orig_bytes = b.rlp_bytes(); let orig_db = b.drain(); @@ -613,7 +603,7 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); - let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, &Default::default(), Default::default()).unwrap(); + let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); @@ -632,9 +622,8 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); - let vm_factory = Default::default(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let mut open_block = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); + let mut open_block = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes.clone(), Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle1_header = Header::new(); uncle1_header.extra_data = b"uncle1".to_vec(); let mut uncle2_header = Header::new(); @@ -649,7 +638,7 @@ mod tests { let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); - let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, &Default::default(), Default::default()).unwrap(); + let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let bytes = e.rlp_bytes(); assert_eq!(bytes, orig_bytes); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index aced57e4c..5e0a4b9f8 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -65,6 +65,7 @@ use evm::Factory as EvmFactory; use miner::{Miner, MinerService}; use util::TrieFactory; use snapshot::{self, io as snapshot_io}; +use factory::Factories; // re-export pub use types::blockchain_info::BlockChainInfo; @@ -130,8 +131,6 @@ pub struct Client { import_lock: Mutex<()>, panic_handler: Arc, verifier: Box, - vm_factory: Arc, - trie_factory: TrieFactory, miner: Arc, sleep_state: Mutex, liveness: AtomicBool, @@ -139,6 +138,7 @@ pub struct Client { notify: RwLock>>, queue_transactions: AtomicUsize, last_hashes: RwLock>, + factories: Factories, } const HISTORY: u64 = 1200; @@ -188,6 +188,13 @@ impl Client { panic_handler.forward_from(&block_queue); let awake = match config.mode { Mode::Dark(..) => false, _ => true }; + + let factories = Factories { + vm: EvmFactory::new(config.vm_type), + trie: TrieFactory::new(config.trie_spec), + accountdb: Default::default(), + }; + let client = Client { sleep_state: Mutex::new(SleepState::new(awake)), liveness: AtomicBool::new(awake), @@ -202,13 +209,12 @@ impl Client { import_lock: Mutex::new(()), panic_handler: panic_handler, verifier: verification::new(config.verifier_type), - vm_factory: Arc::new(EvmFactory::new(config.vm_type)), - trie_factory: TrieFactory::new(config.trie_spec), miner: miner, io_channel: message_channel, notify: RwLock::new(Vec::new()), queue_transactions: AtomicUsize::new(0), last_hashes: RwLock::new(VecDeque::new()), + factories: factories, }; Ok(Arc::new(client)) } @@ -289,7 +295,7 @@ impl Client { let last_hashes = self.build_last_hashes(header.parent_hash.clone()); let db = self.state_db.lock().boxed_clone(); - let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone()); + let enact_result = enact_verified(block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.factories.clone()); if let Err(e) = enact_result { warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); return Err(()); @@ -493,7 +499,7 @@ impl Client { let root = HeaderView::new(&header).state_root(); - State::from_existing(db, root, self.engine.account_start_nonce(), self.trie_factory.clone()).ok() + State::from_existing(db, root, self.engine.account_start_nonce(), self.factories.clone()).ok() }) } @@ -518,7 +524,7 @@ impl Client { self.state_db.lock().boxed_clone(), HeaderView::new(&self.best_block_header()).state_root(), self.engine.account_start_nonce(), - self.trie_factory.clone()) + self.factories.clone()) .expect("State root of best block header always valid.") } @@ -688,7 +694,7 @@ impl BlockChainClient for Client { state.add_balance(&sender, &(needed_balance - balance)); } let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false }; - let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, &self.vm_factory).transact(t, options)); + let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)); // TODO gav move this into Executive. ret.state_diff = original_state.map(|original| state.diff_from(original)); @@ -720,7 +726,7 @@ impl BlockChainClient for Client { gas_limit: view.gas_limit(), }; for t in txs.iter().take(address.index) { - match Executive::new(&mut state, &env_info, &*self.engine, &self.vm_factory).transact(t, Default::default()) { + match Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, Default::default()) { Ok(x) => { env_info.gas_used = env_info.gas_used + x.gas_used; } Err(ee) => { return Err(CallError::Execution(ee)) } } @@ -728,7 +734,7 @@ impl BlockChainClient for Client { let t = &txs[address.index]; let original_state = if analytics.state_diffing { Some(state.clone()) } else { None }; - let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, &self.vm_factory).transact(t, options)); + let mut ret = try!(Executive::new(&mut state, &env_info, &*self.engine, &self.factories.vm).transact(t, options)); ret.state_diff = original_state.map(|original| state.diff_from(original)); Ok(ret) @@ -1029,8 +1035,7 @@ impl MiningBlockChainClient for Client { let mut open_block = OpenBlock::new( engine, - &self.vm_factory, - self.trie_factory.clone(), + self.factories.clone(), false, // TODO: this will need to be parameterised once we want to do immediate mining insertion. self.state_db.lock().boxed_clone(), &self.chain.block_header(&h).expect("h is best block hash: so its header must exist: qed"), @@ -1054,7 +1059,7 @@ impl MiningBlockChainClient for Client { } fn vm_factory(&self) -> &EvmFactory { - &self.vm_factory + &self.factories.vm } fn import_sealed_block(&self, block: SealedBlock) -> ImportResult { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index be1e9da25..410a90347 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -276,7 +276,6 @@ impl MiningBlockChainClient for TestBlockChainClient { let last_hashes = vec![genesis_header.hash()]; let mut open_block = OpenBlock::new( engine, - self.vm_factory(), Default::default(), false, db, diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 9a5a97337..7ec5a66a4 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -252,8 +252,7 @@ mod tests { let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let vm_factory = Default::default(); - let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap(); assert!(b.try_seal(engine, seal).is_ok()); diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index ae1aa20cd..6a3d3c700 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -86,8 +86,7 @@ mod tests { let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let vm_factory = Default::default(); - let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); // Seal with empty AccountProvider. let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap(); diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index d1b2082bf..c658432a2 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -357,8 +357,7 @@ mod tests { let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let vm_factory = Default::default(); - let b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); + let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close(); assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap()); } @@ -372,8 +371,7 @@ mod tests { let mut db = db_result.take(); spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); let last_hashes = Arc::new(vec![genesis_header.hash()]); - let vm_factory = Default::default(); - let mut b = OpenBlock::new(engine, &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); + let mut b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let mut uncle = Header::new(); let uncle_author: Address = "ef2d6d194084c2de36e0dabfce45d046b37d1106".into(); uncle.author = uncle_author.clone(); diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 50c384a99..94800c7de 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -80,6 +80,7 @@ impl VMType { } /// Evm factory. Creates appropriate Evm. +#[derive(Clone)] pub struct Factory { evm: VMType } @@ -128,7 +129,7 @@ impl Factory { impl Default for Factory { /// Returns jitvm factory - #[cfg(feature = "jit")] + #[cfg(all(feature = "jit", not(test)))] fn default() -> Factory { Factory { evm: VMType::Jit @@ -136,7 +137,7 @@ impl Default for Factory { } /// Returns native rust evm factory - #[cfg(not(feature = "jit"))] + #[cfg(any(not(feature = "jit"), test))] fn default() -> Factory { Factory { evm: VMType::Interpreter diff --git a/ethcore/src/factory.rs b/ethcore/src/factory.rs new file mode 100644 index 000000000..dec341820 --- /dev/null +++ b/ethcore/src/factory.rs @@ -0,0 +1,30 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use util::trie::TrieFactory; +use evm::Factory as EvmFactory; +use account_db::Factory as AccountFactory; + +/// Collection of factories. +#[derive(Default, Clone)] +pub struct Factories { + /// factory for evm. + pub vm: EvmFactory, + /// factory for tries. + pub trie: TrieFactory, + /// factory for account databases. + pub accountdb: AccountFactory, +} \ No newline at end of file diff --git a/ethcore/src/json_tests/state.rs b/ethcore/src/json_tests/state.rs index 97f9f70f0..28aaa62ec 100644 --- a/ethcore/src/json_tests/state.rs +++ b/ethcore/src/json_tests/state.rs @@ -64,8 +64,7 @@ pub fn json_chain_test(json_data: &[u8], era: ChainEra) -> Vec { state.populate_from(pre); state.commit() .expect(&format!("State test {} failed due to internal error.", name)); - let vm_factory = Default::default(); - let res = state.apply(&env, &*engine, &vm_factory, &transaction, false); + let res = state.apply(&env, &*engine, &transaction, false); if fail_unless(state.root() == &post_state_root) { println!("!!! {}: State mismatch (got: {}, expect: {}):", name, state.root(), post_state_root); diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index c21720640..2da6abe3d 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -139,6 +139,7 @@ mod externalities; mod verification; mod blockchain; mod types; +mod factory; #[cfg(test)] mod tests; diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 06879ca81..3ca3f0d74 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -270,7 +270,7 @@ impl Miner { Some(old_block) => { trace!(target: "miner", "Already have previous work; updating and returning"); // add transactions to old_block - old_block.reopen(&*self.engine, chain.vm_factory()) + old_block.reopen(&*self.engine) } None => { // block not found - create it. diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index c1c4ac251..3c31bab0d 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -17,8 +17,9 @@ //! Account state encoding and decoding use account_db::{AccountDB, AccountDBMut}; -use util::{U256, FixedHash, H256, Bytes, HashDB, SHA3_EMPTY, TrieDB}; +use util::{U256, FixedHash, H256, Bytes, HashDB, SHA3_EMPTY}; use util::rlp::{Rlp, RlpStream, Stream, UntrustedRlp, View}; +use util::trie::{TrieDB, Trie}; use snapshot::Error; // An alternate account structure from ::account::Account. diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 118b0988a..4e33c9ebc 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -26,13 +26,14 @@ use engines::Engine; use ids::BlockID; use views::BlockView; -use util::{Bytes, Hashable, HashDB, snappy, TrieDB, TrieDBMut, TrieMut}; +use util::{Bytes, Hashable, HashDB, snappy}; use util::Mutex; use util::hash::{FixedHash, H256}; use util::journaldb::{self, Algorithm, JournalDB}; use util::kvdb::Database; use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View, Compressible, RlpType}; use util::rlp::SHA3_NULL_RLP; +use util::trie::{TrieDB, TrieDBMut, Trie, TrieMut}; use self::account::Account; use self::block::AbridgedBlock; diff --git a/ethcore/src/snapshot/tests/helpers.rs b/ethcore/src/snapshot/tests/helpers.rs index e6a92642e..aa055e8fb 100644 --- a/ethcore/src/snapshot/tests/helpers.rs +++ b/ethcore/src/snapshot/tests/helpers.rs @@ -24,7 +24,7 @@ use snapshot::account::Account; use util::hash::{FixedHash, H256}; use util::hashdb::HashDB; use util::trie::{Alphabet, StandardMap, SecTrieDBMut, TrieMut, ValueMode}; -use util::trie::{TrieDB, TrieDBMut}; +use util::trie::{TrieDB, TrieDBMut, Trie}; use util::rlp::SHA3_NULL_RLP; // the proportion of accounts we will alter each tick. @@ -51,10 +51,12 @@ impl StateProducer { // modify existing accounts. let mut accounts_to_modify: Vec<_> = { let trie = TrieDB::new(&*db, &self.state_root).unwrap(); - trie.iter() + let temp = trie.iter() // binding required due to complicated lifetime stuff .filter(|_| rng.gen::() < ACCOUNT_CHURN) .map(|(k, v)| (H256::from_slice(&k), v.to_owned())) - .collect() + .collect(); + + temp }; // sweep once to alter storage tries. diff --git a/ethcore/src/state/account.rs b/ethcore/src/state/account.rs index 829198910..72e2bb0ed 100644 --- a/ethcore/src/state/account.rs +++ b/ethcore/src/state/account.rs @@ -19,7 +19,6 @@ use std::collections::hash_map::Entry; use util::*; use pod_account::*; -use account_db::*; use std::cell::{Ref, RefCell, Cell}; @@ -148,7 +147,7 @@ impl Account { } /// Get (and cache) the contents of the trie's storage at `key`. - pub fn storage_at(&self, db: &AccountDB, key: &H256) -> H256 { + pub fn storage_at(&self, db: &HashDB, key: &H256) -> H256 { self.storage_overlay.borrow_mut().entry(key.clone()).or_insert_with(||{ let db = SecTrieDB::new(db, &self.storage_root) .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ @@ -225,7 +224,7 @@ impl Account { } /// Provide a database to get `code_hash`. Should not be called if it is a contract without code. - pub fn cache_code(&mut self, db: &AccountDB) -> bool { + pub fn cache_code(&mut self, db: &HashDB) -> bool { // TODO: fill out self.code_cache; trace!("Account::cache_code: ic={}; self.code_hash={:?}, self.code_cache={}", self.is_cached(), self.code_hash, self.code_cache.pretty()); self.is_cached() || @@ -277,7 +276,7 @@ impl Account { } /// Commit the `storage_overlay` to the backing DB and update `storage_root`. - pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut AccountDBMut) { + pub fn commit_storage(&mut self, trie_factory: &TrieFactory, db: &mut HashDB) { let mut t = trie_factory.from_existing(db, &mut self.storage_root) .expect("Account storage_root initially set to zero (valid) and only altered by SecTrieDBMut. \ SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \ @@ -300,7 +299,7 @@ impl Account { } /// Commit any unsaved code. `code_hash` will always return the hash of the `code_cache` after this. - pub fn commit_code(&mut self, db: &mut AccountDBMut) { + pub fn commit_code(&mut self, db: &mut HashDB) { trace!("Commiting code of {:?} - {:?}, {:?}", self, self.code_hash.is_none(), self.code_cache.is_empty()); match (self.code_hash.is_none(), self.code_cache.is_empty()) { (true, true) => self.code_hash = Some(SHA3_EMPTY), diff --git a/ethcore/src/state/mod.rs b/ethcore/src/state/mod.rs index c62a77d25..46e77cd34 100644 --- a/ethcore/src/state/mod.rs +++ b/ethcore/src/state/mod.rs @@ -18,8 +18,7 @@ use std::cell::{RefCell, RefMut}; use common::*; use engines::Engine; use executive::{Executive, TransactOptions}; -use evm::Factory as EvmFactory; -use account_db::*; +use factory::Factories; use trace::FlatTrace; use pod_account::*; use pod_state::{self, PodState}; @@ -49,7 +48,7 @@ pub struct State { cache: RefCell>>, snapshots: RefCell>>>>, account_start_nonce: U256, - trie_factory: TrieFactory, + factories: Factories, } const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with valid root. Creating a SecTrieDB with a valid root will not fail. \ @@ -58,11 +57,11 @@ const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with v impl State { /// Creates new state with empty state root #[cfg(test)] - pub fn new(mut db: Box, account_start_nonce: U256, trie_factory: TrieFactory) -> State { + pub fn new(mut db: Box, account_start_nonce: U256, factories: Factories) -> State { let mut root = H256::new(); { // init trie and reset root too null - let _ = trie_factory.create(db.as_hashdb_mut(), &mut root); + let _ = factories.trie.create(db.as_hashdb_mut(), &mut root); } State { @@ -71,12 +70,12 @@ impl State { cache: RefCell::new(HashMap::new()), snapshots: RefCell::new(Vec::new()), account_start_nonce: account_start_nonce, - trie_factory: trie_factory, + factories: factories, } } /// Creates new state with existing state root - pub fn from_existing(db: Box, root: H256, account_start_nonce: U256, trie_factory: TrieFactory) -> Result { + pub fn from_existing(db: Box, root: H256, account_start_nonce: U256, factories: Factories) -> Result { if !db.as_hashdb().contains(&root) { return Err(TrieError::InvalidStateRoot(root)); } @@ -87,7 +86,7 @@ impl State { cache: RefCell::new(HashMap::new()), snapshots: RefCell::new(Vec::new()), account_start_nonce: account_start_nonce, - trie_factory: trie_factory, + factories: factories }; Ok(state) @@ -185,8 +184,11 @@ impl State { /// Mutate storage of account `address` so that it is `value` for `key`. pub fn storage_at(&self, address: &Address, key: &H256) -> H256 { - self.ensure_cached(address, false, - |a| a.as_ref().map_or(H256::new(), |a|a.storage_at(&AccountDB::from_hash(self.db.as_hashdb(), a.address_hash(address)), key))) + self.ensure_cached(address, false, |a| a.as_ref().map_or(H256::new(), |a| { + let addr_hash = a.address_hash(address); + let db = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); + a.storage_at(db.as_hashdb(), key) + })) } /// Mutate storage of account `a` so that it is `value` for `key`. @@ -236,11 +238,12 @@ impl State { /// Execute a given transaction. /// This will change the state accordingly. - pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, vm_factory: &EvmFactory, t: &SignedTransaction, tracing: bool) -> ApplyResult { + pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult { // let old = self.to_pod(); let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true }; - let e = try!(Executive::new(self, env_info, engine, vm_factory).transact(t, options)); + let vm_factory = self.factories.vm.clone(); + let e = try!(Executive::new(self, env_info, engine, &vm_factory).transact(t, options)); // TODO uncomment once to_pod() works correctly. // trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod())); @@ -254,27 +257,27 @@ impl State { /// `accounts` is mutable because we may need to commit the code or storage and record that. #[cfg_attr(feature="dev", allow(match_ref_pats))] pub fn commit_into( - trie_factory: &TrieFactory, + factories: &Factories, db: &mut HashDB, root: &mut H256, - accounts: &mut HashMap> + accounts: &mut HashMap> ) -> Result<(), Error> { // first, commit the sub trees. // TODO: is this necessary or can we dispense with the `ref mut a` for just `a`? for (address, ref mut a) in accounts.iter_mut() { match a { &mut&mut Some(ref mut account) if account.is_dirty() => { - let mut account_db = AccountDBMut::from_hash(db, account.address_hash(address)); - account.commit_storage(trie_factory, &mut account_db); - account.commit_code(&mut account_db); + let addr_hash = account.address_hash(address); + let mut account_db = factories.accountdb.create(db, addr_hash); + account.commit_storage(&factories.trie, account_db.as_hashdb_mut()); + account.commit_code(account_db.as_hashdb_mut()); } _ => {} } } { - let mut trie = trie_factory.from_existing(db, root).unwrap(); + let mut trie = factories.trie.from_existing(db, root).unwrap(); for (address, ref mut a) in accounts.iter_mut() { match **a { Some(ref mut account) if account.is_dirty() => { @@ -293,7 +296,7 @@ impl State { /// Commits our cached account changes into the trie. pub fn commit(&mut self) -> Result<(), Error> { assert!(self.snapshots.borrow().is_empty()); - Self::commit_into(&self.trie_factory, self.db.as_hashdb_mut(), &mut self.root, &mut *self.cache.borrow_mut()) + Self::commit_into(&self.factories, self.db.as_hashdb_mut(), &mut self.root, &mut *self.cache.borrow_mut()) } /// Clear state cache @@ -351,7 +354,7 @@ impl State { where F: FnOnce(&Option) -> U { let have_key = self.cache.borrow().contains_key(a); if !have_key { - let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let maybe_acc = match db.get(a) { Ok(acc) => acc.map(Account::from_rlp), Err(e) => panic!("Potential DB corruption encountered: {}", e), @@ -361,7 +364,8 @@ impl State { if require_code { if let Some(ref mut account) = self.cache.borrow_mut().get_mut(a).unwrap().as_mut() { let addr_hash = account.address_hash(a); - account.cache_code(&AccountDB::from_hash(self.db.as_hashdb(), addr_hash)); + let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); + account.cache_code(accountdb.as_hashdb()); } } @@ -380,7 +384,7 @@ impl State { { let contains_key = self.cache.borrow().contains_key(a); if !contains_key { - let db = self.trie_factory.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); + let db = self.factories.trie.readonly(self.db.as_hashdb(), &self.root).expect(SEC_TRIE_DB_UNWRAP_STR); let maybe_acc = match db.get(a) { Ok(acc) => acc.map(Account::from_rlp), Err(e) => panic!("Potential DB corruption encountered: {}", e), @@ -400,7 +404,8 @@ impl State { let account = c.get_mut(a).unwrap().as_mut().unwrap(); if require_code { let addr_hash = account.address_hash(a); - account.cache_code(&AccountDB::from_hash(self.db.as_hashdb(), addr_hash)); + let accountdb = self.factories.accountdb.readonly(self.db.as_hashdb(), addr_hash); + account.cache_code(accountdb.as_hashdb()); } account }) @@ -421,7 +426,7 @@ impl Clone for State { cache: RefCell::new(self.cache.borrow().clone()), snapshots: RefCell::new(self.snapshots.borrow().clone()), account_start_nonce: self.account_start_nonce.clone(), - trie_factory: self.trie_factory.clone(), + factories: self.factories.clone(), } } } @@ -464,8 +469,7 @@ fn should_apply_create_transaction() { }.sign(&"".sha3()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), subtraces: 0, @@ -525,8 +529,7 @@ fn should_trace_failed_create_transaction() { }.sign(&"".sha3()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), action: trace::Action::Create(trace::Create { @@ -564,8 +567,7 @@ fn should_trace_call_transaction() { state.init_code(&0xa.into(), FromHex::from_hex("6000").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), action: trace::Action::Call(trace::Call { @@ -607,8 +609,7 @@ fn should_trace_basic_call_transaction() { }.sign(&"".sha3()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), action: trace::Action::Call(trace::Call { @@ -649,8 +650,7 @@ fn should_trace_call_transaction_to_builtin() { data: vec![], }.sign(&"".sha3()); - let vm_factory = Default::default(); - let result = state.apply(&info, engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -693,8 +693,7 @@ fn should_not_trace_subcall_transaction_to_builtin() { }.sign(&"".sha3()); state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060006001610be0f1").unwrap()); - let vm_factory = Default::default(); - let result = state.apply(&info, engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -738,8 +737,7 @@ fn should_not_trace_callcode() { state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b611000f2").unwrap()); state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); - let vm_factory = Default::default(); - let result = state.apply(&info, engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -801,8 +799,7 @@ fn should_not_trace_delegatecall() { state.init_code(&0xa.into(), FromHex::from_hex("6000600060006000600b618000f4").unwrap()); state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); - let vm_factory = Default::default(); - let result = state.apply(&info, engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -861,8 +858,7 @@ fn should_trace_failed_call_transaction() { state.init_code(&0xa.into(), FromHex::from_hex("5b600056").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), action: trace::Action::Call(trace::Call { @@ -903,8 +899,7 @@ fn should_trace_call_with_subcall_transaction() { state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); state.init_code(&0xb.into(), FromHex::from_hex("6000").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -963,8 +958,7 @@ fn should_trace_call_with_basic_subcall_transaction() { state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006045600b6000f1").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), subtraces: 1, @@ -1019,8 +1013,7 @@ fn should_not_trace_call_with_invalid_basic_subcall_transaction() { state.init_code(&0xa.into(), FromHex::from_hex("600060006000600060ff600b6000f1").unwrap()); // not enough funds. state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), subtraces: 0, @@ -1064,8 +1057,7 @@ fn should_trace_failed_subcall_transaction() { state.init_code(&0xa.into(), FromHex::from_hex("60006000600060006000600b602b5a03f1").unwrap()); state.init_code(&0xb.into(), FromHex::from_hex("5b600056").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), subtraces: 1, @@ -1122,8 +1114,7 @@ fn should_trace_call_with_subcall_with_subcall_transaction() { state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1").unwrap()); state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), subtraces: 1, @@ -1198,8 +1189,7 @@ fn should_trace_failed_subcall_with_subcall_transaction() { state.init_code(&0xb.into(), FromHex::from_hex("60006000600060006000600c602b5a03f1505b601256").unwrap()); state.init_code(&0xc.into(), FromHex::from_hex("6000").unwrap()); state.add_balance(t.sender().as_ref().unwrap(), &(100.into())); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), @@ -1271,8 +1261,7 @@ fn should_trace_suicide() { state.init_code(&0xa.into(), FromHex::from_hex("73000000000000000000000000000000000000000bff").unwrap()); state.add_balance(&0xa.into(), &50.into()); state.add_balance(t.sender().as_ref().unwrap(), &100.into()); - let vm_factory = Default::default(); - let result = state.apply(&info, &engine, &vm_factory, &t, true).unwrap(); + let result = state.apply(&info, &engine, &t, true).unwrap(); let expected_trace = vec![FlatTrace { trace_address: Default::default(), subtraces: 1, diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index bc671b717..ff35e7023 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -139,7 +139,6 @@ pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_numbe let mut db_result = get_temp_journal_db(); let mut db = db_result.take(); test_spec.ensure_db_good(db.as_hashdb_mut()).unwrap(); - let vm_factory = Default::default(); let genesis_header = test_spec.genesis_header(); let mut rolling_timestamp = 40; @@ -156,7 +155,6 @@ pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_numbe // forge block. let mut b = OpenBlock::new( test_engine, - &vm_factory, Default::default(), false, db, diff --git a/util/src/hashdb.rs b/util/src/hashdb.rs index 52b126ac7..55cc2a89e 100644 --- a/util/src/hashdb.rs +++ b/util/src/hashdb.rs @@ -136,4 +136,4 @@ impl AsHashDB for T { fn as_hashdb_mut(&mut self) -> &mut HashDB { self } -} +} \ No newline at end of file diff --git a/util/src/trie/fatdb.rs b/util/src/trie/fatdb.rs index 244662670..bb35bd467 100644 --- a/util/src/trie/fatdb.rs +++ b/util/src/trie/fatdb.rs @@ -17,7 +17,7 @@ use hash::H256; use sha3::Hashable; use hashdb::HashDB; -use super::{TrieDB, Trie, TrieDBIterator, TrieItem}; +use super::{TrieDB, Trie, TrieDBIterator, TrieItem, Recorder}; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// Additionaly it stores inserted hash-key mappings for later retrieval. @@ -43,16 +43,11 @@ impl<'db> FatDB<'db> { pub fn db(&self) -> &HashDB { self.raw.db() } - - /// Iterator over all key / vlaues in the trie. - pub fn iter(&self) -> FatDBIterator { - FatDBIterator::new(&self.raw) - } } impl<'db> Trie for FatDB<'db> { fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(FatDB::iter(self)) + Box::new(FatDBIterator::new(&self.raw)) } fn root(&self) -> &H256 { @@ -63,10 +58,10 @@ impl<'db> Trie for FatDB<'db> { self.raw.contains(&key.sha3()) } - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> - where 'a: 'key + fn get_recorded<'a, 'b, R: 'b>(&'a self, key: &'b [u8], rec: &'b mut R) -> super::Result> + where 'a: 'b, R: Recorder { - self.raw.get(&key.sha3()) + self.raw.get_recorded(&key.sha3(), rec) } } diff --git a/util/src/trie/mod.rs b/util/src/trie/mod.rs index 6500059b9..886ccd724 100644 --- a/util/src/trie/mod.rs +++ b/util/src/trie/mod.rs @@ -34,6 +34,9 @@ pub mod triedbmut; pub mod sectriedb; /// Export the sectriedbmut module. pub mod sectriedbmut; +/// Trie query recording. +pub mod recorder; + mod fatdb; mod fatdbmut; @@ -45,6 +48,7 @@ pub use self::sectriedbmut::SecTrieDBMut; pub use self::sectriedb::SecTrieDB; pub use self::fatdb::{FatDB, FatDBIterator}; pub use self::fatdbmut::FatDBMut; +pub use self::recorder::Recorder; /// Trie Errors. /// @@ -88,7 +92,14 @@ pub trait Trie { } /// What is the value of the given key in this trie? - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result> where 'a: 'key; + fn get<'a, 'key>(&'a self, key: &'key [u8]) -> Result> where 'a: 'key { + self.get_recorded(key, &mut recorder::NoOp) + } + + /// Query the value of the given key in this trie while recording visited nodes + /// to the given recorder. If the query fails, the nodes passed to the recorder are unspecified. + fn get_recorded<'a, 'b, R: 'b>(&'a self, key: &'b [u8], rec: &'b mut R) -> Result> + where 'a: 'b, R: Recorder; /// Returns an iterator over elements of trie. fn iter<'a>(&'a self) -> Box + 'a>; @@ -119,7 +130,6 @@ pub trait TrieMut { fn remove(&mut self, key: &[u8]) -> Result<()>; } - /// Trie types #[derive(Debug, PartialEq, Clone)] pub enum TrieSpec { @@ -143,6 +153,51 @@ pub struct TrieFactory { spec: TrieSpec, } +/// All different kinds of tries. +/// This is used to prevent a heap allocation for every created trie. +pub enum TrieKinds<'db> { + /// A generic trie db. + Generic(TrieDB<'db>), + /// A secure trie db. + Secure(SecTrieDB<'db>), + /// A fat trie db. + Fat(FatDB<'db>), +} + +// wrapper macro for making the match easier to deal with. +macro_rules! wrapper { + ($me: ident, $f_name: ident, $($param: ident),*) => { + match *$me { + TrieKinds::Generic(ref t) => t.$f_name($($param),*), + TrieKinds::Secure(ref t) => t.$f_name($($param),*), + TrieKinds::Fat(ref t) => t.$f_name($($param),*), + } + } +} + +impl<'db> Trie for TrieKinds<'db> { + fn root(&self) -> &H256 { + wrapper!(self, root,) + } + + fn is_empty(&self) -> bool { + wrapper!(self, is_empty,) + } + + fn contains(&self, key: &[u8]) -> Result { + wrapper!(self, contains, key) + } + + fn get_recorded<'a, 'b, R: 'b>(&'a self, key: &'b [u8], r: &'b mut R) -> Result> + where 'a: 'b, R: Recorder { + wrapper!(self, get_recorded, key, r) + } + + fn iter<'a>(&'a self) -> Box + 'a> { + wrapper!(self, iter,) + } +} + #[cfg_attr(feature="dev", allow(wrong_self_convention))] impl TrieFactory { /// Creates new factory. @@ -153,11 +208,11 @@ impl TrieFactory { } /// Create new immutable instance of Trie. - pub fn readonly<'db>(&self, db: &'db HashDB, root: &'db H256) -> Result> { + pub fn readonly<'db>(&self, db: &'db HashDB, root: &'db H256) -> Result> { match self.spec { - TrieSpec::Generic => Ok(Box::new(try!(TrieDB::new(db, root)))), - TrieSpec::Secure => Ok(Box::new(try!(SecTrieDB::new(db, root)))), - TrieSpec::Fat => Ok(Box::new(try!(FatDB::new(db, root)))), + TrieSpec::Generic => Ok(TrieKinds::Generic(try!(TrieDB::new(db, root)))), + TrieSpec::Secure => Ok(TrieKinds::Secure(try!(SecTrieDB::new(db, root)))), + TrieSpec::Fat => Ok(TrieKinds::Fat(try!(FatDB::new(db, root)))), } } diff --git a/util/src/trie/recorder.rs b/util/src/trie/recorder.rs new file mode 100644 index 000000000..a48f277b4 --- /dev/null +++ b/util/src/trie/recorder.rs @@ -0,0 +1,236 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use sha3::Hashable; +use {Bytes, H256}; + +/// A record of a visited node. +#[derive(PartialEq, Eq, Debug, Clone)] +pub struct Record { + /// The depth of this node. + pub depth: u32, + + /// The raw data of the node. + pub data: Bytes, + + /// The hash of the data. + pub hash: H256, +} + +/// Trie node recorder. +/// +/// These are used to record which nodes are visited during a trie query. +/// Inline nodes are not to be recorded, as they are contained within their parent. +pub trait Recorder { + + /// Record that the given node has been visited. + /// + /// The depth parameter is the depth of the visited node, with the root node having depth 0. + fn record(&mut self, hash: &H256, data: &[u8], depth: u32); + + /// Drain all accepted records from the recorder in ascending order by depth. + fn drain(&mut self) -> Vec where Self: Sized; +} + +/// A no-op trie recorder. This ignores everything which is thrown at it. +pub struct NoOp; + +impl Recorder for NoOp { + #[inline] + fn record(&mut self, _hash: &H256, _data: &[u8], _depth: u32) {} + + #[inline] + fn drain(&mut self) -> Vec { Vec::new() } +} + +/// A simple recorder. Does nothing fancy but fulfills the `Recorder` interface +/// properly. +pub struct BasicRecorder { + nodes: Vec, + min_depth: u32, +} + +impl BasicRecorder { + /// Create a new `BasicRecorder` which records all given nodes. + #[inline] + pub fn new() -> Self { + BasicRecorder::with_depth(0) + } + + /// Create a `BasicRecorder` which only records nodes beyond a given depth. + pub fn with_depth(depth: u32) -> Self { + BasicRecorder { + nodes: Vec::new(), + min_depth: depth, + } + } +} + +impl Recorder for BasicRecorder { + fn record(&mut self, hash: &H256, data: &[u8], depth: u32) { + debug_assert_eq!(data.sha3(), *hash); + + if depth >= self.min_depth { + self.nodes.push(Record { + depth: depth, + data: data.into(), + hash: *hash, + }) + } + } + + fn drain(&mut self) -> Vec { + ::std::mem::replace(&mut self.nodes, Vec::new()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sha3::Hashable; + use ::H256; + + #[test] + fn no_op_does_nothing() { + let mut no_op = NoOp; + let (node1, node2) = (&[1], &[2]); + let (hash1, hash2) = (node1.sha3(), node2.sha3()); + no_op.record(&hash1, node1, 1); + no_op.record(&hash2, node2, 2); + + assert_eq!(no_op.drain(), Vec::new()); + } + + #[test] + fn basic_recorder() { + let mut basic = BasicRecorder::new(); + + let node1 = vec![1, 2, 3, 4]; + let node2 = vec![4, 5, 6, 7, 8, 9, 10]; + + let (hash1, hash2) = (node1.sha3(), node2.sha3()); + basic.record(&hash1, &node1, 0); + basic.record(&hash2, &node2, 456); + + let record1 = Record { + data: node1, + hash: hash1, + depth: 0, + }; + + let record2 = Record { + data: node2, + hash: hash2, + depth: 456 + }; + + assert_eq!(basic.drain(), vec![record1, record2]); + } + + #[test] + fn basic_recorder_min_depth() { + let mut basic = BasicRecorder::with_depth(400); + + let node1 = vec![1, 2, 3, 4]; + let node2 = vec![4, 5, 6, 7, 8, 9, 10]; + + let hash1 = node1.sha3(); + let hash2 = node2.sha3(); + basic.record(&hash1, &node1, 0); + basic.record(&hash2, &node2, 456); + + let records = basic.drain(); + + assert_eq!(records.len(), 1); + + assert_eq!(records[0].clone(), Record { + data: node2, + hash: hash2, + depth: 456, + }); + } + + #[test] + fn trie_record() { + use trie::{TrieDB, TrieDBMut, Trie, TrieMut}; + use memorydb::MemoryDB; + + let mut db = MemoryDB::new(); + + let mut root = H256::default(); + + { + let mut x = TrieDBMut::new(&mut db, &mut root); + + x.insert(b"dog", b"cat").unwrap(); + x.insert(b"lunch", b"time").unwrap(); + x.insert(b"notdog", b"notcat").unwrap(); + x.insert(b"hotdog", b"hotcat").unwrap(); + x.insert(b"letter", b"confusion").unwrap(); + x.insert(b"insert", b"remove").unwrap(); + x.insert(b"pirate", b"aargh!").unwrap(); + x.insert(b"yo ho ho", b"and a bottle of rum").unwrap(); + } + + let trie = TrieDB::new(&db, &root).unwrap(); + let mut recorder = BasicRecorder::new(); + + trie.get_recorded(b"pirate", &mut recorder).unwrap().unwrap(); + + let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect(); + assert_eq!(nodes, vec![ + vec![ + 248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149, + 92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118, + 215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221, 59, + 110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79, 0, 236, + 102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128 + ], + vec![ + 248, 60, 206, 134, 32, 105, 114, 97, 116, 101, 134, 97, 97, 114, 103, 104, 33, + 128, 128, 128, 128, 128, 128, 128, 128, 221, 136, 32, 111, 32, 104, 111, 32, 104, + 111, 147, 97, 110, 100, 32, 97, 32, 98, 111, 116, 116, 108, 101, 32, 111, 102, + 32, 114, 117, 109, 128, 128, 128, 128, 128, 128, 128 + ] + ]); + + trie.get_recorded(b"letter", &mut recorder).unwrap().unwrap(); + + let nodes: Vec<_> = recorder.drain().into_iter().map(|r| r.data).collect(); + assert_eq!(nodes, vec![ + vec![ + 248, 81, 128, 128, 128, 128, 128, 128, 160, 50, 19, 71, 57, 213, 63, 125, 149, + 92, 119, 88, 96, 80, 126, 59, 11, 160, 142, 98, 229, 237, 200, 231, 224, 79, 118, + 215, 93, 144, 246, 179, 176, 160, 118, 211, 171, 199, 172, 136, 136, 240, 221, + 59, 110, 82, 86, 54, 23, 95, 48, 108, 71, 125, 59, 51, 253, 210, 18, 116, 79, + 0, 236, 102, 142, 48, 128, 128, 128, 128, 128, 128, 128, 128, 128 + ], + vec![ + 248, 99, 128, 128, 128, 128, 200, 131, 32, 111, 103, 131, 99, 97, 116, 128, 128, + 128, 206, 134, 32, 111, 116, 100, 111, 103, 134, 104, 111, 116, 99, 97, 116, 206, + 134, 32, 110, 115, 101, 114, 116, 134, 114, 101, 109, 111, 118, 101, 128, 128, + 160, 202, 250, 252, 153, 229, 63, 255, 13, 100, 197, 80, 120, 190, 186, 92, 5, + 255, 135, 245, 205, 180, 213, 161, 8, 47, 107, 13, 105, 218, 1, 9, 5, 128, + 206, 134, 32, 111, 116, 100, 111, 103, 134, 110, 111, 116, 99, 97, 116, 128, 128 + ], + vec![ + 235, 128, 128, 128, 128, 128, 128, 208, 133, 53, 116, 116, 101, 114, 137, 99, + 111, 110, 102, 117, 115, 105, 111, 110, 202, 132, 53, 110, 99, 104, 132, 116, + 105, 109, 101, 128, 128, 128, 128, 128, 128, 128, 128, 128 + ] + ]); + } +} \ No newline at end of file diff --git a/util/src/trie/sectriedb.rs b/util/src/trie/sectriedb.rs index 7869439a7..9e807884c 100644 --- a/util/src/trie/sectriedb.rs +++ b/util/src/trie/sectriedb.rs @@ -18,7 +18,7 @@ use hash::H256; use sha3::Hashable; use hashdb::HashDB; use super::triedb::TrieDB; -use super::{Trie, TrieItem}; +use super::{Trie, TrieItem, Recorder}; /// A `Trie` implementation which hashes keys and uses a generic `HashDB` backing database. /// @@ -59,10 +59,10 @@ impl<'db> Trie for SecTrieDB<'db> { self.raw.contains(&key.sha3()) } - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> - where 'a: 'key + fn get_recorded<'a, 'b, R: 'b>(&'a self, key: &'b [u8], rec: &'b mut R) -> super::Result> + where 'a: 'b, R: Recorder { - self.raw.get(&key.sha3()) + self.raw.get_recorded(&key.sha3(), rec) } } diff --git a/util/src/trie/triedb.rs b/util/src/trie/triedb.rs index 33f2cc4fc..8e6cb98fa 100644 --- a/util/src/trie/triedb.rs +++ b/util/src/trie/triedb.rs @@ -19,6 +19,7 @@ use hashdb::*; use nibbleslice::*; use rlp::*; use super::node::Node; +use super::recorder::{Recorder, NoOp}; use super::{Trie, TrieItem, TrieError}; /// A `Trie` implementation using a generic `HashDB` backing database. @@ -79,7 +80,7 @@ impl<'db> TrieDB<'db> { pub fn keys(&self) -> super::Result> { let mut ret: Vec = Vec::new(); ret.push(self.root.clone()); - try!(self.accumulate_keys(try!(self.root_node()), &mut ret)); + try!(self.accumulate_keys(try!(self.root_node(&mut NoOp)), &mut ret)); Ok(ret) } @@ -114,7 +115,7 @@ impl<'db> TrieDB<'db> { acc.push(p.as_val()); } - self.accumulate_keys(try!(self.get_node(payload)), acc) + self.accumulate_keys(try!(self.get_node(payload, &mut NoOp, 0)), acc) }; match node { @@ -127,18 +128,19 @@ impl<'db> TrieDB<'db> { } /// Get the root node's RLP. - fn root_node(&self) -> super::Result { - self.root_data().map(Node::decoded) + fn root_node<'a, R: 'a + Recorder>(&self, r: &'a mut R) -> super::Result { + self.root_data(r).map(Node::decoded) } /// Get the data of the root node. - fn root_data(&self) -> super::Result<&[u8]> { + fn root_data<'a, R: 'a + Recorder>(&self, r: &'a mut R) -> super::Result<&[u8]> { self.db.get(self.root).ok_or_else(|| Box::new(TrieError::InvalidStateRoot(*self.root))) + .map(|node| { r.record(self.root, node, 0); node }) } /// Get the root node as a `Node`. - fn get_node(&'db self, node: &'db [u8]) -> super::Result { - self.get_raw_or_lookup(node).map(Node::decoded) + fn get_node<'a, R: 'a + Recorder>(&'db self, node: &'db [u8], r: &'a mut R, depth: u32) -> super::Result { + self.get_raw_or_lookup(node, r, depth).map(Node::decoded) } /// Indentation helper for `formal_all`. @@ -155,7 +157,7 @@ impl<'db> TrieDB<'db> { Node::Leaf(slice, value) => try!(writeln!(f, "'{:?}: {:?}.", slice, value.pretty())), Node::Extension(ref slice, item) => { try!(write!(f, "'{:?} ", slice)); - if let Ok(node) = self.get_node(item) { + if let Ok(node) = self.get_node(item, &mut NoOp, 0) { try!(self.fmt_all(node, f, deepness)); } }, @@ -166,7 +168,7 @@ impl<'db> TrieDB<'db> { try!(writeln!(f, "=: {:?}", v.pretty())) } for i in 0..16 { - match self.get_node(nodes[i]) { + match self.get_node(nodes[i], &mut NoOp, 0) { Ok(Node::Empty) => {}, Ok(n) => { try!(self.fmt_indent(f, deepness + 1)); @@ -188,29 +190,36 @@ impl<'db> TrieDB<'db> { } /// Return optional data for a key given as a `NibbleSlice`. Returns `None` if no data exists. - fn do_lookup<'key>(&'db self, key: &NibbleSlice<'key>) -> super::Result> - where 'db: 'key + fn do_lookup<'key, R: 'key>(&'db self, key: &NibbleSlice<'key>, r: &'key mut R) -> super::Result> + where 'db: 'key, R: Recorder { - let root_rlp = try!(self.root_data()); - self.get_from_node(root_rlp, key) + let root_rlp = try!(self.root_data(r)); + self.get_from_node(root_rlp, key, r, 1) } /// Recursible function to retrieve the value given a `node` and a partial `key`. `None` if no /// value exists for the key. /// /// Note: Not a public API; use Trie trait functions. - fn get_from_node<'key>(&'db self, node: &'db [u8], key: &NibbleSlice<'key>) -> super::Result> - where 'db: 'key - { + fn get_from_node<'key, R: 'key>( + &'db self, + node: &'db [u8], + key: &NibbleSlice<'key>, + r: &'key mut R, + d: u32 + ) -> super::Result> where 'db: 'key, R: Recorder { match Node::decoded(node) { Node::Leaf(ref slice, value) if key == slice => Ok(Some(value)), Node::Extension(ref slice, item) if key.starts_with(slice) => { - let data = try!(self.get_raw_or_lookup(item)); - self.get_from_node(data, &key.mid(slice.len())) + let data = try!(self.get_raw_or_lookup(item, r, d)); + self.get_from_node(data, &key.mid(slice.len()), r, d + 1) }, Node::Branch(ref nodes, value) => match key.is_empty() { true => Ok(value), - false => self.get_from_node(try!(self.get_raw_or_lookup(nodes[key.at(0) as usize])), &key.mid(1)) + false => { + let node = try!(self.get_raw_or_lookup(nodes[key.at(0) as usize], r, d)); + self.get_from_node(node, &key.mid(1), r, d + 1) + } }, _ => Ok(None) } @@ -219,13 +228,14 @@ impl<'db> TrieDB<'db> { /// Given some node-describing data `node`, return the actual node RLP. /// This could be a simple identity operation in the case that the node is sufficiently small, but /// may require a database lookup. - fn get_raw_or_lookup(&'db self, node: &'db [u8]) -> super::Result<&'db [u8]> { + fn get_raw_or_lookup(&'db self, node: &'db [u8], rec: &mut R, d: u32) -> super::Result<&'db [u8]> { // check if its sha3 + len let r = Rlp::new(node); match r.is_data() && r.size() == 32 { true => { let key = r.as_val::(); self.db.get(&key).ok_or_else(|| Box::new(TrieError::IncompleteDatabase(key))) + .map(|raw| { rec.record(&key, raw, d); raw }) } false => Ok(node) } @@ -275,7 +285,7 @@ impl<'a> TrieDBIterator<'a> { trail: vec![], key_nibbles: Vec::new(), }; - r.descend(db.root_data().unwrap()); + r.descend(db.root_data(&mut NoOp).unwrap()); r } @@ -283,7 +293,7 @@ impl<'a> TrieDBIterator<'a> { fn descend(&mut self, d: &'a [u8]) { self.trail.push(Crumb { status: Status::Entering, - node: self.db.get_node(d).unwrap(), + node: self.db.get_node(d, &mut NoOp, 0).unwrap(), }); match self.trail.last().unwrap().node { Node::Leaf(n, _) | Node::Extension(n, _) => { self.key_nibbles.extend(n.iter()); }, @@ -341,24 +351,17 @@ impl<'a> Iterator for TrieDBIterator<'a> { } } -impl<'db> TrieDB<'db> { - /// Get all keys/values stored in the trie. - pub fn iter(&self) -> TrieDBIterator { - TrieDBIterator::new(self) - } -} - impl<'db> Trie for TrieDB<'db> { fn iter<'a>(&'a self) -> Box + 'a> { - Box::new(TrieDB::iter(self)) + Box::new(TrieDBIterator::new(self)) } fn root(&self) -> &H256 { self.root } - fn get<'a, 'key>(&'a self, key: &'key [u8]) -> super::Result> - where 'a: 'key + fn get_recorded<'a, 'b, R: 'b>(&'a self, key: &'b [u8], rec: &'b mut R) -> super::Result> + where 'a: 'b, R: Recorder { - self.do_lookup(&NibbleSlice::new(key)) + self.do_lookup(&NibbleSlice::new(key), rec) } } @@ -387,6 +390,8 @@ fn iterator() { t.insert(x, x).unwrap(); } } - assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), TrieDB::new(&memdb, &root).unwrap().iter().map(|x|x.0).collect::>()); - assert_eq!(d, TrieDB::new(&memdb, &root).unwrap().iter().map(|x|x.1).collect::>()); + + let t = TrieDB::new(&memdb, &root).unwrap(); + assert_eq!(d.iter().map(|i|i.to_vec()).collect::>(), t.iter().map(|x|x.0).collect::>()); + assert_eq!(d, t.iter().map(|x|x.1).collect::>()); } From f07a1e6bafc3d1ab7e2e5aef6756bfafcd913d68 Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Wed, 24 Aug 2016 17:00:14 +0200 Subject: [PATCH 21/29] Fixed neighbours collection (#1996) --- util/network/src/discovery.rs | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/util/network/src/discovery.rs b/util/network/src/discovery.rs index 2b98f3230..a78f05804 100644 --- a/util/network/src/discovery.rs +++ b/util/network/src/discovery.rs @@ -59,7 +59,7 @@ pub struct BucketEntry { pub timeout: Option, } -struct NodeBucket { +pub struct NodeBucket { nodes: VecDeque, //sorted by last active } @@ -281,12 +281,12 @@ impl Discovery { if count == BUCKET_SIZE { // delete the most distant element let remove = { - let (_, last) = found.iter_mut().next_back().unwrap(); + let (key, last) = found.iter_mut().next_back().unwrap(); last.pop(); - last.is_empty() + if last.is_empty() { Some(key.clone()) } else { None } }; - if remove { - found.remove(&distance); + if let Some(remove) = remove { + found.remove(&remove); } } else { @@ -605,6 +605,21 @@ mod tests { assert!(removed > 0); } + #[test] + fn find_nearest_saturated() { + use super::*; + let mut buckets: Vec<_> = (0..256).map(|_| NodeBucket::new()).collect(); + let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40447").unwrap(), udp_port: 40447 }; + for _ in 0..(16 + 10) { + buckets[0].nodes.push_back(BucketEntry { + address: NodeEntry { id: NodeId::new(), endpoint: ep.clone() }, + timeout: None + }); + } + let nearest = Discovery::nearest_node_entries(&NodeId::new(), &buckets); + assert_eq!(nearest.len(), 16) + } + #[test] fn packets() { let key = KeyPair::create().unwrap(); From b0d462c6c95f4ad0afe73115837a8d37f08162d5 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 24 Aug 2016 18:35:21 +0200 Subject: [PATCH 22/29] Signature cleanup (#1921) * Address renamed to H160 at bigint library level * moved uint specific test from util to bigint library * naming * unifing hashes in progress * unifing hashes * cleanup redundant unwraps in tests * Removing util/crypto in progress. * fixed compiling * signature cleanup in progress * new module - ethcrypto used by ethstore and ethcore-network * fixed compiling * fixed compiling * fixed merge --- Cargo.lock | 16 + ethcore/Cargo.toml | 1 + ethcore/src/builtin.rs | 13 +- ethcore/src/client/test_client.rs | 3 +- ethcore/src/engines/basic_authority.rs | 12 +- ethcore/src/engines/instant_seal.rs | 2 +- ethcore/src/error.rs | 16 +- ethcore/src/executive.rs | 9 +- ethcore/src/lib.rs | 1 + ethcore/src/miner/miner.rs | 5 +- ethcore/src/miner/transaction_queue.rs | 26 +- ethcore/src/tests/helpers.rs | 3 +- ethcore/src/types/transaction.rs | 42 +- ethcore/src/verification/verification.rs | 3 +- ethcrypto/Cargo.toml | 12 + ethcrypto/src/lib.rs | 246 ++++++++++ ethkey/src/lib.rs | 2 +- ethkey/src/primitive.rs | 17 - ethkey/src/signature.rs | 25 +- ethstore/Cargo.toml | 1 + ethstore/src/crypto.rs | 95 ---- ethstore/src/lib.rs | 2 +- rpc/Cargo.toml | 1 + rpc/src/lib.rs | 1 + rpc/src/v1/impls/ethcore.rs | 5 +- rpc/src/v1/impls/personal.rs | 5 +- rpc/src/v1/tests/eth.rs | 5 +- sync/src/api.rs | 4 +- util/network/Cargo.toml | 2 + util/network/src/connection.rs | 2 +- util/network/src/discovery.rs | 18 +- util/network/src/error.rs | 11 +- util/network/src/handshake.rs | 60 ++- util/network/src/host.rs | 5 +- util/network/src/lib.rs | 2 + util/network/src/tests.rs | 6 +- util/src/crypto.rs | 560 ----------------------- util/src/error.rs | 9 - util/src/lib.rs | 4 +- 39 files changed, 444 insertions(+), 808 deletions(-) create mode 100644 ethcrypto/Cargo.toml create mode 100644 ethcrypto/src/lib.rs delete mode 100644 ethkey/src/primitive.rs delete mode 100644 ethstore/src/crypto.rs delete mode 100644 util/src/crypto.rs diff --git a/Cargo.lock b/Cargo.lock index d659c4bd7..5bd5e8f32 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -269,6 +269,7 @@ dependencies = [ "ethcore-ipc-nano 1.4.0", "ethcore-util 1.4.0", "ethjson 0.1.0", + "ethkey 0.2.0", "ethstore 0.1.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", @@ -406,6 +407,8 @@ dependencies = [ "ethcore-devtools 1.4.0", "ethcore-io 1.4.0", "ethcore-util 1.4.0", + "ethcrypto 0.1.0", + "ethkey 0.2.0", "igd 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -431,6 +434,7 @@ dependencies = [ "ethcore-ipc 1.4.0", "ethcore-util 1.4.0", "ethjson 0.1.0", + "ethkey 0.2.0", "ethstore 0.1.0", "ethsync 1.4.0", "json-ipc-server 0.2.4 (git+https://github.com/ethcore/json-ipc-server.git)", @@ -508,6 +512,17 @@ dependencies = [ "vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "ethcrypto" +version = "0.1.0" +dependencies = [ + "bigint 0.1.0", + "eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)", + "ethkey 0.2.0", + "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", + "tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ethjson" version = "0.1.0" @@ -535,6 +550,7 @@ dependencies = [ name = "ethstore" version = "0.1.0" dependencies = [ + "ethcrypto 0.1.0", "ethkey 0.2.0", "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 729f9c268..a34116df5 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -33,6 +33,7 @@ ethcore-devtools = { path = "../devtools" } ethjson = { path = "../json" } ethcore-ipc = { path = "../ipc/rpc" } ethstore = { path = "../ethstore" } +ethkey = { path = "../ethkey" } ethcore-ipc-nano = { path = "../ipc/nano" } rand = "0.3" diff --git a/ethcore/src/builtin.rs b/ethcore/src/builtin.rs index 891f321a1..d4ea5e30e 100644 --- a/ethcore/src/builtin.rs +++ b/ethcore/src/builtin.rs @@ -14,10 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use util::*; use crypto::sha2::Sha256; use crypto::ripemd160::Ripemd160; use crypto::digest::Digest; +use util::*; +use ethkey::{Signature, recover}; use ethjson; /// Definition of a contract whose implementation is built-in. @@ -92,19 +93,19 @@ pub fn new_builtin_exec(name: &str) -> Box { }), "ecrecover" => Box::new(move|input: &[u8], output: &mut[u8]| { #[repr(packed)] - #[derive(Debug)] + #[derive(Debug, Default)] struct InType { hash: H256, v: H256, r: H256, s: H256, } - let mut it: InType = InType { hash: H256::new(), v: H256::new(), r: H256::new(), s: H256::new() }; + let mut it = InType::default(); it.copy_raw(input); if it.v == H256::from(&U256::from(27)) || it.v == H256::from(&U256::from(28)) { - let s = signature_from_rsv(&it.r, &it.s, it.v[31] - 27); - if ec::is_valid(&s) { - if let Ok(p) = ec::recover(&s, &it.hash) { + let s = Signature::from_rsv(&it.r, &it.s, it.v[31] - 27); + if s.is_valid() { + if let Ok(p) = recover(&s, &it.hash) { let r = p.as_slice().sha3(); // NICE: optimise and separate out into populate-like function for i in 0..min(32, output.len()) { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 410a90347..8e26a6b0c 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -18,6 +18,7 @@ use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder}; use util::*; +use ethkey::{Generator, Random}; use devtools::*; use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action}; use blockchain::TreeRoute; @@ -188,7 +189,7 @@ impl TestBlockChainClient { let txs = match with { EachBlockWith::Transaction | EachBlockWith::UncleAndTransaction => { let mut txs = RlpStream::new_list(1); - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); // Update nonces value self.nonces.write().insert(keypair.address(), U256::one()); let tx = Transaction { diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 7ec5a66a4..926399d7b 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -17,6 +17,7 @@ //! A blockchain engine that supports a basic, non-BFT proof-of-authority. use common::*; +use ethkey::{recover, public_to_address}; use account_provider::AccountProvider; use block::*; use spec::CommonParams; @@ -133,7 +134,7 @@ impl Engine for BasicAuthority { fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { // check the signature is legit. let sig = try!(UntrustedRlp::new(&header.seal[0]).as_val::()); - let signer = Address::from(try!(ec::recover(&sig, &header.bare_hash())).sha3()); + let signer = public_to_address(&try!(recover(&sig.into(), &header.bare_hash()))); if !self.our_params.authorities.contains(&signer) { return try!(Err(BlockError::InvalidSeal)); } @@ -228,15 +229,10 @@ mod tests { fn can_do_signature_verification_fail() { let engine = new_test_authority().engine; let mut header: Header = Header::default(); - header.set_seal(vec![rlp::encode(&Signature::zero()).to_vec()]); + header.set_seal(vec![rlp::encode(&H520::default()).to_vec()]); let verify_result = engine.verify_block_unordered(&header, None); - - match verify_result { - Err(Error::Util(UtilError::Crypto(CryptoError::InvalidSignature))) => {}, - Err(_) => { panic!("should be block difficulty error (got {:?})", verify_result); }, - _ => { panic!("Should be error, got Ok"); }, - } + assert!(verify_result.is_err()); } #[test] diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index 6a3d3c700..e98e87bf5 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -100,7 +100,7 @@ mod tests { assert!(engine.verify_block_basic(&header, None).is_ok()); - header.set_seal(vec![rlp::encode(&Signature::zero()).to_vec()]); + header.set_seal(vec![rlp::encode(&H520::default()).to_vec()]); assert!(engine.verify_block_unordered(&header, None).is_ok()); } diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 449303732..5c26e2f78 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -24,6 +24,7 @@ use client::Error as ClientError; use ipc::binary::{BinaryConvertError, BinaryConvertable}; use types::block_import_error::BlockImportError; use snapshot::Error as SnapshotError; +use ethkey::Error as EthkeyError; pub use types::executed::{ExecutionError, CallError}; @@ -238,6 +239,8 @@ pub enum Error { Snappy(::util::snappy::InvalidInput), /// Snapshot error. Snapshot(SnapshotError), + /// Ethkey error. + Ethkey(EthkeyError), } impl fmt::Display for Error { @@ -258,6 +261,7 @@ impl fmt::Display for Error { Error::StdIo(ref err) => err.fmt(f), Error::Snappy(ref err) => err.fmt(f), Error::Snapshot(ref err) => err.fmt(f), + Error::Ethkey(ref err) => err.fmt(f), } } } @@ -298,12 +302,6 @@ impl From for Error { } } -impl From for Error { - fn from(err: CryptoError) -> Error { - Error::Util(UtilError::Crypto(err)) - } -} - impl From for Error { fn from(err: DecoderError) -> Error { Error::Util(UtilError::Decoder(err)) @@ -361,6 +359,12 @@ impl From for Error { } } +impl From for Error { + fn from(err: EthkeyError) -> Error { + Error::Ethkey(err) + } +} + impl From> for Error where Error: From { fn from(err: Box) -> Error { Error::from(*err) diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 4cceb137b..53d5460ad 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -483,6 +483,7 @@ impl<'a> Executive<'a> { #[cfg(test)] #[allow(dead_code)] mod tests { + use ethkey::{Generator, Random}; use super::*; use common::*; use evm::{Factory, VMType}; @@ -1002,7 +1003,7 @@ mod tests { // TODO: fix (preferred) or remove evm_test_ignore!{test_transact_simple: test_transact_simple_jit, test_transact_simple_int} fn test_transact_simple(factory: Factory) { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let t = Transaction { action: Action::Create, value: U256::from(17), @@ -1069,7 +1070,7 @@ mod tests { evm_test!{test_transact_invalid_nonce: test_transact_invalid_nonce_jit, test_transact_invalid_nonce_int} fn test_transact_invalid_nonce(factory: Factory) { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let t = Transaction { action: Action::Create, value: U256::from(17), @@ -1102,7 +1103,7 @@ mod tests { evm_test!{test_transact_gas_limit_reached: test_transact_gas_limit_reached_jit, test_transact_gas_limit_reached_int} fn test_transact_gas_limit_reached(factory: Factory) { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let t = Transaction { action: Action::Create, value: U256::from(17), @@ -1137,7 +1138,7 @@ mod tests { evm_test!{test_not_enough_cash: test_not_enough_cash_jit, test_not_enough_cash_int} fn test_not_enough_cash(factory: Factory) { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let t = Transaction { action: Action::Create, value: U256::from(18), diff --git a/ethcore/src/lib.rs b/ethcore/src/lib.rs index 2da6abe3d..b8233ea26 100644 --- a/ethcore/src/lib.rs +++ b/ethcore/src/lib.rs @@ -96,6 +96,7 @@ extern crate bloomchain; extern crate rayon; extern crate hyper; extern crate ethash; +extern crate ethkey; pub extern crate ethstore; extern crate semver; extern crate ethcore_ipc_nano as nanoipc; diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 3ca3f0d74..a2533ecde 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -911,6 +911,7 @@ mod tests { use super::super::MinerService; use super::*; use util::*; + use ethkey::{Generator, Random}; use client::{TestBlockChainClient, EachBlockWith}; use client::{TransactionImportResult}; use types::transaction::{Transaction, Action}; @@ -975,7 +976,7 @@ mod tests { let client = TestBlockChainClient::default(); let miner = miner(); let transaction = { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); Transaction { action: Action::Create, value: U256::zero(), @@ -1005,7 +1006,7 @@ mod tests { let client = TestBlockChainClient::default(); let miner = miner(); let transaction = { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); Transaction { action: Action::Create, value: U256::zero(), diff --git a/ethcore/src/miner/transaction_queue.rs b/ethcore/src/miner/transaction_queue.rs index 8a2a37145..5e610da24 100644 --- a/ethcore/src/miner/transaction_queue.rs +++ b/ethcore/src/miner/transaction_queue.rs @@ -26,16 +26,17 @@ //! ```rust //! extern crate ethcore_util as util; //! extern crate ethcore; +//! extern crate ethkey; //! extern crate rustc_serialize; //! -//! use util::crypto::KeyPair; //! use util::{Uint, U256, Address}; +//! use ethkey::{Random, Generator}; //! use ethcore::miner::{TransactionQueue, AccountDetails, TransactionOrigin}; //! use ethcore::transaction::*; //! use rustc_serialize::hex::FromHex; //! //! fn main() { -//! let key = KeyPair::create().unwrap(); +//! let key = Random.generate().unwrap(); //! let t1 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), //! gas: U256::from(100_000), gas_price: U256::one(), nonce: U256::from(10) }; //! let t2 = Transaction { action: Action::Create, value: U256::from(100), data: "3331600055".from_hex().unwrap(), @@ -233,7 +234,7 @@ struct TransactionSet { impl TransactionSet { /// Inserts `TransactionOrder` to this set. Transaction does not need to be unique - /// the same transaction may be validly inserted twice. Any previous transaction that - /// it replaces (i.e. with the same `sender` and `nonce`) should be returned. + /// it replaces (i.e. with the same `sender` and `nonce`) should be returned. fn insert(&mut self, sender: Address, nonce: U256, order: TransactionOrder) -> Option { if !self.by_priority.insert(order.clone()) { return Some(order.clone()); @@ -313,7 +314,7 @@ impl TransactionSet { } /// Get the minimum gas price that we can accept into this queue that wouldn't cause the transaction to - /// immediately be dropped. 0 if the queue isn't at capacity; 1 plus the lowest if it is. + /// immediately be dropped. 0 if the queue isn't at capacity; 1 plus the lowest if it is. fn gas_price_entry_limit(&self) -> U256 { match self.by_gas_price.keys().next() { Some(k) if self.by_priority.len() >= self.limit => *k + 1.into(), @@ -340,7 +341,7 @@ impl TransactionSet { return false; } } else { - // Operation failed: gas-price not found in Map. + // Operation failed: gas-price not found in Map. return false; } // Operation maybe ok: only if hash not found in gas-price Set. @@ -869,6 +870,7 @@ mod test { extern crate rustc_serialize; use util::table::*; use util::*; + use ethkey::{Random, Generator}; use transaction::*; use error::{Error, TransactionError}; use super::*; @@ -897,7 +899,7 @@ mod test { } fn new_tx(nonce: U256, gas_price: U256) -> SignedTransaction { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); new_unsigned_tx(nonce, gas_price).sign(keypair.secret()) } @@ -916,7 +918,7 @@ mod test { let tx1 = new_unsigned_tx(nonce, gas_price); let tx2 = new_unsigned_tx(nonce + nonce_increment, gas_price + gas_price_increment); - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let secret = &keypair.secret(); (tx1.sign(secret), tx2.sign(secret)) } @@ -1373,7 +1375,7 @@ mod test { fn should_move_transactions_if_gap_filled() { // given let mut txq = TransactionQueue::new(); - let kp = KeyPair::create().unwrap(); + let kp = Random.generate().unwrap(); let secret = kp.secret(); let tx = new_unsigned_tx(123.into(), 1.into()).sign(secret); let tx1 = new_unsigned_tx(124.into(), 1.into()).sign(secret); @@ -1397,7 +1399,7 @@ mod test { fn should_remove_transaction() { // given let mut txq2 = TransactionQueue::new(); - let (tx, tx2) = new_tx_pair_default(3.into(), 0.into()); + let (tx, tx2) = new_tx_pair_default(3.into(), 0.into()); txq2.add(tx.clone(), &default_account_details, TransactionOrigin::External).unwrap(); txq2.add(tx2.clone(), &default_account_details, TransactionOrigin::External).unwrap(); assert_eq!(txq2.status().pending, 1); @@ -1582,7 +1584,7 @@ mod test { init_log(); // given let mut txq = TransactionQueue::new(); - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let tx = new_unsigned_tx(123.into(), 1.into()).sign(keypair.secret()); let tx2 = { let mut tx2 = (*tx).clone(); @@ -1605,7 +1607,7 @@ mod test { fn should_replace_same_transaction_when_importing_to_futures() { // given let mut txq = TransactionQueue::new(); - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let tx0 = new_unsigned_tx(123.into(), 1.into()).sign(keypair.secret()); let tx1 = { let mut tx1 = (*tx0).clone(); @@ -1758,7 +1760,7 @@ mod test { // given let mut txq = TransactionQueue::new(); let (tx1, tx2, tx2_2, tx3) = { - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let secret = &keypair.secret(); let nonce = 123.into(); let tx = new_unsigned_tx(nonce, 1.into()); diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index ff35e7023..03a1a2232 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use ethkey::KeyPair; use io::*; use client::{BlockChainClient, Client, ClientConfig}; use common::*; @@ -145,7 +146,7 @@ pub fn generate_dummy_client_with_spec_and_data(get_test_spec: F, block_numbe let mut last_hashes = vec![]; let mut last_header = genesis_header.clone(); - let kp = KeyPair::from_secret("".sha3()).unwrap() ; + let kp = KeyPair::from_secret("".sha3()).unwrap(); let author = kp.address(); let mut n = 0; diff --git a/ethcore/src/types/transaction.rs b/ethcore/src/types/transaction.rs index 4b08a5118..02d3da30a 100644 --- a/ethcore/src/types/transaction.rs +++ b/ethcore/src/types/transaction.rs @@ -16,18 +16,16 @@ //! Transaction data structure. -use util::{H256, Address, U256, H520}; use std::ops::Deref; -use util::rlp::*; -use util::sha3::*; -use util::{UtilError, CryptoError, Bytes, Signature, Secret, ec}; -use util::crypto::{signature_from_rsv, signature_to_rsv}; use std::cell::*; +use util::rlp::*; +use util::sha3::Hashable; +use util::{H256, Address, U256, Bytes}; +use ethkey::{Signature, sign, Secret, recover, public_to_address, Error as EthkeyError}; use error::*; use evm::Schedule; use header::BlockNumber; use ethjson; -use ethstore::ethkey::Signature as EthkeySignature; #[derive(Debug, Clone, PartialEq, Eq, Binary)] /// Transaction action type. @@ -139,19 +137,17 @@ impl Transaction { /// Signs the transaction as coming from `sender`. pub fn sign(self, secret: &Secret) -> SignedTransaction { - let sig = ec::sign(secret, &self.hash()).unwrap(); - self.with_signature(sig.into()) + let sig = sign(secret, &self.hash()).unwrap(); + self.with_signature(sig) } /// Signs the transaction with signature. - pub fn with_signature(self, sig: EthkeySignature) -> SignedTransaction { - let sig: H520 = sig.into(); - let (r, s, v) = signature_to_rsv(&sig); + pub fn with_signature(self, sig: Signature) -> SignedTransaction { SignedTransaction { unsigned: self, - r: r, - s: s, - v: v + 27, + r: sig.r().into(), + s: sig.s().into(), + v: sig.v() + 27, hash: Cell::new(None), sender: Cell::new(None), } @@ -290,12 +286,14 @@ impl SignedTransaction { pub fn standard_v(&self) -> u8 { match self.v { 27 => 0, 28 => 1, _ => 4 } } /// Construct a signature object from the sig. - pub fn signature(&self) -> Signature { signature_from_rsv(&From::from(&self.r), &From::from(&self.s), self.standard_v()) } + pub fn signature(&self) -> Signature { + Signature::from_rsv(&self.r.into(), &self.s.into(), self.standard_v()) + } /// Checks whether the signature has a low 's' value. pub fn check_low_s(&self) -> Result<(), Error> { - if !ec::is_low_s(&self.s) { - Err(Error::Util(UtilError::Crypto(CryptoError::InvalidSignature))) + if !self.signature().is_low_s() { + Err(EthkeyError::InvalidSignature.into()) } else { Ok(()) } @@ -307,7 +305,7 @@ impl SignedTransaction { match sender { Some(s) => Ok(s), None => { - let s = Address::from(try!(ec::recover(&self.signature(), &self.unsigned.hash())).sha3()); + let s = public_to_address(&try!(recover(&self.signature(), &self.unsigned.hash()))); self.sender.set(Some(s)); Ok(s) } @@ -319,8 +317,8 @@ impl SignedTransaction { #[cfg(test)] #[cfg(feature = "json-tests")] pub fn validate(self, schedule: &Schedule, require_low: bool) -> Result { - if require_low && !ec::is_low_s(&self.s) { - return Err(Error::Util(UtilError::Crypto(CryptoError::InvalidSignature))); + if require_low && !self.signature().is_low_s() { + return Err(EthkeyError::InvalidSignature.into()) } try!(self.sender()); if self.gas < U256::from(self.gas_required(&schedule)) { @@ -368,7 +366,9 @@ fn sender_test() { #[test] fn signing() { - let key = ::util::crypto::KeyPair::create().unwrap(); + use ethkey::{Random, Generator}; + + let key = Random.generate().unwrap(); let t = Transaction { action: Action::Create, nonce: U256::from(42), diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index ae7a141b4..9cea3bede 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -228,6 +228,7 @@ fn verify_block_integrity(block: &[u8], transactions_root: &H256, uncles_hash: & #[cfg(test)] mod tests { use util::*; + use ethkey::{Random, Generator}; use header::*; use verification::*; use blockchain::extras::*; @@ -355,7 +356,7 @@ mod tests { good.timestamp = 40; good.number = 10; - let keypair = KeyPair::create().unwrap(); + let keypair = Random.generate().unwrap(); let tr1 = Transaction { action: Action::Create, diff --git a/ethcrypto/Cargo.toml b/ethcrypto/Cargo.toml new file mode 100644 index 000000000..85298266d --- /dev/null +++ b/ethcrypto/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "ethcrypto" +version = "0.1.0" +authors = ["debris "] + +[dependencies] +rust-crypto = "0.2.36" +tiny-keccak = "1.0" +eth-secp256k1 = { git = "https://github.com/ethcore/rust-secp256k1" } +ethkey = { path = "../ethkey" } +bigint = { path = "../util/bigint" } + diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs new file mode 100644 index 000000000..4e3c3c1fc --- /dev/null +++ b/ethcrypto/src/lib.rs @@ -0,0 +1,246 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Crypto utils used ethstore and network. + +extern crate bigint; +extern crate tiny_keccak; +extern crate crypto as rcrypto; +extern crate secp256k1; +extern crate ethkey; + +use tiny_keccak::Keccak; +use rcrypto::pbkdf2::pbkdf2; +use rcrypto::scrypt::{scrypt, ScryptParams}; +use rcrypto::sha2::Sha256; +use rcrypto::hmac::Hmac; +use secp256k1::Error as SecpError; + +pub const KEY_LENGTH: usize = 32; +pub const KEY_ITERATIONS: usize = 10240; +pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; + +pub enum Error { + Secp(SecpError), + InvalidMessage, +} + +impl From for Error { + fn from(e: SecpError) -> Self { + Error::Secp(e) + } +} + +pub trait Keccak256 { + fn keccak256(&self) -> T where T: Sized; +} + +impl Keccak256<[u8; 32]> for [u8] { + fn keccak256(&self) -> [u8; 32] { + let mut keccak = Keccak::new_keccak256(); + let mut result = [0u8; 32]; + keccak.update(self); + keccak.finalize(&mut result); + result + } +} + +pub fn derive_key_iterations(password: &str, salt: &[u8; 32], c: u32) -> (Vec, Vec) { + let mut h_mac = Hmac::new(Sha256::new(), password.as_bytes()); + let mut derived_key = vec![0u8; KEY_LENGTH]; + pbkdf2(&mut h_mac, salt, c, &mut derived_key); + let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; + let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; + (derived_right_bits.to_vec(), derived_left_bits.to_vec()) +} + +pub fn derive_key_scrypt(password: &str, salt: &[u8; 32], n: u32, p: u32, r: u32) -> (Vec, Vec) { + let mut derived_key = vec![0u8; KEY_LENGTH]; + let scrypt_params = ScryptParams::new(n.trailing_zeros() as u8, r, p); + scrypt(password.as_bytes(), salt, &scrypt_params, &mut derived_key); + let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; + let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; + (derived_right_bits.to_vec(), derived_left_bits.to_vec()) +} + +pub fn derive_mac(derived_left_bits: &[u8], cipher_text: &[u8]) -> Vec { + let mut mac = vec![0u8; KEY_LENGTH_AES + cipher_text.len()]; + mac[0..KEY_LENGTH_AES].copy_from_slice(derived_left_bits); + mac[KEY_LENGTH_AES..cipher_text.len() + KEY_LENGTH_AES].copy_from_slice(cipher_text); + mac +} + +/// AES encryption +pub mod aes { + use rcrypto::blockmodes::{CtrMode, CbcDecryptor, PkcsPadding}; + use rcrypto::aessafe::{AesSafe128Encryptor, AesSafe128Decryptor}; + use rcrypto::symmetriccipher::{Encryptor, Decryptor, SymmetricCipherError}; + use rcrypto::buffer::{RefReadBuffer, RefWriteBuffer, WriteBuffer}; + + /// Encrypt a message + pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) { + let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); + encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); + } + + /// Decrypt a message + pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) { + let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); + encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); + } + + + /// Decrypt a message using cbc mode + pub fn decrypt_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result { + let mut encryptor = CbcDecryptor::new(AesSafe128Decryptor::new(k), PkcsPadding, iv.to_vec()); + let len = dest.len(); + let mut buffer = RefWriteBuffer::new(dest); + try!(encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut buffer, true)); + Ok(len - buffer.remaining()) + } +} + +/// ECDH functions +#[cfg_attr(feature="dev", allow(similar_names))] +pub mod ecdh { + use secp256k1::{ecdh, key}; + use ethkey::{Secret, Public, SECP256K1}; + use Error; + + /// Agree on a shared secret + pub fn agree(secret: &Secret, public: &Public) -> Result { + let context = &SECP256K1; + let pdata = { + let mut temp = [4u8; 65]; + (&mut temp[1..65]).copy_from_slice(&public[0..64]); + temp + }; + + let publ = try!(key::PublicKey::from_slice(context, &pdata)); + // no way to create SecretKey from raw byte array. + let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) }; + let shared = ecdh::SharedSecret::new_raw(context, &publ, sec); + + let mut s = Secret::default(); + s.copy_from_slice(&shared[0..32]); + Ok(s) + } +} + +/// ECIES function +#[cfg_attr(feature="dev", allow(similar_names))] +pub mod ecies { + use rcrypto::digest::Digest; + use rcrypto::sha2::Sha256; + use rcrypto::hmac::Hmac; + use rcrypto::mac::Mac; + use bigint::hash::FixedHash; + use ethkey::{Random, Generator, Public, Secret}; + use {Error, ecdh, aes}; + + /// Encrypt a message with a public key + pub fn encrypt(public: &Public, shared_mac: &[u8], plain: &[u8]) -> Result, Error> { + let r = Random.generate().unwrap(); + let z = try!(ecdh::agree(r.secret(), public)); + let mut key = [0u8; 32]; + let mut mkey = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); + let mut hasher = Sha256::new(); + let mkey_material = &key[16..32]; + hasher.input(mkey_material); + hasher.result(&mut mkey); + let ekey = &key[0..16]; + + let mut msg = vec![0u8; (1 + 64 + 16 + plain.len() + 32)]; + msg[0] = 0x04u8; + { + let msgd = &mut msg[1..]; + msgd[0..64].copy_from_slice(r.public()); + { + let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())]; + aes::encrypt(ekey, &[0u8; 16], plain, cipher); + } + let mut hmac = Hmac::new(Sha256::new(), &mkey); + { + let cipher_iv = &msgd[64..(64 + 16 + plain.len())]; + hmac.input(cipher_iv); + } + hmac.input(shared_mac); + hmac.raw_result(&mut msgd[(64 + 16 + plain.len())..]); + } + Ok(msg) + } + + /// Decrypt a message with a secret key + pub fn decrypt(secret: &Secret, shared_mac: &[u8], encrypted: &[u8]) -> Result, Error> { + let meta_len = 1 + 64 + 16 + 32; + if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 { + return Err(Error::InvalidMessage); //invalid message: publickey + } + + let e = &encrypted[1..]; + let p = Public::from_slice(&e[0..64]); + let z = try!(ecdh::agree(secret, &p)); + let mut key = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); + let ekey = &key[0..16]; + let mkey_material = &key[16..32]; + let mut hasher = Sha256::new(); + let mut mkey = [0u8; 32]; + hasher.input(mkey_material); + hasher.result(&mut mkey); + + let clen = encrypted.len() - meta_len; + let cipher_with_iv = &e[64..(64+16+clen)]; + let cipher_iv = &cipher_with_iv[0..16]; + let cipher_no_iv = &cipher_with_iv[16..]; + let msg_mac = &e[(64+16+clen)..]; + + // Verify tag + let mut hmac = Hmac::new(Sha256::new(), &mkey); + hmac.input(cipher_with_iv); + hmac.input(shared_mac); + let mut mac = [0u8; 32]; + hmac.raw_result(&mut mac); + if &mac[..] != msg_mac { + return Err(Error::InvalidMessage); + } + + let mut msg = vec![0u8; clen]; + aes::decrypt(ekey, cipher_iv, cipher_no_iv, &mut msg[..]); + Ok(msg) + } + + fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { + let mut hasher = Sha256::new(); + // SEC/ISO/Shoup specify counter size SHOULD be equivalent + // to size of hash output, however, it also notes that + // the 4 bytes is okay. NIST specifies 4 bytes. + let mut ctr = 1u32; + let mut written = 0usize; + while written < dest.len() { + let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8]; + hasher.input(&ctrs); + hasher.input(secret); + hasher.input(s1); + hasher.result(&mut dest[written..(written + 32)]); + hasher.reset(); + written += 32; + ctr += 1; + } + } +} + diff --git a/ethkey/src/lib.rs b/ethkey/src/lib.rs index 41f53de69..0bce090a5 100644 --- a/ethkey/src/lib.rs +++ b/ethkey/src/lib.rs @@ -31,7 +31,7 @@ mod random; mod signature; lazy_static! { - static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); + pub static ref SECP256K1: secp256k1::Secp256k1 = secp256k1::Secp256k1::new(); } /// Generates new keypair. diff --git a/ethkey/src/primitive.rs b/ethkey/src/primitive.rs deleted file mode 100644 index 05ceaf3d3..000000000 --- a/ethkey/src/primitive.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - - diff --git a/ethkey/src/signature.rs b/ethkey/src/signature.rs index 7d652172c..eec0fbf47 100644 --- a/ethkey/src/signature.rs +++ b/ethkey/src/signature.rs @@ -21,7 +21,7 @@ use std::str::FromStr; use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError}; use secp256k1::key::{SecretKey, PublicKey}; use rustc_serialize::hex::{ToHex, FromHex}; -use bigint::hash::H520; +use bigint::hash::{H520, H256, FixedHash}; use {Secret, Public, SECP256K1, Error, Message, public_to_address, Address}; #[repr(C)] @@ -43,6 +43,29 @@ impl Signature { pub fn v(&self) -> u8 { self.0[64] } + + /// Create a signature object from the sig. + pub fn from_rsv(r: &H256, s: &H256, v: u8) -> Signature { + let mut sig = [0u8; 65]; + sig[0..32].copy_from_slice(&r); + sig[32..64].copy_from_slice(&s); + sig[64] = v; + Signature(sig) + } + + /// Check if this is a "low" signature. + pub fn is_low_s(&self) -> bool { + H256::from_slice(self.s()) <= "7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0".into() + } + + /// Check if each component of the signature is in range. + pub fn is_valid(&self) -> bool { + self.v() <= 1 && + H256::from_slice(self.r()) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() && + H256::from_slice(self.r()) >= 1.into() && + H256::from_slice(self.s()) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() && + H256::from_slice(self.s()) >= 1.into() + } } // manual implementation large arrays don't have trait impls by default. diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index 691cfd969..1842456fd 100644 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -18,6 +18,7 @@ docopt = { version = "0.6", optional = true } time = "0.1.34" lazy_static = "0.2" itertools = "0.4" +ethcrypto = { path = "../ethcrypto" } [build-dependencies] serde_codegen = { version = "0.7", optional = true } diff --git a/ethstore/src/crypto.rs b/ethstore/src/crypto.rs deleted file mode 100644 index a220a7ca1..000000000 --- a/ethstore/src/crypto.rs +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use tiny_keccak::Keccak; -use rcrypto::pbkdf2::pbkdf2; -use rcrypto::scrypt::{scrypt, ScryptParams}; -use rcrypto::sha2::Sha256; -use rcrypto::hmac::Hmac; - -pub const KEY_LENGTH: usize = 32; -pub const KEY_ITERATIONS: usize = 10240; -pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; - -pub fn derive_key_iterations(password: &str, salt: &[u8; 32], c: u32) -> (Vec, Vec) { - let mut h_mac = Hmac::new(Sha256::new(), password.as_bytes()); - let mut derived_key = vec![0u8; KEY_LENGTH]; - pbkdf2(&mut h_mac, salt, c, &mut derived_key); - let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; - let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; - (derived_right_bits.to_vec(), derived_left_bits.to_vec()) -} - -pub fn derive_key_scrypt(password: &str, salt: &[u8; 32], n: u32, p: u32, r: u32) -> (Vec, Vec) { - let mut derived_key = vec![0u8; KEY_LENGTH]; - let scrypt_params = ScryptParams::new(n.trailing_zeros() as u8, r, p); - scrypt(password.as_bytes(), salt, &scrypt_params, &mut derived_key); - let derived_right_bits = &derived_key[0..KEY_LENGTH_AES]; - let derived_left_bits = &derived_key[KEY_LENGTH_AES..KEY_LENGTH]; - (derived_right_bits.to_vec(), derived_left_bits.to_vec()) -} - -pub fn derive_mac(derived_left_bits: &[u8], cipher_text: &[u8]) -> Vec { - let mut mac = vec![0u8; KEY_LENGTH_AES + cipher_text.len()]; - mac[0..KEY_LENGTH_AES].copy_from_slice(derived_left_bits); - mac[KEY_LENGTH_AES..cipher_text.len() + KEY_LENGTH_AES].copy_from_slice(cipher_text); - mac -} - -pub trait Keccak256 { - fn keccak256(&self) -> T where T: Sized; -} - -impl Keccak256<[u8; 32]> for [u8] { - fn keccak256(&self) -> [u8; 32] { - let mut keccak = Keccak::new_keccak256(); - let mut result = [0u8; 32]; - keccak.update(self); - keccak.finalize(&mut result); - result - } -} - -/// AES encryption -pub mod aes { - use rcrypto::blockmodes::{CtrMode, CbcDecryptor, PkcsPadding}; - use rcrypto::aessafe::{AesSafe128Encryptor, AesSafe128Decryptor}; - use rcrypto::symmetriccipher::{Encryptor, Decryptor, SymmetricCipherError}; - use rcrypto::buffer::{RefReadBuffer, RefWriteBuffer, WriteBuffer}; - - /// Encrypt a message - pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) { - let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); - encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); - } - - /// Decrypt a message - pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) { - let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); - encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); - } - - /// Decrypt a message using cbc mode - pub fn decrypt_cbc(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) -> Result { - let mut encryptor = CbcDecryptor::new(AesSafe128Decryptor::new(k), PkcsPadding, iv.to_vec()); - let len = dest.len(); - let mut buffer = RefWriteBuffer::new(dest); - try!(encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut buffer, true)); - Ok(len - buffer.remaining()) - } - -} - diff --git a/ethstore/src/lib.rs b/ethstore/src/lib.rs index 982a47c5a..302e165cf 100644 --- a/ethstore/src/lib.rs +++ b/ethstore/src/lib.rs @@ -30,13 +30,13 @@ extern crate tiny_keccak; extern crate lazy_static; // reexport it nicely extern crate ethkey as _ethkey; +extern crate ethcrypto as crypto; pub mod dir; pub mod ethkey; mod account; mod json; -mod crypto; mod error; mod ethstore; diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 7a70f52c7..ac1fd96c5 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -17,6 +17,7 @@ jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc-http-server.gi ethcore-io = { path = "../util/io" } ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } +ethkey = { path = "../ethkey" } ethstore = { path = "../ethstore" } ethash = { path = "../ethash" } ethsync = { path = "../sync" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index 01a901732..6edc72d41 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -30,6 +30,7 @@ extern crate jsonrpc_http_server; extern crate ethcore_util as util; extern crate ethcore_io as io; extern crate ethcore; +extern crate ethkey; extern crate ethstore; extern crate ethsync; extern crate transient_hashmap; diff --git a/rpc/src/v1/impls/ethcore.rs b/rpc/src/v1/impls/ethcore.rs index 6a5b2e629..16b0f1931 100644 --- a/rpc/src/v1/impls/ethcore.rs +++ b/rpc/src/v1/impls/ethcore.rs @@ -18,9 +18,10 @@ use std::sync::{Arc, Weak}; use std::str::FromStr; use std::collections::{BTreeMap}; -use util::{RotatingLogger, KeyPair, Address}; +use util::{RotatingLogger, Address}; use util::misc::version_data; +use ethkey::{Brain, Generator}; use ethstore::random_phrase; use ethsync::{SyncProvider, ManageNetwork}; use ethcore::miner::MinerService; @@ -213,7 +214,7 @@ impl Ethcore for EthcoreClient where M: MinerService + fn phrase_to_address(&self, params: Params) -> Result { try!(self.active()); from_params::<(String,)>(params).and_then(|(phrase,)| - to_value(&H160::from(KeyPair::from_phrase(&phrase).address())) + to_value(&H160::from(Brain::new(phrase).generate().unwrap().address())) ) } } diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index c0a44e437..4d7008e09 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -17,14 +17,15 @@ //! Account management (personal) rpc implementation use std::sync::{Arc, Weak}; use std::collections::{BTreeMap}; +use util::{Address}; use jsonrpc_core::*; +use ethkey::{Brain, Generator}; use v1::traits::Personal; use v1::types::{H160 as RpcH160, TransactionRequest}; use v1::helpers::{errors, TransactionRequest as TRequest}; use v1::helpers::params::expect_no_params; use v1::helpers::dispatch::unlock_sign_and_dispatch; use ethcore::account_provider::AccountProvider; -use util::{Address, KeyPair}; use ethcore::client::MiningBlockChainClient; use ethcore::miner::MinerService; @@ -94,7 +95,7 @@ impl Personal for PersonalClient where C: MiningBl from_params::<(String, String, )>(params).and_then( |(phrase, pass, )| { let store = take_weak!(self.accounts); - match store.insert_account(*KeyPair::from_phrase(&phrase).secret(), &pass) { + match store.insert_account(*Brain::new(phrase).generate().unwrap().secret(), &pass) { Ok(address) => to_value(&RpcH160::from(address)), Err(e) => Err(errors::account("Could not create account.", e)), } diff --git a/rpc/src/v1/tests/eth.rs b/rpc/src/v1/tests/eth.rs index 6ae923829..301492540 100644 --- a/rpc/src/v1/tests/eth.rs +++ b/rpc/src/v1/tests/eth.rs @@ -16,7 +16,6 @@ //! rpc integration tests. use std::sync::Arc; -use std::str::FromStr; use std::time::Duration; use ethcore::client::{BlockChainClient, Client, ClientConfig}; @@ -286,9 +285,7 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{ #[test] fn eth_transaction_count() { - use util::crypto::Secret; - - let secret = Secret::from_str("8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2").unwrap(); + let secret = "8a283037bb19c4fed7b1c569e40c7dcff366165eb869110a1b11532963eb9cb2".into(); let tester = EthTester::from_spec(Spec::load(TRANSACTION_COUNT_SPEC)); let address = tester.accounts.insert_account(secret, "").unwrap(); tester.accounts.unlock_account_permanently(address, "".into()).unwrap(); diff --git a/sync/src/api.rs b/sync/src/api.rs index 1ded32367..921eb1007 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -17,7 +17,7 @@ use std::sync::Arc; use network::{NetworkProtocolHandler, NetworkService, NetworkContext, PeerId, NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, NetworkError}; -use util::{U256, H256, Secret, Populatable}; +use util::{U256, H256, Populatable}; use io::{TimerToken}; use ethcore::client::{BlockChainClient, ChainNotify}; use ethcore::header::BlockNumber; @@ -232,7 +232,7 @@ pub struct NetworkConfiguration { /// List of initial node addresses pub boot_nodes: Vec, /// Use provided node key instead of default - pub use_secret: Option, + pub use_secret: Option, /// Max number of connected peers to maintain pub max_peers: u32, /// Min number of connected peers to maintain diff --git a/util/network/Cargo.toml b/util/network/Cargo.toml index 661d25c51..caad5c0d5 100644 --- a/util/network/Cargo.toml +++ b/util/network/Cargo.toml @@ -23,6 +23,8 @@ rustc-serialize = "0.3" ethcore-io = { path = "../io" } ethcore-util = { path = ".." } ethcore-devtools = { path = "../../devtools" } +ethkey = { path = "../../ethkey" } +ethcrypto = { path = "../../ethcrypto" } [features] default = [] diff --git a/util/network/src/connection.rs b/util/network/src/connection.rs index 8fd0d0948..d3e54393b 100644 --- a/util/network/src/connection.rs +++ b/util/network/src/connection.rs @@ -29,12 +29,12 @@ use error::*; use io::{IoContext, StreamToken}; use handshake::Handshake; use stats::NetworkStats; -use util::crypto; use rcrypto::blockmodes::*; use rcrypto::aessafe::*; use rcrypto::symmetriccipher::*; use rcrypto::buffer::*; use tiny_keccak::Keccak; +use crypto; const ENCRYPTED_HEADER_LEN: usize = 32; const RECIEVE_PAYLOAD_TIMEOUT: u64 = 30000; diff --git a/util/network/src/discovery.rs b/util/network/src/discovery.rs index a78f05804..48d16be48 100644 --- a/util/network/src/discovery.rs +++ b/util/network/src/discovery.rs @@ -24,11 +24,11 @@ use mio::udp::*; use util::sha3::*; use time; use util::hash::*; -use util::crypto::*; use util::rlp::*; use node_table::*; use error::NetworkError; use io::{StreamToken, IoContext}; +use ethkey::{Secret, KeyPair, sign, recover}; use PROTOCOL_VERSION; @@ -252,7 +252,7 @@ impl Discovery { let bytes = rlp.drain(); let hash = bytes.as_ref().sha3(); - let signature = match ec::sign(&self.secret, &hash) { + let signature = match sign(&self.secret, &hash) { Ok(s) => s, Err(_) => { warn!("Error signing UDP packet"); @@ -361,8 +361,8 @@ impl Discovery { } let signed = &packet[(32 + 65)..]; - let signature = Signature::from_slice(&packet[32..(32 + 65)]); - let node_id = try!(ec::recover(&signature, &signed.sha3())); + let signature = H520::from_slice(&packet[32..(32 + 65)]); + let node_id = try!(recover(&signature.into(), &signed.sha3())); let packet_id = signed[0]; let rlp = UntrustedRlp::new(&signed[1..]); @@ -536,9 +536,9 @@ mod tests { use util::hash::*; use std::net::*; use node_table::*; - use util::crypto::KeyPair; use std::str::FromStr; use rustc_serialize::hex::FromHex; + use ethkey::{Random, Generator}; #[test] fn find_node() { @@ -559,8 +559,8 @@ mod tests { #[test] fn discovery() { - let key1 = KeyPair::create().unwrap(); - let key2 = KeyPair::create().unwrap(); + let key1 = Random.generate().unwrap(); + let key2 = Random.generate().unwrap(); let ep1 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40444").unwrap(), udp_port: 40444 }; let ep2 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40445").unwrap(), udp_port: 40445 }; let mut discovery1 = Discovery::new(&key1, ep1.address.clone(), ep1.clone(), 0); @@ -594,7 +594,7 @@ mod tests { #[test] fn removes_expired() { - let key = KeyPair::create().unwrap(); + let key = Random.generate().unwrap(); let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40446").unwrap(), udp_port: 40447 }; let mut discovery = Discovery::new(&key, ep.address.clone(), ep.clone(), 0); for _ in 0..1200 { @@ -622,7 +622,7 @@ mod tests { #[test] fn packets() { - let key = KeyPair::create().unwrap(); + let key = Random.generate().unwrap(); let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40447").unwrap(), udp_port: 40447 }; let mut discovery = Discovery::new(&key, ep.address.clone(), ep.clone(), 0); discovery.check_timestamps = false; diff --git a/util/network/src/error.rs b/util/network/src/error.rs index cbe1638ea..99bfb4500 100644 --- a/util/network/src/error.rs +++ b/util/network/src/error.rs @@ -15,10 +15,11 @@ // along with Parity. If not, see . use io::IoError; -use util::crypto::CryptoError; use util::rlp::*; use util::UtilError; use std::fmt; +use ethkey::Error as KeyError; +use crypto::Error as CryptoError; #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum DisconnectReason @@ -153,6 +154,12 @@ impl From for NetworkError { } } +impl From for NetworkError { + fn from(_err: KeyError) -> Self { + NetworkError::Auth + } +} + impl From for NetworkError { fn from(_err: CryptoError) -> NetworkError { NetworkError::Auth @@ -179,7 +186,7 @@ fn test_errors() { _ => panic!("Unexpeceted error"), } - match >::from(CryptoError::InvalidSecret) { + match >::from(CryptoError::InvalidMessage) { NetworkError::Auth => {}, _ => panic!("Unexpeceted error"), } diff --git a/util/network/src/handshake.rs b/util/network/src/handshake.rs index e87197683..403079de4 100644 --- a/util/network/src/handshake.rs +++ b/util/network/src/handshake.rs @@ -21,14 +21,14 @@ use util::hash::*; use util::rlp::*; use util::sha3::Hashable; use util::bytes::Bytes; -use util::crypto::*; -use util::crypto; use connection::{Connection}; use host::{HostInfo}; use node_table::NodeId; use error::*; use stats::NetworkStats; use io::{IoContext, StreamToken}; +use ethkey::{KeyPair, Public, Secret, recover, sign, Generator, Random}; +use crypto::{ecdh, ecies}; #[derive(PartialEq, Eq, Debug)] enum HandshakeState { @@ -89,7 +89,7 @@ impl Handshake { connection: Connection::new(token, socket, stats), originated: false, state: HandshakeState::New, - ecdhe: try!(KeyPair::create()), + ecdhe: try!(Random.generate()), nonce: nonce.clone(), remote_ephemeral: Public::new(), remote_nonce: H256::new(), @@ -166,8 +166,8 @@ impl Handshake { self.remote_nonce.clone_from_slice(remote_nonce); self.remote_version = remote_version; let shared = try!(ecdh::agree(host_secret, &self.id)); - let signature = Signature::from_slice(sig); - self.remote_ephemeral = try!(ec::recover(&signature, &(&shared ^ &self.remote_nonce))); + let signature = H520::from_slice(sig); + self.remote_ephemeral = try!(recover(&signature.into(), &(&shared ^ &self.remote_nonce))); Ok(()) } @@ -208,7 +208,7 @@ impl Handshake { self.auth_cipher.extend_from_slice(data); let auth = try!(ecies::decrypt(secret, &self.auth_cipher[0..2], &self.auth_cipher[2..])); let rlp = UntrustedRlp::new(&auth); - let signature: Signature = try!(rlp.val_at(0)); + let signature: H520 = try!(rlp.val_at(0)); let remote_public: Public = try!(rlp.val_at(1)); let remote_nonce: H256 = try!(rlp.val_at(2)); let remote_version: u64 = try!(rlp.val_at(3)); @@ -271,13 +271,13 @@ impl Handshake { let (nonce, _) = rest.split_at_mut(32); // E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0) - let shared = try!(crypto::ecdh::agree(secret, &self.id)); - try!(crypto::ec::sign(self.ecdhe.secret(), &(&shared ^ &self.nonce))).copy_to(sig); + let shared = try!(ecdh::agree(secret, &self.id)); + sig.copy_from_slice(&*try!(sign(self.ecdhe.secret(), &(&shared ^ &self.nonce)))); self.ecdhe.public().sha3_into(hepubk); - public.copy_to(pubk); - self.nonce.copy_to(nonce); + pubk.copy_from_slice(public); + nonce.copy_from_slice(&self.nonce); } - let message = try!(crypto::ecies::encrypt(&self.id, &[], &data)); + let message = try!(ecies::encrypt(&self.id, &[], &data)); self.auth_cipher = message.clone(); self.connection.send(io, message); self.connection.expect(V4_ACK_PACKET_SIZE); @@ -297,7 +297,7 @@ impl Handshake { self.ecdhe.public().copy_to(epubk); self.nonce.copy_to(nonce); } - let message = try!(crypto::ecies::encrypt(&self.id, &[], &data)); + let message = try!(ecies::encrypt(&self.id, &[], &data)); self.ack_cipher = message.clone(); self.connection.send(io, message); self.state = HandshakeState::StartSession; @@ -319,7 +319,7 @@ impl Handshake { let encoded = rlp.drain(); let len = (encoded.len() + ECIES_OVERHEAD) as u16; let prefix = [ (len >> 8) as u8, (len & 0xff) as u8 ]; - let message = try!(crypto::ecies::encrypt(&self.id, &prefix, &encoded)); + let message = try!(ecies::encrypt(&self.id, &prefix, &encoded)); self.ack_cipher.extend_from_slice(&prefix); self.ack_cipher.extend_from_slice(&message); self.connection.send(io, self.ack_cipher.clone()); @@ -331,31 +331,29 @@ impl Handshake { #[cfg(test)] mod test { use std::sync::Arc; - use std::str::FromStr; use rustc_serialize::hex::FromHex; use super::*; - use util::crypto::*; use util::hash::*; use io::*; - use std::net::SocketAddr; use mio::tcp::TcpStream; use stats::NetworkStats; + use ethkey::Public; fn check_auth(h: &Handshake, version: u64) { - assert_eq!(h.id, Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap()); - assert_eq!(h.remote_nonce, H256::from_str("7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6").unwrap()); - assert_eq!(h.remote_ephemeral, Public::from_str("654d1044b69c577a44e5f01a1209523adb4026e70c62d1c13a067acabc09d2667a49821a0ad4b634554d330a15a58fe61f8a8e0544b310c6de7b0c8da7528a8d").unwrap()); + assert_eq!(h.id, "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into()); + assert_eq!(h.remote_nonce, "7e968bba13b6c50e2c4cd7f241cc0d64d1ac25c7f5952df231ac6a2bda8ee5d6".into()); + assert_eq!(h.remote_ephemeral, "654d1044b69c577a44e5f01a1209523adb4026e70c62d1c13a067acabc09d2667a49821a0ad4b634554d330a15a58fe61f8a8e0544b310c6de7b0c8da7528a8d".into()); assert_eq!(h.remote_version, version); } fn check_ack(h: &Handshake, version: u64) { - assert_eq!(h.remote_nonce, H256::from_str("559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd").unwrap()); - assert_eq!(h.remote_ephemeral, Public::from_str("b6d82fa3409da933dbf9cb0140c5dde89f4e64aec88d476af648880f4a10e1e49fe35ef3e69e93dd300b4797765a747c6384a6ecf5db9c2690398607a86181e4").unwrap()); + assert_eq!(h.remote_nonce, "559aead08264d5795d3909718cdd05abd49572e84fe55590eef31a88a08fdffd".into()); + assert_eq!(h.remote_ephemeral, "b6d82fa3409da933dbf9cb0140c5dde89f4e64aec88d476af648880f4a10e1e49fe35ef3e69e93dd300b4797765a747c6384a6ecf5db9c2690398607a86181e4".into()); assert_eq!(h.remote_version, version); } fn create_handshake(to: Option<&Public>) -> Handshake { - let addr = SocketAddr::from_str("127.0.0.1:50556").unwrap(); + let addr = "127.0.0.1:50556".parse().unwrap(); let socket = TcpStream::connect(&addr).unwrap(); let nonce = H256::new(); Handshake::new(0, to, socket, &nonce, Arc::new(NetworkStats::new())).unwrap() @@ -368,7 +366,7 @@ mod test { #[test] fn test_handshake_auth_plain() { let mut h = create_handshake(None); - let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap(); + let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291".into(); let auth = "\ 048ca79ad18e4b0659fab4853fe5bc58eb83992980f4c9cc147d2aa31532efd29a3d3dc6a3d89eaf\ @@ -389,7 +387,7 @@ mod test { #[test] fn test_handshake_auth_eip8() { let mut h = create_handshake(None); - let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap(); + let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291".into(); let auth = "\ 01b304ab7578555167be8154d5cc456f567d5ba302662433674222360f08d5f1534499d3678b513b\ @@ -415,7 +413,7 @@ mod test { #[test] fn test_handshake_auth_eip8_2() { let mut h = create_handshake(None); - let secret = Secret::from_str("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291").unwrap(); + let secret = "b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291".into(); let auth = "\ 01b8044c6c312173685d1edd268aa95e1d495474c6959bcdd10067ba4c9013df9e40ff45f5bfd6f7\ @@ -444,9 +442,9 @@ mod test { #[test] fn test_handshake_ack_plain() { - let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap(); + let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); let mut h = create_handshake(Some(&remote)); - let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap(); + let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee".into(); let ack = "\ 049f8abcfa9c0dc65b982e98af921bc0ba6e4243169348a236abe9df5f93aa69d99cadddaa387662\ @@ -464,9 +462,9 @@ mod test { #[test] fn test_handshake_ack_eip8() { - let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap(); + let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); let mut h = create_handshake(Some(&remote)); - let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap(); + let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee".into(); let ack = "\ 01ea0451958701280a56482929d3b0757da8f7fbe5286784beead59d95089c217c9b917788989470\ @@ -493,9 +491,9 @@ mod test { #[test] fn test_handshake_ack_eip8_2() { - let remote = Public::from_str("fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877").unwrap(); + let remote = "fda1cff674c90c9a197539fe3dfb53086ace64f83ed7c6eabec741f7f381cc803e52ab2cd55d5569bce4347107a310dfd5f88a010cd2ffd1005ca406f1842877".into(); let mut h = create_handshake(Some(&remote)); - let secret = Secret::from_str("49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee").unwrap(); + let secret = "49a7b37aa6f6645917e7b807e9d1c00d4fa71f18343b0d4122a4d2df64dd6fee".into(); let ack = "\ 01f004076e58aae772bb101ab1a8e64e01ee96e64857ce82b1113817c6cdd52c09d26f7b90981cd7\ diff --git a/util/network/src/host.rs b/util/network/src/host.rs index a414ade40..a1d320c58 100644 --- a/util/network/src/host.rs +++ b/util/network/src/host.rs @@ -23,12 +23,11 @@ use std::ops::*; use std::cmp::min; use std::path::{Path, PathBuf}; use std::io::{Read, Write}; -use std::default::Default; use std::fs; +use ethkey::{KeyPair, Secret, Random, Generator}; use mio::*; use mio::tcp::*; use util::hash::*; -use util::crypto::*; use util::Hashable; use util::rlp::*; use util::version; @@ -362,7 +361,7 @@ impl Host { } else { config.config_path.clone().and_then(|ref p| load_key(Path::new(&p))) .map_or_else(|| { - let key = KeyPair::create().unwrap(); + let key = Random.generate().unwrap(); if let Some(path) = config.config_path.clone() { save_key(Path::new(&path), key.secret()); } diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index 47e3a1256..e36861f37 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -70,6 +70,8 @@ extern crate rustc_serialize; extern crate igd; extern crate libc; extern crate slab; +extern crate ethkey; +extern crate ethcrypto as crypto; #[cfg(test)] extern crate ethcore_devtools as devtools; diff --git a/util/network/src/tests.rs b/util/network/src/tests.rs index 3a19cbbab..4186e549a 100644 --- a/util/network/src/tests.rs +++ b/util/network/src/tests.rs @@ -20,7 +20,7 @@ use std::thread; use std::time::*; use util::common::*; use io::TimerToken; -use util::crypto::KeyPair; +use ethkey::{Random, Generator}; pub struct TestProtocol { drop_session: bool, @@ -99,7 +99,7 @@ fn net_service() { #[test] fn net_connect() { ::util::log::init_log(); - let key1 = KeyPair::create().unwrap(); + let key1 = Random.generate().unwrap(); let mut config1 = NetworkConfiguration::new_local(); config1.use_secret = Some(key1.secret().clone()); config1.boot_nodes = vec![ ]; @@ -130,7 +130,7 @@ fn net_start_stop() { #[test] fn net_disconnect() { - let key1 = KeyPair::create().unwrap(); + let key1 = Random.generate().unwrap(); let mut config1 = NetworkConfiguration::new_local(); config1.use_secret = Some(key1.secret().clone()); config1.boot_nodes = vec![ ]; diff --git a/util/src/crypto.rs b/util/src/crypto.rs deleted file mode 100644 index 298bc205e..000000000 --- a/util/src/crypto.rs +++ /dev/null @@ -1,560 +0,0 @@ -// Copyright 2015, 2016 Ethcore (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Ethcore crypto. - -use bigint::uint::*; -use bigint::hash::*; -use bytes::*; -use secp256k1::{key, Secp256k1}; -use rand::os::OsRng; -use sha3::Hashable; -use std::fmt; -use Address; - -/// Secret key for secp256k1 EC operations. 256 bit generic "hash" data. -pub type Secret = H256; -/// Public key for secp256k1 EC operations. 512 bit generic "hash" data. -pub type Public = H512; -/// Signature for secp256k1 EC operations; encodes two 256-bit curve points -/// and a third sign bit. 520 bit generic "hash" data. -pub type Signature = H520; - -lazy_static! { - static ref SECP256K1: Secp256k1 = Secp256k1::new(); -} - -/// Create a new signature from the R, S and V componenets. -pub fn signature_from_rsv(r: &H256, s: &H256, v: u8) -> Signature { - let mut ret: Signature = Signature::new(); - (&mut ret[0..32]).copy_from_slice(r); - (&mut ret[32..64]).copy_from_slice(s); - - ret[64] = v; - ret -} - -/// Convert transaction to R, S and V components. -pub fn signature_to_rsv(s: &Signature) -> (U256, U256, u8) { - (U256::from(&s.as_slice()[0..32]), U256::from(&s.as_slice()[32..64]), s[64]) -} - -#[derive(Debug)] -/// Crypto error -pub enum CryptoError { - /// Invalid secret key - InvalidSecret, - /// Invalid public key - InvalidPublic, - /// Invalid EC signature - InvalidSignature, - /// Invalid AES message - InvalidMessage, - /// IO Error - Io(::std::io::Error), -} - -impl fmt::Display for CryptoError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let msg = match *self { - CryptoError::InvalidSecret => "Invalid secret key".into(), - CryptoError::InvalidPublic => "Invalid public key".into(), - CryptoError::InvalidSignature => "Invalid EC signature".into(), - CryptoError::InvalidMessage => "Invalid AES message".into(), - CryptoError::Io(ref err) => format!("I/O error: {}", err), - }; - - f.write_fmt(format_args!("Crypto error ({})", msg)) - } -} - -impl From<::secp256k1::Error> for CryptoError { - fn from(e: ::secp256k1::Error) -> CryptoError { - match e { - ::secp256k1::Error::InvalidMessage => CryptoError::InvalidMessage, - ::secp256k1::Error::InvalidPublicKey => CryptoError::InvalidPublic, - ::secp256k1::Error::InvalidSecretKey => CryptoError::InvalidSecret, - _ => CryptoError::InvalidSignature, - } - } -} - -impl From<::std::io::Error> for CryptoError { - fn from(err: ::std::io::Error) -> CryptoError { - CryptoError::Io(err) - } -} - -#[derive(Debug, PartialEq, Eq)] -/// secp256k1 Key pair -/// -/// Use `create()` to create a new random key pair. -/// -/// # Example -/// ```rust -/// extern crate ethcore_util; -/// use ethcore_util::crypto::*; -/// use ethcore_util::hash::*; -/// fn main() { -/// let pair = KeyPair::create().unwrap(); -/// let message = H256::random(); -/// let signature = ec::sign(pair.secret(), &message).unwrap(); -/// -/// assert!(ec::verify(pair.public(), &signature, &message).unwrap()); -/// assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public()); -/// } -/// ``` -pub struct KeyPair { - secret: Secret, - public: Public, -} - -impl KeyPair { - /// Create a pair from secret key - pub fn from_secret(secret: Secret) -> Result { - let context = &SECP256K1; - let s: key::SecretKey = try!(key::SecretKey::from_slice(context, &secret)); - let pub_key = try!(key::PublicKey::from_secret_key(context, &s)); - let serialized = pub_key.serialize_vec(context, false); - let p: Public = Public::from_slice(&serialized[1..65]); - Ok(KeyPair { - secret: secret, - public: p, - }) - } - - - // TODO: move to ethstore/secret.rs once @debris has refactored necessary dependencies into own crate - /// Convert the given phrase into a secret as per brain-wallet spec. - /// Taken from https://github.com/ethereum/wiki/wiki/Brain-Wallet - /// Note particularly secure for low-entropy keys. - pub fn from_phrase(phrase: &str) -> KeyPair { - let mut h = phrase.as_bytes().sha3(); - for _ in 0..16384 { - h = h.sha3(); - } - loop { - let r = KeyPair::from_secret(h); - if r.is_ok() { - let r = r.unwrap(); - if r.address()[0] == 0 { - return r; - } - } - h = h.sha3(); - } - } - - /// Create a new random key pair - pub fn create() -> Result { - let context = &SECP256K1; - let mut rng = try!(OsRng::new()); - let (sec, publ) = try!(context.generate_keypair(&mut rng)); - let serialized = publ.serialize_vec(context, false); - let p: Public = Public::from_slice(&serialized[1..65]); - - let mut s = Secret::new(); - s.copy_from_slice(&sec[0..32]); - - Ok(KeyPair { - secret: s, - public: p, - }) - } - - /// Returns public key - pub fn public(&self) -> &Public { - &self.public - } - - /// Returns private key - pub fn secret(&self) -> &Secret { - &self.secret - } - - /// Returns address. - pub fn address(&self) -> Address { - Address::from(self.public.sha3()) - } - - /// Sign a message with our secret key. - pub fn sign(&self, message: &H256) -> Result { ec::sign(&self.secret, message) } -} - -/// EC functions -#[cfg_attr(feature="dev", allow(similar_names))] -pub mod ec { - use bigint::hash::*; - use bigint::uint::*; - use standard::*; - use crypto::*; - use crypto::{self}; - - /// Recovers Public key from signed message hash. - pub fn recover(signature: &Signature, message: &H256) -> Result { - use secp256k1::*; - let context = &crypto::SECP256K1; - let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32)))); - let publ = try!(context.recover(&try!(Message::from_slice(&message)), &rsig)); - let serialized = publ.serialize_vec(context, false); - let p: Public = Public::from_slice(&serialized[1..65]); - //TODO: check if it's the zero key and fail if so. - Ok(p) - } - /// Returns siganture of message hash. - pub fn sign(secret: &Secret, message: &H256) -> Result { - // TODO: allow creation of only low-s signatures. - use secp256k1::{Message, key}; - - let context = &crypto::SECP256K1; - // no way to create from raw byte array. - let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) }; - let s = try!(context.sign_recoverable(&try!(Message::from_slice(&message)), sec)); - let (rec_id, data) = s.serialize_compact(context); - let mut signature = crypto::Signature::new(); - signature.clone_from_slice(&data); - signature[64] = rec_id.to_i32() as u8; - - let (_, s, v) = signature_to_rsv(&signature); - let secp256k1n = U256::from_str("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141").unwrap(); - if !is_low_s(&s) { - signature = super::signature_from_rsv(&H256::from_slice(&signature[0..32]), &H256::from(secp256k1n - s), v ^ 1); - } - Ok(signature) - } - - /// Verify signature. - pub fn verify(public: &Public, signature: &Signature, message: &H256) -> Result { - use secp256k1::*; - let context = &crypto::SECP256K1; - let rsig = try!(RecoverableSignature::from_compact(context, &signature[0..64], try!(RecoveryId::from_i32(signature[64] as i32)))); - let sig = rsig.to_standard(context); - - let pdata: [u8; 65] = { - let mut temp = [4u8; 65]; - (&mut temp[1..65]).copy_from_slice(public); - temp - }; - - let publ = try!(key::PublicKey::from_slice(context, &pdata)); - match context.verify(&try!(Message::from_slice(&message)), &sig, &publ) { - Ok(_) => Ok(true), - Err(Error::IncorrectSignature) => Ok(false), - Err(x) => Err(CryptoError::from(x)) - } - } - - /// Check if this is a "low" signature. - pub fn is_low(sig: &Signature) -> bool { - H256::from_slice(&sig[32..64]) <= "7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0".into() - } - - /// Check if this is a "low" signature. - pub fn is_low_s(s: &U256) -> bool { - s <= &U256::from_str("7FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF5D576E7357A4501DDFE92F46681B20A0").unwrap() - } - - /// Check if each component of the signature is in range. - pub fn is_valid(sig: &Signature) -> bool { - sig[64] <= 1 && - H256::from_slice(&sig[0..32]) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() && - H256::from_slice(&sig[32..64]) < "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into() && - H256::from_slice(&sig[32..64]) >= 1.into() && - H256::from_slice(&sig[0..32]) >= 1.into() - } -} - -/// ECDH functions -#[cfg_attr(feature="dev", allow(similar_names))] -pub mod ecdh { - use hash::FixedHash; - use crypto::{self, Secret, Public, CryptoError}; - - /// Agree on a shared secret - pub fn agree(secret: &Secret, public: &Public) -> Result { - use secp256k1::{ecdh, key}; - - let context = &crypto::SECP256K1; - let pdata = { - let mut temp = [4u8; 65]; - (&mut temp[1..65]).copy_from_slice(&public[0..64]); - temp - }; - - let publ = try!(key::PublicKey::from_slice(context, &pdata)); - // no way to create SecretKey from raw byte array. - let sec: &key::SecretKey = unsafe { ::std::mem::transmute(secret) }; - let shared = ecdh::SharedSecret::new_raw(context, &publ, sec); - - let mut s = crypto::Secret::new(); - s.copy_from_slice(&shared[0..32]); - Ok(s) - } -} - -/// ECIES function -#[cfg_attr(feature="dev", allow(similar_names))] -pub mod ecies { - use hash::*; - use bytes::*; - use crypto::*; - use sha3::Hashable; - - /// Encrypt a message with a public key - pub fn encrypt(public: &Public, shared_mac: &[u8], plain: &[u8]) -> Result { - use ::rcrypto::digest::Digest; - use ::rcrypto::sha2::Sha256; - use ::rcrypto::hmac::Hmac; - use ::rcrypto::mac::Mac; - let r = try!(KeyPair::create()); - let z = try!(ecdh::agree(r.secret(), public)); - let mut key = [0u8; 32]; - let mut mkey = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); - let mut hasher = Sha256::new(); - let mkey_material = &key[16..32]; - hasher.input(mkey_material); - hasher.result(&mut mkey); - let ekey = &key[0..16]; - - let mut msg = vec![0u8; (1 + 64 + 16 + plain.len() + 32)]; - msg[0] = 0x04u8; - { - let msgd = &mut msg[1..]; - r.public().copy_to(&mut msgd[0..64]); - let iv = H128::random(); - iv.copy_to(&mut msgd[64..(64+16)]); - { - let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())]; - aes::encrypt(ekey, &iv, plain, cipher); - } - let mut hmac = Hmac::new(Sha256::new(), &mkey); - { - let cipher_iv = &msgd[64..(64 + 16 + plain.len())]; - hmac.input(cipher_iv); - } - hmac.input(shared_mac); - hmac.raw_result(&mut msgd[(64 + 16 + plain.len())..]); - } - Ok(msg) - } - - /// Encrypt a message with a public key - pub fn encrypt_single_message(public: &Public, plain: &[u8]) -> Result { - use ::rcrypto::digest::Digest; - use ::rcrypto::sha2::Sha256; - let r = try!(KeyPair::create()); - let z = try!(ecdh::agree(r.secret(), public)); - let mut key = [0u8; 32]; - let mut mkey = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); - let mut hasher = Sha256::new(); - let mkey_material = &key[16..32]; - hasher.input(mkey_material); - hasher.result(&mut mkey); - let ekey = &key[0..16]; - - let mut msgd = vec![0u8; (64 + plain.len())]; - { - r.public().copy_to(&mut msgd[0..64]); - let iv = H128::from_slice(&z.sha3()[0..16]); - { - let cipher = &mut msgd[64..(64 + plain.len())]; - aes::encrypt(ekey, &iv, plain, cipher); - } - } - Ok(msgd) - } - - /// Decrypt a message with a secret key - pub fn decrypt(secret: &Secret, shared_mac: &[u8], encrypted: &[u8]) -> Result { - use ::rcrypto::digest::Digest; - use ::rcrypto::sha2::Sha256; - use ::rcrypto::hmac::Hmac; - use ::rcrypto::mac::Mac; - - let meta_len = 1 + 64 + 16 + 32; - if encrypted.len() < meta_len || encrypted[0] < 2 || encrypted[0] > 4 { - return Err(CryptoError::InvalidMessage); //invalid message: publickey - } - - let e = &encrypted[1..]; - let p = Public::from_slice(&e[0..64]); - let z = try!(ecdh::agree(secret, &p)); - let mut key = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); - let ekey = &key[0..16]; - let mkey_material = &key[16..32]; - let mut hasher = Sha256::new(); - let mut mkey = [0u8; 32]; - hasher.input(mkey_material); - hasher.result(&mut mkey); - - let clen = encrypted.len() - meta_len; - let cipher_with_iv = &e[64..(64+16+clen)]; - let cipher_iv = &cipher_with_iv[0..16]; - let cipher_no_iv = &cipher_with_iv[16..]; - let msg_mac = &e[(64+16+clen)..]; - - // Verify tag - let mut hmac = Hmac::new(Sha256::new(), &mkey); - hmac.input(cipher_with_iv); - hmac.input(shared_mac); - let mut mac = H256::new(); - hmac.raw_result(&mut mac); - if &mac[..] != msg_mac { - return Err(CryptoError::InvalidMessage); - } - - let mut msg = vec![0u8; clen]; - aes::decrypt(ekey, cipher_iv, cipher_no_iv, &mut msg[..]); - Ok(msg) - } - - /// Decrypt single message with a secret key - pub fn decrypt_single_message(secret: &Secret, encrypted: &[u8]) -> Result { - use ::rcrypto::digest::Digest; - use ::rcrypto::sha2::Sha256; - - let meta_len = 64; - if encrypted.len() < meta_len { - return Err(CryptoError::InvalidMessage); //invalid message: publickey - } - - let e = encrypted; - let p = Public::from_slice(&e[0..64]); - let z = try!(ecdh::agree(secret, &p)); - let mut key = [0u8; 32]; - kdf(&z, &[0u8; 0], &mut key); - let ekey = &key[0..16]; - let mkey_material = &key[16..32]; - let mut hasher = Sha256::new(); - let mut mkey = [0u8; 32]; - hasher.input(mkey_material); - hasher.result(&mut mkey); - - let clen = encrypted.len() - meta_len; - let cipher = &e[64..(64+clen)]; - let mut msg = vec![0u8; clen]; - let iv = H128::from_slice(&z.sha3()[0..16]); - aes::decrypt(ekey, &iv, cipher, &mut msg[..]); - Ok(msg) - } - - fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { - use ::rcrypto::digest::Digest; - use ::rcrypto::sha2::Sha256; - let mut hasher = Sha256::new(); - // SEC/ISO/Shoup specify counter size SHOULD be equivalent - // to size of hash output, however, it also notes that - // the 4 bytes is okay. NIST specifies 4 bytes. - let mut ctr = 1u32; - let mut written = 0usize; - while written < dest.len() { - let ctrs = [(ctr >> 24) as u8, (ctr >> 16) as u8, (ctr >> 8) as u8, ctr as u8]; - hasher.input(&ctrs); - hasher.input(secret); - hasher.input(s1); - hasher.result(&mut dest[written..(written + 32)]); - hasher.reset(); - written += 32; - ctr += 1; - } - } -} - -/// AES encryption -pub mod aes { - use ::rcrypto::blockmodes::*; - use ::rcrypto::aessafe::*; - use ::rcrypto::symmetriccipher::*; - use ::rcrypto::buffer::*; - - /// Encrypt a message - pub fn encrypt(k: &[u8], iv: &[u8], plain: &[u8], dest: &mut [u8]) { - let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); - encryptor.encrypt(&mut RefReadBuffer::new(plain), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); - } - - /// Decrypt a message - pub fn decrypt(k: &[u8], iv: &[u8], encrypted: &[u8], dest: &mut [u8]) { - let mut encryptor = CtrMode::new(AesSafe128Encryptor::new(k), iv.to_vec()); - encryptor.decrypt(&mut RefReadBuffer::new(encrypted), &mut RefWriteBuffer::new(dest), true).expect("Invalid length or padding"); - } -} - - -#[cfg(test)] -mod tests { - use hash::*; - use crypto::*; - - // TODO: tests for sign/recover roundtrip, at least. - - #[test] - fn test_signature() { - let pair = KeyPair::create().unwrap(); - let message = H256::random(); - let signature = ec::sign(pair.secret(), &message).unwrap(); - - assert!(ec::verify(pair.public(), &signature, &message).unwrap()); - assert_eq!(ec::recover(&signature, &message).unwrap(), *pair.public()); - } - - #[test] - fn test_invalid_key() { - assert!(KeyPair::from_secret("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff".into()).is_err()); - assert!(KeyPair::from_secret("0000000000000000000000000000000000000000000000000000000000000000".into()).is_err()); - assert!(KeyPair::from_secret("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141".into()).is_err()); - } - - #[test] - fn test_key() { - let pair = KeyPair::from_secret("6f7b0d801bc7b5ce7bbd930b84fd0369b3eb25d09be58d64ba811091046f3aa2".into()).unwrap(); - assert_eq!(pair.public().hex(), "101b3ef5a4ea7a1c7928e24c4c75fd053c235d7b80c22ae5c03d145d0ac7396e2a4ffff9adee3133a7b05044a5cee08115fd65145e5165d646bde371010d803c"); - } - - #[test] - fn test_key_from_phrase() { - assert_eq!(KeyPair::from_phrase("correct horse battery staple").address(), "0021f80b7f29b9c84e8099c2c6c74a46ed2268c4".into()); - } - - #[test] - fn ecies_shared() { - let kp = KeyPair::create().unwrap(); - let message = b"So many books, so little time"; - - let shared = b"shared"; - let wrong_shared = b"incorrect"; - let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap(); - assert!(encrypted[..] != message[..]); - assert_eq!(encrypted[0], 0x04); - - assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err()); - let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); - assert_eq!(decrypted[..message.len()], message[..]); - } - - #[test] - fn ecies_shared_single() { - let kp = KeyPair::create().unwrap(); - let message = b"So many books, so little time"; - let encrypted = ecies::encrypt_single_message(kp.public(), message).unwrap(); - assert!(encrypted[..] != message[..]); - let decrypted = ecies::decrypt_single_message(kp.secret(), &encrypted).unwrap(); - assert_eq!(decrypted[..message.len()], message[..]); - } -} diff --git a/util/src/error.rs b/util/src/error.rs index afc49b27c..e1bf7b8a7 100644 --- a/util/src/error.rs +++ b/util/src/error.rs @@ -44,8 +44,6 @@ impl fmt::Display for BaseDataError { #[derive(Debug)] /// General error type which should be capable of representing all errors in ethcore. pub enum UtilError { - /// Error concerning the crypto utility subsystem. - Crypto(::crypto::CryptoError), /// Error concerning the Rust standard library's IO subsystem. StdIo(::std::io::Error), /// Error concerning the hex conversion logic. @@ -65,7 +63,6 @@ pub enum UtilError { impl fmt::Display for UtilError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { - UtilError::Crypto(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::StdIo(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::FromHex(ref err) => f.write_fmt(format_args!("{}", err)), UtilError::BaseData(ref err) => f.write_fmt(format_args!("{}", err)), @@ -134,12 +131,6 @@ impl From<::std::io::Error> for UtilError { } } -impl From<::crypto::CryptoError> for UtilError { - fn from(err: ::crypto::CryptoError) -> UtilError { - UtilError::Crypto(err) - } -} - impl From<::rlp::DecoderError> for UtilError { fn from(err: ::rlp::DecoderError) -> UtilError { UtilError::Decoder(err) diff --git a/util/src/lib.rs b/util/src/lib.rs index f8dc34af8..f7459615e 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -132,7 +132,6 @@ pub mod migration; pub mod overlaydb; pub mod journaldb; pub mod kvdb; -pub mod crypto; pub mod triehash; pub mod trie; pub mod nibbleslice; @@ -150,7 +149,6 @@ pub use hashdb::*; pub use memorydb::*; pub use overlaydb::*; pub use journaldb::JournalDB; -pub use crypto::*; pub use triehash::*; pub use trie::{Trie, TrieMut, TrieDB, TrieDBMut, TrieFactory, TrieError, SecTrieDB, SecTrieDBMut}; pub use nibbleslice::*; @@ -162,3 +160,5 @@ pub use timer::*; /// 160-bit integer representing account address pub type Address = H160; +/// Secret +pub type Secret = H256; From d6311624405b151ed817501d9dd6df8c27bac65d Mon Sep 17 00:00:00 2001 From: Nikolay Volf Date: Wed, 24 Aug 2016 20:35:38 +0400 Subject: [PATCH 23/29] Stratum IPC service (#1959) * boot binaries reorg & helpers * stratum ipc service * spaces in cli --- Cargo.lock | 4 ++ Cargo.toml | 1 + parity/boot.rs | 123 ++++++++++++++++++++++++++++++++++++++++++ parity/main.rs | 40 ++++++++++++++ parity/modules.rs | 5 ++ parity/stratum.rs | 57 ++++++++++++++++++++ parity/sync.rs | 97 ++++++--------------------------- stratum/Cargo.toml | 7 +++ stratum/build.rs | 21 ++++++++ stratum/src/lib.rs | 17 ++++-- stratum/src/traits.rs | 21 ++++++-- 11 files changed, 304 insertions(+), 89 deletions(-) create mode 100644 parity/boot.rs create mode 100644 parity/stratum.rs create mode 100644 stratum/build.rs diff --git a/Cargo.lock b/Cargo.lock index 5bd5e8f32..5c06c5a62 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -472,12 +472,16 @@ version = "1.4.0" dependencies = [ "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-devtools 1.4.0", + "ethcore-ipc 1.4.0", + "ethcore-ipc-codegen 1.4.0", + "ethcore-ipc-nano 1.4.0", "ethcore-util 1.4.0", "json-tcp-server 0.1.0 (git+https://github.com/ethcore/json-tcp-server)", "jsonrpc-core 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.5.1 (git+https://github.com/ethcore/mio?branch=v0.5.x)", + "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 6ef8bcc7e..661544465 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -61,6 +61,7 @@ dapps = ["ethcore-dapps"] ipc = ["ethcore/ipc"] dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] +stratum = ["ipc"] [[bin]] path = "parity/main.rs" diff --git a/parity/boot.rs b/parity/boot.rs new file mode 100644 index 000000000..ddc05437c --- /dev/null +++ b/parity/boot.rs @@ -0,0 +1,123 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Parity micro-service helpers + +use nanoipc; +use ipc; +use std; +use std::sync::Arc; +use hypervisor::HypervisorServiceClient; +use hypervisor::service::IpcModuleId; +use ctrlc::CtrlC; +use std::sync::atomic::{AtomicBool, Ordering}; +use nanoipc::{IpcInterface, GuardedSocket, NanoSocket}; +use ipc::WithSocket; +use ethcore_logger::{Config as LogConfig, setup_log}; +use docopt::Docopt; + +#[derive(Debug)] +pub enum BootError { + ReadArgs(std::io::Error), + DecodeArgs(ipc::binary::BinaryError), + DependencyConnect(nanoipc::SocketError), +} + +pub fn host_service(addr: &str, stop_guard: Arc, service: Arc) where T: IpcInterface { + let socket_url = addr.to_owned(); + std::thread::spawn(move || { + let mut worker = nanoipc::Worker::::new(&service); + worker.add_reqrep(&socket_url).unwrap(); + + while !stop_guard.load(Ordering::Relaxed) { + worker.poll(); + } + }); +} + +pub fn payload() -> Result { + use std::io; + use std::io::Read; + + let mut buffer = Vec::new(); + try!( + io::stdin().read_to_end(&mut buffer) + .map_err(|io_err| BootError::ReadArgs(io_err)) + ); + + ipc::binary::deserialize::(&buffer) + .map_err(|binary_error| BootError::DecodeArgs(binary_error)) +} + +pub fn register(hv_url: &str, module_id: IpcModuleId) -> GuardedSocket>{ + let hypervisor_client = nanoipc::init_client::>(hv_url).unwrap(); + hypervisor_client.handshake().unwrap(); + hypervisor_client.module_ready(module_id); + + hypervisor_client +} + +pub fn dependency>(url: &str) + -> Result, BootError> +{ + nanoipc::init_client::(url).map_err(|socket_err| BootError::DependencyConnect(socket_err)) +} + +pub fn main_thread() -> Arc { + let stop = Arc::new(AtomicBool::new(false)); + let ctrc_stop = stop.clone(); + CtrlC::set_handler(move || { + ctrc_stop.store(true, Ordering::Relaxed); + }); + stop +} + +pub fn setup_cli_logger(svc_name: &str) { + let usage = format!(" +Ethcore {} service +Usage: + parity {} [options] + + Options: + -l --logging LOGGING Specify the logging level. Must conform to the same + format as RUST_LOG. + --log-file FILENAME Specify a filename into which logging should be + directed. + --no-color Don't use terminal color codes in output. +", svc_name, svc_name); + + #[derive(Debug, RustcDecodable)] + struct Args { + flag_logging: Option, + flag_log_file: Option, + flag_no_color: bool, + } + + impl Args { + pub fn log_settings(&self) -> LogConfig { + LogConfig { + color: self.flag_no_color || cfg!(windows), + mode: self.flag_logging.clone(), + file: self.flag_log_file.clone(), + } + } + } + + let args: Args = Docopt::new(usage) + .and_then(|d| d.decode()) + .unwrap_or_else(|e| e.exit()); + setup_log(&args.log_settings()).expect("Log initialization failure"); +} diff --git a/parity/main.rs b/parity/main.rs index bb9f5e743..406645f06 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -57,9 +57,24 @@ extern crate lazy_static; extern crate regex; extern crate isatty; +#[cfg(feature="stratum")] +extern crate ethcore_stratum; + #[cfg(feature = "dapps")] extern crate ethcore_dapps; +macro_rules! dependency { + ($dep_ty:ident, $url:expr) => { + { + let dep = boot::dependency::<$dep_ty<_>>($url) + .unwrap_or_else(|e| panic!("Fatal: error connecting service ({:?})", e)); + dep.handshake() + .unwrap_or_else(|e| panic!("Fatal: error in connected service ({:?})", e)); + dep + } + } +} + mod cache; mod upgrade; mod rpc; @@ -83,6 +98,10 @@ mod presale; mod run; mod sync; mod snapshot; +mod boot; + +#[cfg(feature="stratum")] +mod stratum; use std::{process, env}; use cli::print_version; @@ -116,6 +135,25 @@ fn start() -> Result { execute(cmd) } +#[cfg(feature="stratum")] +mod stratum_optional { + pub fn probably_run() -> bool { + // just redirect to the stratum::main() + if ::std::env::args().nth(1).map_or(false, |arg| arg == "stratum") { + super::stratum::main(); + true + } + else { false } + } +} + +#[cfg(not(feature="stratum"))] +mod stratum_optional { + pub fn probably_run() -> bool { + false + } +} + fn main() { // just redirect to the sync::main() if std::env::args().nth(1).map_or(false, |arg| arg == "sync") { @@ -123,6 +161,8 @@ fn main() { return; } + if stratum_optional::probably_run() { return; } + match start() { Ok(result) => { println!("{}", result); diff --git a/parity/modules.rs b/parity/modules.rs index 20f2567ce..83ae44802 100644 --- a/parity/modules.rs +++ b/parity/modules.rs @@ -32,6 +32,11 @@ pub mod service_urls { pub const SYNC: &'static str = "parity-sync.ipc"; pub const SYNC_NOTIFY: &'static str = "parity-sync-notify.ipc"; pub const NETWORK_MANAGER: &'static str = "parity-manage-net.ipc"; + #[cfg(feature="stratum")] + pub const STRATUM: &'static str = "parity-stratum.ipc"; + #[cfg(feature="stratum")] + pub const MINING_JOB_DISPATCHER: &'static str = "parity-mining-jobs.ipc"; + pub fn with_base(data_dir: &str, service_path: &str) -> String { let mut path = PathBuf::from(data_dir); diff --git a/parity/stratum.rs b/parity/stratum.rs new file mode 100644 index 000000000..32c7b8a50 --- /dev/null +++ b/parity/stratum.rs @@ -0,0 +1,57 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Parity sync service + +use std; +use std::sync::Arc; +use ethcore_stratum::{Stratum as StratumServer, PushWorkHandler, RemoteJobDispatcher, ServiceConfiguration}; +use std::thread; +use modules::service_urls; +use boot; +use hypervisor::service::IpcModuleId; +use std::net::SocketAddr; +use std::str::FromStr; + +const STRATUM_MODULE_ID: IpcModuleId = 8000; + +pub fn main() { + boot::setup_cli_logger("stratum"); + + let service_config: ServiceConfiguration = boot::payload() + .unwrap_or_else(|e| panic!("Fatal: error reading boot arguments ({:?})", e)); + + let job_dispatcher = dependency!(RemoteJobDispatcher, service_urls::MINING_JOB_DISPATCHER); + + let stop = boot::main_thread(); + let server = + StratumServer::start( + &SocketAddr::from_str(&service_config.listen_addr) + .unwrap_or_else(|e| panic!("Fatal: invalid listen address ({:?})", e)), + job_dispatcher.service().clone(), + service_config.secret + ).unwrap_or_else( + |e| panic!("Fatal: cannot start stratum server({:?})", e) + ); + + boot::host_service(service_urls::STRATUM, stop.clone(), server.clone() as Arc); + + let _ = boot::register(STRATUM_MODULE_ID); + + while !stop.load(::std::sync::atomic::Ordering::Relaxed) { + thread::park_timeout(std::time::Duration::from_millis(1000)); + } +} diff --git a/parity/sync.rs b/parity/sync.rs index 382c1806d..5d3056acd 100644 --- a/parity/sync.rs +++ b/parity/sync.rs @@ -16,113 +16,48 @@ //! Parity sync service -use nanoipc; -use ipc; use std; use std::sync::Arc; -use hypervisor::{HypervisorServiceClient, SYNC_MODULE_ID, HYPERVISOR_IPC_URL}; -use ctrlc::CtrlC; -use std::sync::atomic::{AtomicBool, Ordering}; -use docopt::Docopt; +use hypervisor::{SYNC_MODULE_ID, HYPERVISOR_IPC_URL}; use ethcore::client::{RemoteClient, ChainNotify}; use ethsync::{SyncProvider, EthSync, ManageNetwork, ServiceConfiguration}; use std::thread; -use nanoipc::IpcInterface; use modules::service_urls; -use ethcore_logger::{Config as LogConfig, setup_log}; - -const USAGE: &'static str = " -Ethcore sync service -Usage: - parity sync [options] - - Options: - -l --logging LOGGING Specify the logging level. Must conform to the same - format as RUST_LOG. - --log-file FILENAME Specify a filename into which logging should be - directed. - --no-color Don't use terminal color codes in output. -"; - -#[derive(Debug, RustcDecodable)] -struct Args { - flag_logging: Option, - flag_log_file: Option, - flag_no_color: bool, -} - -impl Args { - pub fn log_settings(&self) -> LogConfig { - LogConfig { - color: self.flag_no_color || cfg!(windows), - mode: self.flag_logging.clone(), - file: self.flag_log_file.clone(), - } - } -} - -fn run_service(addr: &str, stop_guard: Arc, service: Arc) where T: IpcInterface { - let socket_url = addr.to_owned(); - std::thread::spawn(move || { - let mut worker = nanoipc::Worker::::new(&service); - worker.add_reqrep(&socket_url).unwrap(); - - while !stop_guard.load(Ordering::Relaxed) { - worker.poll(); - } - }); -} +use boot; pub fn main() { - use std::io::{self, Read}; + boot::setup_cli_logger("sync"); - let args: Args = Docopt::new(USAGE) - .and_then(|d| d.decode()) - .unwrap_or_else(|e| e.exit()); + let service_config: ServiceConfiguration = boot::payload() + .unwrap_or_else(|e| panic!("Fatal: error reading boot arguments ({:?})", e)); - setup_log(&args.log_settings()).expect("Log initialization failure"); + let remote_client = dependency!(RemoteClient, &service_urls::with_base(&service_config.io_path, service_urls::CLIENT)); - let mut buffer = Vec::new(); - io::stdin().read_to_end(&mut buffer).expect("Failed to read initialisation payload"); - let service_config = ipc::binary::deserialize::(&buffer).expect("Failed deserializing initialisation payload"); - - let remote_client = nanoipc::init_client::>( - &service_urls::with_base(&service_config.io_path, service_urls::CLIENT), - ).unwrap(); - - remote_client.handshake().unwrap(); - - let stop = Arc::new(AtomicBool::new(false)); + let stop = boot::main_thread(); let sync = EthSync::new(service_config.sync, remote_client.service().clone(), service_config.net).unwrap(); - run_service( + let _ = boot::register( + &service_urls::with_base(&service_config.io_path, HYPERVISOR_IPC_URL), + SYNC_MODULE_ID + ); + + boot::host_service( &service_urls::with_base(&service_config.io_path, service_urls::SYNC), stop.clone(), sync.clone() as Arc ); - run_service( + boot::host_service( &service_urls::with_base(&service_config.io_path, service_urls::NETWORK_MANAGER), stop.clone(), sync.clone() as Arc ); - run_service( + boot::host_service( &service_urls::with_base(&service_config.io_path, service_urls::SYNC_NOTIFY), stop.clone(), sync.clone() as Arc ); - let hypervisor_client = nanoipc::init_client::>( - &service_urls::with_base(&service_config.io_path, HYPERVISOR_IPC_URL), - ).unwrap(); - hypervisor_client.handshake().unwrap(); - hypervisor_client.module_ready(SYNC_MODULE_ID); - - let terminate_stop = stop.clone(); - CtrlC::set_handler(move || { - terminate_stop.store(true, Ordering::Relaxed); - }); - - while !stop.load(Ordering::Relaxed) { + while !stop.load(::std::sync::atomic::Ordering::Relaxed) { thread::park_timeout(std::time::Duration::from_millis(1000)); } } diff --git a/stratum/Cargo.toml b/stratum/Cargo.toml index 7fc8fa6c3..958e807be 100644 --- a/stratum/Cargo.toml +++ b/stratum/Cargo.toml @@ -4,6 +4,10 @@ name = "ethcore-stratum" version = "1.4.0" license = "GPL-3.0" authors = ["Ethcore "] +build = "build.rs" + +[build-dependencies] +ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] log = "0.3" @@ -14,6 +18,9 @@ ethcore-util = { path = "../util" } ethcore-devtools = { path = "../devtools" } lazy_static = "0.2" env_logger = "0.3" +ethcore-ipc = { path = "../ipc/rpc" } +semver = "0.2" +ethcore-ipc-nano = { path = "../ipc/nano" } [profile.release] debug = true diff --git a/stratum/build.rs b/stratum/build.rs new file mode 100644 index 000000000..61fa5098f --- /dev/null +++ b/stratum/build.rs @@ -0,0 +1,21 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +extern crate ethcore_ipc_codegen; + +fn main() { + ethcore_ipc_codegen::derive_ipc("src/traits.rs").unwrap(); +} diff --git a/stratum/src/lib.rs b/stratum/src/lib.rs index ccbfa6b57..c54eeea62 100644 --- a/stratum/src/lib.rs +++ b/stratum/src/lib.rs @@ -20,6 +20,8 @@ extern crate json_tcp_server; extern crate jsonrpc_core; #[macro_use] extern crate log; extern crate ethcore_util as util; +extern crate ethcore_ipc as ipc; +extern crate semver; #[cfg(test)] extern crate mio; @@ -31,9 +33,16 @@ extern crate env_logger; #[macro_use] extern crate lazy_static; -mod traits; +mod traits { + //! Stratum ipc interfaces specification + #![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues + include!(concat!(env!("OUT_DIR"), "/traits.rs")); +} -pub use traits::{JobDispatcher, PushWorkHandler, Error}; +pub use traits::{ + JobDispatcher, PushWorkHandler, Error, ServiceConfiguration, + RemoteWorkHandler, RemoteJobDispatcher, +}; use json_tcp_server::Server as JsonRpcServer; use jsonrpc_core::{IoHandler, Params, IoDelegate, to_value, from_params}; @@ -133,8 +142,8 @@ impl Stratum { let mut job_que = self.job_que.write(); let workers = self.workers.read(); for socket_addr in job_que.drain() { - if let Some(ref worker_id) = workers.get(&socket_addr) { - let job_payload = self.dispatcher.job(worker_id); + if let Some(worker_id) = workers.get(&socket_addr) { + let job_payload = self.dispatcher.job(worker_id.to_owned()); job_payload.map( |json| self.rpc_server.push_message(&socket_addr, json.as_bytes()) ); diff --git a/stratum/src/traits.rs b/stratum/src/traits.rs index 8bd169ad6..339f753b5 100644 --- a/stratum/src/traits.rs +++ b/stratum/src/traits.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Stratum ipc interfaces specification - use std; use std::error::Error as StdError; +use util::H256; +use ipc::IpcConfig; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Binary)] pub enum Error { NoWork, NoWorkers, @@ -32,6 +32,8 @@ impl From for Error { } } +#[derive(Ipc)] +#[ipc(client_ident="RemoteJobDispatcher")] /// Interface that can provide pow/blockchain-specific responses for the clients pub trait JobDispatcher: Send + Sync { // json for initial client handshake @@ -39,9 +41,11 @@ pub trait JobDispatcher: Send + Sync { // json for difficulty dispatch fn difficulty(&self) -> Option { None } // json for job update given worker_id (payload manager should split job!) - fn job(&self, _worker_id: &str) -> Option { None } + fn job(&self, _worker_id: String) -> Option { None } } +#[derive(Ipc)] +#[ipc(client_ident="RemoteWorkHandler")] /// Interface that can handle requests to push job for workers pub trait PushWorkHandler: Send + Sync { /// push the same work package for all workers (`payload`: json of pow-specific set of work specification) @@ -50,3 +54,12 @@ pub trait PushWorkHandler: Send + Sync { /// push the work packages worker-wise (`payload`: json of pow-specific set of work specification) fn push_work(&self, payloads: Vec) -> Result<(), Error>; } + +#[derive(Binary)] +pub struct ServiceConfiguration { + pub listen_addr: String, + pub secret: Option, +} + +impl IpcConfig for PushWorkHandler { } +impl IpcConfig for JobDispatcher { } From f6b8dd0e781c1de37656cada098c156e65a01569 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 24 Aug 2016 18:35:53 +0200 Subject: [PATCH 24/29] protection from adding empty traces && assertion in traces db (#1994) * protection from readding empty traces && assertion in traces db * simplified assert --- ethcore/src/trace/db.rs | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 26fb0b1ff..684c12a70 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -262,6 +262,13 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { /// Traces of import request's enacted blocks are expected to be already in database /// or to be the currently inserted trace. fn import(&self, batch: &DBTransaction, request: ImportRequest) { + // valid (canon): retracted 0, enacted 1 => false, true, + // valid (branch): retracted 0, enacted 0 => false, false, + // valid (bbcc): retracted 1, enacted 1 => true, true, + // invalid: retracted 1, enacted 0 => true, false, + let ret = request.retracted != 0; + let ena = !request.enacted.is_empty(); + assert!(!(ret && !ena)); // fast return if tracing is disabled if !self.tracing_enabled() { return; @@ -278,7 +285,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { } // now let's rebuild the blooms - { + if !request.enacted.is_empty() { let range_start = request.block_number as Number + 1 - request.enacted.len(); let range_end = range_start + request.retracted; let replaced_range = range_start..range_end; From d0a5e9f148239a3a0fd7b9c447dedb787b14d8cf Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Thu, 25 Aug 2016 02:10:02 +0700 Subject: [PATCH 25/29] Update gitlab-ci remove failure from armv7 (pi2) --- .gitlab-ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d7599f343..2b2d07a0f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -105,7 +105,6 @@ linux-armv7: paths: - target/armv7-unknown-linux-gnueabihf/release/parity name: "${CI_BUILD_NAME}_parity" - allow_failure: true linux-arm: stage: build image: ethcore/rust-arm:latest From 3e07135df30ffdf09baab298ab71690c7a96362d Mon Sep 17 00:00:00 2001 From: "Denis S. Soldatov aka General-Beck" Date: Thu, 25 Aug 2016 02:54:23 +0700 Subject: [PATCH 26/29] Update gitlab-ci remove linker from arm* target --- .gitlab-ci.yml | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2b2d07a0f..e950996ac 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -92,10 +92,6 @@ linux-armv7: - tags - stable script: - - mkdir -p .cargo - - echo "[target.armv7-unknown-linux-gnueabihf]" >> .cargo/config - - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config - - cat .cargo/config - cargo build --target armv7-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/armv7-unknown-linux-gnueabihf/release/parity tags: @@ -114,10 +110,6 @@ linux-arm: - tags - stable script: - - mkdir -p .cargo - - echo "[target.arm-unknown-linux-gnueabihf]" >> .cargo/config - - echo "linker= \"arm-linux-gnueabihf-gcc\"" >> .cargo/config - - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabihf --release --verbose - arm-linux-gnueabihf-strip target/arm-unknown-linux-gnueabihf/release/parity tags: @@ -137,10 +129,6 @@ linux-armv6: - tags - stable script: - - mkdir -p .cargo - - echo "[target.arm-unknown-linux-gnueabi]" >> .cargo/config - - echo "linker= \"arm-linux-gnueabi-gcc\"" >> .cargo/config - - cat .cargo/config - cargo build --target arm-unknown-linux-gnueabi --release --verbose - arm-linux-gnueabi-strip target/arm-unknown-linux-gnueabi/release/parity tags: @@ -160,10 +148,6 @@ linux-aarch64: - tags - stable script: - - mkdir -p .cargo - - echo "[target.aarch64-unknown-linux-gnu]" >> .cargo/config - - echo "linker= \"aarch64-linux-gnu-gcc\"" >> .cargo/config - - cat .cargo/config - cargo build --target aarch64-unknown-linux-gnu --release --verbose - aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/parity tags: From 09e0842f56fa15b510b53797436973da8e8c4f42 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Thu, 25 Aug 2016 12:11:06 +0200 Subject: [PATCH 27/29] Revert removing ecies (#2009) * revert removing ECIES without MAC * removed reverted change in encrypt --- Cargo.lock | 1 - ethcrypto/src/lib.rs | 93 ++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 90 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5c06c5a62..119e87fdf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -290,7 +290,6 @@ version = "1.4.0" dependencies = [ "clippy 0.0.85 (registry+https://github.com/rust-lang/crates.io-index)", "ethabi 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.4.0", "ethcore-rpc 1.4.0", "ethcore-util 1.4.0", "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index 4e3c3c1fc..9263e32c9 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -33,6 +33,7 @@ pub const KEY_LENGTH: usize = 32; pub const KEY_ITERATIONS: usize = 10240; pub const KEY_LENGTH_AES: usize = KEY_LENGTH / 2; +#[derive(PartialEq, Debug)] pub enum Error { Secp(SecpError), InvalidMessage, @@ -147,9 +148,9 @@ pub mod ecies { use rcrypto::sha2::Sha256; use rcrypto::hmac::Hmac; use rcrypto::mac::Mac; - use bigint::hash::FixedHash; + use bigint::hash::{FixedHash, H128}; use ethkey::{Random, Generator, Public, Secret}; - use {Error, ecdh, aes}; + use {Error, ecdh, aes, Keccak256}; /// Encrypt a message with a public key pub fn encrypt(public: &Public, shared_mac: &[u8], plain: &[u8]) -> Result, Error> { @@ -169,9 +170,11 @@ pub mod ecies { { let msgd = &mut msg[1..]; msgd[0..64].copy_from_slice(r.public()); + let iv = H128::random(); + msgd[64..80].copy_from_slice(&iv); { let cipher = &mut msgd[(64 + 16)..(64 + 16 + plain.len())]; - aes::encrypt(ekey, &[0u8; 16], plain, cipher); + aes::encrypt(ekey, &iv, plain, cipher); } let mut hmac = Hmac::new(Sha256::new(), &mkey); { @@ -184,6 +187,31 @@ pub mod ecies { Ok(msg) } + /// Encrypt a message with a public key + pub fn encrypt_single_message(public: &Public, plain: &[u8]) -> Result, Error> { + let r = Random.generate().unwrap(); + let z = try!(ecdh::agree(r.secret(), public)); + let mut key = [0u8; 32]; + let mut mkey = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); + let mut hasher = Sha256::new(); + let mkey_material = &key[16..32]; + hasher.input(mkey_material); + hasher.result(&mut mkey); + let ekey = &key[0..16]; + + let mut msgd = vec![0u8; (64 + plain.len())]; + { + r.public().copy_to(&mut msgd[0..64]); + let iv = H128::from_slice(&z.keccak256()[0..16]); + { + let cipher = &mut msgd[64..(64 + plain.len())]; + aes::encrypt(ekey, &iv, plain, cipher); + } + } + Ok(msgd) + } + /// Decrypt a message with a secret key pub fn decrypt(secret: &Secret, shared_mac: &[u8], encrypted: &[u8]) -> Result, Error> { let meta_len = 1 + 64 + 16 + 32; @@ -224,6 +252,33 @@ pub mod ecies { Ok(msg) } + /// Decrypt single message with a secret key + pub fn decrypt_single_message(secret: &Secret, encrypted: &[u8]) -> Result, Error> { + let meta_len = 64; + if encrypted.len() < meta_len { + return Err(Error::InvalidMessage); //invalid message: publickey + } + + let e = encrypted; + let p = Public::from_slice(&e[0..64]); + let z = try!(ecdh::agree(secret, &p)); + let mut key = [0u8; 32]; + kdf(&z, &[0u8; 0], &mut key); + let ekey = &key[0..16]; + let mkey_material = &key[16..32]; + let mut hasher = Sha256::new(); + let mut mkey = [0u8; 32]; + hasher.input(mkey_material); + hasher.result(&mut mkey); + + let clen = encrypted.len() - meta_len; + let cipher = &e[64..(64+clen)]; + let mut msg = vec![0u8; clen]; + let iv = H128::from_slice(&z.keccak256()[0..16]); + aes::decrypt(ekey, &iv, cipher, &mut msg[..]); + Ok(msg) + } + fn kdf(secret: &Secret, s1: &[u8], dest: &mut [u8]) { let mut hasher = Sha256::new(); // SEC/ISO/Shoup specify counter size SHOULD be equivalent @@ -244,3 +299,35 @@ pub mod ecies { } } +#[cfg(test)] +mod tests { + use ethkey::{Random, Generator}; + use ecies; + + #[test] + fn ecies_shared() { + let kp = Random.generate().unwrap(); + let message = b"So many books, so little time"; + + let shared = b"shared"; + let wrong_shared = b"incorrect"; + let encrypted = ecies::encrypt(kp.public(), shared, message).unwrap(); + assert!(encrypted[..] != message[..]); + assert_eq!(encrypted[0], 0x04); + + assert!(ecies::decrypt(kp.secret(), wrong_shared, &encrypted).is_err()); + let decrypted = ecies::decrypt(kp.secret(), shared, &encrypted).unwrap(); + assert_eq!(decrypted[..message.len()], message[..]); + } + + #[test] + fn ecies_shared_single() { + let kp = Random.generate().unwrap(); + let message = b"So many books, so little time"; + let encrypted = ecies::encrypt_single_message(kp.public(), message).unwrap(); + assert!(encrypted[..] != message[..]); + let decrypted = ecies::decrypt_single_message(kp.secret(), &encrypted).unwrap(); + assert_eq!(decrypted[..message.len()], message[..]); + } +} + From b18407b9e383ff04b889f15d8fad8fc36033f401 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 25 Aug 2016 14:28:45 +0200 Subject: [PATCH 28/29] Snapshot optimizations (#1991) * apply RLP compression to abridged blocks * add memorydb consolidate * code hash optimization * add warning to snapshot restoration CLI --- ethcore/src/snapshot/account.rs | 147 +++++++++++++++++++++++--- ethcore/src/snapshot/block.rs | 12 ++- ethcore/src/snapshot/error.rs | 6 ++ ethcore/src/snapshot/mod.rs | 91 +++++++++++++--- ethcore/src/snapshot/service.rs | 2 + ethcore/src/snapshot/tests/state.rs | 1 + parity/snapshot.rs | 1 + util/src/journaldb/archivedb.rs | 4 + util/src/journaldb/earlymergedb.rs | 4 + util/src/journaldb/overlayrecentdb.rs | 4 + util/src/journaldb/refcounteddb.rs | 13 +++ util/src/journaldb/traits.rs | 3 + util/src/memorydb.rs | 36 +++++++ 13 files changed, 287 insertions(+), 37 deletions(-) diff --git a/ethcore/src/snapshot/account.rs b/ethcore/src/snapshot/account.rs index 3c31bab0d..fcd7b6abc 100644 --- a/ethcore/src/snapshot/account.rs +++ b/ethcore/src/snapshot/account.rs @@ -22,6 +22,34 @@ use util::rlp::{Rlp, RlpStream, Stream, UntrustedRlp, View}; use util::trie::{TrieDB, Trie}; use snapshot::Error; +use std::collections::{HashMap, HashSet}; + +// whether an encoded account has code and how it is referred to. +#[repr(u8)] +enum CodeState { + // the account has no code. + Empty = 0, + // raw code is encoded. + Inline = 1, + // the code is referred to by hash. + Hash = 2, +} + +impl CodeState { + fn from(x: u8) -> Result { + match x { + 0 => Ok(CodeState::Empty), + 1 => Ok(CodeState::Inline), + 2 => Ok(CodeState::Hash), + _ => Err(Error::UnrecognizedCodeState(x)) + } + } + + fn raw(self) -> u8 { + self as u8 + } +} + // An alternate account structure from ::account::Account. #[derive(PartialEq, Clone, Debug)] pub struct Account { @@ -58,7 +86,7 @@ impl Account { // walk the account's storage trie, returning an RLP item containing the // account properties and the storage. - pub fn to_fat_rlp(&self, acct_db: &AccountDB) -> Result { + pub fn to_fat_rlp(&self, acct_db: &AccountDB, used_code: &mut HashSet) -> Result { let db = try!(TrieDB::new(acct_db, &self.storage_root)); let mut pairs = Vec::new(); @@ -81,11 +109,14 @@ impl Account { // [has_code, code_hash]. if self.code_hash == SHA3_EMPTY { - account_stream.append(&false).append_empty_data(); + account_stream.append(&CodeState::Empty.raw()).append_empty_data(); + } else if used_code.contains(&self.code_hash) { + account_stream.append(&CodeState::Hash.raw()).append(&self.code_hash); } else { match acct_db.get(&self.code_hash) { Some(c) => { - account_stream.append(&true).append(&c); + used_code.insert(self.code_hash.clone()); + account_stream.append(&CodeState::Inline.raw()).append(&c); } None => { warn!("code lookup failed during snapshot"); @@ -100,16 +131,39 @@ impl Account { } // decode a fat rlp, and rebuild the storage trie as we go. - pub fn from_fat_rlp(acct_db: &mut AccountDBMut, rlp: UntrustedRlp) -> Result { + // returns the account structure along with its newly recovered code, + // if it exists. + pub fn from_fat_rlp( + acct_db: &mut AccountDBMut, + rlp: UntrustedRlp, + code_map: &HashMap, + ) -> Result<(Self, Option), Error> { use util::{TrieDBMut, TrieMut}; let nonce = try!(rlp.val_at(0)); let balance = try!(rlp.val_at(1)); - let code_hash = if try!(rlp.val_at(2)) { - let code: Bytes = try!(rlp.val_at(3)); - acct_db.insert(&code) - } else { - SHA3_EMPTY + let code_state: CodeState = { + let raw: u8 = try!(rlp.val_at(2)); + try!(CodeState::from(raw)) + }; + + // load the code if it exists. + let (code_hash, new_code) = match code_state { + CodeState::Empty => (SHA3_EMPTY, None), + CodeState::Inline => { + let code: Bytes = try!(rlp.val_at(3)); + let code_hash = acct_db.insert(&code); + + (code_hash, Some(code)) + } + CodeState::Hash => { + let code_hash = try!(rlp.val_at(3)); + if let Some(code) = code_map.get(&code_hash) { + acct_db.emplace(code_hash.clone(), code.clone()); + } + + (code_hash, None) + } }; let mut storage_root = H256::zero(); @@ -124,12 +178,20 @@ impl Account { try!(storage_trie.insert(&k, &v)); } } - Ok(Account { + + let acc = Account { nonce: nonce, balance: balance, storage_root: storage_root, code_hash: code_hash, - }) + }; + + Ok((acc, new_code)) + } + + /// Get the account's code hash. + pub fn code_hash(&self) -> &H256 { + &self.code_hash } #[cfg(test)] @@ -145,9 +207,11 @@ mod tests { use snapshot::tests::helpers::fill_storage; use util::{SHA3_NULL_RLP, SHA3_EMPTY}; - use util::{Address, FixedHash, H256}; + use util::{Address, FixedHash, H256, HashDB}; use util::rlp::{UntrustedRlp, View}; + use std::collections::{HashSet, HashMap}; + use super::Account; #[test] @@ -166,9 +230,9 @@ mod tests { let thin_rlp = account.to_thin_rlp(); assert_eq!(Account::from_thin_rlp(&thin_rlp), account); - let fat_rlp = account.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr)).unwrap(); + let fat_rlp = account.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr), &mut Default::default()).unwrap(); let fat_rlp = UntrustedRlp::new(&fat_rlp); - assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp).unwrap(), account); + assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, &Default::default()).unwrap().0, account); } #[test] @@ -192,8 +256,59 @@ mod tests { let thin_rlp = account.to_thin_rlp(); assert_eq!(Account::from_thin_rlp(&thin_rlp), account); - let fat_rlp = account.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr)).unwrap(); + let fat_rlp = account.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr), &mut Default::default()).unwrap(); let fat_rlp = UntrustedRlp::new(&fat_rlp); - assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp).unwrap(), account); + assert_eq!(Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr), fat_rlp, &Default::default()).unwrap().0, account); + } + + #[test] + fn encoding_code() { + let mut db = get_temp_journal_db(); + let mut db = &mut **db; + + let addr1 = Address::random(); + let addr2 = Address::random(); + + let code_hash = { + let mut acct_db = AccountDBMut::new(db.as_hashdb_mut(), &addr1); + acct_db.insert(b"this is definitely code") + }; + + { + let mut acct_db = AccountDBMut::new(db.as_hashdb_mut(), &addr2); + acct_db.emplace(code_hash.clone(), b"this is definitely code".to_vec()); + } + + let account1 = Account { + nonce: 50.into(), + balance: 123456789.into(), + storage_root: SHA3_NULL_RLP, + code_hash: code_hash, + }; + + let account2 = Account { + nonce: 400.into(), + balance: 98765432123456789usize.into(), + storage_root: SHA3_NULL_RLP, + code_hash: code_hash, + }; + + let mut used_code = HashSet::new(); + + let fat_rlp1 = account1.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr1), &mut used_code).unwrap(); + let fat_rlp2 = account2.to_fat_rlp(&AccountDB::new(db.as_hashdb(), &addr2), &mut used_code).unwrap(); + assert_eq!(used_code.len(), 1); + + let fat_rlp1 = UntrustedRlp::new(&fat_rlp1); + let fat_rlp2 = UntrustedRlp::new(&fat_rlp2); + + let code_map = HashMap::new(); + let (acc, maybe_code) = Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr2), fat_rlp2, &code_map).unwrap(); + assert!(maybe_code.is_none()); + assert_eq!(acc, account2); + + let (acc, maybe_code) = Account::from_fat_rlp(&mut AccountDBMut::new(db.as_hashdb_mut(), &addr1), fat_rlp1, &code_map).unwrap(); + assert_eq!(maybe_code, Some(b"this is definitely code".to_vec())); + assert_eq!(acc, account1); } } diff --git a/ethcore/src/snapshot/block.rs b/ethcore/src/snapshot/block.rs index 5cb1ed640..f317cf54e 100644 --- a/ethcore/src/snapshot/block.rs +++ b/ethcore/src/snapshot/block.rs @@ -21,6 +21,7 @@ use header::Header; use views::BlockView; use util::rlp::{DecoderError, RlpStream, Stream, UntrustedRlp, View}; +use util::rlp::{Compressible, RlpType}; use util::{Bytes, Hashable, H256}; const HEADER_FIELDS: usize = 10; @@ -31,10 +32,10 @@ pub struct AbridgedBlock { } impl AbridgedBlock { - /// Create from a vector of bytes. Does no verification. - pub fn from_raw(rlp: Bytes) -> Self { + /// Create from rlp-compressed bytes. Does no verification. + pub fn from_raw(compressed: Bytes) -> Self { AbridgedBlock { - rlp: rlp, + rlp: compressed, } } @@ -78,7 +79,7 @@ impl AbridgedBlock { } AbridgedBlock { - rlp: stream.out(), + rlp: UntrustedRlp::new(stream.as_raw()).compress(RlpType::Blocks).to_vec(), } } @@ -86,7 +87,8 @@ impl AbridgedBlock { /// /// Will fail if contains invalid rlp. pub fn to_block(&self, parent_hash: H256, number: u64) -> Result { - let rlp = UntrustedRlp::new(&self.rlp); + let rlp = UntrustedRlp::new(&self.rlp).decompress(RlpType::Blocks); + let rlp = UntrustedRlp::new(&rlp); let mut header = Header { parent_hash: parent_hash, diff --git a/ethcore/src/snapshot/error.rs b/ethcore/src/snapshot/error.rs index d41d7cd2f..d4587fdba 100644 --- a/ethcore/src/snapshot/error.rs +++ b/ethcore/src/snapshot/error.rs @@ -35,6 +35,10 @@ pub enum Error { IncompleteChain, /// Old starting block in a pruned database. OldBlockPrunedDB, + /// Missing code. + MissingCode(Vec), + /// Unrecognized code encoding. + UnrecognizedCodeState(u8), /// Trie error. Trie(TrieError), /// Decoder error. @@ -51,6 +55,8 @@ impl fmt::Display for Error { Error::IncompleteChain => write!(f, "Cannot create snapshot due to incomplete chain."), Error::OldBlockPrunedDB => write!(f, "Attempted to create a snapshot at an old block while using \ a pruned database. Please re-run with the --pruning archive flag."), + Error::MissingCode(ref missing) => write!(f, "Incomplete snapshot: {} contract codes not found.", missing.len()), + Error::UnrecognizedCodeState(state) => write!(f, "Unrecognized code encoding ({})", state), Error::Io(ref err) => err.fmt(f), Error::Decoder(ref err) => err.fmt(f), Error::Trie(ref err) => err.fmt(f), diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 4e33c9ebc..84afb7cc1 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -16,7 +16,7 @@ //! Snapshot creation, restoration, and network service. -use std::collections::VecDeque; +use std::collections::{HashMap, HashSet, VecDeque}; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; @@ -27,6 +27,7 @@ use ids::BlockID; use views::BlockView; use util::{Bytes, Hashable, HashDB, snappy}; +use util::memorydb::MemoryDB; use util::Mutex; use util::hash::{FixedHash, H256}; use util::journaldb::{self, Algorithm, JournalDB}; @@ -332,6 +333,8 @@ pub fn chunk_state<'a>(db: &HashDB, root: &H256, writer: &Mutex(db: &HashDB, root: &H256, writer: &Mutex, state_root: H256, + code_map: HashMap, // maps code hashes to code itself. + missing_code: HashMap>, // maps code hashes to lists of accounts missing that code. } impl StateRebuilder { @@ -411,6 +416,8 @@ impl StateRebuilder { StateRebuilder { db: journaldb::new(db.clone(), pruning, ::db::COL_STATE), state_root: SHA3_NULL_RLP, + code_map: HashMap::new(), + missing_code: HashMap::new(), } } @@ -419,41 +426,57 @@ impl StateRebuilder { let rlp = UntrustedRlp::new(chunk); let account_fat_rlps: Vec<_> = rlp.iter().map(|r| r.as_raw()).collect(); let mut pairs = Vec::with_capacity(rlp.item_count()); - let backing = self.db.backing().clone(); // initialize the pairs vector with empty values so we have slots to write into. pairs.resize(rlp.item_count(), (H256::new(), Vec::new())); let chunk_size = account_fat_rlps.len() / ::num_cpus::get() + 1; + // new code contained within this chunk. + let mut chunk_code = HashMap::new(); + // build account tries in parallel. // Todo [rob] keep a thread pool around so we don't do this per-chunk. try!(scope(|scope| { let mut handles = Vec::new(); for (account_chunk, out_pairs_chunk) in account_fat_rlps.chunks(chunk_size).zip(pairs.chunks_mut(chunk_size)) { - let mut db = self.db.boxed_clone(); - let handle: ScopedJoinHandle, ::error::Error>> = scope.spawn(move || { - try!(rebuild_account_trie(db.as_hashdb_mut(), account_chunk, out_pairs_chunk)); + let code_map = &self.code_map; + let handle: ScopedJoinHandle> = scope.spawn(move || { + let mut db = MemoryDB::new(); + let status = try!(rebuild_accounts(&mut db, account_chunk, out_pairs_chunk, code_map)); trace!(target: "snapshot", "thread rebuilt {} account tries", account_chunk.len()); - Ok(db) + Ok((db, status)) }); handles.push(handle); } - // commit all account tries to the db, but only in this thread. - let batch = backing.transaction(); + // consolidate all edits into the main overlay. for handle in handles { - let mut thread_db = try!(handle.join()); - try!(thread_db.inject(&batch)); - } - try!(backing.write(batch).map_err(::util::UtilError::SimpleString)); + let (thread_db, status): (MemoryDB, _) = try!(handle.join()); + self.db.consolidate(thread_db); + chunk_code.extend(status.new_code); + + for (addr_hash, code_hash) in status.missing_code { + self.missing_code.entry(code_hash).or_insert_with(Vec::new).push(addr_hash); + } + } Ok::<_, ::error::Error>(()) })); + // patch up all missing code. must be done after collecting all new missing code entries. + for (code_hash, code) in chunk_code { + for addr_hash in self.missing_code.remove(&code_hash).unwrap_or_else(Vec::new) { + let mut db = AccountDBMut::from_hash(self.db.as_hashdb_mut(), addr_hash); + db.emplace(code_hash, code.clone()); + } + + self.code_map.insert(code_hash, code); + } + // batch trie writes { @@ -468,6 +491,7 @@ impl StateRebuilder { } } + let backing = self.db.backing().clone(); let batch = backing.transaction(); try!(self.db.inject(&batch)); try!(backing.write(batch).map_err(::util::UtilError::SimpleString)); @@ -475,11 +499,36 @@ impl StateRebuilder { Ok(()) } + /// Check for accounts missing code. Once all chunks have been fed, there should + /// be none. + pub fn check_missing(&self) -> Result<(), Error> { + let missing = self.missing_code.keys().cloned().collect::>(); + match missing.is_empty() { + true => Ok(()), + false => Err(Error::MissingCode(missing)), + } + } + /// Get the state root of the rebuilder. pub fn state_root(&self) -> H256 { self.state_root } } -fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mut [(H256, Bytes)]) -> Result<(), ::error::Error> { +#[derive(Default)] +struct RebuiltStatus { + new_code: Vec<(H256, Bytes)>, // new code that's become available. + missing_code: Vec<(H256, H256)>, // accounts that are missing code. +} + +// rebuild a set of accounts and their storage. +// returns +fn rebuild_accounts( + db: &mut HashDB, + account_chunk: &[&[u8]], + out_chunk: &mut [(H256, Bytes)], + code_map: &HashMap +) -> Result +{ + let mut status = RebuiltStatus::default(); for (account_pair, out) in account_chunk.into_iter().zip(out_chunk) { let account_rlp = UntrustedRlp::new(account_pair); @@ -491,14 +540,24 @@ fn rebuild_account_trie(db: &mut HashDB, account_chunk: &[&[u8]], out_chunk: &mu let mut acct_db = AccountDBMut::from_hash(db, hash); // fill out the storage trie and code while decoding. - let acc = try!(Account::from_fat_rlp(&mut acct_db, fat_rlp)); + let (acc, maybe_code) = try!(Account::from_fat_rlp(&mut acct_db, fat_rlp, code_map)); + + let code_hash = acc.code_hash().clone(); + match maybe_code { + Some(code) => status.new_code.push((code_hash, code)), + None => { + if code_hash != ::util::SHA3_EMPTY && !code_map.contains_key(&code_hash) { + status.missing_code.push((hash, code_hash)); + } + } + } acc.to_thin_rlp() }; *out = (hash, thin_rlp); } - Ok(()) + Ok(status) } /// Proportion of blocks which we will verify `PoW` for. diff --git a/ethcore/src/snapshot/service.rs b/ethcore/src/snapshot/service.rs index 576e32c67..45e1184b4 100644 --- a/ethcore/src/snapshot/service.rs +++ b/ethcore/src/snapshot/service.rs @@ -125,6 +125,8 @@ impl Restoration { try!(self.state.feed(&self.snappy_buffer[..len])); if self.state_chunks_left.is_empty() { + try!(self.state.check_missing()); + let root = self.state.state_root(); if root != self.final_state_root { warn!("Final restored state has wrong state root: expected {:?}, got {:?}", root, self.final_state_root); diff --git a/ethcore/src/snapshot/tests/state.rs b/ethcore/src/snapshot/tests/state.rs index 96cb88106..a293cdb44 100644 --- a/ethcore/src/snapshot/tests/state.rs +++ b/ethcore/src/snapshot/tests/state.rs @@ -72,6 +72,7 @@ fn snap_and_restore() { rebuilder.feed(&chunk).unwrap(); } + rebuilder.check_missing().unwrap(); assert_eq!(rebuilder.state_root(), state_root); new_db }; diff --git a/parity/snapshot.rs b/parity/snapshot.rs index c3e43e89f..650123d73 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -108,6 +108,7 @@ impl SnapshotCommand { let (service, _panic_handler) = try!(self.start_service()); warn!("Snapshot restoration is experimental and the format may be subject to change."); + warn!("On encountering an unexpected error, please ensure that you have a recent snapshot."); let snapshot = service.snapshot_service(); let reader = PackedReader::new(Path::new(&file)) diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 620728cd6..417d5b865 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -228,6 +228,10 @@ impl JournalDB for ArchiveDB { fn backing(&self) -> &Arc { &self.backing } + + fn consolidate(&mut self, with: MemoryDB) { + self.overlay.consolidate(with); + } } #[cfg(test)] diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index 4f52abcce..e2543d11c 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -539,6 +539,10 @@ impl JournalDB for EarlyMergeDB { Ok(ops) } + + fn consolidate(&mut self, with: MemoryDB) { + self.overlay.consolidate(with); + } } #[cfg(test)] diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 6e1068fb0..3d1d7e143 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -339,6 +339,10 @@ impl JournalDB for OverlayRecentDB { Ok(ops) } + + fn consolidate(&mut self, with: MemoryDB) { + self.transaction_overlay.consolidate(with); + } } impl HashDB for OverlayRecentDB { diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 5a2d85c1c..6b37e451f 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -20,6 +20,7 @@ use common::*; use rlp::*; use hashdb::*; use overlaydb::OverlayDB; +use memorydb::MemoryDB; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY}; use super::traits::JournalDB; use kvdb::{Database, DBTransaction}; @@ -192,6 +193,18 @@ impl JournalDB for RefCountedDB { } self.forward.commit_to_batch(batch) } + + fn consolidate(&mut self, mut with: MemoryDB) { + for (key, (value, rc)) in with.drain() { + for _ in 0..rc { + self.emplace(key.clone(), value.clone()); + } + + for _ in rc..0 { + self.remove(&key); + } + } + } } #[cfg(test)] diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs index 96715604e..1a00da1e4 100644 --- a/util/src/journaldb/traits.rs +++ b/util/src/journaldb/traits.rs @@ -61,6 +61,9 @@ pub trait JournalDB: HashDB { /// to the backing strage fn flush(&self) {} + /// Consolidate all the insertions and deletions in the given memory overlay. + fn consolidate(&mut self, overlay: ::memorydb::MemoryDB); + /// Commit all changes in a single batch #[cfg(test)] fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { diff --git a/util/src/memorydb.rs b/util/src/memorydb.rs index 4376d173c..468ad2ec3 100644 --- a/util/src/memorydb.rs +++ b/util/src/memorydb.rs @@ -174,6 +174,24 @@ impl MemoryDB { } } } + + /// Consolidate all the entries of `other` into `self`. + pub fn consolidate(&mut self, mut other: Self) { + for (key, (value, rc)) in other.drain() { + match self.data.entry(key) { + Entry::Occupied(mut entry) => { + if entry.get().1 < 0 { + entry.get_mut().0 = value; + } + + entry.get_mut().1 += rc; + } + Entry::Vacant(entry) => { + entry.insert((value, rc)); + } + } + } + } } static NULL_RLP_STATIC: [u8; 1] = [0x80; 1]; @@ -310,3 +328,21 @@ fn memorydb_remove_and_purge() { m.remove_and_purge(&hello_key); assert_eq!(m.raw(&hello_key), None); } + +#[test] +fn consolidate() { + let mut main = MemoryDB::new(); + let mut other = MemoryDB::new(); + let remove_key = other.insert(b"doggo"); + main.remove(&remove_key); + + let insert_key = other.insert(b"arf"); + main.emplace(insert_key, b"arf".to_vec()); + + main.consolidate(other); + + let overlay = main.drain(); + + assert_eq!(overlay.get(&remove_key).unwrap(), &(b"doggo".to_vec(), 0)); + assert_eq!(overlay.get(&insert_key).unwrap(), &(b"arf".to_vec(), 2)); +} \ No newline at end of file From 2aef81cf90e4ed61b9587a72e3a9209ec150cd28 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 25 Aug 2016 16:43:56 +0200 Subject: [PATCH 29/29] remove internal locking from DBTransaction (#2003) --- ethcore/src/blockchain/blockchain.rs | 88 +++++++++++++-------------- ethcore/src/client/client.rs | 12 ++-- ethcore/src/db.rs | 8 +-- ethcore/src/snapshot/mod.rs | 4 +- ethcore/src/snapshot/tests/blocks.rs | 8 ++- ethcore/src/tests/helpers.rs | 8 +-- ethcore/src/trace/db.rs | 16 ++--- ethcore/src/trace/mod.rs | 2 +- util/src/journaldb/archivedb.rs | 4 +- util/src/journaldb/earlymergedb.rs | 12 ++-- util/src/journaldb/overlayrecentdb.rs | 4 +- util/src/journaldb/refcounteddb.rs | 6 +- util/src/journaldb/traits.rs | 12 ++-- util/src/kvdb.rs | 36 +++++------ util/src/migration/mod.rs | 2 +- util/src/migration/tests.rs | 2 +- util/src/overlaydb.rs | 16 ++--- 17 files changed, 121 insertions(+), 119 deletions(-) diff --git a/ethcore/src/blockchain/blockchain.rs b/ethcore/src/blockchain/blockchain.rs index e76cf42c4..379d77407 100644 --- a/ethcore/src/blockchain/blockchain.rs +++ b/ethcore/src/blockchain/blockchain.rs @@ -380,7 +380,7 @@ impl BlockChain { children: vec![] }; - let batch = DBTransaction::new(&db); + let mut batch = DBTransaction::new(&db); batch.put(db::COL_HEADERS, &hash, block.header_rlp().as_raw()); batch.put(db::COL_BODIES, &hash, &Self::block_to_body(genesis)); @@ -419,7 +419,7 @@ impl BlockChain { } } - let batch = db.transaction(); + let mut batch = db.transaction(); batch.put(db::COL_EXTRA, b"first", &hash); db.write(batch).expect("Low level database error."); @@ -451,7 +451,7 @@ impl BlockChain { #[cfg(test)] fn rewind(&self) -> Option { use db::Key; - let batch = self.db.transaction(); + let mut batch =self.db.transaction(); // track back to the best block we have in the blocks database if let Some(best_block_hash) = self.db.get(db::COL_EXTRA, b"best").unwrap() { let best_block_hash = H256::from_slice(&best_block_hash); @@ -604,7 +604,7 @@ impl BlockChain { assert!(self.pending_best_block.read().is_none()); - let batch = self.db.transaction(); + let mut batch = self.db.transaction(); let block_rlp = UntrustedRlp::new(bytes); let compressed_header = block_rlp.at(0).unwrap().compress(RlpType::Blocks); @@ -625,7 +625,7 @@ impl BlockChain { location: BlockLocation::CanonChain, }; - self.prepare_update(&batch, ExtrasUpdate { + self.prepare_update(&mut batch, ExtrasUpdate { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: self.prepare_block_details_update(bytes, &info), block_receipts: self.prepare_block_receipts_update(receipts, &info), @@ -659,7 +659,7 @@ impl BlockChain { let mut update = HashMap::new(); update.insert(hash, block_details); - self.prepare_update(&batch, ExtrasUpdate { + self.prepare_update(&mut batch, ExtrasUpdate { block_hashes: self.prepare_block_hashes_update(bytes, &info), block_details: update, block_receipts: self.prepare_block_receipts_update(receipts, &info), @@ -682,7 +682,7 @@ impl BlockChain { let mut parent_details = self.block_details(&block_hash) .unwrap_or_else(|| panic!("Invalid block hash: {:?}", block_hash)); - let batch = self.db.transaction(); + let mut batch = self.db.transaction(); parent_details.children.push(child_hash); let mut update = HashMap::new(); @@ -701,7 +701,7 @@ impl BlockChain { /// Inserts the block into backing cache database. /// Expects the block to be valid and already verified. /// If the block is already known, does nothing. - pub fn insert_block(&self, batch: &DBTransaction, bytes: &[u8], receipts: Vec) -> ImportRoute { + pub fn insert_block(&self, batch: &mut DBTransaction, bytes: &[u8], receipts: Vec) -> ImportRoute { // create views onto rlp let block = BlockView::new(bytes); let header = block.header_view(); @@ -782,7 +782,7 @@ impl BlockChain { } /// Prepares extras update. - fn prepare_update(&self, batch: &DBTransaction, update: ExtrasUpdate, is_best: bool) { + fn prepare_update(&self, batch: &mut DBTransaction, update: ExtrasUpdate, is_best: bool) { { let block_hashes: Vec<_> = update.block_details.keys().cloned().collect(); @@ -1147,8 +1147,8 @@ mod tests { assert_eq!(bc.best_block_number(), 0); // when - let batch = db.transaction(); - bc.insert_block(&batch, &first, vec![]); + let mut batch =db.transaction(); + bc.insert_block(&mut batch, &first, vec![]); assert_eq!(bc.best_block_number(), 0); bc.commit(); // NOTE no db.write here (we want to check if best block is cached) @@ -1177,8 +1177,8 @@ mod tests { assert_eq!(bc.block_hash(1), None); assert_eq!(bc.block_details(&genesis_hash).unwrap().children, vec![]); - let batch = db.transaction(); - bc.insert_block(&batch, &first, vec![]); + let mut batch =db.transaction(); + bc.insert_block(&mut batch, &first, vec![]); db.write(batch).unwrap(); bc.commit(); @@ -1203,11 +1203,11 @@ mod tests { let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let mut block_hashes = vec![genesis_hash.clone()]; - let batch = db.transaction(); + let mut batch =db.transaction(); for _ in 0..10 { let block = canon_chain.generate(&mut finalizer).unwrap(); block_hashes.push(BlockView::new(&block).header_view().sha3()); - bc.insert_block(&batch, &block, vec![]); + bc.insert_block(&mut batch, &block, vec![]); bc.commit(); } db.write(batch).unwrap(); @@ -1238,20 +1238,20 @@ mod tests { let db = new_db(temp.as_str()); let bc = BlockChain::new(Config::default(), &genesis, db.clone()); - let batch = db.transaction(); + let mut batch =db.transaction(); for b in &[&b1a, &b1b, &b2a, &b2b, &b3a, &b3b, &b4a, &b4b, &b5a, &b5b] { - bc.insert_block(&batch, b, vec![]); + bc.insert_block(&mut batch, b, vec![]); bc.commit(); } - bc.insert_block(&batch, &b1b, vec![]); - bc.insert_block(&batch, &b2a, vec![]); - bc.insert_block(&batch, &b2b, vec![]); - bc.insert_block(&batch, &b3a, vec![]); - bc.insert_block(&batch, &b3b, vec![]); - bc.insert_block(&batch, &b4a, vec![]); - bc.insert_block(&batch, &b4b, vec![]); - bc.insert_block(&batch, &b5a, vec![]); - bc.insert_block(&batch, &b5b, vec![]); + bc.insert_block(&mut batch, &b1b, vec![]); + bc.insert_block(&mut batch, &b2a, vec![]); + bc.insert_block(&mut batch, &b2b, vec![]); + bc.insert_block(&mut batch, &b3a, vec![]); + bc.insert_block(&mut batch, &b3b, vec![]); + bc.insert_block(&mut batch, &b4a, vec![]); + bc.insert_block(&mut batch, &b4b, vec![]); + bc.insert_block(&mut batch, &b5a, vec![]); + bc.insert_block(&mut batch, &b5b, vec![]); db.write(batch).unwrap(); assert_eq!( @@ -1286,17 +1286,17 @@ mod tests { let db = new_db(temp.as_str()); let bc = BlockChain::new(Config::default(), &genesis, db.clone()); - let batch = db.transaction(); - let ir1 = bc.insert_block(&batch, &b1, vec![]); + let mut batch =db.transaction(); + let ir1 = bc.insert_block(&mut batch, &b1, vec![]); bc.commit(); - let ir2 = bc.insert_block(&batch, &b2, vec![]); + let ir2 = bc.insert_block(&mut batch, &b2, vec![]); bc.commit(); - let ir3b = bc.insert_block(&batch, &b3b, vec![]); + let ir3b = bc.insert_block(&mut batch, &b3b, vec![]); bc.commit(); db.write(batch).unwrap(); assert_eq!(bc.block_hash(3).unwrap(), b3b_hash); - let batch = db.transaction(); - let ir3a = bc.insert_block(&batch, &b3a, vec![]); + let mut batch =db.transaction(); + let ir3a = bc.insert_block(&mut batch, &b3a, vec![]); bc.commit(); db.write(batch).unwrap(); @@ -1402,8 +1402,8 @@ mod tests { let db = new_db(temp.as_str()); let bc = BlockChain::new(Config::default(), &genesis, db.clone()); assert_eq!(bc.best_block_hash(), genesis_hash); - let batch = db.transaction(); - bc.insert_block(&batch, &first, vec![]); + let mut batch =db.transaction(); + bc.insert_block(&mut batch, &first, vec![]); db.write(batch).unwrap(); bc.commit(); assert_eq!(bc.best_block_hash(), first_hash); @@ -1467,8 +1467,8 @@ mod tests { let temp = RandomTempPath::new(); let db = new_db(temp.as_str()); let bc = BlockChain::new(Config::default(), &genesis, db.clone()); - let batch = db.transaction(); - bc.insert_block(&batch, &b1, vec![]); + let mut batch =db.transaction(); + bc.insert_block(&mut batch, &b1, vec![]); db.write(batch).unwrap(); bc.commit(); @@ -1480,8 +1480,8 @@ mod tests { } fn insert_block(db: &Arc, bc: &BlockChain, bytes: &[u8], receipts: Vec) -> ImportRoute { - let batch = db.transaction(); - let res = bc.insert_block(&batch, bytes, receipts); + let mut batch =db.transaction(); + let res = bc.insert_block(&mut batch, bytes, receipts); db.write(batch).unwrap(); bc.commit(); res @@ -1569,16 +1569,16 @@ mod tests { let bc = BlockChain::new(Config::default(), &genesis, db.clone()); let uncle = canon_chain.fork(1).generate(&mut finalizer.fork()).unwrap(); - let batch = db.transaction(); + let mut batch =db.transaction(); // create a longer fork for _ in 0..5 { let canon_block = canon_chain.generate(&mut finalizer).unwrap(); - bc.insert_block(&batch, &canon_block, vec![]); + bc.insert_block(&mut batch, &canon_block, vec![]); bc.commit(); } assert_eq!(bc.best_block_number(), 5); - bc.insert_block(&batch, &uncle, vec![]); + bc.insert_block(&mut batch, &uncle, vec![]); db.write(batch).unwrap(); bc.commit(); } @@ -1604,10 +1604,10 @@ mod tests { let db = new_db(temp.as_str()); let bc = BlockChain::new(Config::default(), &genesis, db.clone()); - let batch = db.transaction(); - bc.insert_block(&batch, &first, vec![]); + let mut batch =db.transaction(); + bc.insert_block(&mut batch, &first, vec![]); bc.commit(); - bc.insert_block(&batch, &second, vec![]); + bc.insert_block(&mut batch, &second, vec![]); bc.commit(); db.write(batch).unwrap(); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 5e0a4b9f8..78669912c 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -172,8 +172,8 @@ impl Client { let mut state_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); if state_db.is_empty() && try!(spec.ensure_db_good(state_db.as_hashdb_mut())) { - let batch = DBTransaction::new(&db); - try!(state_db.commit(&batch, 0, &spec.genesis_header().hash(), None)); + let mut batch = DBTransaction::new(&db); + try!(state_db.commit(&mut batch, 0, &spec.genesis_header().hash(), None)); try!(db.write(batch).map_err(ClientError::Database)); } @@ -431,14 +431,14 @@ impl Client { //let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new)); - let batch = DBTransaction::new(&self.db); + let mut batch = DBTransaction::new(&self.db); // CHECK! I *think* this is fine, even if the state_root is equal to another // already-imported block of the same number. // TODO: Prove it with a test. - block.drain().commit(&batch, number, hash, ancient).expect("DB commit failed."); + block.drain().commit(&mut batch, number, hash, ancient).expect("DB commit failed."); - let route = self.chain.insert_block(&batch, block_data, receipts); - self.tracedb.import(&batch, TraceImportRequest { + let route = self.chain.insert_block(&mut batch, block_data, receipts); + self.tracedb.import(&mut batch, TraceImportRequest { traces: traces.into(), block_hash: hash.clone(), block_number: number, diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index e7a6425e3..7fcf30de3 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -83,10 +83,10 @@ pub trait Key { /// Should be used to write value into database. pub trait Writable { /// Writes the value into the database. - fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref; + fn write(&mut self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref; /// Writes the value into the database and updates the cache. - fn write_with_cache(&self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where + fn write_with_cache(&mut self, col: Option, cache: &mut Cache, key: K, value: T, policy: CacheUpdatePolicy) where K: Key + Hash + Eq, T: Encodable, R: Deref { @@ -102,7 +102,7 @@ pub trait Writable { } /// Writes the values into the database and updates the cache. - fn extend_with_cache(&self, col: Option, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where + fn extend_with_cache(&mut self, col: Option, cache: &mut Cache, values: HashMap, policy: CacheUpdatePolicy) where K: Key + Hash + Eq, T: Encodable, R: Deref { @@ -169,7 +169,7 @@ pub trait Readable { } impl Writable for DBTransaction { - fn write(&self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref { + fn write(&mut self, col: Option, key: &Key, value: &T) where T: Encodable, R: Deref { self.put(col, &key.key(), &encode(value)); } } diff --git a/ethcore/src/snapshot/mod.rs b/ethcore/src/snapshot/mod.rs index 84afb7cc1..a1f9812d5 100644 --- a/ethcore/src/snapshot/mod.rs +++ b/ethcore/src/snapshot/mod.rs @@ -492,8 +492,8 @@ impl StateRebuilder { } let backing = self.db.backing().clone(); - let batch = backing.transaction(); - try!(self.db.inject(&batch)); + let mut batch = backing.transaction(); + try!(self.db.inject(&mut batch)); try!(backing.write(batch).map_err(::util::UtilError::SimpleString)); trace!(target: "snapshot", "current state root: {:?}", self.state_root); Ok(()) diff --git a/ethcore/src/snapshot/tests/blocks.rs b/ethcore/src/snapshot/tests/blocks.rs index a7a6d5b7e..ac9880263 100644 --- a/ethcore/src/snapshot/tests/blocks.rs +++ b/ethcore/src/snapshot/tests/blocks.rs @@ -43,14 +43,16 @@ fn chunk_and_restore(amount: u64) { let bc = BlockChain::new(Default::default(), &genesis, old_db.clone()); // build the blockchain. + let mut batch = old_db.transaction(); for _ in 0..amount { let block = canon_chain.generate(&mut finalizer).unwrap(); - let batch = old_db.transaction(); - bc.insert_block(&batch, &block, vec![]); + bc.insert_block(&mut batch, &block, vec![]); bc.commit(); - old_db.write(batch).unwrap(); } + old_db.write(batch).unwrap(); + + let best_hash = bc.best_block_hash(); // snapshot it. diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index 03a1a2232..4942ace5a 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -259,9 +259,9 @@ pub fn generate_dummy_blockchain(block_number: u32) -> GuardedTempResult GuardedTempRes let bc = BlockChain::new(BlockChainConfig::default(), &create_unverifiable_block(0, H256::zero()), db.clone()); - let batch = db.transaction(); + let mut batch = db.transaction(); for block_order in 1..block_number { - bc.insert_block(&batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]); + bc.insert_block(&mut batch, &create_unverifiable_block_with_extra(block_order, bc.best_block_hash(), None), vec![]); bc.commit(); } db.write(batch).unwrap(); diff --git a/ethcore/src/trace/db.rs b/ethcore/src/trace/db.rs index 684c12a70..d295e084c 100644 --- a/ethcore/src/trace/db.rs +++ b/ethcore/src/trace/db.rs @@ -142,7 +142,7 @@ impl TraceDB where T: DatabaseExtras { false => [0x0] }; - let batch = DBTransaction::new(&tracesdb); + let mut batch = DBTransaction::new(&tracesdb); batch.put(db::COL_TRACE, b"enabled", &encoded_tracing); batch.put(db::COL_TRACE, b"version", TRACE_DB_VER); tracesdb.write(batch).unwrap(); @@ -261,7 +261,7 @@ impl TraceDatabase for TraceDB where T: DatabaseExtras { /// Traces of import request's enacted blocks are expected to be already in database /// or to be the currently inserted trace. - fn import(&self, batch: &DBTransaction, request: ImportRequest) { + fn import(&self, batch: &mut DBTransaction, request: ImportRequest) { // valid (canon): retracted 0, enacted 1 => false, true, // valid (branch): retracted 0, enacted 0 => false, false, // valid (bbcc): retracted 1, enacted 1 => true, true, @@ -611,8 +611,8 @@ mod tests { // import block 0 let request = create_simple_import_request(0, block_0.clone()); - let batch = DBTransaction::new(&db); - tracedb.import(&batch, request); + let mut batch = DBTransaction::new(&db); + tracedb.import(&mut batch, request); db.write(batch).unwrap(); let filter = Filter { @@ -627,8 +627,8 @@ mod tests { // import block 1 let request = create_simple_import_request(1, block_1.clone()); - let batch = DBTransaction::new(&db); - tracedb.import(&batch, request); + let mut batch = DBTransaction::new(&db); + tracedb.import(&mut batch, request); db.write(batch).unwrap(); let filter = Filter { @@ -686,8 +686,8 @@ mod tests { // import block 0 let request = create_simple_import_request(0, block_0.clone()); - let batch = DBTransaction::new(&db); - tracedb.import(&batch, request); + let mut batch = DBTransaction::new(&db); + tracedb.import(&mut batch, request); db.write(batch).unwrap(); } diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 277227729..4b6b4ad92 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -121,7 +121,7 @@ pub trait Database { fn tracing_enabled(&self) -> bool; /// Imports new block traces. - fn import(&self, batch: &DBTransaction, request: ImportRequest); + fn import(&self, batch: &mut DBTransaction, request: ImportRequest); /// Returns localized trace at given position. fn trace(&self, block_number: BlockNumber, tx_position: usize, trace_position: Vec) -> Option; diff --git a/util/src/journaldb/archivedb.rs b/util/src/journaldb/archivedb.rs index 417d5b865..863b490a6 100644 --- a/util/src/journaldb/archivedb.rs +++ b/util/src/journaldb/archivedb.rs @@ -156,7 +156,7 @@ impl JournalDB for ArchiveDB { self.latest_era.is_none() } - fn commit(&mut self, batch: &DBTransaction, now: u64, _id: &H256, _end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &mut DBTransaction, now: u64, _id: &H256, _end: Option<(u64, H256)>) -> Result { let mut inserts = 0usize; let mut deletes = 0usize; @@ -185,7 +185,7 @@ impl JournalDB for ArchiveDB { Ok((inserts + deletes) as u32) } - fn inject(&mut self, batch: &DBTransaction) -> Result { + fn inject(&mut self, batch: &mut DBTransaction) -> Result { let mut inserts = 0usize; let mut deletes = 0usize; diff --git a/util/src/journaldb/earlymergedb.rs b/util/src/journaldb/earlymergedb.rs index e2543d11c..4e53202c1 100644 --- a/util/src/journaldb/earlymergedb.rs +++ b/util/src/journaldb/earlymergedb.rs @@ -101,13 +101,13 @@ impl EarlyMergeDB { } // The next three are valid only as long as there is an insert operation of `key` in the journal. - fn set_already_in(batch: &DBTransaction, col: Option, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); } - fn reset_already_in(batch: &DBTransaction, col: Option, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); } + fn set_already_in(batch: &mut DBTransaction, col: Option, key: &H256) { batch.put(col, &Self::morph_key(key, 0), &[1u8]); } + fn reset_already_in(batch: &mut DBTransaction, col: Option, key: &H256) { batch.delete(col, &Self::morph_key(key, 0)); } fn is_already_in(backing: &Database, col: Option, key: &H256) -> bool { backing.get(col, &Self::morph_key(key, 0)).expect("Low-level database error. Some issue with your hard disk?").is_some() } - fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, col: Option, refs: &mut HashMap, batch: &DBTransaction, trace: bool) { + fn insert_keys(inserts: &[(H256, Bytes)], backing: &Database, col: Option, refs: &mut HashMap, batch: &mut DBTransaction, trace: bool) { for &(ref h, ref d) in inserts { if let Some(c) = refs.get_mut(h) { // already counting. increment. @@ -156,7 +156,7 @@ impl EarlyMergeDB { trace!(target: "jdb.fine", "replay_keys: (end) refs={:?}", refs); } - fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &DBTransaction, col: Option, from: RemoveFrom, trace: bool) { + fn remove_keys(deletes: &[H256], refs: &mut HashMap, batch: &mut DBTransaction, col: Option, from: RemoveFrom, trace: bool) { // with a remove on {queue_refs: 1, in_archive: true}, we have two options: // - convert to {queue_refs: 1, in_archive: false} (i.e. remove it from the conceptual archive) // - convert to {queue_refs: 0, in_archive: true} (i.e. remove it from the conceptual queue) @@ -337,7 +337,7 @@ impl JournalDB for EarlyMergeDB { } #[cfg_attr(feature="dev", allow(cyclomatic_complexity))] - fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -514,7 +514,7 @@ impl JournalDB for EarlyMergeDB { Ok(0) } - fn inject(&mut self, batch: &DBTransaction) -> Result { + fn inject(&mut self, batch: &mut DBTransaction) -> Result { let mut ops = 0; for (key, (value, rc)) in self.overlay.drain() { if rc != 0 { ops += 1 } diff --git a/util/src/journaldb/overlayrecentdb.rs b/util/src/journaldb/overlayrecentdb.rs index 3d1d7e143..bd14eb161 100644 --- a/util/src/journaldb/overlayrecentdb.rs +++ b/util/src/journaldb/overlayrecentdb.rs @@ -222,7 +222,7 @@ impl JournalDB for OverlayRecentDB { .or_else(|| self.backing.get_by_prefix(self.column, &key[0..DB_PREFIX_LEN]).map(|b| b.to_vec())) } - fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // record new commit's details. trace!("commit: #{} ({}), end era: {:?}", now, id, end); let mut journal_overlay = self.journal_overlay.write(); @@ -314,7 +314,7 @@ impl JournalDB for OverlayRecentDB { self.journal_overlay.write().pending_overlay.clear(); } - fn inject(&mut self, batch: &DBTransaction) -> Result { + fn inject(&mut self, batch: &mut DBTransaction) -> Result { let mut ops = 0; for (key, (value, rc)) in self.transaction_overlay.drain() { if rc != 0 { ops += 1 } diff --git a/util/src/journaldb/refcounteddb.rs b/util/src/journaldb/refcounteddb.rs index 6b37e451f..5e3f09606 100644 --- a/util/src/journaldb/refcounteddb.rs +++ b/util/src/journaldb/refcounteddb.rs @@ -109,7 +109,7 @@ impl JournalDB for RefCountedDB { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.to_vec()) } - fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { + fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { // journal format: // [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] // [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] @@ -182,11 +182,11 @@ impl JournalDB for RefCountedDB { } } - let r = try!(self.forward.commit_to_batch(&batch)); + let r = try!(self.forward.commit_to_batch(batch)); Ok(r) } - fn inject(&mut self, batch: &DBTransaction) -> Result { + fn inject(&mut self, batch: &mut DBTransaction) -> Result { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove); diff --git a/util/src/journaldb/traits.rs b/util/src/journaldb/traits.rs index 1a00da1e4..85cc7fe58 100644 --- a/util/src/journaldb/traits.rs +++ b/util/src/journaldb/traits.rs @@ -37,7 +37,7 @@ pub trait JournalDB: HashDB { /// Commit all recent insert operations and canonical historical commits' removals from the /// old era to the backing database, reverting any non-canonical historical commit's inserts. - fn commit(&mut self, batch: &DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; + fn commit(&mut self, batch: &mut DBTransaction, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result; /// Commit all queued insert and delete operations without affecting any journalling -- this requires that all insertions /// and deletions are indeed canonical and will likely lead to an invalid database if that assumption is violated. @@ -46,7 +46,7 @@ pub trait JournalDB: HashDB { /// by any previous `commit` operations. Essentially, this means that `inject` can be used /// either to restore a state to a fresh database, or to insert data which may only be journalled /// from this point onwards. - fn inject(&mut self, batch: &DBTransaction) -> Result; + fn inject(&mut self, batch: &mut DBTransaction) -> Result; /// State data query fn state(&self, _id: &H256) -> Option; @@ -67,8 +67,8 @@ pub trait JournalDB: HashDB { /// Commit all changes in a single batch #[cfg(test)] fn commit_batch(&mut self, now: u64, id: &H256, end: Option<(u64, H256)>) -> Result { - let batch = self.backing().transaction(); - let res = try!(self.commit(&batch, now, id, end)); + let mut batch = self.backing().transaction(); + let res = try!(self.commit(&mut batch, now, id, end)); let result = self.backing().write(batch).map(|_| res).map_err(Into::into); self.flush(); result @@ -77,8 +77,8 @@ pub trait JournalDB: HashDB { /// Inject all changes in a single batch. #[cfg(test)] fn inject_batch(&mut self) -> Result { - let batch = self.backing().transaction(); - let res = try!(self.inject(&batch)); + let mut batch = self.backing().transaction(); + let res = try!(self.inject(&mut batch)); self.backing().write(batch).map(|_| res).map_err(Into::into) } } diff --git a/util/src/kvdb.rs b/util/src/kvdb.rs index 586e4b9a9..5db7801a1 100644 --- a/util/src/kvdb.rs +++ b/util/src/kvdb.rs @@ -28,7 +28,7 @@ const DB_BACKGROUND_COMPACTIONS: i32 = 2; /// Write transaction. Batches a sequence of put/delete operations for efficiency. pub struct DBTransaction { - ops: Mutex>, + ops: Vec, } enum DBOp { @@ -52,15 +52,15 @@ impl DBTransaction { /// Create new transaction. pub fn new(_db: &Database) -> DBTransaction { DBTransaction { - ops: Mutex::new(Vec::with_capacity(256)), + ops: Vec::with_capacity(256), } } /// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write. - pub fn put(&self, col: Option, key: &[u8], value: &[u8]) { + pub fn put(&mut self, col: Option, key: &[u8], value: &[u8]) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.lock().push(DBOp::Insert { + self.ops.push(DBOp::Insert { col: col, key: ekey, value: value.to_vec(), @@ -68,10 +68,10 @@ impl DBTransaction { } /// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write. - pub fn put_vec(&self, col: Option, key: &[u8], value: Bytes) { + pub fn put_vec(&mut self, col: Option, key: &[u8], value: Bytes) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.lock().push(DBOp::Insert { + self.ops.push(DBOp::Insert { col: col, key: ekey, value: value, @@ -79,11 +79,11 @@ impl DBTransaction { } /// Insert a key-value pair in the transaction. Any existing value value will be overwritten upon write. - /// Value will be RLP-compressed on flush - pub fn put_compressed(&self, col: Option, key: &[u8], value: Bytes) { + /// Value will be RLP-compressed on flush + pub fn put_compressed(&mut self, col: Option, key: &[u8], value: Bytes) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.lock().push(DBOp::InsertCompressed { + self.ops.push(DBOp::InsertCompressed { col: col, key: ekey, value: value, @@ -91,10 +91,10 @@ impl DBTransaction { } /// Delete value by key. - pub fn delete(&self, col: Option, key: &[u8]) { + pub fn delete(&mut self, col: Option, key: &[u8]) { let mut ekey = ElasticArray32::new(); ekey.append_slice(key); - self.ops.lock().push(DBOp::Delete { + self.ops.push(DBOp::Delete { col: col, key: ekey, }); @@ -299,7 +299,7 @@ impl Database { /// Commit transaction to database. pub fn write_buffered(&self, tr: DBTransaction) { let mut overlay = self.overlay.write(); - let ops = tr.ops.into_inner(); + let ops = tr.ops; for op in ops { match op { DBOp::Insert { col, key, value } => { @@ -359,7 +359,7 @@ impl Database { /// Commit transaction to database. pub fn write(&self, tr: DBTransaction) -> Result<(), String> { let batch = WriteBatch::new(); - let ops = tr.ops.into_inner(); + let ops = tr.ops; for op in ops { match op { DBOp::Insert { col, key, value } => { @@ -425,7 +425,7 @@ mod tests { let key2 = H256::from_str("03c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); let key3 = H256::from_str("01c69be41d0b7e40352fc85be1cd65eb03d40ef8427a0ca4596b1ead9a00e9fc").unwrap(); - let batch = db.transaction(); + let mut batch = db.transaction(); batch.put(None, &key1, b"cat"); batch.put(None, &key2, b"dog"); db.write(batch).unwrap(); @@ -439,17 +439,17 @@ mod tests { assert_eq!(&*contents[1].0, &*key2); assert_eq!(&*contents[1].1, b"dog"); - let batch = db.transaction(); + let mut batch = db.transaction(); batch.delete(None, &key1); db.write(batch).unwrap(); assert!(db.get(None, &key1).unwrap().is_none()); - let batch = db.transaction(); + let mut batch = db.transaction(); batch.put(None, &key1, b"cat"); db.write(batch).unwrap(); - let transaction = db.transaction(); + let mut transaction = db.transaction(); transaction.put(None, &key3, b"elephant"); transaction.delete(None, &key1); db.write(transaction).unwrap(); @@ -459,7 +459,7 @@ mod tests { assert_eq!(&*db.get_by_prefix(None, &key3).unwrap(), b"elephant"); assert_eq!(&*db.get_by_prefix(None, &key2).unwrap(), b"dog"); - let transaction = db.transaction(); + let mut transaction = db.transaction(); transaction.put(None, &key1, b"horse"); transaction.delete(None, &key3); db.write_buffered(transaction); diff --git a/util/src/migration/mod.rs b/util/src/migration/mod.rs index 0cc5436a0..cfd828086 100644 --- a/util/src/migration/mod.rs +++ b/util/src/migration/mod.rs @@ -72,7 +72,7 @@ impl Batch { pub fn commit(&mut self, dest: &mut Database) -> Result<(), Error> { if self.inner.is_empty() { return Ok(()) } - let transaction = DBTransaction::new(dest); + let mut transaction = DBTransaction::new(dest); for keypair in &self.inner { transaction.put(self.column, &keypair.0, &keypair.1); diff --git a/util/src/migration/tests.rs b/util/src/migration/tests.rs index b21f3344f..ee5ff574e 100644 --- a/util/src/migration/tests.rs +++ b/util/src/migration/tests.rs @@ -35,7 +35,7 @@ fn db_path(path: &Path) -> PathBuf { fn make_db(path: &Path, pairs: BTreeMap, Vec>) { let db = Database::open_default(path.to_str().unwrap()).expect("failed to open temp database"); { - let transaction = db.transaction(); + let mut transaction = db.transaction(); for (k, v) in pairs { transaction.put(None, &k, &v); } diff --git a/util/src/overlaydb.rs b/util/src/overlaydb.rs index a68c9a5ed..4a11961b6 100644 --- a/util/src/overlaydb.rs +++ b/util/src/overlaydb.rs @@ -58,13 +58,13 @@ impl OverlayDB { /// Commit all operations in a single batch. #[cfg(test)] pub fn commit(&mut self) -> Result { - let batch = self.backing.transaction(); - let res = try!(self.commit_to_batch(&batch)); + let mut batch = self.backing.transaction(); + let res = try!(self.commit_to_batch(&mut batch)); self.backing.write(batch).map(|_| res).map_err(|e| e.into()) } /// Commit all operations to given batch. - pub fn commit_to_batch(&mut self, batch: &DBTransaction) -> Result { + pub fn commit_to_batch(&mut self, batch: &mut DBTransaction) -> Result { let mut ret = 0u32; let mut deletes = 0usize; for i in self.overlay.drain().into_iter() { @@ -111,7 +111,7 @@ impl OverlayDB { } /// Put the refs and value of the given key, possibly deleting it from the db. - fn put_payload_in_batch(&self, batch: &DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { + fn put_payload_in_batch(&self, batch: &mut DBTransaction, key: &H256, payload: (Bytes, u32)) -> bool { if payload.1 > 0 { let mut s = RlpStream::new_list(2); s.append(&payload.1); @@ -195,8 +195,8 @@ impl HashDB for OverlayDB { fn overlaydb_revert() { let mut m = OverlayDB::new_temp(); let foo = m.insert(b"foo"); // insert foo. - let batch = m.backing.transaction(); - m.commit_to_batch(&batch).unwrap(); // commit - new operations begin here... + let mut batch = m.backing.transaction(); + m.commit_to_batch(&mut batch).unwrap(); // commit - new operations begin here... m.backing.write(batch).unwrap(); let bar = m.insert(b"bar"); // insert bar. m.remove(&foo); // remove foo. @@ -300,7 +300,7 @@ fn playpen() { use std::fs; { let db = Database::open_default("/tmp/test").unwrap(); - let batch = db.transaction(); + let mut batch = db.transaction(); batch.put(None, b"test", b"test2"); db.write(batch).unwrap(); match db.get(None, b"test") { @@ -308,7 +308,7 @@ fn playpen() { Ok(None) => println!("No value for that key"), Err(..) => println!("Gah"), } - let batch = db.transaction(); + let mut batch = db.transaction(); batch.delete(None, b"test"); db.write(batch).unwrap(); }