From 0cfc6bf2a61937e639a94dbe036ebb108d329fd3 Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Thu, 2 Aug 2018 23:18:49 +0200 Subject: [PATCH 01/48] Fix path to parity.h (#9274) * Fix path to parity.h * Fix other paths as well --- parity-clib-examples/cpp/CMakeLists.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/parity-clib-examples/cpp/CMakeLists.txt b/parity-clib-examples/cpp/CMakeLists.txt index 143d014e3..ed6229061 100644 --- a/parity-clib-examples/cpp/CMakeLists.txt +++ b/parity-clib-examples/cpp/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 3.5) include(ExternalProject) -include_directories("${CMAKE_SOURCE_DIR}/../parity-clib") +include_directories("${CMAKE_SOURCE_DIR}/../../parity-clib") add_executable(parity-example main.cpp) @@ -11,9 +11,9 @@ ExternalProject_Add( CONFIGURE_COMMAND "" BUILD_COMMAND "" COMMAND cargo build -p parity-clib # Note: use --release in a real project - BINARY_DIR "${CMAKE_SOURCE_DIR}/../target" + BINARY_DIR "${CMAKE_SOURCE_DIR}/../../target" INSTALL_COMMAND "" LOG_BUILD ON) add_dependencies(parity-example libparity) -target_link_libraries(parity-example "${CMAKE_SOURCE_DIR}/../target/debug/libparity.so") +target_link_libraries(parity-example "${CMAKE_SOURCE_DIR}/../../target/debug/libparity.so") From 25604dc5772f649b3054b887313e01b7673f460a Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 3 Aug 2018 09:58:59 +0200 Subject: [PATCH 02/48] Avoid using $HOME if not necessary (#9273) * Avoid using $HOME if not necessary * Fix concerns and issues --- parity/helpers.rs | 2 +- parity/upgrade.rs | 14 +++++++------- util/dir/src/helpers.rs | 7 ++++++- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/parity/helpers.rs b/parity/helpers.rs index 342306c15..52291ce9d 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -261,7 +261,7 @@ pub fn execute_upgrades( upgrade_data_paths(base_path, dirs, pruning); - match upgrade(Some(&dirs.path)) { + match upgrade(&dirs.path) { Ok(upgrades_applied) if upgrades_applied > 0 => { debug!("Executed {} upgrade scripts - ok", upgrades_applied); }, diff --git a/parity/upgrade.rs b/parity/upgrade.rs index d98123ce1..e81c1cbee 100644 --- a/parity/upgrade.rs +++ b/parity/upgrade.rs @@ -102,14 +102,10 @@ fn upgrade_from_version(previous_version: &Version) -> Result { Ok(count) } -fn with_locked_version(db_path: Option<&str>, script: F) -> Result +fn with_locked_version(db_path: &str, script: F) -> Result where F: Fn(&Version) -> Result { - let mut path = db_path.map_or({ - let mut path = env::home_dir().expect("Applications should have a home dir"); - path.push(".parity"); - path - }, PathBuf::from); + let mut path = PathBuf::from(db_path); create_dir_all(&path).map_err(|_| Error::CannotCreateConfigPath)?; path.push("ver.lock"); @@ -131,7 +127,7 @@ fn with_locked_version(db_path: Option<&str>, script: F) -> Result) -> Result { +pub fn upgrade(db_path: &str) -> Result { with_locked_version(db_path, |ver| { upgrade_from_version(ver) }) @@ -205,6 +201,10 @@ fn upgrade_user_defaults(dirs: &DatabaseDirectories) { } pub fn upgrade_data_paths(base_path: &str, dirs: &DatabaseDirectories, pruning: Algorithm) { + if env::home_dir().is_none() { + return; + } + let legacy_root_path = replace_home("", "$HOME/.parity"); let default_path = default_data_path(); if legacy_root_path != base_path && base_path == default_path { diff --git a/util/dir/src/helpers.rs b/util/dir/src/helpers.rs index 820b9dc5a..24faaff6e 100644 --- a/util/dir/src/helpers.rs +++ b/util/dir/src/helpers.rs @@ -20,7 +20,12 @@ use std::env; /// Replaces `$HOME` str with home directory path. pub fn replace_home(base: &str, arg: &str) -> String { // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` - let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()); + // We use an `if` so that we don't need to call `home_dir()` if not necessary. + let r = if arg.contains("$HOME") { + arg.replace("$HOME", env::home_dir().expect("$HOME isn't defined").to_str().unwrap()) + } else { + arg.to_owned() + }; let r = r.replace("$BASE", base); r.replace("/", &::std::path::MAIN_SEPARATOR.to_string()) } From 3f2fd610d9108da8bbccad85ebcea655ffce537e Mon Sep 17 00:00:00 2001 From: Jongsic Choi Date: Mon, 6 Aug 2018 20:04:28 +0900 Subject: [PATCH 03/48] Fix loop start value (#9285) --- ethcore/src/client/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 3a7fd0d0c..efc8b3f2e 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1843,7 +1843,7 @@ impl BlockChainClient for Client { // Otherwise, we use a slower version that finds a link between from_block and to_block. let from_hash = Self::block_hash(&chain, filter.from_block)?; let from_number = chain.block_number(&from_hash)?; - let to_hash = Self::block_hash(&chain, filter.from_block)?; + let to_hash = Self::block_hash(&chain, filter.to_block)?; let blooms = filter.bloom_possibilities(); let bloom_match = |header: &encoded::Header| { From e8b13cb77e30f60677d09b1b4eab1a4998dde28e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 6 Aug 2018 23:15:52 +0800 Subject: [PATCH 04/48] Implement KIP4: create2 for wasm (#9277) * Basic implementation for kip4 * Add KIP-4 config flags * typo: docs fix * Fix args offset * Add tests for create2 * tests: evm * Update wasm-tests and fix all gas costs * Update wasm-tests * Update wasm-tests and fix gas costs --- ethcore/evm/src/tests.rs | 2 + ethcore/res/wasm-tests | 2 +- ethcore/src/spec/spec.rs | 12 +++++- ethcore/vm/src/ext.rs | 2 +- ethcore/vm/src/schedule.rs | 3 ++ ethcore/vm/src/tests.rs | 5 ++- ethcore/wasm/src/env.rs | 15 ++++++- ethcore/wasm/src/lib.rs | 2 +- ethcore/wasm/src/runtime.rs | 82 ++++++++++++++++++++++++++----------- ethcore/wasm/src/tests.rs | 73 +++++++++++++++++++++------------ json/src/spec/params.rs | 3 ++ 11 files changed, 143 insertions(+), 58 deletions(-) diff --git a/ethcore/evm/src/tests.rs b/ethcore/evm/src/tests.rs index b62faf87d..b8f0df363 100644 --- a/ethcore/evm/src/tests.rs +++ b/ethcore/evm/src/tests.rs @@ -746,6 +746,7 @@ fn test_calls(factory: super::Factory) { assert_set_contains(&ext.calls, &FakeCall { call_type: FakeCallType::Call, + create_scheme: None, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(code_address.clone()), @@ -755,6 +756,7 @@ fn test_calls(factory: super::Factory) { }); assert_set_contains(&ext.calls, &FakeCall { call_type: FakeCallType::Call, + create_scheme: None, gas: U256::from(2556), sender_address: Some(address.clone()), receive_address: Some(address.clone()), diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests index 474110de5..986a6fb94 160000 --- a/ethcore/res/wasm-tests +++ b/ethcore/res/wasm-tests @@ -1 +1 @@ -Subproject commit 474110de59a0f632b20615256c913b144c49354c +Subproject commit 986a6fb94673ba270f8f7ef1fff521ba33d427c2 diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index d79775f78..45f9a6c95 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -125,6 +125,8 @@ pub struct CommonParams { pub remove_dust_contracts: bool, /// Wasm activation blocknumber, if any disabled initially. pub wasm_activation_transition: BlockNumber, + /// Number of first block where KIP-4 rules begin. Only has effect if Wasm is activated. + pub kip4_transition: BlockNumber, /// Gas limit bound divisor (how much gas limit can change per block) pub gas_limit_bound_divisor: U256, /// Registrar contract address. @@ -187,7 +189,11 @@ impl CommonParams { }; } if block_number >= self.wasm_activation_transition { - schedule.wasm = Some(Default::default()); + let mut wasm = ::vm::WasmCosts::default(); + if block_number >= self.kip4_transition { + wasm.have_create2 = true; + } + schedule.wasm = Some(wasm); } } @@ -294,6 +300,10 @@ impl From for CommonParams { BlockNumber::max_value, Into::into ), + kip4_transition: p.kip4_transition.map_or_else( + BlockNumber::max_value, + Into::into + ), } } } diff --git a/ethcore/vm/src/ext.rs b/ethcore/vm/src/ext.rs index 3e6ee1e02..c1ce1b79f 100644 --- a/ethcore/vm/src/ext.rs +++ b/ethcore/vm/src/ext.rs @@ -51,7 +51,7 @@ pub enum MessageCallResult { } /// Specifies how an address is calculated for a new contract. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] pub enum CreateContractAddress { /// Address is calculated from sender and nonce. Pre EIP-86 (Metropolis) FromSenderAndNonce, diff --git a/ethcore/vm/src/schedule.rs b/ethcore/vm/src/schedule.rs index 757d8a16d..ec72c4683 100644 --- a/ethcore/vm/src/schedule.rs +++ b/ethcore/vm/src/schedule.rs @@ -149,6 +149,8 @@ pub struct WasmCosts { pub opcodes_mul: u32, /// Cost of wasm opcode is calculated as TABLE_ENTRY_COST * `opcodes_mul` / `opcodes_div` pub opcodes_div: u32, + /// Whether create2 extern function is activated. + pub have_create2: bool, } impl Default for WasmCosts { @@ -166,6 +168,7 @@ impl Default for WasmCosts { max_stack_height: 64*1024, opcodes_mul: 3, opcodes_div: 8, + have_create2: false, } } } diff --git a/ethcore/vm/src/tests.rs b/ethcore/vm/src/tests.rs index d83e6881a..4930e4219 100644 --- a/ethcore/vm/src/tests.rs +++ b/ethcore/vm/src/tests.rs @@ -39,6 +39,7 @@ pub enum FakeCallType { #[derive(PartialEq, Eq, Hash, Debug)] pub struct FakeCall { pub call_type: FakeCallType, + pub create_scheme: Option, pub gas: U256, pub sender_address: Option
, pub receive_address: Option
, @@ -133,9 +134,10 @@ impl Ext for FakeExt { self.blockhashes.get(number).unwrap_or(&H256::new()).clone() } - fn create(&mut self, gas: &U256, value: &U256, code: &[u8], _address: CreateContractAddress) -> ContractCreateResult { + fn create(&mut self, gas: &U256, value: &U256, code: &[u8], address: CreateContractAddress) -> ContractCreateResult { self.calls.insert(FakeCall { call_type: FakeCallType::Create, + create_scheme: Some(address), gas: *gas, sender_address: None, receive_address: None, @@ -159,6 +161,7 @@ impl Ext for FakeExt { self.calls.insert(FakeCall { call_type: FakeCallType::Call, + create_scheme: None, gas: *gas, sender_address: Some(sender_address.clone()), receive_address: Some(receive_address.clone()), diff --git a/ethcore/wasm/src/env.rs b/ethcore/wasm/src/env.rs index 9bcbee63f..a9e536f5f 100644 --- a/ethcore/wasm/src/env.rs +++ b/ethcore/wasm/src/env.rs @@ -17,6 +17,7 @@ //! Env module glue for wasmi interpreter use std::cell::RefCell; +use vm::WasmCosts; use wasmi::{ self, Signature, Error, FuncRef, FuncInstance, MemoryDescriptor, MemoryRef, MemoryInstance, memory_units, @@ -47,6 +48,7 @@ pub mod ids { pub const SENDER_FUNC: usize = 190; pub const ORIGIN_FUNC: usize = 200; pub const ELOG_FUNC: usize = 210; + pub const CREATE2_FUNC: usize = 220; pub const PANIC_FUNC: usize = 1000; pub const DEBUG_FUNC: usize = 1010; @@ -125,6 +127,11 @@ pub mod signatures { Some(I32), ); + pub const CREATE2: StaticSignature = StaticSignature( + &[I32, I32, I32, I32, I32], + Some(I32), + ); + pub const SUICIDE: StaticSignature = StaticSignature( &[I32], None, @@ -195,18 +202,21 @@ fn host(signature: signatures::StaticSignature, idx: usize) -> FuncRef { /// Maps all functions that runtime support to the corresponding contract import /// entries. /// Also manages initial memory request from the runtime. -#[derive(Default)] pub struct ImportResolver { max_memory: u32, memory: RefCell>, + + have_create2: bool, } impl ImportResolver { /// New import resolver with specifed maximum amount of inital memory (in wasm pages = 64kb) - pub fn with_limit(max_memory: u32) -> ImportResolver { + pub fn with_limit(max_memory: u32, schedule: &WasmCosts) -> ImportResolver { ImportResolver { max_memory: max_memory, memory: RefCell::new(None), + + have_create2: schedule.have_create2, } } @@ -263,6 +273,7 @@ impl wasmi::ModuleImportResolver for ImportResolver { "sender" => host(signatures::SENDER, ids::SENDER_FUNC), "origin" => host(signatures::ORIGIN, ids::ORIGIN_FUNC), "elog" => host(signatures::ELOG, ids::ELOG_FUNC), + "create2" if self.have_create2 => host(signatures::CREATE2, ids::CREATE2_FUNC), _ => { return Err(wasmi::Error::Instantiation( format!("Export {} not found", field_name), diff --git a/ethcore/wasm/src/lib.rs b/ethcore/wasm/src/lib.rs index f1290318e..1fcfe9371 100644 --- a/ethcore/wasm/src/lib.rs +++ b/ethcore/wasm/src/lib.rs @@ -90,7 +90,7 @@ impl vm::Vm for WasmInterpreter { let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?; - let instantiation_resolver = env::ImportResolver::with_limit(16); + let instantiation_resolver = env::ImportResolver::with_limit(16, ext.schedule().wasm()); let module_instance = wasmi::ModuleInstance::new( &loaded_module, diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index d99ab8645..347023dd9 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -321,7 +321,7 @@ impl<'a> Runtime<'a> { if self.gas_counter > self.gas_limit { return Err(Error::InvalidGasState); } Ok(self.gas_limit - self.gas_counter) } - + /// General gas charging extern. fn gas(&mut self, args: RuntimeArgs) -> Result<()> { let amount: u32 = args.nth_checked(0)?; @@ -511,29 +511,7 @@ impl<'a> Runtime<'a> { self.return_u256_ptr(args.nth_checked(0)?, val) } - /// Creates a new contract - /// - /// Arguments: - /// * endowment - how much value (in Wei) transfer to the newly created contract - /// * code_ptr - pointer to the code data - /// * code_len - lenght of the code data - /// * result_ptr - pointer to write an address of the newly created contract - pub fn create(&mut self, args: RuntimeArgs) -> Result - { - // - // method signature: - // fn create(endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; - // - trace!(target: "wasm", "runtime: CREATE"); - let endowment = self.u256_at(args.nth_checked(0)?)?; - trace!(target: "wasm", " val: {:?}", endowment); - let code_ptr: u32 = args.nth_checked(1)?; - trace!(target: "wasm", " code_ptr: {:?}", code_ptr); - let code_len: u32 = args.nth_checked(2)?; - trace!(target: "wasm", " code_len: {:?}", code_len); - let result_ptr: u32 = args.nth_checked(3)?; - trace!(target: "wasm", "result_ptr: {:?}", result_ptr); - + fn do_create(&mut self, endowment: U256, code_ptr: u32, code_len: u32, result_ptr: u32, scheme: vm::CreateContractAddress) -> Result { let code = self.memory.get(code_ptr, code_len as usize)?; self.adjusted_charge(|schedule| schedule.create_gas as u64)?; @@ -543,7 +521,7 @@ impl<'a> Runtime<'a> { * U256::from(self.ext.schedule().wasm().opcodes_mul) / U256::from(self.ext.schedule().wasm().opcodes_div); - match self.ext.create(&gas_left, &endowment, &code, vm::CreateContractAddress::FromSenderAndCodeHash) { + match self.ext.create(&gas_left, &endowment, &code, scheme) { vm::ContractCreateResult::Created(address, gas_left) => { self.memory.set(result_ptr, &*address)?; self.gas_counter = self.gas_limit - @@ -571,6 +549,59 @@ impl<'a> Runtime<'a> { } } + /// Creates a new contract + /// + /// Arguments: + /// * endowment - how much value (in Wei) transfer to the newly created contract + /// * code_ptr - pointer to the code data + /// * code_len - lenght of the code data + /// * result_ptr - pointer to write an address of the newly created contract + pub fn create(&mut self, args: RuntimeArgs) -> Result { + // + // method signature: + // fn create(endowment: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; + // + trace!(target: "wasm", "runtime: CREATE"); + let endowment = self.u256_at(args.nth_checked(0)?)?; + trace!(target: "wasm", " val: {:?}", endowment); + let code_ptr: u32 = args.nth_checked(1)?; + trace!(target: "wasm", " code_ptr: {:?}", code_ptr); + let code_len: u32 = args.nth_checked(2)?; + trace!(target: "wasm", " code_len: {:?}", code_len); + let result_ptr: u32 = args.nth_checked(3)?; + trace!(target: "wasm", "result_ptr: {:?}", result_ptr); + + self.do_create(endowment, code_ptr, code_len, result_ptr, vm::CreateContractAddress::FromSenderAndCodeHash) + } + + /// Creates a new contract using FromSenderSaltAndCodeHash scheme + /// + /// Arguments: + /// * endowment - how much value (in Wei) transfer to the newly created contract + /// * salt - salt to be used in contract creation address + /// * code_ptr - pointer to the code data + /// * code_len - lenght of the code data + /// * result_ptr - pointer to write an address of the newly created contract + pub fn create2(&mut self, args: RuntimeArgs) -> Result { + // + // method signature: + // fn create2(endowment: *const u8, salt: *const u8, code_ptr: *const u8, code_len: u32, result_ptr: *mut u8) -> i32; + // + trace!(target: "wasm", "runtime: CREATE2"); + let endowment = self.u256_at(args.nth_checked(0)?)?; + trace!(target: "wasm", " val: {:?}", endowment); + let salt: H256 = self.u256_at(args.nth_checked(1)?)?.into(); + trace!(target: "wasm", " salt: {:?}", salt); + let code_ptr: u32 = args.nth_checked(2)?; + trace!(target: "wasm", " code_ptr: {:?}", code_ptr); + let code_len: u32 = args.nth_checked(3)?; + trace!(target: "wasm", " code_len: {:?}", code_len); + let result_ptr: u32 = args.nth_checked(4)?; + trace!(target: "wasm", "result_ptr: {:?}", result_ptr); + + self.do_create(endowment, code_ptr, code_len, result_ptr, vm::CreateContractAddress::FromSenderSaltAndCodeHash(salt)) + } + fn debug(&mut self, args: RuntimeArgs) -> Result<()> { trace!(target: "wasm", "Contract debug message: {}", { @@ -744,6 +775,7 @@ mod ext_impl { SENDER_FUNC => void!(self.sender(args)), ORIGIN_FUNC => void!(self.origin(args)), ELOG_FUNC => void!(self.elog(args)), + CREATE2_FUNC => some!(self.create2(args)), _ => panic!("env module doesn't provide function at index {}", index), } } diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index 726b9ebab..e72cc15c8 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -20,7 +20,7 @@ use byteorder::{LittleEndian, ByteOrder}; use ethereum_types::{H256, U256, Address}; use super::WasmInterpreter; -use vm::{self, Vm, GasLeft, ActionParams, ActionValue}; +use vm::{self, Vm, GasLeft, ActionParams, ActionValue, CreateContractAddress}; use vm::tests::{FakeCall, FakeExt, FakeCallType}; macro_rules! load_sample { @@ -138,7 +138,7 @@ fn logger() { U256::from(1_000_000_000), "Logger sets 0x04 key to the trasferred value" ); - assert_eq!(gas_left, U256::from(16_181)); + assert_eq!(gas_left, U256::from(17_716)); } // This test checks if the contract can allocate memory and pass pointer to the result stream properly. @@ -173,7 +173,7 @@ fn identity() { sender, "Idenity test contract does not return the sender passed" ); - assert_eq!(gas_left, U256::from(96_883)); + assert_eq!(gas_left, U256::from(98_419)); } // Dispersion test sends byte array and expect the contract to 'disperse' the original elements with @@ -207,7 +207,7 @@ fn dispersion() { result, vec![0u8, 0, 125, 11, 197, 7, 255, 8, 19, 0] ); - assert_eq!(gas_left, U256::from(92_371)); + assert_eq!(gas_left, U256::from(92_377)); } #[test] @@ -267,7 +267,7 @@ fn suicide() { }; assert!(ext.suicides.contains(&refund)); - assert_eq!(gas_left, U256::from(93_348)); + assert_eq!(gas_left, U256::from(93_346)); } #[test] @@ -281,14 +281,19 @@ fn create() { params.value = ActionValue::transfer(1_000_000_000); let mut ext = FakeExt::new().with_wasm(); + ext.schedule.wasm.as_mut().unwrap().have_create2 = true; let gas_left = { let mut interpreter = wasm_interpreter(); let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); match result { - GasLeft::Known(gas) => gas, - GasLeft::NeedsReturn { .. } => { - panic!("Create contract should not return anthing because ext always fails on creation"); + GasLeft::Known(_) => { + panic!("Create contract always return 40 bytes of the creation address, or in the case where it fails, return 40 bytes of zero."); + }, + GasLeft::NeedsReturn { gas_left, data, apply_state } => { + assert!(apply_state); + assert_eq!(data.as_ref(), [0u8; 40].as_ref()); // FakeExt never succeeds in create. + gas_left }, } }; @@ -297,15 +302,28 @@ fn create() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Create, - gas: U256::from(59_269), + create_scheme: Some(CreateContractAddress::FromSenderAndCodeHash), + gas: U256::from(52_017), sender_address: None, receive_address: None, - value: Some(1_000_000_000.into()), + value: Some((1_000_000_000 / 2).into()), data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], code_address: None, } )); - assert_eq!(gas_left, U256::from(59_212)); + assert!(ext.calls.contains( + &FakeCall { + call_type: FakeCallType::Create, + create_scheme: Some(CreateContractAddress::FromSenderSaltAndCodeHash(H256::from([5u8].as_ref()))), + gas: U256::from(10_740), + sender_address: None, + receive_address: None, + value: Some((1_000_000_000 / 2).into()), + data: vec![0u8, 2, 4, 8, 16, 32, 64, 128], + code_address: None, + } + )); + assert_eq!(gas_left, U256::from(10_675)); } #[test] @@ -340,6 +358,7 @@ fn call_msg() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, + create_scheme: None, gas: U256::from(33_000), sender_address: Some(receiver), receive_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), @@ -382,6 +401,7 @@ fn call_code() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, + create_scheme: None, gas: U256::from(20_000), sender_address: Some(sender), receive_address: Some(receiver), @@ -394,7 +414,7 @@ fn call_code() { // siphash result let res = LittleEndian::read_u32(&result[..]); assert_eq!(res, 4198595614); - assert_eq!(gas_left, U256::from(90_038)); + assert_eq!(gas_left, U256::from(90_037)); } #[test] @@ -429,6 +449,7 @@ fn call_static() { assert!(ext.calls.contains( &FakeCall { call_type: FakeCallType::Call, + create_scheme: None, gas: U256::from(20_000), sender_address: Some(receiver), receive_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()), @@ -442,7 +463,7 @@ fn call_static() { let res = LittleEndian::read_u32(&result[..]); assert_eq!(res, 317632590); - assert_eq!(gas_left, U256::from(90_043)); + assert_eq!(gas_left, U256::from(90_042)); } // Realloc test @@ -465,7 +486,7 @@ fn realloc() { } }; assert_eq!(result, vec![0u8; 2]); - assert_eq!(gas_left, U256::from(92_842)); + assert_eq!(gas_left, U256::from(92_848)); } #[test] @@ -487,7 +508,7 @@ fn alloc() { } }; assert_eq!(result, vec![5u8; 1024*400]); - assert_eq!(gas_left, U256::from(6_893_883)); + assert_eq!(gas_left, U256::from(6_893_881)); } // Tests that contract's ability to read from a storage @@ -515,7 +536,7 @@ fn storage_read() { }; assert_eq!(Address::from(&result[12..32]), address); - assert_eq!(gas_left, U256::from(96_833)); + assert_eq!(gas_left, U256::from(98_369)); } // Tests keccak calculation @@ -541,7 +562,7 @@ fn keccak() { }; assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87")); - assert_eq!(gas_left, U256::from(84_134)); + assert_eq!(gas_left, U256::from(85_949)); } // math_* tests check the ability of wasm contract to perform big integer operations @@ -570,7 +591,7 @@ fn math_add() { U256::from_dec_str("1888888888888888888888888888887").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(92_086)); + assert_eq!(gas_left, U256::from(92_095)); } // multiplication @@ -592,7 +613,7 @@ fn math_mul() { U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(91_414)); + assert_eq!(gas_left, U256::from(91_423)); } // subtraction @@ -614,7 +635,7 @@ fn math_sub() { U256::from_dec_str("111111111111111111111111111111").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(92_086)); + assert_eq!(gas_left, U256::from(92_095)); } // subtraction with overflow @@ -656,7 +677,7 @@ fn math_div() { U256::from_dec_str("1125000").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(87_376)); + assert_eq!(gas_left, U256::from(87_379)); } #[test] @@ -684,7 +705,7 @@ fn storage_metering() { }; // 0 -> not 0 - assert_eq!(gas_left, U256::from(72_399)); + assert_eq!(gas_left, U256::from(72_395)); // #2 @@ -703,7 +724,7 @@ fn storage_metering() { }; // not 0 -> not 0 - assert_eq!(gas_left, U256::from(87_399)); + assert_eq!(gas_left, U256::from(87_395)); } // This test checks the ability of wasm contract to invoke @@ -791,7 +812,7 @@ fn externs() { "Gas limit requested and returned does not match" ); - assert_eq!(gas_left, U256::from(90_435)); + assert_eq!(gas_left, U256::from(90_428)); } #[test] @@ -817,7 +838,7 @@ fn embedded_keccak() { }; assert_eq!(H256::from_slice(&result), H256::from("68371d7e884c168ae2022c82bd837d51837718a7f7dfb7aa3f753074a35e1d87")); - assert_eq!(gas_left, U256::from(84_134)); + assert_eq!(gas_left, U256::from(85_949)); } /// This test checks the correctness of log extern @@ -852,7 +873,7 @@ fn events() { assert_eq!(&log_entry.data, b"gnihtemos"); assert_eq!(&result, b"gnihtemos"); - assert_eq!(gas_left, U256::from(81_351)); + assert_eq!(gas_left, U256::from(83_158)); } #[test] diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index a37e4f23b..0fab68198 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -146,6 +146,9 @@ pub struct Params { /// Wasm activation block height, if not activated from start #[serde(rename="wasmActivationTransition")] pub wasm_activation_transition: Option, + /// KIP4 activiation block height. + #[serde(rename="kip4Transition")] + pub kip4_transition: Option, } #[cfg(test)] From 1e44a624947ca304bb6c824ec7b529fc600c0878 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 7 Aug 2018 17:36:56 +0800 Subject: [PATCH 05/48] Update wasm-tests hash (#9295) --- ethcore/res/wasm-tests | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests index 986a6fb94..242b8d8a8 160000 --- a/ethcore/res/wasm-tests +++ b/ethcore/res/wasm-tests @@ -1 +1 @@ -Subproject commit 986a6fb94673ba270f8f7ef1fff521ba33d427c2 +Subproject commit 242b8d8a89ecb3e11277f0beb8180c95792aac6b From 0d8001adea18e927289340c67332c1dd2b474c79 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 7 Aug 2018 17:48:25 +0800 Subject: [PATCH 06/48] Improve return data truncate logic (#9254) * Improve return data truncate logic * fix: size -> offset + size --- ethcore/evm/src/interpreter/memory.rs | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/ethcore/evm/src/interpreter/memory.rs b/ethcore/evm/src/interpreter/memory.rs index 843aeef3b..313d1b48f 100644 --- a/ethcore/evm/src/interpreter/memory.rs +++ b/ethcore/evm/src/interpreter/memory.rs @@ -119,14 +119,19 @@ impl Memory for Vec { fn into_return_data(mut self, offset: U256, size: U256) -> ReturnData { let mut offset = offset.low_u64() as usize; let size = size.low_u64() as usize; + if !is_valid_range(offset, size) { - return ReturnData::empty() + return ReturnData::empty(); } + if self.len() - size > MAX_RETURN_WASTE_BYTES { - { let _ = self.drain(..offset); } - self.truncate(size); - self.shrink_to_fit(); - offset = 0; + if offset == 0 { + self.truncate(size); + self.shrink_to_fit(); + } else { + self = self[offset..(offset + size)].to_vec(); + offset = 0; + } } ReturnData::new(self, offset, size) } From 1f18dbb17c5366af0cb324bf19ade483ed760266 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 7 Aug 2018 20:52:23 +0800 Subject: [PATCH 07/48] Remove all dapp permissions related settings (#9120) * Completely remove all dapps struct from rpc * Remove unused pub use * Remove dapp policy/permission func in ethcore * Remove all dapps settings from rpc * Fix rpc tests * Use both origin and user_agent * Address grumbles * Address grumbles * Fix tests --- ethcore/src/account_provider/mod.rs | 294 +-------------------- ethcore/src/account_provider/stores.rs | 281 +------------------- json/src/misc/dapps_settings.rs | 53 ---- json/src/misc/mod.rs | 2 - parity/lib.rs | 3 - rpc/src/http_common.rs | 5 +- rpc/src/tests/rpc.rs | 59 +---- rpc/src/v1/extractors.rs | 45 ++-- rpc/src/v1/helpers/fake_sign.rs | 10 +- rpc/src/v1/helpers/signing_queue.rs | 11 +- rpc/src/v1/impls/eth.rs | 32 +-- rpc/src/v1/impls/light/eth.rs | 16 +- rpc/src/v1/impls/light/parity.rs | 16 +- rpc/src/v1/impls/light/trace.rs | 4 +- rpc/src/v1/impls/parity.rs | 21 +- rpc/src/v1/impls/parity_accounts.rs | 57 +--- rpc/src/v1/impls/personal.rs | 4 +- rpc/src/v1/impls/private.rs | 4 +- rpc/src/v1/impls/signing.rs | 11 +- rpc/src/v1/impls/signing_unsafe.rs | 13 +- rpc/src/v1/impls/traces.rs | 8 +- rpc/src/v1/metadata.rs | 24 +- rpc/src/v1/tests/mocked/eth.rs | 16 -- rpc/src/v1/tests/mocked/parity.rs | 7 - rpc/src/v1/tests/mocked/parity_accounts.rs | 100 ------- rpc/src/v1/tests/mocked/signer.rs | 4 +- rpc/src/v1/traits/eth.rs | 16 +- rpc/src/v1/traits/parity.rs | 12 +- rpc/src/v1/traits/parity_accounts.rs | 53 +--- rpc/src/v1/traits/private.rs | 4 +- rpc/src/v1/traits/traces.rs | 8 +- rpc/src/v1/types/confirmations.rs | 7 +- rpc/src/v1/types/mod.rs | 2 +- rpc/src/v1/types/provenance.rs | 88 +----- 34 files changed, 122 insertions(+), 1168 deletions(-) delete mode 100644 json/src/misc/dapps_settings.rs diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index e4289c60a..04bc4f409 100644 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -18,9 +18,9 @@ mod stores; -use self::stores::{AddressBook, DappsSettingsStore, NewDappsPolicy}; +use self::stores::AddressBook; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::fmt; use std::time::{Instant, Duration}; @@ -96,20 +96,6 @@ impl From for SignError { /// `AccountProvider` errors. pub type Error = SSError; -/// Dapp identifier -#[derive(Default, Debug, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)] -pub struct DappId(String); - -impl From for String { - fn from(id: DappId) -> String { id.0 } -} -impl From for DappId { - fn from(id: String) -> DappId { DappId(id) } -} -impl<'a> From<&'a str> for DappId { - fn from(id: &'a str) -> DappId { DappId(id.to_owned()) } -} - fn transient_sstore() -> EthMultiStore { EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed") } @@ -125,8 +111,6 @@ pub struct AccountProvider { unlocked: RwLock>, /// Address book. address_book: RwLock, - /// Dapps settings. - dapps_settings: RwLock, /// Accounts on disk sstore: Box, /// Accounts unlocked with rolling tokens @@ -167,7 +151,7 @@ impl AccountProvider { /// Creates new account provider. pub fn new(sstore: Box, settings: AccountProviderSettings) -> Self { let mut hardware_store = None; - + if settings.enable_hardware_wallets { match HardwareWalletManager::new() { Ok(manager) => { @@ -195,7 +179,6 @@ impl AccountProvider { unlocked_secrets: RwLock::new(HashMap::new()), unlocked: RwLock::new(HashMap::new()), address_book: RwLock::new(address_book), - dapps_settings: RwLock::new(DappsSettingsStore::new(&sstore.local_path())), sstore: sstore, transient_sstore: transient_sstore(), hardware_store: hardware_store, @@ -210,7 +193,6 @@ impl AccountProvider { unlocked_secrets: RwLock::new(HashMap::new()), unlocked: RwLock::new(HashMap::new()), address_book: RwLock::new(AddressBook::transient()), - dapps_settings: RwLock::new(DappsSettingsStore::transient()), sstore: Box::new(EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed")), transient_sstore: transient_sstore(), hardware_store: None, @@ -290,9 +272,14 @@ impl AccountProvider { ) } + /// Returns the address of default account. + pub fn default_account(&self) -> Result { + Ok(self.accounts()?.first().cloned().unwrap_or_default()) + } + /// Returns addresses of hardware accounts. pub fn hardware_accounts(&self) -> Result, Error> { - if let Some(accounts) = self.hardware_store.as_ref().map(|h| h.list_wallets()) { + if let Some(accounts) = self.hardware_store.as_ref().map(|h| h.list_wallets()) { if !accounts.is_empty() { return Ok(accounts.into_iter().map(|a| a.address).collect()); } @@ -308,7 +295,7 @@ impl AccountProvider { Some(Ok(s)) => Ok(s), } } - + /// Provide a pin to a locked hardware wallet on USB path to unlock it pub fn hardware_pin_matrix_ack(&self, path: &str, pin: &str) -> Result { match self.hardware_store.as_ref().map(|h| h.pin_matrix_ack(path, pin)) { @@ -318,175 +305,6 @@ impl AccountProvider { } } - /// Sets addresses of accounts exposed for unknown dapps. - /// `None` means that all accounts will be visible. - /// If not `None` or empty it will also override default account. - pub fn set_new_dapps_addresses(&self, accounts: Option>) -> Result<(), Error> { - let current_default = self.new_dapps_default_address()?; - - self.dapps_settings.write().set_policy(match accounts { - None => NewDappsPolicy::AllAccounts { - default: current_default, - }, - Some(accounts) => NewDappsPolicy::Whitelist(accounts), - }); - Ok(()) - } - - /// Gets addresses of accounts exposed for unknown dapps. - /// `None` means that all accounts will be visible. - pub fn new_dapps_addresses(&self) -> Result>, Error> { - Ok(match self.dapps_settings.read().policy() { - NewDappsPolicy::AllAccounts { .. } => None, - NewDappsPolicy::Whitelist(accounts) => Some(accounts), - }) - } - - /// Sets a default account for unknown dapps. - /// This account will always be returned as the first one. - pub fn set_new_dapps_default_address(&self, address: Address) -> Result<(), Error> { - if !self.valid_addresses()?.contains(&address) { - return Err(SSError::InvalidAccount.into()); - } - - let mut settings = self.dapps_settings.write(); - let new_policy = match settings.policy() { - NewDappsPolicy::AllAccounts { .. } => NewDappsPolicy::AllAccounts { default: address }, - NewDappsPolicy::Whitelist(list) => NewDappsPolicy::Whitelist(Self::insert_default(list, address)), - }; - settings.set_policy(new_policy); - - Ok(()) - } - - /// Inserts given address as first in the vector, preventing duplicates. - fn insert_default(mut addresses: Vec
, default: Address) -> Vec
{ - if let Some(position) = addresses.iter().position(|address| address == &default) { - addresses.swap(0, position); - } else { - addresses.insert(0, default); - } - - addresses - } - - /// Returns a list of accounts that new dapp should see. - /// First account is always the default account. - fn new_dapps_addresses_list(&self) -> Result, Error> { - match self.dapps_settings.read().policy() { - NewDappsPolicy::AllAccounts { default } => if default.is_zero() { - self.accounts() - } else { - Ok(Self::insert_default(self.accounts()?, default)) - }, - NewDappsPolicy::Whitelist(accounts) => { - let addresses = self.filter_addresses(accounts)?; - if addresses.is_empty() { - Ok(vec![self.accounts()?.get(0).cloned().unwrap_or(0.into())]) - } else { - Ok(addresses) - } - }, - } - } - - /// Gets a default account for new dapps - /// Will return zero address in case the default is not set and there are no accounts configured. - pub fn new_dapps_default_address(&self) -> Result { - Ok(self.new_dapps_addresses_list()? - .get(0) - .cloned() - .unwrap_or(0.into()) - ) - } - - /// Gets a list of dapps recently requesting accounts. - pub fn recent_dapps(&self) -> Result, Error> { - Ok(self.dapps_settings.read().recent_dapps()) - } - - /// Marks dapp as recently used. - pub fn note_dapp_used(&self, dapp: DappId) -> Result<(), Error> { - let mut dapps = self.dapps_settings.write(); - dapps.mark_dapp_used(dapp.clone()); - Ok(()) - } - - /// Gets addresses visible for given dapp. - pub fn dapp_addresses(&self, dapp: DappId) -> Result, Error> { - let accounts = self.dapps_settings.read().settings().get(&dapp).map(|settings| { - (settings.accounts.clone(), settings.default.clone()) - }); - - match accounts { - Some((Some(accounts), Some(default))) => self.filter_addresses(Self::insert_default(accounts, default)), - Some((Some(accounts), None)) => self.filter_addresses(accounts), - Some((None, Some(default))) => self.filter_addresses(Self::insert_default(self.new_dapps_addresses_list()?, default)), - _ => self.new_dapps_addresses_list(), - } - } - - /// Returns default account for particular dapp falling back to other allowed accounts if necessary. - pub fn dapp_default_address(&self, dapp: DappId) -> Result { - let dapp_default = self.dapp_addresses(dapp)? - .get(0) - .cloned(); - - match dapp_default { - Some(default) => Ok(default), - None => self.new_dapps_default_address(), - } - } - - /// Sets default address for given dapp. - /// Does not alter dapp addresses, but this account will always be returned as the first one. - pub fn set_dapp_default_address(&self, dapp: DappId, address: Address) -> Result<(), Error> { - if !self.valid_addresses()?.contains(&address) { - return Err(SSError::InvalidAccount.into()); - } - - self.dapps_settings.write().set_default(dapp, address); - Ok(()) - } - - /// Sets addresses visible for given dapp. - /// If `None` - falls back to dapps addresses - /// If not `None` and not empty it will also override default account. - pub fn set_dapp_addresses(&self, dapp: DappId, addresses: Option>) -> Result<(), Error> { - let (addresses, default) = match addresses { - Some(addresses) => { - let addresses = self.filter_addresses(addresses)?; - let default = addresses.get(0).cloned(); - (Some(addresses), default) - }, - None => (None, None), - }; - - let mut settings = self.dapps_settings.write(); - if let Some(default) = default { - settings.set_default(dapp.clone(), default); - } - settings.set_accounts(dapp, addresses); - Ok(()) - } - - fn valid_addresses(&self) -> Result, Error> { - Ok(self.addresses_info().into_iter() - .map(|(address, _)| address) - .chain(self.accounts()?) - .collect()) - } - - /// Removes addresses that are neither accounts nor in address book. - fn filter_addresses(&self, addresses: Vec
) -> Result, Error> { - let valid = self.valid_addresses()?; - - Ok(addresses.into_iter() - .filter(|a| valid.contains(&a)) - .collect() - ) - } - /// Returns each address along with metadata. pub fn addresses_info(&self) -> HashMap { self.address_book.read().get() @@ -849,7 +667,7 @@ impl AccountProvider { #[cfg(test)] mod tests { - use super::{AccountProvider, Unlock, DappId}; + use super::{AccountProvider, Unlock}; use std::time::{Duration, Instant}; use ethstore::ethkey::{Generator, Random, Address}; use ethstore::{StoreAccountRef, Derivation}; @@ -977,96 +795,6 @@ mod tests { assert!(ap.sign_with_token(kp.address(), token, Default::default()).is_err(), "Second usage of the same token should fail."); } - #[test] - fn should_reset_dapp_addresses_to_default() { - // given - let ap = AccountProvider::transient_provider(); - let app = DappId("app1".into()); - // add accounts to address book - ap.set_address_name(1.into(), "1".into()); - ap.set_address_name(2.into(), "2".into()); - // set `AllAccounts` policy - ap.set_new_dapps_addresses(Some(vec![1.into(), 2.into()])).unwrap(); - assert_eq!(ap.dapp_addresses(app.clone()).unwrap(), vec![1.into(), 2.into()]); - - // Alter and check - ap.set_dapp_addresses(app.clone(), Some(vec![1.into(), 3.into()])).unwrap(); - assert_eq!(ap.dapp_addresses(app.clone()).unwrap(), vec![1.into()]); - - // Reset back to default - ap.set_dapp_addresses(app.clone(), None).unwrap(); - assert_eq!(ap.dapp_addresses(app.clone()).unwrap(), vec![1.into(), 2.into()]); - } - - #[test] - fn should_set_dapps_default_address() { - // given - let ap = AccountProvider::transient_provider(); - let app = DappId("app1".into()); - // set `AllAccounts` policy - ap.set_new_dapps_addresses(None).unwrap(); - // add accounts to address book - ap.set_address_name(1.into(), "1".into()); - ap.set_address_name(2.into(), "2".into()); - - ap.set_dapp_addresses(app.clone(), Some(vec![1.into(), 2.into(), 3.into()])).unwrap(); - assert_eq!(ap.dapp_addresses(app.clone()).unwrap(), vec![1.into(), 2.into()]); - assert_eq!(ap.dapp_default_address("app1".into()).unwrap(), 1.into()); - - // when setting empty list - ap.set_dapp_addresses(app.clone(), Some(vec![])).unwrap(); - - // then default account is intact - assert_eq!(ap.dapp_addresses(app.clone()).unwrap(), vec![1.into()]); - assert_eq!(ap.dapp_default_address("app1".into()).unwrap(), 1.into()); - - // alter default account - ap.set_dapp_default_address("app1".into(), 2.into()).unwrap(); - assert_eq!(ap.dapp_addresses(app.clone()).unwrap(), vec![2.into()]); - assert_eq!(ap.dapp_default_address("app1".into()).unwrap(), 2.into()); - } - - #[test] - fn should_set_dapps_policy_and_default_account() { - // given - let ap = AccountProvider::transient_provider(); - - // default_account should be always available - assert_eq!(ap.new_dapps_default_address().unwrap(), 0.into()); - - let address = ap.new_account(&"test".into()).unwrap(); - ap.set_address_name(1.into(), "1".into()); - - // Default account set to first account by default - assert_eq!(ap.new_dapps_default_address().unwrap(), address); - assert_eq!(ap.dapp_default_address("app1".into()).unwrap(), address); - - // Even when returning nothing - ap.set_new_dapps_addresses(Some(vec![])).unwrap(); - // Default account is still returned - assert_eq!(ap.dapp_addresses("app1".into()).unwrap(), vec![address]); - - // change to all - ap.set_new_dapps_addresses(None).unwrap(); - assert_eq!(ap.dapp_addresses("app1".into()).unwrap(), vec![address]); - - // change to non-existent account - ap.set_new_dapps_addresses(Some(vec![2.into()])).unwrap(); - assert_eq!(ap.dapp_addresses("app1".into()).unwrap(), vec![address]); - - // change to a addresses - ap.set_new_dapps_addresses(Some(vec![1.into()])).unwrap(); - assert_eq!(ap.dapp_addresses("app1".into()).unwrap(), vec![1.into()]); - - // it overrides default account - assert_eq!(ap.new_dapps_default_address().unwrap(), 1.into()); - assert_eq!(ap.dapp_default_address("app1".into()).unwrap(), 1.into()); - - ap.set_new_dapps_default_address(address).unwrap(); - assert_eq!(ap.new_dapps_default_address().unwrap(), address); - assert_eq!(ap.dapp_default_address("app1".into()).unwrap(), address); - } - #[test] fn should_not_return_blacklisted_account() { // given diff --git a/ethcore/src/account_provider/stores.rs b/ethcore/src/account_provider/stores.rs index d7725deb7..7124e91e2 100644 --- a/ethcore/src/account_provider/stores.rs +++ b/ethcore/src/account_provider/stores.rs @@ -14,21 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Address Book and Dapps Settings Store +//! Address Book Store use std::{fs, fmt, hash, ops}; -use std::sync::atomic::{self, AtomicUsize}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use ethstore::ethkey::Address; -use ethjson::misc::{ - AccountMeta, - DappsSettings as JsonSettings, - DappsHistory as JsonDappsHistory, - NewDappsPolicy as JsonNewDappsPolicy, -}; -use account_provider::DappId; +use ethjson::misc::AccountMeta; /// Disk-backed map from Address to String. Uses JSON. pub struct AddressBook { @@ -88,214 +81,6 @@ impl AddressBook { } } -/// Dapps user settings -#[derive(Debug, Default, Clone, Eq, PartialEq)] -pub struct DappsSettings { - /// A list of visible accounts - pub accounts: Option>, - /// Default account - pub default: Option
, -} - -impl From for DappsSettings { - fn from(s: JsonSettings) -> Self { - DappsSettings { - accounts: s.accounts.map(|accounts| accounts.into_iter().map(Into::into).collect()), - default: s.default.map(Into::into), - } - } -} - -impl From for JsonSettings { - fn from(s: DappsSettings) -> Self { - JsonSettings { - accounts: s.accounts.map(|accounts| accounts.into_iter().map(Into::into).collect()), - default: s.default.map(Into::into), - } - } -} - -/// Dapps user settings -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum NewDappsPolicy { - AllAccounts { - default: Address, - }, - Whitelist(Vec
), -} - -impl From for NewDappsPolicy { - fn from(s: JsonNewDappsPolicy) -> Self { - match s { - JsonNewDappsPolicy::AllAccounts { default } => NewDappsPolicy::AllAccounts { - default: default.into(), - }, - JsonNewDappsPolicy::Whitelist(accounts) => NewDappsPolicy::Whitelist( - accounts.into_iter().map(Into::into).collect() - ), - } - } -} - -impl From for JsonNewDappsPolicy { - fn from(s: NewDappsPolicy) -> Self { - match s { - NewDappsPolicy::AllAccounts { default } => JsonNewDappsPolicy::AllAccounts { - default: default.into(), - }, - NewDappsPolicy::Whitelist(accounts) => JsonNewDappsPolicy::Whitelist( - accounts.into_iter().map(Into::into).collect() - ), - } - } -} - -/// Transient dapps data -#[derive(Default, Debug, Clone, Eq, PartialEq)] -pub struct TransientDappsData { - /// Timestamp of last access - pub last_accessed: u64, -} - -impl From for TransientDappsData { - fn from(s: JsonDappsHistory) -> Self { - TransientDappsData { - last_accessed: s.last_accessed, - } - } -} - -impl From for JsonDappsHistory { - fn from(s: TransientDappsData) -> Self { - JsonDappsHistory { - last_accessed: s.last_accessed, - } - } -} - -enum TimeProvider { - Clock, - Incremenetal(AtomicUsize) -} - -impl TimeProvider { - fn get(&self) -> u64 { - match *self { - TimeProvider::Clock => { - ::std::time::UNIX_EPOCH.elapsed() - .expect("Correct time is required to be set") - .as_secs() - - }, - TimeProvider::Incremenetal(ref time) => { - time.fetch_add(1, atomic::Ordering::SeqCst) as u64 - }, - } - } -} - -const MAX_RECENT_DAPPS: usize = 50; - -/// Disk-backed map from DappId to Settings. Uses JSON. -pub struct DappsSettingsStore { - /// Dapps Settings - settings: DiskMap, - /// New Dapps Policy - policy: DiskMap, - /// Transient Data of recently Accessed Dapps - history: DiskMap, - /// Time - time: TimeProvider, -} - -impl DappsSettingsStore { - /// Creates new store at given directory path. - pub fn new(path: &Path) -> Self { - let mut r = DappsSettingsStore { - settings: DiskMap::new(path, "dapps_accounts.json".into()), - policy: DiskMap::new(path, "dapps_policy.json".into()), - history: DiskMap::new(path, "dapps_history.json".into()), - time: TimeProvider::Clock, - }; - r.settings.revert(JsonSettings::read); - r.policy.revert(JsonNewDappsPolicy::read); - r.history.revert(JsonDappsHistory::read); - r - } - - /// Creates transient store (no changes are saved to disk). - pub fn transient() -> Self { - DappsSettingsStore { - settings: DiskMap::transient(), - policy: DiskMap::transient(), - history: DiskMap::transient(), - time: TimeProvider::Incremenetal(AtomicUsize::new(1)), - } - } - - /// Get copy of the dapps settings - pub fn settings(&self) -> HashMap { - self.settings.clone() - } - - /// Returns current new dapps policy - pub fn policy(&self) -> NewDappsPolicy { - self.policy.get("default").cloned().unwrap_or(NewDappsPolicy::AllAccounts { - default: 0.into(), - }) - } - - /// Returns recent dapps with last accessed timestamp - pub fn recent_dapps(&self) -> HashMap { - self.history.iter().map(|(k, v)| (k.clone(), v.last_accessed)).collect() - } - - /// Marks recent dapp as used - pub fn mark_dapp_used(&mut self, dapp: DappId) { - { - let entry = self.history.entry(dapp).or_insert_with(|| Default::default()); - entry.last_accessed = self.time.get(); - } - // Clear extraneous entries - while self.history.len() > MAX_RECENT_DAPPS { - let min = self.history.iter() - .min_by_key(|&(_, ref v)| v.last_accessed) - .map(|(ref k, _)| k.clone()) - .cloned(); - - match min { - Some(k) => self.history.remove(&k), - None => break, - }; - } - self.history.save(JsonDappsHistory::write); - } - - /// Sets current new dapps policy - pub fn set_policy(&mut self, policy: NewDappsPolicy) { - self.policy.insert("default".into(), policy); - self.policy.save(JsonNewDappsPolicy::write); - } - - /// Sets accounts for specific dapp. - pub fn set_accounts(&mut self, id: DappId, accounts: Option>) { - { - let settings = self.settings.entry(id).or_insert_with(DappsSettings::default); - settings.accounts = accounts; - } - self.settings.save(JsonSettings::write); - } - - /// Sets a default account for specific dapp. - pub fn set_default(&mut self, id: DappId, default: Address) { - { - let settings = self.settings.entry(id).or_insert_with(DappsSettings::default); - settings.default = Some(default); - } - self.settings.save(JsonSettings::write); - } -} - /// Disk-serializable HashMap #[derive(Debug)] struct DiskMap { @@ -366,8 +151,7 @@ impl DiskMap { #[cfg(test)] mod tests { - use super::{AddressBook, DappsSettingsStore, DappsSettings, NewDappsPolicy}; - use account_provider::DappId; + use super::AddressBook; use std::collections::HashMap; use ethjson::misc::AccountMeta; use tempdir::TempDir; @@ -398,63 +182,4 @@ mod tests { 3.into() => AccountMeta{name: "Three".to_owned(), meta: "{}".to_owned(), uuid: None} ]); } - - #[test] - fn should_save_and_reload_dapps_settings() { - // given - let tempdir = TempDir::new("").unwrap(); - let mut b = DappsSettingsStore::new(tempdir.path()); - - // when - b.set_accounts("dappOne".into(), Some(vec![1.into(), 2.into()])); - - // then - let b = DappsSettingsStore::new(tempdir.path()); - assert_eq!(b.settings(), hash_map![ - "dappOne".into() => DappsSettings { - accounts: Some(vec![1.into(), 2.into()]), - default: None, - } - ]); - } - - #[test] - fn should_maintain_a_map_of_recent_dapps() { - let mut store = DappsSettingsStore::transient(); - assert!(store.recent_dapps().is_empty(), "Initially recent dapps should be empty."); - - let dapp1: DappId = "dapp1".into(); - let dapp2: DappId = "dapp2".into(); - store.mark_dapp_used(dapp1.clone()); - let recent = store.recent_dapps(); - assert_eq!(recent.len(), 1); - assert_eq!(recent.get(&dapp1), Some(&1)); - - store.mark_dapp_used(dapp2.clone()); - let recent = store.recent_dapps(); - assert_eq!(recent.len(), 2); - assert_eq!(recent.get(&dapp1), Some(&1)); - assert_eq!(recent.get(&dapp2), Some(&2)); - } - - #[test] - fn should_store_dapps_policy() { - // given - let tempdir = TempDir::new("").unwrap(); - let mut store = DappsSettingsStore::new(tempdir.path()); - - // Test default policy - assert_eq!(store.policy(), NewDappsPolicy::AllAccounts { - default: 0.into(), - }); - - // when - store.set_policy(NewDappsPolicy::Whitelist(vec![1.into(), 2.into()])); - - // then - let store = DappsSettingsStore::new(tempdir.path()); - assert_eq!(store.policy.clone(), hash_map![ - "default".into() => NewDappsPolicy::Whitelist(vec![1.into(), 2.into()]) - ]); - } } diff --git a/json/src/misc/dapps_settings.rs b/json/src/misc/dapps_settings.rs deleted file mode 100644 index f59f5f1cf..000000000 --- a/json/src/misc/dapps_settings.rs +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2015-2018 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Dapps settings de/serialization. - -use hash; - -/// Settings for specific dapp. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct DappsSettings { - /// A list of accounts this Dapp can see. - pub accounts: Option>, - /// Default account - pub default: Option, -} - -impl_serialization!(String => DappsSettings); - -/// History for specific dapp. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct DappsHistory { - /// Last accessed timestamp - pub last_accessed: u64, -} - -impl_serialization!(String => DappsHistory); - -/// Accounts policy for new dapps. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub enum NewDappsPolicy { - /// All accounts are exposed by default. - AllAccounts { - /// Default account, which should be returned as the first one. - default: hash::Address, - }, - /// Only accounts listed here are exposed by default for new dapps. - Whitelist(Vec), -} - -impl_serialization!(String => NewDappsPolicy); diff --git a/json/src/misc/mod.rs b/json/src/misc/mod.rs index 836094f0c..ae7dd80e1 100644 --- a/json/src/misc/mod.rs +++ b/json/src/misc/mod.rs @@ -48,7 +48,5 @@ macro_rules! impl_serialization { } mod account_meta; -mod dapps_settings; -pub use self::dapps_settings::{DappsSettings, DappsHistory, NewDappsPolicy}; pub use self::account_meta::AccountMeta; diff --git a/parity/lib.rs b/parity/lib.rs index 93edd7498..84cacf07e 100644 --- a/parity/lib.rs +++ b/parity/lib.rs @@ -79,9 +79,6 @@ extern crate log as rlog; #[cfg(feature = "secretstore")] extern crate ethcore_secretstore; -#[cfg(feature = "dapps")] -extern crate parity_dapps; - #[cfg(test)] #[macro_use] extern crate pretty_assertions; diff --git a/rpc/src/http_common.rs b/rpc/src/http_common.rs index 8296720b2..47717f313 100644 --- a/rpc/src/http_common.rs +++ b/rpc/src/http_common.rs @@ -25,7 +25,7 @@ pub trait HttpMetaExtractor: Send + Sync + 'static { /// Type of Metadata type Metadata: jsonrpc_core::Metadata; /// Extracts metadata from given params. - fn read_metadata(&self, origin: Option, user_agent: Option, dapps_origin: Option) -> Self::Metadata; + fn read_metadata(&self, origin: Option, user_agent: Option) -> Self::Metadata; } pub struct MetaExtractor { @@ -49,7 +49,6 @@ impl http::MetaExtractor for MetaExtractor where let origin = as_string(req.headers().get_raw("origin")); let user_agent = as_string(req.headers().get_raw("user-agent")); - let dapps_origin = as_string(req.headers().get_raw("x-parity-origin")); - self.extractor.read_metadata(origin, user_agent, dapps_origin) + self.extractor.read_metadata(origin, user_agent) } } diff --git a/rpc/src/tests/rpc.rs b/rpc/src/tests/rpc.rs index d15aeca6c..fd515ea3a 100644 --- a/rpc/src/tests/rpc.rs +++ b/rpc/src/tests/rpc.rs @@ -73,7 +73,7 @@ mod testsing { // when let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; - let expected = "34\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown via RPC\",\"id\":1}\n\n0\n\n"; + let expected = "4B\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / unknown agent via RPC\",\"id\":1}\n\n0\n\n"; let res = request(server, &format!("\ POST / HTTP/1.1\r\n\ @@ -98,7 +98,7 @@ mod testsing { // when let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; - let expected = "38\n{\"jsonrpc\":\"2.0\",\"result\":\"curl/7.16.3 via RPC\",\"id\":1}\n\n0\n\n"; + let expected = "49\n{\"jsonrpc\":\"2.0\",\"result\":\"unknown origin / curl/7.16.3 via RPC\",\"id\":1}\n\n0\n\n"; let res = request(server, &format!("\ POST / HTTP/1.1\r\n\ @@ -116,59 +116,4 @@ mod testsing { res.assert_status("HTTP/1.1 200 OK"); assert_eq!(res.body, expected); } - - #[test] - fn should_extract_dapp_origin() { - // given - let (server, address) = serve(); - - // when - let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; - let expected = "3A\n{\"jsonrpc\":\"2.0\",\"result\":\"Dapp http://parity.io\",\"id\":1}\n\n0\n\n"; - let res = request(server, - &format!("\ - POST / HTTP/1.1\r\n\ - Host: {}\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - Origin: http://parity.io\r\n\ - Connection: close\r\n\ - User-Agent: curl/7.16.3\r\n\ - \r\n\ - {} - ", address, req.len(), req) - ); - - // then - res.assert_status("HTTP/1.1 200 OK"); - assert_eq!(res.body, expected); - } - - #[test] - fn should_extract_dapp_origin_from_extension() { - // given - let (server, address) = serve(); - - // when - let req = r#"{"method":"hello","params":[],"jsonrpc":"2.0","id":1}"#; - let expected = "44\n{\"jsonrpc\":\"2.0\",\"result\":\"Dapp http://wallet.ethereum.org\",\"id\":1}\n\n0\n\n"; - let res = request(server, - &format!("\ - POST / HTTP/1.1\r\n\ - Host: {}\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - Origin: null\r\n\ - X-Parity-Origin: http://wallet.ethereum.org\r\n\ - Connection: close\r\n\ - User-Agent: curl/7.16.3\r\n\ - \r\n\ - {} - ", address, req.len(), req) - ); - - // then - res.assert_status("HTTP/1.1 200 OK"); - assert_eq!(res.body, expected); - } } diff --git a/rpc/src/v1/extractors.rs b/rpc/src/v1/extractors.rs index c69c41ddd..3406bb031 100644 --- a/rpc/src/v1/extractors.rs +++ b/rpc/src/v1/extractors.rs @@ -36,14 +36,13 @@ pub struct RpcExtractor; impl HttpMetaExtractor for RpcExtractor { type Metadata = Metadata; - fn read_metadata(&self, origin: Option, user_agent: Option, dapps_origin: Option) -> Metadata { + fn read_metadata(&self, origin: Option, user_agent: Option) -> Metadata { Metadata { - origin: match (origin.as_ref().map(|s| s.as_str()), user_agent, dapps_origin) { - (Some("null"), _, Some(dapp)) => Origin::Dapps(dapp.into()), - (Some(dapp), _, _) => Origin::Dapps(dapp.to_owned().into()), - (None, Some(service), _) => Origin::Rpc(service.into()), - (None, _, _) => Origin::Rpc("unknown".into()), - }, + origin: Origin::Rpc( + format!("{} / {}", + origin.unwrap_or("unknown origin".to_string()), + user_agent.unwrap_or("unknown agent".to_string())) + ), session: None, } } @@ -76,16 +75,15 @@ impl ws::MetaExtractor for WsExtractor { fn extract(&self, req: &ws::RequestContext) -> Metadata { let id = req.session_id as u64; - let dapp = req.origin.as_ref().map(|origin| (&**origin).into()).unwrap_or_default(); let origin = match self.authcodes_path { Some(ref path) => { let authorization = req.protocols.get(0).and_then(|p| auth_token_hash(&path, p, true)); match authorization { - Some(id) => Origin::Signer { session: id.into(), dapp: dapp }, - None => Origin::Ws { session: id.into(), dapp: dapp }, + Some(id) => Origin::Signer { session: id.into() }, + None => Origin::Ws { session: id.into() }, } }, - None => Origin::Ws { session: id.into(), dapp: dapp }, + None => Origin::Ws { session: id.into() }, }; let session = Some(Arc::new(Session::new(req.sender()))); Metadata { @@ -253,26 +251,13 @@ mod tests { let extractor = RpcExtractor; // when - let meta1 = extractor.read_metadata(None, None, None); - let meta2 = extractor.read_metadata(None, Some("http://parity.io".to_owned()), None); - let meta3 = extractor.read_metadata(None, Some("http://parity.io".to_owned()), Some("ignored".into())); + let meta1 = extractor.read_metadata(None, None); + let meta2 = extractor.read_metadata(None, Some("http://parity.io".to_owned())); + let meta3 = extractor.read_metadata(None, Some("http://parity.io".to_owned())); // then - assert_eq!(meta1.origin, Origin::Rpc("unknown".into())); - assert_eq!(meta2.origin, Origin::Rpc("http://parity.io".into())); - assert_eq!(meta3.origin, Origin::Rpc("http://parity.io".into())); - } - - #[test] - fn should_dapps_origin() { - // given - let extractor = RpcExtractor; - let dapp = "https://wallet.ethereum.org".to_owned(); - - // when - let meta = extractor.read_metadata(Some("null".into()), None, Some(dapp.clone())); - - // then - assert_eq!(meta.origin, Origin::Dapps(dapp.into())); + assert_eq!(meta1.origin, Origin::Rpc("unknown origin / unknown agent".into())); + assert_eq!(meta2.origin, Origin::Rpc("unknown origin / http://parity.io".into())); + assert_eq!(meta3.origin, Origin::Rpc("unknown origin / http://parity.io".into())); } } diff --git a/rpc/src/v1/helpers/fake_sign.rs b/rpc/src/v1/helpers/fake_sign.rs index fc6aaccdd..2ff54e481 100644 --- a/rpc/src/v1/helpers/fake_sign.rs +++ b/rpc/src/v1/helpers/fake_sign.rs @@ -16,18 +16,14 @@ use transaction::{Transaction, SignedTransaction, Action}; +use ethereum_types::U256; use jsonrpc_core::Error; use v1::helpers::CallRequest; -pub fn sign_call(request: CallRequest, gas_cap: bool) -> Result { - let max_gas = 50_000_000.into(); +pub fn sign_call(request: CallRequest) -> Result { + let max_gas = U256::from(50_000_000); let gas = match request.gas { - Some(gas) if gas_cap && gas > max_gas => { - warn!("Gas limit capped to {} (from {})", max_gas, gas); - max_gas - } Some(gas) => gas, - None if gas_cap => max_gas, None => max_gas * 10, }; let from = request.from.unwrap_or(0.into()); diff --git a/rpc/src/v1/helpers/signing_queue.rs b/rpc/src/v1/helpers/signing_queue.rs index 17b26b01e..9f31628a3 100644 --- a/rpc/src/v1/helpers/signing_queue.rs +++ b/rpc/src/v1/helpers/signing_queue.rs @@ -17,9 +17,8 @@ use std::collections::BTreeMap; use ethereum_types::{U256, Address}; use parking_lot::{Mutex, RwLock}; -use ethcore::account_provider::DappId; use v1::helpers::{ConfirmationRequest, ConfirmationPayload, oneshot, errors}; -use v1::types::{ConfirmationResponse, H160 as RpcH160, Origin, DappId as RpcDappId}; +use v1::types::{ConfirmationResponse, H160 as RpcH160, Origin}; use jsonrpc_core::Error; @@ -30,14 +29,6 @@ pub type ConfirmationResult = Result; pub enum DefaultAccount { /// Default account is known Provided(Address), - /// Should use default account for dapp - ForDapp(DappId), -} - -impl From for DefaultAccount { - fn from(dapp_id: RpcDappId) -> Self { - DefaultAccount::ForDapp(dapp_id.into()) - } } impl From for DefaultAccount { diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 84c0aa6ed..1e4ad87a5 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -21,11 +21,11 @@ use std::time::{Instant, Duration, SystemTime, UNIX_EPOCH}; use std::sync::Arc; use rlp::{self, Rlp}; -use ethereum_types::{U256, H64, H160, H256, Address}; +use ethereum_types::{U256, H64, H256, Address}; use parking_lot::Mutex; use ethash::SeedHashCompute; -use ethcore::account_provider::{AccountProvider, DappId}; +use ethcore::account_provider::AccountProvider; use ethcore::client::{BlockChainClient, BlockId, TransactionId, UncleId, StateOrBlock, StateClient, StateInfo, Call, EngineInfo}; use ethcore::ethereum::Ethash; use ethcore::filter::Filter as EthcoreFilter; @@ -398,13 +398,6 @@ impl EthClient Result> { - self.accounts - .note_dapp_used(dapp.clone()) - .and_then(|_| self.accounts.dapp_addresses(dapp)) - .map_err(|e| errors::account("Could not fetch accounts.", e)) - } - fn get_state(&self, number: BlockNumber) -> StateOrBlock { match number { BlockNumber::Num(num) => BlockId::Number(num).into(), @@ -507,12 +500,10 @@ impl Eth for EthClient< } } - fn author(&self, meta: Metadata) -> Result { - let dapp = meta.dapp_id(); - + fn author(&self) -> Result { let mut miner = self.miner.authoring_params().author; if miner == 0.into() { - miner = self.dapp_accounts(dapp.into())?.get(0).cloned().unwrap_or_default(); + miner = self.accounts.accounts().ok().and_then(|a| a.get(0).cloned()).unwrap_or_default(); } Ok(RpcH160::from(miner)) @@ -530,10 +521,9 @@ impl Eth for EthClient< Ok(RpcU256::from(default_gas_price(&*self.client, &*self.miner, self.options.gas_price_percentile))) } - fn accounts(&self, meta: Metadata) -> Result> { - let dapp = meta.dapp_id(); - - let accounts = self.dapp_accounts(dapp.into())?; + fn accounts(&self) -> Result> { + let accounts = self.accounts.accounts() + .map_err(|e| errors::account("Could not fetch accounts.", e))?; Ok(accounts.into_iter().map(Into::into).collect()) } @@ -835,9 +825,9 @@ impl Eth for EthClient< self.send_raw_transaction(raw) } - fn call(&self, meta: Self::Metadata, request: CallRequest, num: Trailing) -> BoxFuture { + fn call(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = try_bf!(fake_sign::sign_call(request, meta.is_dapp())); + let signed = try_bf!(fake_sign::sign_call(request)); let num = num.unwrap_or_default(); @@ -875,9 +865,9 @@ impl Eth for EthClient< )) } - fn estimate_gas(&self, meta: Self::Metadata, request: CallRequest, num: Trailing) -> BoxFuture { + fn estimate_gas(&self, request: CallRequest, num: Trailing) -> BoxFuture { let request = CallRequest::into(request); - let signed = try_bf!(fake_sign::sign_call(request, meta.is_dapp())); + let signed = try_bf!(fake_sign::sign_call(request)); let num = num.unwrap_or_default(); let (state, header) = if num == BlockNumber::Pending { diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 042720c5c..de2170f12 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -29,7 +29,7 @@ use light::client::LightChainClient; use light::{cht, TransactionQueue}; use light::on_demand::{request, OnDemand}; -use ethcore::account_provider::{AccountProvider, DappId}; +use ethcore::account_provider::AccountProvider; use ethcore::encoded; use ethcore::filter::Filter as EthcoreFilter; use ethcore::ids::BlockId; @@ -251,7 +251,7 @@ impl Eth for EthClient { } } - fn author(&self, _meta: Self::Metadata) -> Result { + fn author(&self) -> Result { Ok(Default::default()) } @@ -270,12 +270,8 @@ impl Eth for EthClient { .unwrap_or_else(Default::default)) } - fn accounts(&self, meta: Metadata) -> Result> { - let dapp: DappId = meta.dapp_id().into(); - - self.accounts - .note_dapp_used(dapp.clone()) - .and_then(|_| self.accounts.dapp_addresses(dapp)) + fn accounts(&self) -> Result> { + self.accounts.accounts() .map_err(|e| errors::account("Could not fetch accounts.", e)) .map(|accs| accs.into_iter().map(Into::::into).collect()) } @@ -397,7 +393,7 @@ impl Eth for EthClient { self.send_raw_transaction(raw) } - fn call(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing) -> BoxFuture { + fn call(&self, req: CallRequest, num: Trailing) -> BoxFuture { Box::new(self.fetcher().proved_execution(req, num).and_then(|res| { match res { Ok(exec) => Ok(exec.output.into()), @@ -406,7 +402,7 @@ impl Eth for EthClient { })) } - fn estimate_gas(&self, _meta: Self::Metadata, req: CallRequest, num: Trailing) -> BoxFuture { + fn estimate_gas(&self, req: CallRequest, num: Trailing) -> BoxFuture { // TODO: binary chop for more accurate estimates. Box::new(self.fetcher().proved_execution(req, num).and_then(|res| { match res { diff --git a/rpc/src/v1/impls/light/parity.rs b/rpc/src/v1/impls/light/parity.rs index 2045a4a59..f74886e61 100644 --- a/rpc/src/v1/impls/light/parity.rs +++ b/rpc/src/v1/impls/light/parity.rs @@ -43,7 +43,7 @@ use v1::types::{ Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, DappId, ChainStatus, + OperationsInfo, ChainStatus, AccountInfo, HwAccountInfo, Header, RichHeader, }; use Host; @@ -99,13 +99,10 @@ impl ParityClient { impl Parity for ParityClient { type Metadata = Metadata; - fn accounts_info(&self, dapp: Trailing) -> Result> { - let dapp = dapp.unwrap_or_default(); - + fn accounts_info(&self) -> Result> { let store = &self.accounts; let dapp_accounts = store - .note_dapp_used(dapp.clone().into()) - .and_then(|_| store.dapp_addresses(dapp.into())) + .accounts() .map_err(|e| errors::account("Could not fetch accounts.", e))? .into_iter().collect::>(); @@ -136,10 +133,9 @@ impl Parity for ParityClient { Ok(store.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e))?) } - fn default_account(&self, meta: Self::Metadata) -> Result { - let dapp_id = meta.dapp_id(); + fn default_account(&self) -> Result { Ok(self.accounts - .dapp_addresses(dapp_id.into()) + .accounts() .ok() .and_then(|accounts| accounts.get(0).cloned()) .map(|acc| acc.into()) @@ -423,7 +419,7 @@ impl Parity for ParityClient { ipfs::cid(content) } - fn call(&self, _meta: Self::Metadata, _requests: Vec, _block: Trailing) -> Result> { + fn call(&self, _requests: Vec, _block: Trailing) -> Result> { Err(errors::light_unimplemented(None)) } } diff --git a/rpc/src/v1/impls/light/trace.rs b/rpc/src/v1/impls/light/trace.rs index 80cc63a40..483b19365 100644 --- a/rpc/src/v1/impls/light/trace.rs +++ b/rpc/src/v1/impls/light/trace.rs @@ -46,11 +46,11 @@ impl Traces for TracesClient { Err(errors::light_unimplemented(None)) } - fn call(&self, _meta: Self::Metadata, _request: CallRequest, _flags: TraceOptions, _block: Trailing) -> Result { + fn call(&self, _request: CallRequest, _flags: TraceOptions, _block: Trailing) -> Result { Err(errors::light_unimplemented(None)) } - fn call_many(&self, _meta: Self::Metadata, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing) -> Result> { + fn call_many(&self, _request: Vec<(CallRequest, TraceOptions)>, _block: Trailing) -> Result> { Err(errors::light_unimplemented(None)) } diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 2bdf09df4..3e20d5821 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -44,7 +44,7 @@ use v1::types::{ Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, DappId, ChainStatus, + OperationsInfo, ChainStatus, AccountInfo, HwAccountInfo, RichHeader, block_number_to_id }; @@ -103,12 +103,8 @@ impl Parity for ParityClient where { type Metadata = Metadata; - fn accounts_info(&self, dapp: Trailing) -> Result> { - let dapp = dapp.unwrap_or_default(); - - let dapp_accounts = self.accounts - .note_dapp_used(dapp.clone().into()) - .and_then(|_| self.accounts.dapp_addresses(dapp.into())) + fn accounts_info(&self) -> Result> { + let dapp_accounts = self.accounts.accounts() .map_err(|e| errors::account("Could not fetch accounts.", e))? .into_iter().collect::>(); @@ -137,11 +133,8 @@ impl Parity for ParityClient where self.accounts.locked_hardware_accounts().map_err(|e| errors::account("Error communicating with hardware wallet.", e)) } - fn default_account(&self, meta: Self::Metadata) -> Result { - let dapp_id = meta.dapp_id(); - - Ok(self.accounts - .dapp_default_address(dapp_id.into()) + fn default_account(&self) -> Result { + Ok(self.accounts.default_account() .map(Into::into) .ok() .unwrap_or_default()) @@ -421,11 +414,11 @@ impl Parity for ParityClient where ipfs::cid(content) } - fn call(&self, meta: Self::Metadata, requests: Vec, num: Trailing) -> Result> { + fn call(&self, requests: Vec, num: Trailing) -> Result> { let requests = requests .into_iter() .map(|request| Ok(( - fake_sign::sign_call(request.into(), meta.is_dapp())?, + fake_sign::sign_call(request.into())?, Default::default() ))) .collect::>>()?; diff --git a/rpc/src/v1/impls/parity_accounts.rs b/rpc/src/v1/impls/parity_accounts.rs index f9be594ad..941a77796 100644 --- a/rpc/src/v1/impls/parity_accounts.rs +++ b/rpc/src/v1/impls/parity_accounts.rs @@ -25,7 +25,7 @@ use ethcore::account_provider::AccountProvider; use jsonrpc_core::Result; use v1::helpers::errors; use v1::traits::ParityAccounts; -use v1::types::{H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, DappId, Derive, DeriveHierarchical, DeriveHash, ExtAccountInfo}; +use v1::types::{H160 as RpcH160, H256 as RpcH256, H520 as RpcH520, Derive, DeriveHierarchical, DeriveHash, ExtAccountInfo}; use ethkey::Password; /// Account management (personal) rpc implementation. @@ -143,61 +143,6 @@ impl ParityAccounts for ParityAccountsClient { Ok(true) } - fn set_dapp_addresses(&self, dapp: DappId, addresses: Option>) -> Result { - self.accounts.set_dapp_addresses(dapp.into(), addresses.map(into_vec)) - .map_err(|e| errors::account("Couldn't set dapp addresses.", e)) - .map(|_| true) - } - - fn dapp_addresses(&self, dapp: DappId) -> Result> { - self.accounts.dapp_addresses(dapp.into()) - .map_err(|e| errors::account("Couldn't get dapp addresses.", e)) - .map(into_vec) - } - - fn set_dapp_default_address(&self, dapp: DappId, address: RpcH160) -> Result { - self.accounts.set_dapp_default_address(dapp.into(), address.into()) - .map_err(|e| errors::account("Couldn't set dapp default address.", e)) - .map(|_| true) - } - - fn dapp_default_address(&self, dapp: DappId) -> Result { - self.accounts.dapp_default_address(dapp.into()) - .map_err(|e| errors::account("Couldn't get dapp default address.", e)) - .map(Into::into) - } - - fn set_new_dapps_addresses(&self, addresses: Option>) -> Result { - self.accounts - .set_new_dapps_addresses(addresses.map(into_vec)) - .map_err(|e| errors::account("Couldn't set dapps addresses.", e)) - .map(|_| true) - } - - fn new_dapps_addresses(&self) -> Result>> { - self.accounts.new_dapps_addresses() - .map_err(|e| errors::account("Couldn't get dapps addresses.", e)) - .map(|accounts| accounts.map(into_vec)) - } - - fn set_new_dapps_default_address(&self, address: RpcH160) -> Result { - self.accounts.set_new_dapps_default_address(address.into()) - .map_err(|e| errors::account("Couldn't set new dapps default address.", e)) - .map(|_| true) - } - - fn new_dapps_default_address(&self) -> Result { - self.accounts.new_dapps_default_address() - .map_err(|e| errors::account("Couldn't get new dapps default address.", e)) - .map(Into::into) - } - - fn recent_dapps(&self) -> Result> { - self.accounts.recent_dapps() - .map_err(|e| errors::account("Couldn't get recent dapps.", e)) - .map(|map| map.into_iter().map(|(k, v)| (k.into(), v)).collect()) - } - fn import_geth_accounts(&self, addresses: Vec) -> Result> { self.accounts .import_geth_accounts(into_vec(addresses), false) diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index 8cb98a66b..9def02447 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -58,14 +58,14 @@ impl PersonalClient { } impl PersonalClient { - fn do_sign_transaction(&self, meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<(PendingTransaction, D)> { + fn do_sign_transaction(&self, _meta: Metadata, request: TransactionRequest, password: String) -> BoxFuture<(PendingTransaction, D)> { let dispatcher = self.dispatcher.clone(); let accounts = self.accounts.clone(); let default = match request.from.as_ref() { Some(account) => Ok(account.clone().into()), None => accounts - .dapp_default_address(meta.dapp_id().into()) + .default_account() .map_err(|e| errors::account("Cannot find default account.", e)), }; diff --git a/rpc/src/v1/impls/private.rs b/rpc/src/v1/impls/private.rs index a1110eed1..247fb1aa1 100644 --- a/rpc/src/v1/impls/private.rs +++ b/rpc/src/v1/impls/private.rs @@ -100,14 +100,14 @@ impl Private for PrivateClient { }) } - fn private_call(&self, meta: Self::Metadata, block_number: BlockNumber, request: CallRequest) -> Result { + fn private_call(&self, block_number: BlockNumber, request: CallRequest) -> Result { let id = match block_number { BlockNumber::Pending => return Err(errors::private_message_block_id_not_supported()), num => block_number_to_id(num) }; let request = CallRequest::into(request); - let signed = fake_sign::sign_call(request, meta.is_dapp())?; + let signed = fake_sign::sign_call(request)?; let client = self.unwrap_manager()?; let executed_result = client.private_call(id, &signed).map_err(|e| errors::private_message(e))?; Ok(executed_result.output.into()) diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index b22bbc80d..d16715353 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -112,7 +112,6 @@ impl SigningQueueClient { let accounts = self.accounts.clone(); let default_account = match default_account { DefaultAccount::Provided(acc) => acc, - DefaultAccount::ForDapp(dapp) => accounts.dapp_default_address(dapp).ok().unwrap_or_default(), }; let dispatcher = self.dispatcher.clone(); @@ -138,8 +137,8 @@ impl SigningQueueClient { impl ParitySigning for SigningQueueClient { type Metadata = Metadata; - fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { - let default_account = self.accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default(); + fn compose_transaction(&self, _meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { + let default_account = self.accounts.default_account().ok().unwrap_or_default(); Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into)) } @@ -164,7 +163,7 @@ impl ParitySigning for SigningQueueClient { let remote = self.remote.clone(); let confirmations = self.confirmations.clone(); - Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into(), meta.origin) + Box::new(self.dispatch(RpcConfirmationPayload::SendTransaction(request), DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default()), meta.origin) .map(|result| match result { DispatchResult::Value(v) => RpcEither::Or(v), DispatchResult::Future(id, future) => { @@ -221,7 +220,7 @@ impl EthSigning for SigningQueueClient { fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { let res = self.dispatch( RpcConfirmationPayload::SendTransaction(request), - meta.dapp_id().into(), + DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default()), meta.origin, ); @@ -236,7 +235,7 @@ impl EthSigning for SigningQueueClient { fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { let res = self.dispatch( RpcConfirmationPayload::SignTransaction(request), - meta.dapp_id().into(), + DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default()), meta.origin, ); diff --git a/rpc/src/v1/impls/signing_unsafe.rs b/rpc/src/v1/impls/signing_unsafe.rs index 6016cbbfc..c4f2117a5 100644 --- a/rpc/src/v1/impls/signing_unsafe.rs +++ b/rpc/src/v1/impls/signing_unsafe.rs @@ -55,7 +55,6 @@ impl SigningUnsafeClient { let accounts = self.accounts.clone(); let default = match account { DefaultAccount::Provided(acc) => acc, - DefaultAccount::ForDapp(dapp) => accounts.dapp_default_address(dapp).ok().unwrap_or_default(), }; let dis = self.dispatcher.clone(); @@ -80,8 +79,8 @@ impl EthSigning for SigningUnsafeClient })) } - fn send_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - Box::new(self.handle(RpcConfirmationPayload::SendTransaction(request), meta.dapp_id().into()) + fn send_transaction(&self, _meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { + Box::new(self.handle(RpcConfirmationPayload::SendTransaction(request), DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default())) .then(|res| match res { Ok(RpcConfirmationResponse::SendTransaction(hash)) => Ok(hash), Err(e) => Err(e), @@ -89,8 +88,8 @@ impl EthSigning for SigningUnsafeClient })) } - fn sign_transaction(&self, meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { - Box::new(self.handle(RpcConfirmationPayload::SignTransaction(request), meta.dapp_id().into()) + fn sign_transaction(&self, _meta: Metadata, request: RpcTransactionRequest) -> BoxFuture { + Box::new(self.handle(RpcConfirmationPayload::SignTransaction(request), DefaultAccount::Provided(self.accounts.default_account().ok().unwrap_or_default())) .then(|res| match res { Ok(RpcConfirmationResponse::SignTransaction(tx)) => Ok(tx), Err(e) => Err(e), @@ -102,9 +101,9 @@ impl EthSigning for SigningUnsafeClient impl ParitySigning for SigningUnsafeClient { type Metadata = Metadata; - fn compose_transaction(&self, meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { + fn compose_transaction(&self, _meta: Metadata, transaction: RpcTransactionRequest) -> BoxFuture { let accounts = self.accounts.clone(); - let default_account = accounts.dapp_default_address(meta.dapp_id().into()).ok().unwrap_or_default(); + let default_account = accounts.default_account().ok().unwrap_or_default(); Box::new(self.dispatcher.fill_optional_fields(transaction.into(), default_account, true).map(Into::into)) } diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index ee7d7154c..3abddd2f9 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -87,11 +87,11 @@ impl Traces for TracesClient where .map(LocalizedTrace::from)) } - fn call(&self, meta: Self::Metadata, request: CallRequest, flags: TraceOptions, block: Trailing) -> Result { + fn call(&self, request: CallRequest, flags: TraceOptions, block: Trailing) -> Result { let block = block.unwrap_or_default(); let request = CallRequest::into(request); - let signed = fake_sign::sign_call(request, meta.is_dapp())?; + let signed = fake_sign::sign_call(request)?; let id = match block { BlockNumber::Num(num) => BlockId::Number(num), @@ -109,13 +109,13 @@ impl Traces for TracesClient where .map_err(errors::call) } - fn call_many(&self, meta: Self::Metadata, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing) -> Result> { + fn call_many(&self, requests: Vec<(CallRequest, TraceOptions)>, block: Trailing) -> Result> { let block = block.unwrap_or_default(); let requests = requests.into_iter() .map(|(request, flags)| { let request = CallRequest::into(request); - let signed = fake_sign::sign_call(request, meta.is_dapp())?; + let signed = fake_sign::sign_call(request)?; Ok((signed, to_call_analytics(flags))) }) .collect::>>()?; diff --git a/rpc/src/v1/metadata.rs b/rpc/src/v1/metadata.rs index 970ec60e4..10486f496 100644 --- a/rpc/src/v1/metadata.rs +++ b/rpc/src/v1/metadata.rs @@ -20,7 +20,7 @@ use std::sync::Arc; use jsonrpc_core; use jsonrpc_pubsub::{Session, PubSubMetadata}; -use v1::types::{DappId, Origin}; +use v1::types::Origin; /// RPC methods metadata. #[derive(Clone, Default, Debug)] @@ -31,28 +31,6 @@ pub struct Metadata { pub session: Option>, } -impl Metadata { - /// Returns dapp id if this request is coming from a Dapp or default `DappId` otherwise. - pub fn dapp_id(&self) -> DappId { - // TODO [ToDr] Extract dapp info from Ws connections. - match self.origin { - Origin::Dapps(ref dapp) => dapp.clone(), - Origin::Ws { ref dapp, .. } => dapp.clone(), - Origin::Signer { ref dapp, .. } => dapp.clone(), - _ => DappId::default(), - } - } - - /// Returns true if the request originates from a Dapp. - pub fn is_dapp(&self) -> bool { - if let Origin::Dapps(_) = self.origin { - true - } else { - false - } - } -} - impl jsonrpc_core::Metadata for Metadata {} impl PubSubMetadata for Metadata { fn session(&self) -> Option> { diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 7213ad63d..602621194 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -39,7 +39,6 @@ use v1::helpers::nonce; use v1::helpers::dispatch::FullDispatcher; use v1::tests::helpers::{TestSyncProvider, Config, TestMinerService, TestSnapshotService}; use v1::metadata::Metadata; -use v1::types::Origin; fn blockchain_client() -> Arc { let client = TestBlockChainClient::new(); @@ -395,7 +394,6 @@ fn rpc_eth_gas_price() { fn rpc_eth_accounts() { let tester = EthTester::default(); let address = tester.accounts_provider.new_account(&"".into()).unwrap(); - tester.accounts_provider.set_new_dapps_addresses(None).unwrap(); tester.accounts_provider.set_address_name(1.into(), "1".into()); tester.accounts_provider.set_address_name(10.into(), "10".into()); @@ -403,20 +401,6 @@ fn rpc_eth_accounts() { let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + &format!("0x{:x}", address) + r#""],"id":1}"#; assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - tester.accounts_provider.set_new_dapps_addresses(Some(vec![1.into()])).unwrap(); - // even with some account it should return empty list (no dapp detected) - let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // when we add visible address it should return that. - tester.accounts_provider.set_dapp_addresses("app1".into(), Some(vec![10.into()])).unwrap(); - let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["0x000000000000000000000000000000000000000a"],"id":1}"#; - let mut meta = Metadata::default(); - meta.origin = Origin::Dapps("app1".into()); - assert_eq!((*tester.io).handle_request_sync(request, meta), Some(response.to_owned())); } #[test] diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index 300badd74..68251d30b 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -115,13 +115,6 @@ fn rpc_parity_accounts_info() { let request = r#"{"jsonrpc": "2.0", "method": "parity_accountsInfo", "params": [], "id": 1}"#; let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"name\":\"Test\"}}}},\"id\":1}}", address); assert_eq!(io.handle_request_sync(request), Some(response)); - - // Change the whitelist - let address = Address::from(1); - deps.accounts.set_new_dapps_addresses(Some(vec![address.clone()])).unwrap(); - let request = r#"{"jsonrpc": "2.0", "method": "parity_accountsInfo", "params": [], "id": 1}"#; - let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"name\":\"XX\"}}}},\"id\":1}}", address); - assert_eq!(io.handle_request_sync(request), Some(response)); } #[test] diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index 699ba01f8..4a1f72173 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -121,106 +121,6 @@ fn should_be_able_to_set_meta() { assert_eq!(res, Some(response)); } -#[test] -fn rpc_parity_set_and_get_dapps_accounts() { - // given - let tester = setup(); - tester.accounts.set_address_name(10.into(), "10".into()); - assert_eq!(tester.accounts.dapp_addresses("app1".into()).unwrap(), vec![]); - - // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_setDappAddresses","params":["app1",["0x000000000000000000000000000000000000000a","0x0000000000000000000000000000000000000001"]], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // then - assert_eq!(tester.accounts.dapp_addresses("app1".into()).unwrap(), vec![10.into()]); - let request = r#"{"jsonrpc": "2.0", "method": "parity_getDappAddresses","params":["app1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["0x000000000000000000000000000000000000000a"],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); -} - -#[test] -fn rpc_parity_set_and_get_dapp_default_address() { - // given - let tester = setup(); - tester.accounts.set_address_name(10.into(), "10".into()); - assert_eq!(tester.accounts.dapp_addresses("app1".into()).unwrap(), vec![]); - - // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_setDappDefaultAddress","params":["app1", "0x000000000000000000000000000000000000000a"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // then - assert_eq!(tester.accounts.dapp_addresses("app1".into()).unwrap(), vec![10.into()]); - let request = r#"{"jsonrpc": "2.0", "method": "parity_getDappDefaultAddress","params":["app1"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x000000000000000000000000000000000000000a","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); -} - -#[test] -fn rpc_parity_set_and_get_new_dapps_whitelist() { - // given - let tester = setup(); - - // when set to whitelist - let request = r#"{"jsonrpc": "2.0", "method": "parity_setNewDappsAddresses","params":[["0x000000000000000000000000000000000000000a"]], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // then - assert_eq!(tester.accounts.new_dapps_addresses().unwrap(), Some(vec![10.into()])); - let request = r#"{"jsonrpc": "2.0", "method": "parity_getNewDappsAddresses","params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":["0x000000000000000000000000000000000000000a"],"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // when set to empty - let request = r#"{"jsonrpc": "2.0", "method": "parity_setNewDappsAddresses","params":[null], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // then - assert_eq!(tester.accounts.new_dapps_addresses().unwrap(), None); - let request = r#"{"jsonrpc": "2.0", "method": "parity_getNewDappsAddresses","params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); -} - -#[test] -fn rpc_parity_set_and_get_new_dapps_default_address() { - // given - let tester = setup(); - tester.accounts.set_address_name(10.into(), "10".into()); - assert_eq!(tester.accounts.new_dapps_default_address().unwrap(), 0.into()); - - // when - let request = r#"{"jsonrpc": "2.0", "method": "parity_setNewDappsDefaultAddress","params":["0x000000000000000000000000000000000000000a"], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); - - // then - assert_eq!(tester.accounts.new_dapps_default_address().unwrap(), 10.into()); - let request = r#"{"jsonrpc": "2.0", "method": "parity_getNewDappsDefaultAddress","params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":"0x000000000000000000000000000000000000000a","id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); -} - -#[test] -fn rpc_parity_recent_dapps() { - // given - let tester = setup(); - - // when - // trigger dapp usage - tester.accounts.note_dapp_used("dapp1".into()).unwrap(); - - // then - let request = r#"{"jsonrpc": "2.0", "method": "parity_listRecentDapps","params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"dapp1":1},"id":1}"#; - assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); -} - #[test] fn should_be_able_to_kill_account() { let tester = setup(); diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index 52d5f7d8d..430fb4fc2 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -89,14 +89,14 @@ fn should_return_list_of_items_to_confirm() { data: vec![], nonce: None, condition: None, - }), Origin::Dapps("http://parity.io".into())).unwrap(); + }), Origin::Unknown).unwrap(); let _sign_future = tester.signer.add_request(ConfirmationPayload::EthSignMessage(1.into(), vec![5].into()), Origin::Unknown).unwrap(); // when let request = r#"{"jsonrpc":"2.0","method":"signer_requestsToConfirm","params":[],"id":1}"#; let response = concat!( r#"{"jsonrpc":"2.0","result":["#, - r#"{"id":"0x1","origin":{"dapp":"http://parity.io"},"payload":{"sendTransaction":{"condition":null,"data":"0x","from":"0x0000000000000000000000000000000000000001","gas":"0x989680","gasPrice":"0x2710","nonce":null,"to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","value":"0x1"}}},"#, + r#"{"id":"0x1","origin":"unknown","payload":{"sendTransaction":{"condition":null,"data":"0x","from":"0x0000000000000000000000000000000000000001","gas":"0x989680","gasPrice":"0x2710","nonce":null,"to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","value":"0x1"}}},"#, r#"{"id":"0x2","origin":"unknown","payload":{"sign":{"address":"0x0000000000000000000000000000000000000001","data":"0x05"}}}"#, r#"],"id":1}"# ); diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 48e315ce7..a11b86806 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -40,8 +40,8 @@ build_rpc_trait! { fn hashrate(&self) -> Result; /// Returns block author. - #[rpc(meta, name = "eth_coinbase")] - fn author(&self, Self::Metadata) -> Result; + #[rpc(name = "eth_coinbase")] + fn author(&self) -> Result; /// Returns true if client is actively mining new blocks. #[rpc(name = "eth_mining")] @@ -52,8 +52,8 @@ build_rpc_trait! { fn gas_price(&self) -> Result; /// Returns accounts list. - #[rpc(meta, name = "eth_accounts")] - fn accounts(&self, Self::Metadata) -> Result>; + #[rpc(name = "eth_accounts")] + fn accounts(&self) -> Result>; /// Returns highest block number. #[rpc(name = "eth_blockNumber")] @@ -108,12 +108,12 @@ build_rpc_trait! { fn submit_transaction(&self, Bytes) -> Result; /// Call contract, returning the output data. - #[rpc(meta, name = "eth_call")] - fn call(&self, Self::Metadata, CallRequest, Trailing) -> BoxFuture; + #[rpc(name = "eth_call")] + fn call(&self, CallRequest, Trailing) -> BoxFuture; /// Estimate gas needed for execution of given contract. - #[rpc(meta, name = "eth_estimateGas")] - fn estimate_gas(&self, Self::Metadata, CallRequest, Trailing) -> BoxFuture; + #[rpc(name = "eth_estimateGas")] + fn estimate_gas(&self, CallRequest, Trailing) -> BoxFuture; /// Get transaction by its hash. #[rpc(name = "eth_getTransactionByHash")] diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index 1cc6aca96..39e1892cc 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -26,7 +26,7 @@ use v1::types::{ Peers, Transaction, RpcSettings, Histogram, TransactionStats, LocalTransactionStatus, BlockNumber, ConsensusCapability, VersionInfo, - OperationsInfo, DappId, ChainStatus, + OperationsInfo, ChainStatus, AccountInfo, HwAccountInfo, RichHeader, }; @@ -37,7 +37,7 @@ build_rpc_trait! { /// Returns accounts information. #[rpc(name = "parity_accountsInfo")] - fn accounts_info(&self, Trailing) -> Result>; + fn accounts_info(&self) -> Result>; /// Returns hardware accounts information. #[rpc(name = "parity_hardwareAccountsInfo")] @@ -48,8 +48,8 @@ build_rpc_trait! { fn locked_hardware_accounts_info(&self) -> Result>; /// Returns default account for dapp. - #[rpc(meta, name = "parity_defaultAccount")] - fn default_account(&self, Self::Metadata) -> Result; + #[rpc(name = "parity_defaultAccount")] + fn default_account(&self) -> Result; /// Returns current transactions limit. #[rpc(name = "parity_transactionsLimit")] @@ -216,7 +216,7 @@ build_rpc_trait! { fn ipfs_cid(&self, Bytes) -> Result; /// Call contract, returning the output data. - #[rpc(meta, name = "parity_call")] - fn call(&self, Self::Metadata, Vec, Trailing) -> Result>; + #[rpc(name = "parity_call")] + fn call(&self, Vec, Trailing) -> Result>; } } diff --git a/rpc/src/v1/traits/parity_accounts.rs b/rpc/src/v1/traits/parity_accounts.rs index c7bc21771..029f06bf2 100644 --- a/rpc/src/v1/traits/parity_accounts.rs +++ b/rpc/src/v1/traits/parity_accounts.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use jsonrpc_core::Result; use ethkey::Password; use ethstore::KeyFile; -use v1::types::{H160, H256, H520, DappId, DeriveHash, DeriveHierarchical, ExtAccountInfo}; +use v1::types::{H160, H256, H520, DeriveHash, DeriveHierarchical, ExtAccountInfo}; build_rpc_trait! { /// Personal Parity rpc interface. @@ -72,57 +72,6 @@ build_rpc_trait! { #[rpc(name = "parity_setAccountMeta")] fn set_account_meta(&self, H160, String) -> Result; - /// Sets addresses exposed for particular dapp. - /// Setting a non-empty list will also override default account. - /// Setting `None` will resets visible account to what's visible for new dapps - /// (does not affect default account though) - #[rpc(name = "parity_setDappAddresses")] - fn set_dapp_addresses(&self, DappId, Option>) -> Result; - - /// Gets accounts exposed for particular dapp. - #[rpc(name = "parity_getDappAddresses")] - fn dapp_addresses(&self, DappId) -> Result>; - - /// Changes dapp default address. - /// Does not affect other accounts exposed for this dapp, but - /// default account will always be retured as the first one. - #[rpc(name = "parity_setDappDefaultAddress")] - fn set_dapp_default_address(&self, DappId, H160) -> Result; - - /// Returns current dapp default address. - /// If not set explicite for the dapp will return global default. - #[rpc(name = "parity_getDappDefaultAddress")] - fn dapp_default_address(&self, DappId) -> Result; - - /// Sets accounts exposed for new dapps. - /// Setting a non-empty list will also override default account. - /// Setting `None` exposes all internal-managed accounts. - /// (does not affect default account though) - #[rpc(name = "parity_setNewDappsAddresses")] - fn set_new_dapps_addresses(&self, Option>) -> Result; - - /// Gets accounts exposed for new dapps. - /// `None` means that all accounts are exposes. - #[rpc(name = "parity_getNewDappsAddresses")] - fn new_dapps_addresses(&self) -> Result>>; - - /// Changes default address for new dapps (global default address) - /// Does not affect other accounts exposed for new dapps, but - /// default account will always be retured as the first one. - #[rpc(name = "parity_setNewDappsDefaultAddress")] - fn set_new_dapps_default_address(&self, H160) -> Result; - - /// Returns current default address for new dapps (global default address) - /// In case it's not set explicite will return first available account. - /// If no accounts are available will return `0x0` - #[rpc(name = "parity_getNewDappsDefaultAddress")] - fn new_dapps_default_address(&self) -> Result; - - /// Returns identified dapps that recently used RPC - /// Includes last usage timestamp. - #[rpc(name = "parity_listRecentDapps")] - fn recent_dapps(&self) -> Result>; - /// Imports a number of Geth accounts, with the list provided as the argument. #[rpc(name = "parity_importGethAccounts")] fn import_geth_accounts(&self, Vec) -> Result>; diff --git a/rpc/src/v1/traits/private.rs b/rpc/src/v1/traits/private.rs index b7b1aa20a..fdc28a817 100644 --- a/rpc/src/v1/traits/private.rs +++ b/rpc/src/v1/traits/private.rs @@ -35,8 +35,8 @@ build_rpc_trait! { fn compose_deployment_transaction(&self, BlockNumber, Bytes, Vec, U256) -> Result; /// Make a call to the private contract - #[rpc(meta, name = "private_call")] - fn private_call(&self, Self::Metadata, BlockNumber, CallRequest) -> Result; + #[rpc(name = "private_call")] + fn private_call(&self, BlockNumber, CallRequest) -> Result; /// Retrieve the id of the key associated with the contract #[rpc(name = "private_contractKey")] diff --git a/rpc/src/v1/traits/traces.rs b/rpc/src/v1/traits/traces.rs index 91d460864..00572ce38 100644 --- a/rpc/src/v1/traits/traces.rs +++ b/rpc/src/v1/traits/traces.rs @@ -42,12 +42,12 @@ build_rpc_trait! { fn block_traces(&self, BlockNumber) -> Result>>; /// Executes the given call and returns a number of possible traces for it. - #[rpc(meta, name = "trace_call")] - fn call(&self, Self::Metadata, CallRequest, TraceOptions, Trailing) -> Result; + #[rpc(name = "trace_call")] + fn call(&self, CallRequest, TraceOptions, Trailing) -> Result; /// Executes all given calls and returns a number of possible traces for each of it. - #[rpc(meta, name = "trace_callMany")] - fn call_many(&self, Self::Metadata, Vec<(CallRequest, TraceOptions)>, Trailing) -> Result>; + #[rpc(name = "trace_callMany")] + fn call_many(&self, Vec<(CallRequest, TraceOptions)>, Trailing) -> Result>; /// Executes the given raw transaction and returns a number of possible traces for it. #[rpc(name = "trace_rawTransaction")] diff --git a/rpc/src/v1/types/confirmations.rs b/rpc/src/v1/types/confirmations.rs index 477546aa4..e5da13298 100644 --- a/rpc/src/v1/types/confirmations.rs +++ b/rpc/src/v1/types/confirmations.rs @@ -285,14 +285,13 @@ mod tests { condition: None, }), origin: Origin::Signer { - dapp: "http://parity.io".into(), session: 5.into(), } }; // when let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"sendTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"signer":{"dapp":"http://parity.io","session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#; + let expected = r#"{"id":"0xf","payload":{"sendTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"signer":{"session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}}"#; // then assert_eq!(res.unwrap(), expected.to_owned()); @@ -314,12 +313,12 @@ mod tests { nonce: Some(1.into()), condition: None, }), - origin: Origin::Dapps("http://parity.io".into()), + origin: Origin::Unknown, }; // when let res = serde_json::to_string(&ConfirmationRequest::from(request)); - let expected = r#"{"id":"0xf","payload":{"signTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":{"dapp":"http://parity.io"}}"#; + let expected = r#"{"id":"0xf","payload":{"signTransaction":{"from":"0x0000000000000000000000000000000000000000","to":null,"gasPrice":"0x2710","gas":"0x3a98","value":"0x186a0","data":"0x010203","nonce":"0x1","condition":null}},"origin":"unknown"}"#; // then assert_eq!(res.unwrap(), expected.to_owned()); diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index 0f21e2f7b..fe35dd50a 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -63,7 +63,7 @@ pub use self::histogram::Histogram; pub use self::index::Index; pub use self::log::Log; pub use self::node_kind::{NodeKind, Availability, Capability}; -pub use self::provenance::{Origin, DappId}; +pub use self::provenance::Origin; pub use self::receipt::Receipt; pub use self::rpc_settings::RpcSettings; pub use self::secretstore::EncryptedDocumentKey; diff --git a/rpc/src/v1/types/provenance.rs b/rpc/src/v1/types/provenance.rs index 328f2ded3..55e25f0da 100644 --- a/rpc/src/v1/types/provenance.rs +++ b/rpc/src/v1/types/provenance.rs @@ -17,7 +17,6 @@ //! Request Provenance use std::fmt; -use ethcore::account_provider::DappId as EthDappId; use v1::types::H256; /// RPC request origin @@ -27,25 +26,18 @@ pub enum Origin { /// RPC server (includes request origin) #[serde(rename="rpc")] Rpc(String), - /// Dapps server (includes DappId) - #[serde(rename="dapp")] - Dapps(DappId), /// IPC server (includes session hash) #[serde(rename="ipc")] Ipc(H256), /// WS server #[serde(rename="ws")] Ws { - /// Dapp id - dapp: DappId, /// Session id session: H256, }, /// Signer (authorized WS server) #[serde(rename="signer")] Signer { - /// Dapp id - dapp: DappId, /// Session id session: H256 }, @@ -67,80 +59,35 @@ impl fmt::Display for Origin { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Origin::Rpc(ref origin) => write!(f, "{} via RPC", origin), - Origin::Dapps(ref origin) => write!(f, "Dapp {}", origin), Origin::Ipc(ref session) => write!(f, "IPC (session: {})", session), - Origin::Ws { ref session, ref dapp } => write!(f, "{} via WebSocket (session: {})", dapp, session), - Origin::Signer { ref session, ref dapp } => write!(f, "{} via UI (session: {})", dapp, session), + Origin::Ws { ref session } => write!(f, "WebSocket (session: {})", session), + Origin::Signer { ref session } => write!(f, "Secure Session (session: {})", session), Origin::CApi => write!(f, "C API"), Origin::Unknown => write!(f, "unknown origin"), } } } -/// Dapplication Internal Id -#[derive(Debug, Default, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Serialize, Deserialize)] -pub struct DappId(pub String); - -impl fmt::Display for DappId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", self.0) - } -} - -impl Into for DappId { - fn into(self) -> String { - self.0 - } -} - -impl From for DappId { - fn from(s: String) -> Self { - DappId(s) - } -} - -impl<'a> From<&'a str> for DappId { - fn from(s: &'a str) -> Self { - DappId(s.to_owned()) - } -} - -impl From for DappId { - fn from(id: EthDappId) -> Self { - DappId(id.into()) - } -} - -impl Into for DappId { - fn into(self) -> EthDappId { - Into::::into(self).into() - } -} - #[cfg(test)] mod tests { use serde_json; - use super::{DappId, Origin}; + use super::Origin; #[test] fn should_serialize_origin() { // given let o1 = Origin::Rpc("test service".into()); - let o2 = Origin::Dapps("http://parity.io".into()); let o3 = Origin::Ipc(5.into()); let o4 = Origin::Signer { - dapp: "http://parity.io".into(), session: 10.into(), }; let o5 = Origin::Unknown; let o6 = Origin::Ws { - dapp: "http://parity.io".into(), session: 5.into(), }; // when let res1 = serde_json::to_string(&o1).unwrap(); - let res2 = serde_json::to_string(&o2).unwrap(); let res3 = serde_json::to_string(&o3).unwrap(); let res4 = serde_json::to_string(&o4).unwrap(); let res5 = serde_json::to_string(&o5).unwrap(); @@ -148,34 +95,9 @@ mod tests { // then assert_eq!(res1, r#"{"rpc":"test service"}"#); - assert_eq!(res2, r#"{"dapp":"http://parity.io"}"#); assert_eq!(res3, r#"{"ipc":"0x0000000000000000000000000000000000000000000000000000000000000005"}"#); - assert_eq!(res4, r#"{"signer":{"dapp":"http://parity.io","session":"0x000000000000000000000000000000000000000000000000000000000000000a"}}"#); + assert_eq!(res4, r#"{"signer":{"session":"0x000000000000000000000000000000000000000000000000000000000000000a"}}"#); assert_eq!(res5, r#""unknown""#); - assert_eq!(res6, r#"{"ws":{"dapp":"http://parity.io","session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}"#); - } - - #[test] - fn should_serialize_dapp_id() { - // given - let id = DappId("testapp".into()); - - // when - let res = serde_json::to_string(&id).unwrap(); - - // then - assert_eq!(res, r#""testapp""#); - } - - #[test] - fn should_deserialize_dapp_id() { - // given - let id = r#""testapp""#; - - // when - let res: DappId = serde_json::from_str(id).unwrap(); - - // then - assert_eq!(res, DappId("testapp".into())); + assert_eq!(res6, r#"{"ws":{"session":"0x0000000000000000000000000000000000000000000000000000000000000005"}}"#); } } From 712101b63d04c1e54b638e7eb64542fcc0e21465 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Tue, 7 Aug 2018 15:18:19 +0100 Subject: [PATCH 08/48] ethcore: add transition flag for transaction permission contract (#9275) * ethcore: add transition flag for transaction permission contract * ethcore: fix transaction permission contract tests --- .../contract_ver_2_genesis.json | 3 +- .../deprecated_contract_genesis.json | 3 +- ethcore/src/machine.rs | 2 +- ethcore/src/spec/spec.rs | 4 + ethcore/src/tx_filter.rs | 83 +++++++++++-------- json/src/spec/params.rs | 3 + 6 files changed, 61 insertions(+), 37 deletions(-) diff --git a/ethcore/res/tx_permission_tests/contract_ver_2_genesis.json b/ethcore/res/tx_permission_tests/contract_ver_2_genesis.json index b165625a1..89077c0af 100644 --- a/ethcore/res/tx_permission_tests/contract_ver_2_genesis.json +++ b/ethcore/res/tx_permission_tests/contract_ver_2_genesis.json @@ -17,7 +17,8 @@ "minGasLimit": "0x1388", "networkID" : "0x69", "gasLimitBoundDivisor": "0x0400", - "transactionPermissionContract": "0x0000000000000000000000000000000000000005" + "transactionPermissionContract": "0x0000000000000000000000000000000000000005", + "transactionPermissionContractTransition": "1" }, "genesis": { "seal": { diff --git a/ethcore/res/tx_permission_tests/deprecated_contract_genesis.json b/ethcore/res/tx_permission_tests/deprecated_contract_genesis.json index 92fde9080..dd858bee6 100644 --- a/ethcore/res/tx_permission_tests/deprecated_contract_genesis.json +++ b/ethcore/res/tx_permission_tests/deprecated_contract_genesis.json @@ -17,7 +17,8 @@ "minGasLimit": "0x1388", "networkID" : "0x69", "gasLimitBoundDivisor": "0x0400", - "transactionPermissionContract": "0x0000000000000000000000000000000000000005" + "transactionPermissionContract": "0x0000000000000000000000000000000000000005", + "transactionPermissionContractTransition": "1" }, "genesis": { "seal": { diff --git a/ethcore/src/machine.rs b/ethcore/src/machine.rs index 5b2170609..d487fff9b 100644 --- a/ethcore/src/machine.rs +++ b/ethcore/src/machine.rs @@ -343,7 +343,7 @@ impl EthereumMachine { -> Result<(), transaction::Error> { if let Some(ref filter) = self.tx_filter.as_ref() { - if !filter.transaction_allowed(header.parent_hash(), t, client) { + if !filter.transaction_allowed(header.parent_hash(), header.number(), t, client) { return Err(transaction::Error::NotAllowed.into()) } } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 45f9a6c95..e842835f7 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -139,6 +139,8 @@ pub struct CommonParams { pub max_code_size_transition: BlockNumber, /// Transaction permission managing contract address. pub transaction_permission_contract: Option
, + /// Block at which the transaction permission contract should start being used. + pub transaction_permission_contract_transition: BlockNumber, /// Maximum size of transaction's RLP payload pub max_transaction_size: usize, } @@ -296,6 +298,8 @@ impl From for CommonParams { max_transaction_size: p.max_transaction_size.map_or(MAX_TRANSACTION_SIZE, Into::into), max_code_size_transition: p.max_code_size_transition.map_or(0, Into::into), transaction_permission_contract: p.transaction_permission_contract.map(Into::into), + transaction_permission_contract_transition: + p.transaction_permission_contract_transition.map_or(0, Into::into), wasm_activation_transition: p.wasm_activation_transition.map_or_else( BlockNumber::max_value, Into::into diff --git a/ethcore/src/tx_filter.rs b/ethcore/src/tx_filter.rs index 585dc7e3d..78b495b26 100644 --- a/ethcore/src/tx_filter.rs +++ b/ethcore/src/tx_filter.rs @@ -23,6 +23,7 @@ use client::{BlockInfo, CallContract, BlockId}; use parking_lot::Mutex; use spec::CommonParams; use transaction::{Action, SignedTransaction}; +use types::BlockNumber; use hash::KECCAK_EMPTY; use_contract!(transact_acl_deprecated, "TransactAclDeprecated", "res/contracts/tx_acl_deprecated.json"); @@ -44,6 +45,7 @@ pub struct TransactionFilter { contract_deprecated: transact_acl_deprecated::TransactAclDeprecated, contract: transact_acl::TransactAcl, contract_address: Address, + transition_block: BlockNumber, permission_cache: Mutex>, contract_version_cache: Mutex>> } @@ -56,6 +58,7 @@ impl TransactionFilter { contract_deprecated: transact_acl_deprecated::TransactAclDeprecated::default(), contract: transact_acl::TransactAcl::default(), contract_address: address, + transition_block: params.transaction_permission_contract_transition, permission_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), contract_version_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)), } @@ -63,7 +66,9 @@ impl TransactionFilter { } /// Check if transaction is allowed at given block. - pub fn transaction_allowed(&self, parent_hash: &H256, transaction: &SignedTransaction, client: &C) -> bool { + pub fn transaction_allowed(&self, parent_hash: &H256, block_number: BlockNumber, transaction: &SignedTransaction, client: &C) -> bool { + if block_number < self.transition_block { return true; } + let mut permission_cache = self.permission_cache.lock(); let mut contract_version_cache = self.contract_version_cache.lock(); @@ -196,33 +201,38 @@ mod test { basic_tx_with_ether_and_to_key6.value = U256::from(123123); let genesis = client.block_hash(BlockId::Latest).unwrap(); + let block_number = 1; - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &create_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key1.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); + // same tx but request is allowed because the contract only enables at block #1 + assert!(filter.transaction_allowed(&genesis, 0, &create_tx.clone().sign(key2.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key2.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key2.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key2.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key3.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key2.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key2.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key4.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key3.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key3.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key3.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &create_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key1.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key4.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key4.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &basic_tx_with_ether_and_to_key7.clone().sign(key5.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &call_tx_with_ether.clone().sign(key5.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key6.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx_with_ether_and_to_key7.clone().sign(key6.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx_to_key6.clone().sign(key7.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &basic_tx_with_ether_and_to_key6.clone().sign(key7.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key1.secret(), None), &*client)); + + assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx_with_ether_and_to_key7.clone().sign(key5.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx_with_ether.clone().sign(key5.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key6.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx_with_ether_and_to_key7.clone().sign(key6.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx_to_key6.clone().sign(key7.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx_with_ether_and_to_key6.clone().sign(key7.secret(), None), &*client)); } /// Contract code: https://gist.github.com/arkpar/38a87cb50165b7e683585eec71acb05a @@ -254,21 +264,26 @@ mod test { call_tx.action = Action::Call(Address::from("0000000000000000000000000000000000000005")); let genesis = client.block_hash(BlockId::Latest).unwrap(); + let block_number = 1; - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &create_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key1.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); + // same tx but request is allowed because the contract only enables at block #1 + assert!(filter.transaction_allowed(&genesis, 0, &create_tx.clone().sign(key2.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key2.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key2.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &call_tx.clone().sign(key2.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key1.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key1.secret(), None), &*client)); - assert!(filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key3.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key3.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key2.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key2.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key2.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &basic_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &create_tx.clone().sign(key4.secret(), None), &*client)); - assert!(!filter.transaction_allowed(&genesis, &call_tx.clone().sign(key4.secret(), None), &*client)); + assert!(filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key3.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key3.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key3.secret(), None), &*client)); + + assert!(!filter.transaction_allowed(&genesis, block_number, &basic_tx.clone().sign(key4.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &create_tx.clone().sign(key4.secret(), None), &*client)); + assert!(!filter.transaction_allowed(&genesis, block_number, &call_tx.clone().sign(key4.secret(), None), &*client)); } } diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index 0fab68198..cf57e9af4 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -143,6 +143,9 @@ pub struct Params { /// Transaction permission contract address. #[serde(rename="transactionPermissionContract")] pub transaction_permission_contract: Option
, + /// Block at which the transaction permission contract should start being used. + #[serde(rename="transactionPermissionContractTransition")] + pub transaction_permission_contract_transition: Option, /// Wasm activation block height, if not activated from start #[serde(rename="wasmActivationTransition")] pub wasm_activation_transition: Option, From 78a38e98259404d19dbe38e5fe407b92e5144098 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Wed, 8 Aug 2018 10:56:54 +0200 Subject: [PATCH 09/48] ethcore sync decodes rlp less often (#9264) * deserialize block only once during verification * ethcore-sync uses Unverified * ethcore-sync uses Unverified * fixed build error * removed Block::is_good * applied review suggestions * ethcore-sync deserializes headers and blocks only once --- ethcore/src/verification/queue/kind.rs | 1 + ethcore/sync/src/block_sync.rs | 54 ++---- ethcore/sync/src/blocks.rs | 259 +++++++++++++++++-------- 3 files changed, 196 insertions(+), 118 deletions(-) diff --git a/ethcore/src/verification/queue/kind.rs b/ethcore/src/verification/queue/kind.rs index fbc6346c9..973518726 100644 --- a/ethcore/src/verification/queue/kind.rs +++ b/ethcore/src/verification/queue/kind.rs @@ -113,6 +113,7 @@ pub mod blocks { } /// An unverified block. + #[derive(PartialEq, Debug)] pub struct Unverified { /// Unverified block header. pub header: Header, diff --git a/ethcore/sync/src/block_sync.rs b/ethcore/sync/src/block_sync.rs index 588bfc0c7..4c229cd87 100644 --- a/ethcore/sync/src/block_sync.rs +++ b/ethcore/sync/src/block_sync.rs @@ -23,12 +23,11 @@ use std::cmp; use heapsize::HeapSizeOf; use ethereum_types::H256; use rlp::{self, Rlp}; -use ethcore::header::{BlockNumber, Header as BlockHeader}; +use ethcore::header::BlockNumber; use ethcore::client::{BlockStatus, BlockId, BlockImportError, BlockImportErrorKind}; use ethcore::error::{ImportErrorKind, BlockError}; -use ethcore::verification::queue::kind::blocks::Unverified; use sync_io::SyncIo; -use blocks::BlockCollection; +use blocks::{BlockCollection, SyncBody, SyncHeader}; const MAX_HEADERS_TO_REQUEST: usize = 128; const MAX_BODIES_TO_REQUEST: usize = 32; @@ -236,45 +235,39 @@ impl BlockDownloader { let mut valid_response = item_count == 0; //empty response is valid let mut any_known = false; for i in 0..item_count { - let info: BlockHeader = r.val_at(i).map_err(|e| { - trace!(target: "sync", "Error decoding block header RLP: {:?}", e); - BlockDownloaderImportError::Invalid - })?; - let number = BlockNumber::from(info.number()); + let info = SyncHeader::from_rlp(r.at(i)?.as_raw().to_vec())?; + let number = BlockNumber::from(info.header.number()); + let hash = info.header.hash(); // Check if any of the headers matches the hash we requested if !valid_response { if let Some(expected) = expected_hash { - valid_response = expected == info.hash() + valid_response = expected == hash; } } - any_known = any_known || self.blocks.contains_head(&info.hash()); - if self.blocks.contains(&info.hash()) { - trace!(target: "sync", "Skipping existing block header {} ({:?})", number, info.hash()); + any_known = any_known || self.blocks.contains_head(&hash); + if self.blocks.contains(&hash) { + trace!(target: "sync", "Skipping existing block header {} ({:?})", number, hash); continue; } if self.highest_block.as_ref().map_or(true, |n| number > *n) { self.highest_block = Some(number); } - let hash = info.hash(); - let hdr = r.at(i).map_err(|e| { - trace!(target: "sync", "Error decoding block header RLP: {:?}", e); - BlockDownloaderImportError::Invalid - })?; + match io.chain().block_status(BlockId::Hash(hash.clone())) { BlockStatus::InChain | BlockStatus::Queued => { match self.state { State::Blocks => trace!(target: "sync", "Header already in chain {} ({})", number, hash), _ => trace!(target: "sync", "Header already in chain {} ({}), state = {:?}", number, hash, self.state), } - headers.push(hdr.as_raw().to_vec()); + headers.push(info); hashes.push(hash); }, BlockStatus::Bad => { return Err(BlockDownloaderImportError::Invalid); }, BlockStatus::Unknown | BlockStatus::Pending => { - headers.push(hdr.as_raw().to_vec()); + headers.push(info); hashes.push(hash); } } @@ -325,19 +318,15 @@ impl BlockDownloader { let item_count = r.item_count().unwrap_or(0); if item_count == 0 { return Err(BlockDownloaderImportError::Useless); - } - else if self.state != State::Blocks { + } else if self.state != State::Blocks { trace!(target: "sync", "Ignored unexpected block bodies"); - } - else { + } else { let mut bodies = Vec::with_capacity(item_count); for i in 0..item_count { - let body = r.at(i).map_err(|e| { - trace!(target: "sync", "Error decoding block boides RLP: {:?}", e); - BlockDownloaderImportError::Invalid - })?; - bodies.push(body.as_raw().to_vec()); + let body = SyncBody::from_rlp(r.at(i)?.as_raw())?; + bodies.push(body); } + if self.blocks.insert_bodies(bodies) != item_count { trace!(target: "sync", "Deactivating peer for giving invalid block bodies"); return Err(BlockDownloaderImportError::Invalid); @@ -483,15 +472,6 @@ impl BlockDownloader { let block = block_and_receipts.block; let receipts = block_and_receipts.receipts; - let block = match Unverified::from_rlp(block) { - Ok(block) => block, - Err(_) => { - debug!(target: "sync", "Bad block rlp"); - bad = true; - break; - } - }; - let h = block.header.hash(); let number = block.header.number(); let parent = *block.header.parent_hash(); diff --git a/ethcore/sync/src/blocks.rs b/ethcore/sync/src/blocks.rs index 248180b28..a502cee9c 100644 --- a/ethcore/sync/src/blocks.rs +++ b/ethcore/sync/src/blocks.rs @@ -23,28 +23,85 @@ use triehash_ethereum::ordered_trie_root; use bytes::Bytes; use rlp::{Rlp, RlpStream, DecoderError}; use network; -use ethcore::encoded::Block; -use ethcore::views::{HeaderView, BodyView}; use ethcore::header::Header as BlockHeader; +use ethcore::verification::queue::kind::blocks::Unverified; +use transaction::UnverifiedTransaction; known_heap_size!(0, HeaderId); type SmallHashVec = SmallVec<[H256; 1]>; -/// Block data with optional body. -struct SyncBlock { - header: Bytes, - body: Option, - receipts: Option, - receipts_root: H256, +pub struct SyncHeader { + pub bytes: Bytes, + pub header: BlockHeader, } -/// Block with optional receipt -pub struct BlockAndReceipts { - /// Block data. - pub block: Bytes, - /// Block receipts RLP list. - pub receipts: Option, +impl HeapSizeOf for SyncHeader { + fn heap_size_of_children(&self) -> usize { + self.bytes.heap_size_of_children() + + self.header.heap_size_of_children() + } +} + +impl SyncHeader { + pub fn from_rlp(bytes: Bytes) -> Result { + let result = SyncHeader { + header: ::rlp::decode(&bytes)?, + bytes, + }; + + Ok(result) + } +} + +pub struct SyncBody { + pub transactions_bytes: Bytes, + pub transactions: Vec, + pub uncles_bytes: Bytes, + pub uncles: Vec, +} + +impl SyncBody { + pub fn from_rlp(bytes: &[u8]) -> Result { + let rlp = Rlp::new(bytes); + let transactions_rlp = rlp.at(0)?; + let uncles_rlp = rlp.at(1)?; + + let result = SyncBody { + transactions_bytes: transactions_rlp.as_raw().to_vec(), + transactions: transactions_rlp.as_list()?, + uncles_bytes: uncles_rlp.as_raw().to_vec(), + uncles: uncles_rlp.as_list()?, + }; + + Ok(result) + } + + fn empty_body() -> Self { + SyncBody { + transactions_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(), + transactions: Vec::with_capacity(0), + uncles_bytes: ::rlp::EMPTY_LIST_RLP.to_vec(), + uncles: Vec::with_capacity(0), + } + } +} + +impl HeapSizeOf for SyncBody { + fn heap_size_of_children(&self) -> usize { + self.transactions_bytes.heap_size_of_children() + + self.transactions.heap_size_of_children() + + self.uncles_bytes.heap_size_of_children() + + self.uncles.heap_size_of_children() + } +} + +/// Block data with optional body. +struct SyncBlock { + header: SyncHeader, + body: Option, + receipts: Option, + receipts_root: H256, } impl HeapSizeOf for SyncBlock { @@ -53,6 +110,29 @@ impl HeapSizeOf for SyncBlock { } } +fn unverified_from_sync(header: SyncHeader, body: Option) -> Unverified { + let mut stream = RlpStream::new_list(3); + stream.append_raw(&header.bytes, 1); + let body = body.unwrap_or_else(SyncBody::empty_body); + stream.append_raw(&body.transactions_bytes, 1); + stream.append_raw(&body.uncles_bytes, 1); + + Unverified { + header: header.header, + transactions: body.transactions, + uncles: body.uncles, + bytes: stream.out().to_vec(), + } +} + +/// Block with optional receipt +pub struct BlockAndReceipts { + /// Block data. + pub block: Unverified, + /// Block receipts RLP list. + pub receipts: Option, +} + /// Used to identify header by transactions and uncles hashes #[derive(Eq, PartialEq, Hash)] struct HeaderId { @@ -124,7 +204,7 @@ impl BlockCollection { } /// Insert a set of headers into collection and advance subchain head pointers. - pub fn insert_headers(&mut self, headers: Vec) { + pub fn insert_headers(&mut self, headers: Vec) { for h in headers { if let Err(e) = self.insert_header(h) { trace!(target: "sync", "Ignored invalid header: {:?}", e); @@ -134,7 +214,7 @@ impl BlockCollection { } /// Insert a collection of block bodies for previously downloaded headers. - pub fn insert_bodies(&mut self, bodies: Vec) -> usize { + pub fn insert_bodies(&mut self, bodies: Vec) -> usize { let mut inserted = 0; for b in bodies { if let Err(e) = self.insert_body(b) { @@ -278,30 +358,33 @@ impl BlockCollection { while let Some(h) = head { head = self.parents.get(&h).cloned(); if let Some(head) = head { - match self.blocks.get(&head) { - Some(block) if block.body.is_some() && (!self.need_receipts || block.receipts.is_some()) => { - blocks.push(block); - hashes.push(head); - self.head = Some(head); - } - _ => break, + match self.blocks.remove(&head) { + Some(block) => { + if block.body.is_some() && (!self.need_receipts || block.receipts.is_some()) { + blocks.push(block); + hashes.push(head); + self.head = Some(head); + } else { + self.blocks.insert(head, block); + break; + } + }, + _ => { + break; + }, } } } - for block in blocks { - let body = view!(BodyView, block.body.as_ref().expect("blocks contains only full blocks; qed")); - let header = view!(HeaderView, &block.header); - let block_view = Block::new_from_header_and_body(&header, &body); + for block in blocks.into_iter() { + let unverified = unverified_from_sync(block.header, block.body); drained.push(BlockAndReceipts { - block: block_view.into_inner(), + block: unverified, receipts: block.receipts.clone(), }); } } - for h in hashes { - self.blocks.remove(&h); - } + trace!(target: "sync", "Drained {} blocks, new head :{:?}", drained.len(), self.head); drained } @@ -337,26 +420,23 @@ impl BlockCollection { self.downloading_headers.contains(hash) || self.downloading_bodies.contains(hash) } - fn insert_body(&mut self, b: Bytes) -> Result<(), network::Error> { + fn insert_body(&mut self, body: SyncBody) -> Result<(), network::Error> { let header_id = { - let body = Rlp::new(&b); - let tx = body.at(0)?; - let tx_root = ordered_trie_root(tx.iter().map(|r| r.as_raw())); - let uncles = keccak(body.at(1)?.as_raw()); + let tx_root = ordered_trie_root(Rlp::new(&body.transactions_bytes).iter().map(|r| r.as_raw())); + let uncles = keccak(&body.uncles_bytes); HeaderId { transactions_root: tx_root, uncles: uncles } }; - match self.header_ids.get(&header_id).cloned() { + match self.header_ids.remove(&header_id) { Some(h) => { - self.header_ids.remove(&header_id); self.downloading_bodies.remove(&h); match self.blocks.get_mut(&h) { Some(ref mut block) => { trace!(target: "sync", "Got body {}", h); - block.body = Some(b); + block.body = Some(body); Ok(()) }, None => { @@ -401,54 +481,63 @@ impl BlockCollection { } } - fn insert_header(&mut self, header: Bytes) -> Result { - let info: BlockHeader = Rlp::new(&header).as_val()?; - let hash = info.hash(); + fn insert_header(&mut self, info: SyncHeader) -> Result { + let hash = info.header.hash(); if self.blocks.contains_key(&hash) { return Ok(hash); } + match self.head { None if hash == self.heads[0] => { trace!(target: "sync", "New head {}", hash); - self.head = Some(info.parent_hash().clone()); + self.head = Some(info.header.parent_hash().clone()); }, _ => () } - let mut block = SyncBlock { - header: header, - body: None, - receipts: None, - receipts_root: H256::new(), - }; let header_id = HeaderId { - transactions_root: info.transactions_root().clone(), - uncles: info.uncles_hash().clone(), + transactions_root: *info.header.transactions_root(), + uncles: *info.header.uncles_hash(), }; - if header_id.transactions_root == KECCAK_NULL_RLP && header_id.uncles == KECCAK_EMPTY_LIST_RLP { + + let body = if header_id.transactions_root == KECCAK_NULL_RLP && header_id.uncles == KECCAK_EMPTY_LIST_RLP { // empty body, just mark as downloaded - let mut body_stream = RlpStream::new_list(2); - body_stream.append_raw(&::rlp::EMPTY_LIST_RLP, 1); - body_stream.append_raw(&::rlp::EMPTY_LIST_RLP, 1); - block.body = Some(body_stream.out()); - } - else { - trace!("Queueing body tx_root = {:?}, uncles = {:?}, block = {:?}, number = {}", header_id.transactions_root, header_id.uncles, hash, info.number()); - self.header_ids.insert(header_id, hash.clone()); - } - if self.need_receipts { - let receipt_root = info.receipts_root().clone(); + Some(SyncBody::empty_body()) + } else { + trace!( + "Queueing body tx_root = {:?}, uncles = {:?}, block = {:?}, number = {}", + header_id.transactions_root, + header_id.uncles, + hash, + info.header.number() + ); + self.header_ids.insert(header_id, hash); + None + }; + + let (receipts, receipts_root) = if self.need_receipts { + let receipt_root = *info.header.receipts_root(); if receipt_root == KECCAK_NULL_RLP { let receipts_stream = RlpStream::new_list(0); - block.receipts = Some(receipts_stream.out()); + (Some(receipts_stream.out()), receipt_root) } else { - self.receipt_ids.entry(receipt_root).or_insert_with(|| SmallHashVec::new()).push(hash.clone()); + self.receipt_ids.entry(receipt_root).or_insert_with(|| SmallHashVec::new()).push(hash); + (None, receipt_root) } - block.receipts_root = receipt_root; - } + } else { + (None, H256::new()) + }; - self.parents.insert(info.parent_hash().clone(), hash.clone()); - self.blocks.insert(hash.clone(), block); + self.parents.insert(*info.header.parent_hash(), hash); + + let block = SyncBlock { + header: info, + body, + receipts, + receipts_root, + }; + + self.blocks.insert(hash, block); trace!(target: "sync", "New header: {:x}", hash); Ok(hash) } @@ -485,10 +574,11 @@ impl BlockCollection { #[cfg(test)] mod test { - use super::BlockCollection; + use super::{BlockCollection, SyncHeader}; use ethcore::client::{TestBlockChainClient, EachBlockWith, BlockId, BlockChainClient}; - use ethcore::views::HeaderView; use ethcore::header::BlockNumber; + use ethcore::verification::queue::kind::blocks::Unverified; + use ethcore::views::HeaderView; use rlp::*; fn is_empty(bc: &BlockCollection) -> bool { @@ -541,7 +631,7 @@ mod test { assert_eq!(bc.downloading_headers.len(), 1); assert!(bc.drain().is_empty()); - bc.insert_headers(headers[0..6].to_vec()); + bc.insert_headers(headers[0..6].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); assert_eq!(hashes[5], bc.heads[0]); for h in &hashes[0..6] { bc.clear_header_download(h) @@ -550,7 +640,10 @@ mod test { assert!(!bc.is_downloading(&hashes[0])); assert!(bc.contains(&hashes[0])); - assert_eq!(&bc.drain().into_iter().map(|b| b.block).collect::>()[..], &blocks[0..6]); + assert_eq!( + bc.drain().into_iter().map(|b| b.block).collect::>(), + blocks[0..6].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::>() + ); assert!(!bc.contains(&hashes[0])); assert_eq!(hashes[5], bc.head.unwrap()); @@ -558,13 +651,17 @@ mod test { assert_eq!(hashes[5], h); let (h, _) = bc.needed_headers(6, false).unwrap(); assert_eq!(hashes[20], h); - bc.insert_headers(headers[10..16].to_vec()); + bc.insert_headers(headers[10..16].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); assert!(bc.drain().is_empty()); - bc.insert_headers(headers[5..10].to_vec()); - assert_eq!(&bc.drain().into_iter().map(|b| b.block).collect::>()[..], &blocks[6..16]); + bc.insert_headers(headers[5..10].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + assert_eq!( + bc.drain().into_iter().map(|b| b.block).collect::>(), + blocks[6..16].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::>() + ); + assert_eq!(hashes[15], bc.heads[0]); - bc.insert_headers(headers[15..].to_vec()); + bc.insert_headers(headers[15..].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); bc.drain(); assert!(bc.is_empty()); } @@ -584,11 +681,11 @@ mod test { let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); bc.reset_to(heads); - bc.insert_headers(headers[2..22].to_vec()); + bc.insert_headers(headers[2..22].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); assert_eq!(hashes[0], bc.heads[0]); assert_eq!(hashes[21], bc.heads[1]); assert!(bc.head.is_none()); - bc.insert_headers(headers[0..2].to_vec()); + bc.insert_headers(headers[0..2].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); assert!(bc.head.is_some()); assert_eq!(hashes[21], bc.heads[0]); } @@ -608,9 +705,9 @@ mod test { let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); bc.reset_to(heads); - bc.insert_headers(headers[1..2].to_vec()); + bc.insert_headers(headers[1..2].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); assert!(bc.drain().is_empty()); - bc.insert_headers(headers[0..1].to_vec()); + bc.insert_headers(headers[0..1].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); assert_eq!(bc.drain().len(), 2); } } From e2095d4a5d5749ee26828f65c80e509e024b9535 Mon Sep 17 00:00:00 2001 From: David Date: Thu, 9 Aug 2018 09:51:48 +0200 Subject: [PATCH 10/48] Move ethereum-specific H256FastMap type to own crate (#9307) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a `fastmap` crate that provides the H256FastMap specialized HashMap * Use `fastmap` instead of `plain_hasher` * Update submodules for Reasons™ * Submodule update --- Cargo.lock | 14 +++++++-- Cargo.toml | 1 + ethcore/light/Cargo.toml | 2 +- ethcore/light/src/client/header_chain.rs | 2 +- ethcore/light/src/lib.rs | 2 +- ethcore/light/src/transaction_queue.rs | 2 +- ethcore/sync/Cargo.toml | 2 +- ethcore/sync/src/chain/mod.rs | 2 +- ethcore/sync/src/lib.rs | 2 +- ethcore/sync/src/transactions_stats.rs | 2 +- util/fastmap/Cargo.toml | 10 ++++++ util/fastmap/src/lib.rs | 39 ++++++++++++++++++++++++ util/journaldb/Cargo.toml | 2 +- util/journaldb/src/lib.rs | 2 +- util/journaldb/src/overlayrecentdb.rs | 2 +- 15 files changed, 72 insertions(+), 14 deletions(-) create mode 100644 util/fastmap/Cargo.toml create mode 100644 util/fastmap/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 54a05ba05..1760b3b92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -597,6 +597,7 @@ dependencies = [ "ethcore-network 1.12.0", "ethcore-transaction 0.1.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "fastmap 0.1.0", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -612,7 +613,6 @@ dependencies = [ "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "patricia-trie 0.2.1 (git+https://github.com/paritytech/parity-common)", "patricia-trie-ethereum 0.1.0", - "plain_hasher 0.1.0 (git+https://github.com/paritytech/parity-common)", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "rlp_derive 0.1.0", @@ -845,6 +845,7 @@ dependencies = [ "ethcore-transaction 0.1.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", + "fastmap 0.1.0", "hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", @@ -856,7 +857,6 @@ dependencies = [ "macros 0.1.0", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "plain_hasher 0.1.0 (git+https://github.com/paritytech/parity-common)", "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1044,6 +1044,14 @@ dependencies = [ "ethkey 0.3.0", ] +[[package]] +name = "fastmap" +version = "0.1.0" +dependencies = [ + "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "plain_hasher 0.1.0 (git+https://github.com/paritytech/parity-common)", +] + [[package]] name = "fdlimit" version = "0.1.1" @@ -1323,6 +1331,7 @@ version = "0.2.0" dependencies = [ "ethcore-logger 1.12.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "fastmap 0.1.0", "hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", @@ -1333,7 +1342,6 @@ dependencies = [ "memorydb 0.2.1 (git+https://github.com/paritytech/parity-common)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", - "plain_hasher 0.1.0 (git+https://github.com/paritytech/parity-common)", "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", ] diff --git a/Cargo.toml b/Cargo.toml index 697afeab3..26b387e80 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -136,6 +136,7 @@ members = [ "util/triehash-ethereum", "util/keccak-hasher", "util/patricia-trie-ethereum", + "util/fastmap", ] [patch.crates-io] diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 6c3a454e2..16f70de62 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -20,7 +20,7 @@ ethcore-io = { path = "../../util/io" } hashdb = { git = "https://github.com/paritytech/parity-common" } heapsize = "0.4" vm = { path = "../vm" } -plain_hasher = { git = "https://github.com/paritytech/parity-common" } +fastmap = { path = "../../util/fastmap" } rlp = { git = "https://github.com/paritytech/parity-common" } rlp_derive = { path = "../../util/rlp_derive" } smallvec = "0.4" diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 957a1ea43..4611adbfe 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -41,7 +41,7 @@ use ethereum_types::{H256, H264, U256}; use heapsize::HeapSizeOf; use kvdb::{DBTransaction, KeyValueDB}; use parking_lot::{Mutex, RwLock}; -use plain_hasher::H256FastMap; +use fastmap::H256FastMap; use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp}; use smallvec::SmallVec; diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index 24c95cfde..e151267a9 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -68,7 +68,7 @@ extern crate keccak_hasher; extern crate memorydb; extern crate patricia_trie as trie; extern crate patricia_trie_ethereum as ethtrie; -extern crate plain_hasher; +extern crate fastmap; extern crate rand; extern crate rlp; extern crate parking_lot; diff --git a/ethcore/light/src/transaction_queue.rs b/ethcore/light/src/transaction_queue.rs index e8880037a..cb017bcb1 100644 --- a/ethcore/light/src/transaction_queue.rs +++ b/ethcore/light/src/transaction_queue.rs @@ -29,7 +29,7 @@ use std::collections::hash_map::Entry; use transaction::{self, Condition, PendingTransaction, SignedTransaction}; use ethereum_types::{H256, U256, Address}; -use plain_hasher::H256FastMap; +use fastmap::H256FastMap; // Knowledge of an account's current nonce. #[derive(Debug, Clone, PartialEq, Eq)] diff --git a/ethcore/sync/Cargo.toml b/ethcore/sync/Cargo.toml index dfd457772..9cdd0d84b 100644 --- a/ethcore/sync/Cargo.toml +++ b/ethcore/sync/Cargo.toml @@ -17,7 +17,7 @@ ethcore-transaction = { path = "../transaction" } ethcore = { path = ".." } ethereum-types = "0.3" hashdb = { git = "https://github.com/paritytech/parity-common" } -plain_hasher = { git = "https://github.com/paritytech/parity-common" } +fastmap = { path = "../../util/fastmap" } rlp = { git = "https://github.com/paritytech/parity-common" } rustc-hex = "1.0" keccak-hash = { git = "https://github.com/paritytech/parity-common" } diff --git a/ethcore/sync/src/chain/mod.rs b/ethcore/sync/src/chain/mod.rs index 520226a9c..625ccb30d 100644 --- a/ethcore/sync/src/chain/mod.rs +++ b/ethcore/sync/src/chain/mod.rs @@ -99,7 +99,7 @@ use std::time::{Duration, Instant}; use hash::keccak; use heapsize::HeapSizeOf; use ethereum_types::{H256, U256}; -use plain_hasher::H256FastMap; +use fastmap::H256FastMap; use parking_lot::RwLock; use bytes::Bytes; use rlp::{Rlp, RlpStream, DecoderError}; diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index f9f8a3e3e..eb38d09d9 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -31,7 +31,7 @@ extern crate ethcore; extern crate ethereum_types; extern crate env_logger; extern crate hashdb; -extern crate plain_hasher; +extern crate fastmap; extern crate rand; extern crate semver; extern crate parking_lot; diff --git a/ethcore/sync/src/transactions_stats.rs b/ethcore/sync/src/transactions_stats.rs index c45b1ad8b..4d11dcf68 100644 --- a/ethcore/sync/src/transactions_stats.rs +++ b/ethcore/sync/src/transactions_stats.rs @@ -17,7 +17,7 @@ use api::TransactionStats; use std::collections::{HashSet, HashMap}; use ethereum_types::{H256, H512}; -use plain_hasher::H256FastMap; +use fastmap::H256FastMap; type NodeId = H512; type BlockNumber = u64; diff --git a/util/fastmap/Cargo.toml b/util/fastmap/Cargo.toml new file mode 100644 index 000000000..3889c6700 --- /dev/null +++ b/util/fastmap/Cargo.toml @@ -0,0 +1,10 @@ +[package] +name = "fastmap" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Specialized version of `HashMap` with H256 keys and fast hashing function." +license = "GPL-3.0" + +[dependencies] +ethereum-types = "0.3" +plain_hasher = { git = "https://github.com/paritytech/parity-common" } diff --git a/util/fastmap/src/lib.rs b/util/fastmap/src/lib.rs new file mode 100644 index 000000000..135ce54ba --- /dev/null +++ b/util/fastmap/src/lib.rs @@ -0,0 +1,39 @@ +// Copyright 2015-2018 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Provides a `H256FastMap` type with H256 keys and fast hashing function. + +extern crate ethereum_types; +extern crate plain_hasher; + +use ethereum_types::H256; +use std::hash; +use std::collections::HashMap; +use plain_hasher::PlainHasher; + +/// Specialized version of `HashMap` with H256 keys and fast hashing function. +pub type H256FastMap = HashMap>; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_works() { + let mut h = H256FastMap::default(); + h.insert(H256::from(123), "abc"); + } +} \ No newline at end of file diff --git a/util/journaldb/Cargo.toml b/util/journaldb/Cargo.toml index 27b0ae195..7e559e229 100644 --- a/util/journaldb/Cargo.toml +++ b/util/journaldb/Cargo.toml @@ -15,7 +15,7 @@ kvdb = { git = "https://github.com/paritytech/parity-common" } log = "0.3" memorydb = { git = "https://github.com/paritytech/parity-common" } parking_lot = "0.6" -plain_hasher = { git = "https://github.com/paritytech/parity-common" } +fastmap = { path = "../../util/fastmap" } rlp = { git = "https://github.com/paritytech/parity-common" } [dev-dependencies] diff --git a/util/journaldb/src/lib.rs b/util/journaldb/src/lib.rs index b14ef88e9..e88b437d8 100644 --- a/util/journaldb/src/lib.rs +++ b/util/journaldb/src/lib.rs @@ -27,7 +27,7 @@ extern crate keccak_hasher; extern crate kvdb; extern crate memorydb; extern crate parking_lot; -extern crate plain_hasher; +extern crate fastmap; extern crate rlp; #[cfg(test)] diff --git a/util/journaldb/src/overlayrecentdb.rs b/util/journaldb/src/overlayrecentdb.rs index b63168e54..1549d3baa 100644 --- a/util/journaldb/src/overlayrecentdb.rs +++ b/util/journaldb/src/overlayrecentdb.rs @@ -29,7 +29,7 @@ use keccak_hasher::KeccakHasher; use kvdb::{KeyValueDB, DBTransaction}; use memorydb::*; use parking_lot::RwLock; -use plain_hasher::H256FastMap; +use fastmap::H256FastMap; use rlp::{Rlp, RlpStream, encode, decode, DecoderError, Decodable, Encodable}; use super::{DB_PREFIX_LEN, LATEST_ERA_KEY, JournalDB, error_negatively_reference_hash}; use util::DatabaseKey; From 88141951224afdfb0b592816e5587b70dca6df5f Mon Sep 17 00:00:00 2001 From: Max Riveiro Date: Thu, 9 Aug 2018 18:54:08 +0300 Subject: [PATCH 11/48] Fix codecov.io badge in README (#9327) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9355cde7d..6c7378c57 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ ### [» Download the latest release «](https://github.com/paritytech/parity-ethereum/releases/latest) [![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) -[![codecov](https://codecov.io/gh/paritytech/parity/branch/master/graph/badge.svg)](https://codecov.io/gh/paritytech/parity) +[![codecov](https://codecov.io/gh/paritytech/parity-ethereum/branch/master/graph/badge.svg)](https://codecov.io/gh/paritytech/parity-ethereum) [![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity) [![GPLv3](https://img.shields.io/badge/license-GPL%20v3-green.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html) From 62fdfb937a4cc7e8b805c77f2fc53f39b2b33cda Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Thu, 9 Aug 2018 23:13:28 +0200 Subject: [PATCH 12/48] Allow tx pool to be Send (#9315) --- transaction-pool/Cargo.toml | 2 +- transaction-pool/src/lib.rs | 2 +- transaction-pool/src/scoring.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 8af887d3c..f56a70236 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Generic transaction pool." name = "transaction-pool" -version = "1.12.1" +version = "1.12.2" license = "GPL-3.0" authors = ["Parity Technologies "] diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs index ea77debfa..c23c6662c 100644 --- a/transaction-pool/src/lib.rs +++ b/transaction-pool/src/lib.rs @@ -111,7 +111,7 @@ pub trait VerifiedTransaction: fmt::Debug { type Hash: fmt::Debug + fmt::LowerHex + Eq + Clone + Hash; /// Transaction sender type. - type Sender: fmt::Debug + Eq + Clone + Hash; + type Sender: fmt::Debug + Eq + Clone + Hash + Send; /// Transaction hash fn hash(&self) -> &Self::Hash; diff --git a/transaction-pool/src/scoring.rs b/transaction-pool/src/scoring.rs index 25189604c..8bc44a732 100644 --- a/transaction-pool/src/scoring.rs +++ b/transaction-pool/src/scoring.rs @@ -83,7 +83,7 @@ pub enum Change { /// pub trait Scoring: fmt::Debug { /// A score of a transaction. - type Score: cmp::Ord + Clone + Default + fmt::Debug; + type Score: cmp::Ord + Clone + Default + fmt::Debug + Send; /// Custom scoring update event type. type Event: fmt::Debug; From b28e742683737ec776c154b79cde7f0d67033a0b Mon Sep 17 00:00:00 2001 From: Maciej Hirsz <1096222+maciejhirsz@users.noreply.github.com> Date: Thu, 9 Aug 2018 23:14:45 +0200 Subject: [PATCH 13/48] Update tobalaba.json (#9313) --- ethcore/res/ethereum/tobalaba.json | 31 ++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/ethcore/res/ethereum/tobalaba.json b/ethcore/res/ethereum/tobalaba.json index e9345c696..0f5778601 100644 --- a/ethcore/res/ethereum/tobalaba.json +++ b/ethcore/res/ethereum/tobalaba.json @@ -16,7 +16,16 @@ "gasLimitBoundDivisor": "0x400", "minGasLimit": "0x1388", "networkID": "0x62121", - "wasmActivationTransition": 4000000 + "wasmActivationTransition": 6666666, + "eip140Transition": 6666666, + "eip211Transition": 6666666, + "eip214Transition": 6666666, + "eip658Transition": 6666666, + + "maxCodeSize": 24576, + "maxCodeSizeTransition": 6666666, + + "registrar": "0xb8624dc8cb3ca3147c178ac4c21734eb49e04071" }, "genesis": { "seal": { @@ -43,12 +52,22 @@ }, "0x4ba15b56452521c0826a35a6f2022e1210fc519b": { "balance": "0x7E37BE2022B2B09472D89C0000" - } + }, + + "0x0000000000000000000000000000000000000001": { "builtin": { "name": "ecrecover", "activate_at": 6666666, "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0x0000000000000000000000000000000000000002": { "builtin": { "name": "sha256", "activate_at": 6666666, "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0x0000000000000000000000000000000000000003": { "builtin": { "name": "ripemd160", "activate_at": 6666666, "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0x0000000000000000000000000000000000000004": { "builtin": { "name": "identity", "activate_at": 6666666, "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0x0000000000000000000000000000000000000005": { "builtin": { "name": "modexp", "activate_at": 6666666, "pricing": { "modexp": { "divisor": 20 } } } }, + "0x0000000000000000000000000000000000000006": { "builtin": { "name": "alt_bn128_add", "activate_at": 6666666, "pricing": { "linear": { "base": 500, "word": 0 } } } }, + "0x0000000000000000000000000000000000000007": { "builtin": { "name": "alt_bn128_mul", "activate_at": 6666666, "pricing": { "linear": { "base": 40000, "word": 0 } } } }, + "0x0000000000000000000000000000000000000008": { "builtin": { "name": "alt_bn128_pairing", "activate_at": 6666666, "pricing": { "alt_bn128_pairing": { "base": 100000, "pair": 80000 } } } } }, + "nodes": [ - "enode://147573f46fe9f5cc38fbe070089a31390baec5dd2827c8f2ef168833e4d0254fbee3969a02c5b9910ea5d5b23d86a6ed5eabcda17cc12007b7d9178b6c697aa5@37.120.168.56:30303", - "enode://a370d5fd55959f20af6d1565b151a760c1372f5a2aaf674d4892cd4fd2de0d1f672781cd40e0d4e4b51c5823527ddec73b31cc14ac685449d9f0866996a16b9f@13.76.165.180:30303", - "enode://da019fa5fb1fda105100d68a986938ec15ac5c6ff69d6e4ad3e350e377057f3e67e33aea5feb22d5cdcfc22041d141c8453c77baa64a216fff98f191ca76b3ec@18.220.108.238:30303", - "enode://49498fb8cdcd79c813ccdaa9496a3a4be0a187a3183e99adbc04d9c90b9a62ad59f0b6832f6e43b48e63fbebf74ec5438eb0d6d9098330edf36413d276fedf81@13.80.148.117:30303" + "enode://eda34244538d72f42605a6fc8b8a34b15714c683989e8b29dc9e7a2b2088da490a5b32f2c149bec5a5c482bf03ec2c4f38b833ae31e36fcb26fb05fd094b2a88@18.197.33.9:30303", + "enode://12e903e900137b02b22e01f7918bd6e7310773c313e4e577281f35597e394a3e0b54c7314a8970a9776c5a3e5dc4daee289215dea3897bcb6d5cf0bb1dd2d356@18.197.31.231:30303", + "enode://423fdb91b37ec0714af0c19f625ec4af3ada2844367a36e45a05703577a84f7f0e9483585d4950a35c9e3738dba8c6abd7e1ce278d9a1f3f28065bc009f409cd@52.221.203.209:30303", + "enode://a9327d37d07799817d4a3e13d49fb4f5cc1486d4adf3ec8a6b98be62c4d7a5453914a5139dbe124809a388514cb0be37f9fa799539abe2250672f6d3d778b821@18.191.209.251:30303" ] } From e590874a8174a1cf47e1d82410deaa50cc4c64f3 Mon Sep 17 00:00:00 2001 From: Nick Sanders Date: Thu, 9 Aug 2018 16:04:10 -0700 Subject: [PATCH 14/48] Update `log` -> 0.4, `env_logger` -> 0.5. (#9294) * Rename a few types & methods. * Change `(Log)Builder::format` (closure) arg. --- Cargo.lock | 144 ++++++++++++++++++++--------- Cargo.toml | 4 +- ethash/Cargo.toml | 2 +- ethcore/Cargo.toml | 2 +- ethcore/evm/Cargo.toml | 2 +- ethcore/light/Cargo.toml | 2 +- ethcore/node_filter/Cargo.toml | 2 +- ethcore/private-tx/Cargo.toml | 2 +- ethcore/service/Cargo.toml | 2 +- ethcore/src/block.rs | 4 +- ethcore/stratum/Cargo.toml | 4 +- ethcore/sync/Cargo.toml | 4 +- ethcore/sync/src/tests/chain.rs | 18 ++-- ethcore/sync/src/tests/snapshot.rs | 2 +- ethcore/vm/Cargo.toml | 2 +- ethcore/wasm/Cargo.toml | 2 +- ethkey/Cargo.toml | 2 +- ethkey/cli/Cargo.toml | 2 +- ethkey/cli/src/main.rs | 2 +- ethstore/Cargo.toml | 2 +- hash-fetch/Cargo.toml | 2 +- hw/Cargo.toml | 2 +- local-store/Cargo.toml | 2 +- logger/Cargo.toml | 4 +- logger/src/lib.rs | 22 ++--- logger/src/rotating.rs | 8 +- miner/Cargo.toml | 4 +- price-info/Cargo.toml | 2 +- rpc/Cargo.toml | 2 +- rpc_client/Cargo.toml | 2 +- secret_store/Cargo.toml | 2 +- transaction-pool/Cargo.toml | 2 +- updater/Cargo.toml | 2 +- util/io/Cargo.toml | 2 +- util/journaldb/Cargo.toml | 2 +- util/migration-rocksdb/Cargo.toml | 2 +- util/network-devp2p/Cargo.toml | 2 +- util/stats/Cargo.toml | 2 +- util/trace-time/Cargo.toml | 2 +- whisper/Cargo.toml | 2 +- whisper/cli/Cargo.toml | 2 +- 41 files changed, 168 insertions(+), 110 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1760b3b92..f529f2bc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -401,11 +401,14 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.4.3" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", - "regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -471,7 +474,7 @@ dependencies = [ "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -528,7 +531,7 @@ dependencies = [ "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-rocksdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "memory-cache 0.1.0", @@ -577,7 +580,7 @@ version = "1.12.0" dependencies = [ "crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -606,7 +609,7 @@ dependencies = [ "keccak-hasher 0.1.0", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "memory-cache 0.1.0", "memorydb 0.2.1 (git+https://github.com/paritytech/parity-common)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", @@ -632,9 +635,9 @@ dependencies = [ "ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", "arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "atty 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", @@ -645,7 +648,7 @@ name = "ethcore-miner" version = "1.12.0" dependencies = [ "ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethash 1.12.0", "ethcore-transaction 0.1.0", @@ -658,7 +661,7 @@ dependencies = [ "hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "linked-hash-map 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "price-info 1.12.0", @@ -702,7 +705,7 @@ dependencies = [ "ipnetwork 0.12.7 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.14 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", @@ -740,7 +743,7 @@ dependencies = [ "fetch 0.1.0", "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -778,7 +781,7 @@ dependencies = [ "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-rocksdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -808,7 +811,7 @@ dependencies = [ "ethcore-sync 1.12.0", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-rocksdb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "stop-guard 0.1.0", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", @@ -818,14 +821,14 @@ dependencies = [ name = "ethcore-stratum" version = "1.12.0" dependencies = [ - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore-logger 1.12.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 8.0.1 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-tcp-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-core 0.1.17 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-io 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -835,7 +838,7 @@ dependencies = [ name = "ethcore-sync" version = "1.12.0" dependencies = [ - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", "ethcore-io 1.12.0", "ethcore-light 1.12.0", @@ -853,7 +856,7 @@ dependencies = [ "keccak-hasher 0.1.0", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -923,7 +926,7 @@ dependencies = [ "eth-secp256k1 0.5.7 (git+https://github.com/paritytech/rust-secp256k1)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "mem 0.1.0", "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-wordlist 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -940,7 +943,7 @@ name = "ethkey-cli" version = "0.1.0" dependencies = [ "docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", "panic_hook 0.1.0", "parity-wordlist 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -959,7 +962,7 @@ dependencies = [ "ethkey 0.3.0", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-wordlist 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1000,7 +1003,7 @@ dependencies = [ "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "memory-cache 0.1.0", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1169,7 +1172,7 @@ dependencies = [ "ethkey 0.3.0", "hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)", "libusb 0.3.0 (git+https://github.com/paritytech/libusb-rs)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "protobuf 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1221,6 +1224,14 @@ name = "httparse" version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "humantime" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "hyper" version = "0.11.24" @@ -1338,7 +1349,7 @@ dependencies = [ "keccak-hasher 0.1.0", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "memorydb 0.2.1 (git+https://github.com/paritytech/parity-common)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1676,7 +1687,7 @@ version = "0.1.0" dependencies = [ "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-rocksdb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1805,7 +1816,7 @@ dependencies = [ "ethcore-network-devp2p 1.12.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1963,7 +1974,7 @@ dependencies = [ "daemonize 0.2.3 (git+https://github.com/paritytech/daemonize)", "dir 0.1.1", "docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", "ethcore-io 1.12.0", "ethcore-light 1.12.0", @@ -1987,7 +1998,7 @@ dependencies = [ "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-rocksdb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "mem 0.1.0", "migration-rocksdb 0.1.0", "node-filter 1.12.0", @@ -2039,7 +2050,7 @@ dependencies = [ "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "mime 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", "mime_guess 2.0.0-alpha.2 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", @@ -2075,7 +2086,7 @@ dependencies = [ "ethkey 0.3.0", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2133,7 +2144,7 @@ dependencies = [ "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "kvdb-memorydb 0.1.0 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "macros 0.1.0", "multihash 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2169,7 +2180,7 @@ dependencies = [ "jsonrpc-core 8.0.1 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-ws-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-rpc 1.12.0", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2207,7 +2218,7 @@ dependencies = [ "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "matches 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-hash-fetch 1.12.0", @@ -2253,7 +2264,7 @@ dependencies = [ "jsonrpc-core 8.0.1 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-macros 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "mem 0.1.0", "ordered-float 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-crypto 0.1.0 (git+https://github.com/paritytech/parity-common)", @@ -2409,7 +2420,7 @@ dependencies = [ "futures 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)", "futures-cpupool 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2598,11 +2609,31 @@ dependencies = [ "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "regex" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "aho-corasick 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)", + "memchr 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)", + "regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "thread_local 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", + "utf8-ranges 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "regex-syntax" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "regex-syntax" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "registrar" version = "0.0.1" @@ -2939,7 +2970,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" name = "stats" version = "0.1.0" dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3001,6 +3032,14 @@ dependencies = [ "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "termcolor" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "wincolor 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "termion" version = "1.5.1" @@ -3282,7 +3321,7 @@ dependencies = [ name = "trace-time" version = "0.1.0" dependencies = [ - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -3291,7 +3330,7 @@ version = "1.12.1" dependencies = [ "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", ] @@ -3343,6 +3382,11 @@ dependencies = [ "triehash 0.2.0 (git+https://github.com/paritytech/parity-common)", ] +[[package]] +name = "ucd-util" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "uint" version = "0.2.1" @@ -3470,7 +3514,7 @@ dependencies = [ "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethjson 0.1.0", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "patricia-trie 0.2.1 (git+https://github.com/paritytech/parity-common)", "patricia-trie-ethereum 0.1.0", @@ -3490,7 +3534,7 @@ dependencies = [ "ethcore-logger 1.12.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "parity-wasm 0.31.0 (registry+https://github.com/rust-lang/crates.io-index)", "pwasm-utils 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", @@ -3537,7 +3581,7 @@ dependencies = [ "jsonrpc-core 8.0.1 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-http-server 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", "jsonrpc-pubsub 8.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.11)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "panic_hook 0.1.0", "parity-whisper 0.1.0", "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3573,6 +3617,14 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "wincolor" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "ws" version = "0.7.5" @@ -3665,7 +3717,7 @@ dependencies = [ "checksum edit-distance 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a34f5204fbc13582de418611cf3a7dcdd07c6d312a5b631597ba72c06b9d9c9" "checksum either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "740178ddf48b1a9e878e6d6509a1442a2d42fd2928aae8e7a6f8a36fb01981b3" "checksum elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "88d4851b005ef16de812ea9acdb7bece2f0a40dd86c07b85631d7dafa54537bb" -"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b" +"checksum env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)" = "f4d7e69c283751083d53d01eac767407343b8b69c4bd70058e08adc2637cb257" "checksum error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff511d5dc435d703f4971bc399647c9bc38e20cb41452e3b9feb4765419ed3f3" "checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02" "checksum eth-secp256k1 0.5.7 (git+https://github.com/paritytech/rust-secp256k1)" = "" @@ -3694,6 +3746,7 @@ dependencies = [ "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "" "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" +"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" "checksum hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)" = "df4dd5dae401458087396b6db7fabc4d6760aa456a5fa8e92bda549f39cae661" "checksum hyper-rustls 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d6cdc1751771a14b8175764394f025e309a28c825ed9eaf97fa62bb831dc8c5" "checksum idna 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "014b298351066f1512874135335d62a789ffe78a9974f94b43ed5621951eaf7d" @@ -3800,7 +3853,9 @@ dependencies = [ "checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1" "checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76" "checksum regex 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "744554e01ccbd98fff8c457c3b092cd67af62a555a43bfe97ae8a0451f7799fa" +"checksum regex 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5bbbea44c5490a1e84357ff28b7d518b4619a159fed5d25f6c1de2d19cc42814" "checksum regex-syntax 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ad890a5eef7953f55427c50575c680c42841653abd2b028b68cd223d157f62db" +"checksum regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "747ba3b235651f6e2f67dfa8bcdcd073ddb7c243cb21c442fc12395dfcac212d" "checksum relay 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1576e382688d7e9deecea24417e350d3062d97e32e45d70b1cde65994ff1489a" "checksum ring 0.12.1 (git+https://github.com/paritytech/ring)" = "" "checksum rlp 0.2.1 (git+https://github.com/paritytech/parity-common)" = "" @@ -3846,6 +3901,7 @@ dependencies = [ "checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" "checksum tempfile 2.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "11ce2fe9db64b842314052e2421ac61a73ce41b898dc8e3750398b219c5fc1e0" "checksum term_size 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9e5b9a66db815dcfd2da92db471106457082577c3c278d4138ab3e3b4e189327" +"checksum termcolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "722426c4a0539da2c4ffd9b419d90ad540b4cff4a053be9069c908d4d07e2836" "checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096" "checksum textwrap 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c0b59b6b4b44d867f1370ef1bd91bfb262bf07bf0ae65c202ea2fbc16153b693" "checksum thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c7fbf4c9d56b320106cd64fd024dadfa0be7cb4706725fc44a7d7ce952d820c1" @@ -3875,6 +3931,7 @@ dependencies = [ "checksum trezor-sys 1.0.0 (git+https://github.com/paritytech/trezor-sys)" = "" "checksum trie-standardmap 0.1.0 (git+https://github.com/paritytech/parity-common)" = "" "checksum triehash 0.2.0 (git+https://github.com/paritytech/parity-common)" = "" +"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d" "checksum uint 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "38051a96565903d81c9a9210ce11076b2218f3b352926baa1f5f6abbdfce8273" "checksum unicase 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" "checksum unicase 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "284b6d3db520d67fbe88fd778c21510d1b0ba4a551e5d0fbb023d33405f6de8a" @@ -3900,6 +3957,7 @@ dependencies = [ "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" "checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +"checksum wincolor 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b9dc3aa9dcda98b5a16150c54619c1ead22e3d3a5d458778ae914be760aa981a" "checksum ws 0.7.5 (git+https://github.com/tomusdrw/ws-rs)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" "checksum xdg 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a66b7c2281ebde13cf4391d70d4c7e5946c3c25e72a7b859ca8f677dcd0b0c61" diff --git a/Cargo.toml b/Cargo.toml index 26b387e80..b4355a1fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,8 +8,8 @@ authors = ["Parity Technologies "] [dependencies] blooms-db = { path = "util/blooms-db" } -log = "0.3" -env_logger = "0.4" +log = "0.4" +env_logger = "0.5" rustc-hex = "1.0" docopt = "0.8" clap = "2" diff --git a/ethash/Cargo.toml b/ethash/Cargo.toml index df0f17e0f..f4a99cd5e 100644 --- a/ethash/Cargo.toml +++ b/ethash/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Parity Technologies "] [lib] [dependencies] -log = "0.3" +log = "0.4" keccak-hash = { git = "https://github.com/paritytech/parity-common" } primal = "0.2.3" parking_lot = "0.6" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 82dd2230d..2c57e8b60 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -39,7 +39,7 @@ evm = { path = "evm" } heapsize = "0.4" itertools = "0.5" lazy_static = "1.0" -log = "0.3" +log = "0.4" lru-cache = "0.1" num = { version = "0.1", default-features = false, features = ["bigint"] } num_cpus = "1.2" diff --git a/ethcore/evm/Cargo.toml b/ethcore/evm/Cargo.toml index 18c9a3907..ce9f644cc 100644 --- a/ethcore/evm/Cargo.toml +++ b/ethcore/evm/Cargo.toml @@ -8,7 +8,7 @@ bit-set = "0.4" ethereum-types = "0.3" heapsize = "0.4" lazy_static = "1.0" -log = "0.3" +log = "0.4" vm = { path = "../vm" } keccak-hash = { git = "https://github.com/paritytech/parity-common" } parking_lot = "0.6" diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 16f70de62..9224f07e9 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -7,7 +7,7 @@ version = "1.12.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" +log = "0.4" ethcore = { path = ".."} parity-bytes = { git = "https://github.com/paritytech/parity-common" } ethcore-transaction = { path = "../transaction" } diff --git a/ethcore/node_filter/Cargo.toml b/ethcore/node_filter/Cargo.toml index b6e3cfe55..adba8ee09 100644 --- a/ethcore/node_filter/Cargo.toml +++ b/ethcore/node_filter/Cargo.toml @@ -11,7 +11,7 @@ ethcore = { path = ".."} ethcore-network = { path = "../../util/network" } ethcore-network-devp2p = { path = "../../util/network-devp2p" } ethereum-types = "0.3" -log = "0.3" +log = "0.4" parking_lot = "0.6" ethabi = "5.1" ethabi-derive = "5.0" diff --git a/ethcore/private-tx/Cargo.toml b/ethcore/private-tx/Cargo.toml index e547c9808..2383443e7 100644 --- a/ethcore/private-tx/Cargo.toml +++ b/ethcore/private-tx/Cargo.toml @@ -23,7 +23,7 @@ ethkey = { path = "../../ethkey" } fetch = { path = "../../util/fetch" } futures = "0.1" keccak-hash = { git = "https://github.com/paritytech/parity-common" } -log = "0.3" +log = "0.4" parking_lot = "0.6" patricia-trie = { git = "https://github.com/paritytech/parity-common" } patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } diff --git a/ethcore/service/Cargo.toml b/ethcore/service/Cargo.toml index 245bce787..e1ba1c81f 100644 --- a/ethcore/service/Cargo.toml +++ b/ethcore/service/Cargo.toml @@ -11,7 +11,7 @@ ethcore-io = { path = "../../util/io" } ethcore-private-tx = { path = "../private-tx" } ethcore-sync = { path = "../sync" } kvdb = { git = "https://github.com/paritytech/parity-common" } -log = "0.3" +log = "0.4" stop-guard = { path = "../../util/stop-guard" } trace-time = { path = "../../util/trace-time" } diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index 9fd3957fb..fc2873ba3 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -564,7 +564,7 @@ fn enact( ancestry: &mut Iterator, ) -> Result { { - if ::log::max_log_level() >= ::log::LogLevel::Trace { + if ::log::max_level() >= ::log::Level::Trace { let s = State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(parent.number() + 1), factories.clone())?; trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", header.number(), s.root(), header.author(), s.balance(&header.author())?); @@ -659,7 +659,7 @@ mod tests { let transactions = transactions?; { - if ::log::max_log_level() >= ::log::LogLevel::Trace { + if ::log::max_level() >= ::log::Level::Trace { let s = State::from_existing(db.boxed_clone(), parent.state_root().clone(), engine.account_start_nonce(parent.number() + 1), factories.clone())?; trace!(target: "enact", "num={}, root={}, author={}, author_balance={}\n", header.number(), s.root(), header.author(), s.balance(&header.author())?); diff --git a/ethcore/stratum/Cargo.toml b/ethcore/stratum/Cargo.toml index 1da27c01a..0d64aa926 100644 --- a/ethcore/stratum/Cargo.toml +++ b/ethcore/stratum/Cargo.toml @@ -11,11 +11,11 @@ keccak-hash = { git = "https://github.com/paritytech/parity-common" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-macros = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-tcp-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } -log = "0.3" +log = "0.4" parking_lot = "0.6" [dev-dependencies] -env_logger = "0.4" +env_logger = "0.5" tokio-core = "0.1" tokio-io = "0.1" ethcore-logger = { path = "../../logger" } diff --git a/ethcore/sync/Cargo.toml b/ethcore/sync/Cargo.toml index 9cdd0d84b..6cc87df31 100644 --- a/ethcore/sync/Cargo.toml +++ b/ethcore/sync/Cargo.toml @@ -25,8 +25,8 @@ keccak-hasher = { path = "../../util/keccak-hasher" } triehash-ethereum = {version = "0.2", path = "../../util/triehash-ethereum" } kvdb = { git = "https://github.com/paritytech/parity-common" } macros = { path = "../../util/macros" } -log = "0.3" -env_logger = "0.4" +log = "0.4" +env_logger = "0.5" rand = "0.4" heapsize = "0.4" semver = "0.9" diff --git a/ethcore/sync/src/tests/chain.rs b/ethcore/sync/src/tests/chain.rs index 0d9c83f2f..0b6c8f7c2 100644 --- a/ethcore/sync/src/tests/chain.rs +++ b/ethcore/sync/src/tests/chain.rs @@ -22,7 +22,7 @@ use {SyncConfig, WarpSync}; #[test] fn two_peers() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(3); net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); @@ -33,7 +33,7 @@ fn two_peers() { #[test] fn long_chain() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(2); net.peer(1).chain.add_blocks(50000, EachBlockWith::Nothing); net.sync(); @@ -43,7 +43,7 @@ fn long_chain() { #[test] fn status_after_sync() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(3); net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); @@ -63,7 +63,7 @@ fn takes_few_steps() { #[test] fn empty_blocks() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(3); for n in 0..200 { let with = if n % 2 == 0 { EachBlockWith::Nothing } else { EachBlockWith::Uncle }; @@ -77,7 +77,7 @@ fn empty_blocks() { #[test] fn forked() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(3); net.peer(0).chain.add_blocks(30, EachBlockWith::Uncle); net.peer(1).chain.add_blocks(30, EachBlockWith::Uncle); @@ -98,7 +98,7 @@ fn forked() { #[test] fn forked_with_misbehaving_peer() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(3); let mut alt_spec = ::ethcore::spec::Spec::new_test(); @@ -122,7 +122,7 @@ fn forked_with_misbehaving_peer() { #[test] fn net_hard_fork() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let ref_client = TestBlockChainClient::new(); ref_client.add_blocks(50, EachBlockWith::Uncle); { @@ -141,7 +141,7 @@ fn net_hard_fork() { #[test] fn restart() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(3); net.peer(1).chain.add_blocks(1000, EachBlockWith::Uncle); net.peer(2).chain.add_blocks(1000, EachBlockWith::Uncle); @@ -255,7 +255,7 @@ fn high_td_attach() { #[test] fn disconnect_on_unrelated_chain() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut net = TestNet::new(2); net.peer(0).chain.set_history(Some(20)); net.peer(1).chain.set_history(Some(20)); diff --git a/ethcore/sync/src/tests/snapshot.rs b/ethcore/sync/src/tests/snapshot.rs index e6636c02f..2316745c1 100644 --- a/ethcore/sync/src/tests/snapshot.rs +++ b/ethcore/sync/src/tests/snapshot.rs @@ -141,7 +141,7 @@ impl SnapshotService for TestSnapshotService { #[test] fn snapshot_sync() { - ::env_logger::init().ok(); + ::env_logger::try_init().ok(); let mut config = SyncConfig::default(); config.warp_sync = WarpSync::Enabled; let mut net = TestNet::new_with_config(5, config); diff --git a/ethcore/vm/Cargo.toml b/ethcore/vm/Cargo.toml index 194f4600d..b455fb42d 100644 --- a/ethcore/vm/Cargo.toml +++ b/ethcore/vm/Cargo.toml @@ -9,7 +9,7 @@ parity-bytes = { git = "https://github.com/paritytech/parity-common" } ethereum-types = "0.3" patricia-trie = { git = "https://github.com/paritytech/parity-common" } patricia-trie-ethereum = { path = "../../util/patricia-trie-ethereum" } -log = "0.3" +log = "0.4" common-types = { path = "../types" } ethjson = { path = "../../json" } rlp = { git = "https://github.com/paritytech/parity-common" } diff --git a/ethcore/wasm/Cargo.toml b/ethcore/wasm/Cargo.toml index 5ca2f3122..f6268af66 100644 --- a/ethcore/wasm/Cargo.toml +++ b/ethcore/wasm/Cargo.toml @@ -6,7 +6,7 @@ authors = ["Parity Technologies "] [dependencies] byteorder = "1.0" ethereum-types = "0.3" -log = "0.3" +log = "0.4" parity-wasm = "0.31" libc = "0.2" pwasm-utils = "0.2.2" diff --git a/ethkey/Cargo.toml b/ethkey/Cargo.toml index 8449a54c3..37f7ca866 100644 --- a/ethkey/Cargo.toml +++ b/ethkey/Cargo.toml @@ -10,7 +10,7 @@ parity-crypto = { git = "https://github.com/paritytech/parity-common" } eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" } ethereum-types = "0.3" lazy_static = "1.0" -log = "0.3" +log = "0.4" mem = { path = "../util/mem" } parity-wordlist = "1.2" quick-error = "1.2.2" diff --git a/ethkey/cli/Cargo.toml b/ethkey/cli/Cargo.toml index 779ca2872..522d3f17a 100644 --- a/ethkey/cli/Cargo.toml +++ b/ethkey/cli/Cargo.toml @@ -5,7 +5,7 @@ authors = ["Parity Technologies "] [dependencies] docopt = "0.8" -env_logger = "0.4" +env_logger = "0.5" ethkey = { path = "../" } panic_hook = { path = "../../util/panic_hook" } parity-wordlist="1.2" diff --git a/ethkey/cli/src/main.rs b/ethkey/cli/src/main.rs index 555bc2d20..732732504 100644 --- a/ethkey/cli/src/main.rs +++ b/ethkey/cli/src/main.rs @@ -162,7 +162,7 @@ impl DisplayMode { fn main() { panic_hook::set_abort(); - env_logger::init().expect("Logger initialized only once."); + env_logger::try_init().expect("Logger initialized only once."); match execute(env::args()) { Ok(ok) => println!("{}", ok), diff --git a/ethstore/Cargo.toml b/ethstore/Cargo.toml index deeb5a946..f7bca47f5 100644 --- a/ethstore/Cargo.toml +++ b/ethstore/Cargo.toml @@ -4,7 +4,7 @@ version = "0.2.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" +log = "0.4" libc = "0.2" rand = "0.4" ethkey = { path = "../ethkey" } diff --git a/hash-fetch/Cargo.toml b/hash-fetch/Cargo.toml index c4eb7acd3..d539fd672 100644 --- a/hash-fetch/Cargo.toml +++ b/hash-fetch/Cargo.toml @@ -9,7 +9,7 @@ authors = ["Parity Technologies "] [dependencies] futures = "0.1" futures-cpupool = "0.1" -log = "0.3" +log = "0.4" mime = "0.3" mime_guess = "2.0.0-alpha.2" rand = "0.4" diff --git a/hw/Cargo.toml b/hw/Cargo.toml index b7d90648e..47dc06c41 100644 --- a/hw/Cargo.toml +++ b/hw/Cargo.toml @@ -7,7 +7,7 @@ version = "1.12.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" +log = "0.4" parking_lot = "0.6" protobuf = "1.4" hidapi = { git = "https://github.com/paritytech/hidapi-rs" } diff --git a/local-store/Cargo.toml b/local-store/Cargo.toml index 75717bed0..9197aa4e7 100644 --- a/local-store/Cargo.toml +++ b/local-store/Cargo.toml @@ -9,7 +9,7 @@ ethcore = { path = "../ethcore" } ethcore-io = { path = "../util/io" } ethcore-transaction = { path = "../ethcore/transaction" } kvdb = { git = "https://github.com/paritytech/parity-common" } -log = "0.3" +log = "0.4" rlp = { git = "https://github.com/paritytech/parity-common" } serde = "1.0" serde_derive = "1.0" diff --git a/logger/Cargo.toml b/logger/Cargo.toml index 3db404bf6..fd59fad7a 100644 --- a/logger/Cargo.toml +++ b/logger/Cargo.toml @@ -6,8 +6,8 @@ license = "GPL-3.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" -env_logger = "0.4" +log = "0.4" +env_logger = "0.5" atty = "0.2" lazy_static = "1.0" regex = "0.2" diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 691839788..152d691f9 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -33,7 +33,7 @@ mod rotating; use std::{env, thread, fs}; use std::sync::{Weak, Arc}; use std::io::Write; -use env_logger::LogBuilder; +use env_logger::{Builder as LogBuilder, Formatter}; use regex::Regex; use ansi_term::Colour; use parking_lot::Mutex; @@ -68,12 +68,12 @@ pub fn setup_log(config: &Config) -> Result, String> { let mut levels = String::new(); let mut builder = LogBuilder::new(); // Disable info logging by default for some modules: - builder.filter(Some("ws"), LogLevelFilter::Warn); - builder.filter(Some("reqwest"), LogLevelFilter::Warn); - builder.filter(Some("hyper"), LogLevelFilter::Warn); - builder.filter(Some("rustls"), LogLevelFilter::Error); + builder.filter(Some("ws"), LevelFilter::Warn); + builder.filter(Some("reqwest"), LevelFilter::Warn); + builder.filter(Some("hyper"), LevelFilter::Warn); + builder.filter(Some("rustls"), LevelFilter::Error); // Enable info for others. - builder.filter(None, LogLevelFilter::Info); + builder.filter(None, LevelFilter::Info); if let Ok(lvl) = env::var("RUST_LOG") { levels.push_str(&lvl); @@ -99,10 +99,10 @@ pub fn setup_log(config: &Config) -> Result, String> { None => None, }; - let format = move |record: &LogRecord| { + let format = move |buf: &mut Formatter, record: &Record| { let timestamp = time::strftime("%Y-%m-%d %H:%M:%S %Z", &time::now()).unwrap(); - let with_color = if max_log_level() <= LogLevelFilter::Info { + let with_color = if max_level() <= LevelFilter::Info { format!("{} {}", Colour::Black.bold().paint(timestamp), record.args()) } else { let name = thread::current().name().map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); @@ -122,16 +122,16 @@ pub fn setup_log(config: &Config) -> Result, String> { let _ = file.write_all(b"\n"); } logger.append(removed_color); - if !isatty && record.level() <= LogLevel::Info && atty::is(atty::Stream::Stdout) { + if !isatty && record.level() <= Level::Info && atty::is(atty::Stream::Stdout) { // duplicate INFO/WARN output to console println!("{}", ret); } - ret + write!(buf, "{}", ret) }; builder.format(format); - builder.init() + builder.try_init() .and_then(|_| { *ROTATING_LOGGER.lock() = Arc::downgrade(&logs); Ok(logs) diff --git a/logger/src/rotating.rs b/logger/src/rotating.rs index ddc24792a..e8fde50d4 100644 --- a/logger/src/rotating.rs +++ b/logger/src/rotating.rs @@ -17,8 +17,8 @@ //! Common log helper functions use std::env; -use rlog::LogLevelFilter; -use env_logger::LogBuilder; +use rlog::LevelFilter; +use env_logger::Builder as LogBuilder; use arrayvec::ArrayVec; use parking_lot::{RwLock, RwLockReadGuard}; @@ -26,13 +26,13 @@ use parking_lot::{RwLock, RwLockReadGuard}; lazy_static! { static ref LOG_DUMMY: () = { let mut builder = LogBuilder::new(); - builder.filter(None, LogLevelFilter::Info); + builder.filter(None, LevelFilter::Info); if let Ok(log) = env::var("RUST_LOG") { builder.parse(&log); } - if !builder.init().is_ok() { + if !builder.try_init().is_ok() { println!("logger initialization failed!"); } }; diff --git a/miner/Cargo.toml b/miner/Cargo.toml index 0f4c2c2db..90d2c52f7 100644 --- a/miner/Cargo.toml +++ b/miner/Cargo.toml @@ -24,7 +24,7 @@ futures-cpupool = "0.1" heapsize = "0.4" keccak-hash = { git = "https://github.com/paritytech/parity-common" } linked-hash-map = "0.5" -log = "0.3" +log = "0.4" parking_lot = "0.6" price-info = { path = "../price-info", optional = true } rlp = { git = "https://github.com/paritytech/parity-common" } @@ -32,7 +32,7 @@ trace-time = { path = "../util/trace-time" } transaction-pool = { path = "../transaction-pool" } [dev-dependencies] -env_logger = "0.4" +env_logger = "0.5" ethkey = { path = "../ethkey" } rustc-hex = "1.0" diff --git a/price-info/Cargo.toml b/price-info/Cargo.toml index 7dc648516..b7ab6508f 100644 --- a/price-info/Cargo.toml +++ b/price-info/Cargo.toml @@ -10,7 +10,7 @@ authors = ["Parity Technologies "] fetch = { path = "../util/fetch" } futures = "0.1" futures-cpupool = "0.1" -log = "0.3" +log = "0.4" serde_json = "1.0" [dev-dependencies] diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 7e25871a3..b896a44ae 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -12,7 +12,7 @@ ansi_term = "0.10" cid = "0.2" futures = "0.1.6" futures-cpupool = "0.1" -log = "0.3" +log = "0.4" multihash ="0.7" order-stat = "0.1" parking_lot = "0.6" diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml index 6b0f4c2cc..ae3eca54d 100644 --- a/rpc_client/Cargo.toml +++ b/rpc_client/Cargo.toml @@ -8,7 +8,7 @@ version = "1.4.0" [dependencies] futures = "0.1" -log = "0.3.6" +log = "0.4" serde = "1.0" serde_json = "1.0" url = "1.2.0" diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index 85eda93e3..0c8c277c5 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Parity Technologies "] [dependencies] byteorder = "1.0" -log = "0.3" +log = "0.4" parking_lot = "0.6" hyper = { version = "0.11", default-features = false } serde = "1.0" diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index f56a70236..4540f62a7 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -7,7 +7,7 @@ authors = ["Parity Technologies "] [dependencies] error-chain = "0.12" -log = "0.3" +log = "0.4" smallvec = "0.4" trace-time = { path = "../util/trace-time", version = "0.1" } diff --git a/updater/Cargo.toml b/updater/Cargo.toml index b7c1aded9..b8d5c02a4 100644 --- a/updater/Cargo.toml +++ b/updater/Cargo.toml @@ -8,7 +8,7 @@ authors = ["Parity Technologies "] [dependencies] keccak-hash = { git = "https://github.com/paritytech/parity-common" } lazy_static = "1.0" -log = "0.3" +log = "0.4" ethabi = "5.1" ethabi-derive = "5.0" ethabi-contract = "5.0" diff --git a/util/io/Cargo.toml b/util/io/Cargo.toml index 0e1dfbbc1..ad50881dd 100644 --- a/util/io/Cargo.toml +++ b/util/io/Cargo.toml @@ -11,7 +11,7 @@ fnv = "1.0" mio = { version = "0.6.8", optional = true } crossbeam = "0.3" parking_lot = "0.6" -log = "0.3" +log = "0.4" slab = "0.4" num_cpus = "1.8" timer = "0.2" diff --git a/util/journaldb/Cargo.toml b/util/journaldb/Cargo.toml index 7e559e229..ce495e6dc 100644 --- a/util/journaldb/Cargo.toml +++ b/util/journaldb/Cargo.toml @@ -12,7 +12,7 @@ hashdb = { git = "https://github.com/paritytech/parity-common" } heapsize = "0.4" keccak-hasher = { path = "../keccak-hasher" } kvdb = { git = "https://github.com/paritytech/parity-common" } -log = "0.3" +log = "0.4" memorydb = { git = "https://github.com/paritytech/parity-common" } parking_lot = "0.6" fastmap = { path = "../../util/fastmap" } diff --git a/util/migration-rocksdb/Cargo.toml b/util/migration-rocksdb/Cargo.toml index 39ff50cfb..5d4c450a6 100644 --- a/util/migration-rocksdb/Cargo.toml +++ b/util/migration-rocksdb/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" +log = "0.4" macros = { path = "../macros" } kvdb = { git = "https://github.com/paritytech/parity-common" } kvdb-rocksdb = { git = "https://github.com/paritytech/parity-common" } diff --git a/util/network-devp2p/Cargo.toml b/util/network-devp2p/Cargo.toml index 99fdc1645..e7e4a3ae1 100644 --- a/util/network-devp2p/Cargo.toml +++ b/util/network-devp2p/Cargo.toml @@ -7,7 +7,7 @@ version = "1.12.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" +log = "0.4" mio = "0.6.8" bytes = "0.4" rand = "0.4" diff --git a/util/stats/Cargo.toml b/util/stats/Cargo.toml index 99e81c9e7..9997c7846 100644 --- a/util/stats/Cargo.toml +++ b/util/stats/Cargo.toml @@ -4,4 +4,4 @@ version = "0.1.0" authors = ["Parity Technologies "] [dependencies] -log = "0.3" +log = "0.4" diff --git a/util/trace-time/Cargo.toml b/util/trace-time/Cargo.toml index 288a2c4e4..7a2a93faa 100644 --- a/util/trace-time/Cargo.toml +++ b/util/trace-time/Cargo.toml @@ -6,4 +6,4 @@ authors = ["Parity Technologies "] license = "GPL-3.0" [dependencies] -log = "0.3" +log = "0.4" diff --git a/whisper/Cargo.toml b/whisper/Cargo.toml index 44882b4f5..4720bd6f8 100644 --- a/whisper/Cargo.toml +++ b/whisper/Cargo.toml @@ -12,7 +12,7 @@ ethcore-network = { path = "../util/network" } parity-crypto = { git = "https://github.com/paritytech/parity-common" } ethkey = { path = "../ethkey" } hex = "0.2" -log = "0.3" +log = "0.4" mem = { path = "../util/mem" } ordered-float = "0.5" parking_lot = "0.6" diff --git a/whisper/cli/Cargo.toml b/whisper/cli/Cargo.toml index 9aea1a877..ac44c53bc 100644 --- a/whisper/cli/Cargo.toml +++ b/whisper/cli/Cargo.toml @@ -17,7 +17,7 @@ panic_hook = { path = "../../util/panic_hook" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-pubsub = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } -log = "0.3" +log = "0.4" [[bin]] name = "whisper" From 65a1d889072554dd8d41202366ad4177c1892731 Mon Sep 17 00:00:00 2001 From: Kirill Fomichev Date: Fri, 10 Aug 2018 11:45:04 +0300 Subject: [PATCH 15/48] Docker alpine: use multi-stage concept (#9269) * Docker alpine: use multi-stage concept * Docker alpine: create config directory --- docker/alpine/Dockerfile | 56 +++++++++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 21 deletions(-) diff --git a/docker/alpine/Dockerfile b/docker/alpine/Dockerfile index ad375e5a9..47b37372e 100644 --- a/docker/alpine/Dockerfile +++ b/docker/alpine/Dockerfile @@ -1,29 +1,43 @@ -FROM alpine:edge - -WORKDIR /build - -# install tools and dependencies -RUN apk add --no-cache gcc musl-dev pkgconfig g++ make curl \ - eudev-dev rust cargo git file binutils \ - libusb-dev linux-headers perl cmake +FROM alpine:edge AS builder # show backtraces ENV RUST_BACKTRACE 1 -# show tools -RUN rustc -vV && \ -cargo -V && \ -gcc -v &&\ -g++ -v +RUN apk add --no-cache \ + build-base \ + cargo \ + cmake \ + eudev-dev \ + linux-headers \ + perl \ + rust -# build parity -ADD . /build/parity -RUN cd parity && \ - cargo build --release --verbose && \ - ls /build/parity/target/release/parity && \ - strip /build/parity/target/release/parity +WORKDIR /parity +COPY . /parity +RUN cargo build --release --target x86_64-alpine-linux-musl --verbose +RUN strip target/x86_64-alpine-linux-musl/release/parity -RUN file /build/parity/target/release/parity + +FROM alpine:edge + +# show backtraces +ENV RUST_BACKTRACE 1 + +RUN apk add --no-cache \ + libstdc++ \ + eudev-libs \ + libgcc + +RUN addgroup -g 1000 parity \ + && adduser -u 1000 -G parity -s /bin/sh -D parity + +USER parity EXPOSE 8080 8545 8180 -ENTRYPOINT ["/build/parity/target/release/parity"] + +WORKDIR /home/parity + +RUN mkdir -p /home/parity/.local/share/io.parity.ethereum/ +COPY --chown=parity:parity --from=builder /parity/target/x86_64-alpine-linux-musl/release/parity ./ + +ENTRYPOINT ["./parity"] From 30e40079ca67447b45c9de2b564d8c3604d888c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 10 Aug 2018 11:00:55 +0200 Subject: [PATCH 16/48] Prevent blockchain & miner racing when accessing pending block. (#9310) * Prevent blockchain & miner racing when accessing pending block. * Fix unavailability of pending block during reseal. --- ethcore/src/miner/miner.rs | 13 +++--- util/using_queue/src/lib.rs | 85 ++++++++++++++++++------------------- 2 files changed, 49 insertions(+), 49 deletions(-) diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 38acf9e1e..23c868ca9 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -319,14 +319,15 @@ impl Miner { /// Retrieves an existing pending block iff it's not older than given block number. /// /// NOTE: This will not prepare a new pending block if it's not existing. - /// See `map_pending_block` for alternative behaviour. fn map_existing_pending_block(&self, f: F, latest_block_number: BlockNumber) -> Option where F: FnOnce(&ClosedBlock) -> T, { self.sealing.lock().queue .peek_last_ref() .and_then(|b| { - if b.block().header().number() > latest_block_number { + // to prevent a data race between block import and updating pending block + // we allow the number to be equal. + if b.block().header().number() >= latest_block_number { Some(f(b)) } else { None @@ -365,7 +366,7 @@ impl Miner { // if at least one was pushed successfully, close and enqueue new ClosedBlock; // otherwise, leave everything alone. // otherwise, author a fresh block. - let mut open_block = match sealing.queue.pop_if(|b| b.block().header().parent_hash() == &best_hash) { + let mut open_block = match sealing.queue.get_pending_if(|b| b.block().header().parent_hash() == &best_hash) { Some(old_block) => { trace!(target: "miner", "prepare_block: Already have previous work; updating and returning"); // add transactions to old_block @@ -628,7 +629,7 @@ impl Miner { { let mut sealing = self.sealing.lock(); sealing.next_mandatory_reseal = Instant::now() + self.options.reseal_max_period; - sealing.queue.push(block.clone()); + sealing.queue.set_pending(block.clone()); sealing.queue.use_last_ref(); } @@ -690,7 +691,7 @@ impl Miner { ); let is_new = original_work_hash.map_or(true, |h| h != block_hash); - sealing.queue.push(block); + sealing.queue.set_pending(block); #[cfg(feature = "work-notify")] { @@ -1108,7 +1109,7 @@ impl miner::MinerService for Miner { Some(false) => { trace!(target: "miner", "update_sealing: engine is not keen to seal internally right now"); // anyway, save the block for later use - self.sealing.lock().queue.push(block); + self.sealing.lock().queue.set_pending(block); }, None => { trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work"); diff --git a/util/using_queue/src/lib.rs b/util/using_queue/src/lib.rs index b2c94b3f4..42fe1a33a 100644 --- a/util/using_queue/src/lib.rs +++ b/util/using_queue/src/lib.rs @@ -54,7 +54,7 @@ impl UsingQueue { /// Return a reference to the item at the top of the queue (or `None` if the queue is empty); /// this constitutes using the item and will remain in the queue for at least another - /// `max_size` invocations of `push()`. + /// `max_size` invocations of `set_pending() + use_last_ref()`. pub fn use_last_ref(&mut self) -> Option<&T> { if let Some(x) = self.pending.take() { self.in_use.push(x); @@ -65,9 +65,9 @@ impl UsingQueue { self.in_use.last() } - /// Place an item on the end of the queue. The previously `push()`ed item will be removed - /// if `use_last_ref()` since it was `push()`ed. - pub fn push(&mut self, b: T) { + /// Place an item on the end of the queue. The previously pending item will be removed + /// if `use_last_ref()` since it was set. + pub fn set_pending(&mut self, b: T) { self.pending = Some(b); } @@ -100,17 +100,16 @@ impl UsingQueue { } } - /// Returns the most recently pushed block if `f` returns `true` with a reference to it as + /// Returns a clone of the pending block if `f` returns `true` with a reference to it as /// a parameter, otherwise `None`. - /// Will not destroy a block if a reference to it has previously been returned by `use_last_ref`, - /// but rather clone it. - pub fn pop_if

(&mut self, predicate: P) -> Option where P: Fn(&T) -> bool, T: Clone { + /// + /// If pending block is not available will clone the first of the used blocks that match the predicate. + pub fn get_pending_if

(&mut self, predicate: P) -> Option where P: Fn(&T) -> bool, T: Clone { // a bit clumsy - TODO: think about a nicer way of expressing this. - if let Some(x) = self.pending.take() { - if predicate(&x) { - Some(x) + if let Some(ref x) = self.pending { + if predicate(x) { + Some(x.clone()) } else { - self.pending = Some(x); None } } else { @@ -122,21 +121,21 @@ impl UsingQueue { #[test] fn should_not_find_when_pushed() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); assert!(q.take_used_if(|i| i == &1).is_none()); } #[test] fn should_not_find_when_pushed_with_clone() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); assert!(q.clone_used_if(|i| i == &1).is_none()); } #[test] fn should_find_when_pushed_and_used() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); q.use_last_ref(); assert!(q.take_used_if(|i| i == &1).unwrap() == 1); } @@ -144,7 +143,7 @@ fn should_find_when_pushed_and_used() { #[test] fn should_have_same_semantics_for_get_take_clone() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); assert!(q.get_used_if(GetAction::Clone, |i| i == &1).is_none()); assert!(q.get_used_if(GetAction::Take, |i| i == &1).is_none()); q.use_last_ref(); @@ -158,7 +157,7 @@ fn should_have_same_semantics_for_get_take_clone() { #[test] fn should_find_when_pushed_and_used_with_clone() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); q.use_last_ref(); assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); } @@ -166,7 +165,7 @@ fn should_find_when_pushed_and_used_with_clone() { #[test] fn should_not_find_again_when_pushed_and_taken() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); q.use_last_ref(); assert!(q.take_used_if(|i| i == &1).unwrap() == 1); assert!(q.clone_used_if(|i| i == &1).is_none()); @@ -175,7 +174,7 @@ fn should_not_find_again_when_pushed_and_taken() { #[test] fn should_find_again_when_pushed_and_cloned() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); q.use_last_ref(); assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); assert!(q.clone_used_if(|i| i == &1).unwrap() == 1); @@ -185,9 +184,9 @@ fn should_find_again_when_pushed_and_cloned() { #[test] fn should_find_when_others_used() { let mut q = UsingQueue::new(2); - q.push(1); + q.set_pending(1); q.use_last_ref(); - q.push(2); + q.set_pending(2); q.use_last_ref(); assert!(q.take_used_if(|i| i == &1).is_some()); } @@ -195,9 +194,9 @@ fn should_find_when_others_used() { #[test] fn should_not_find_when_too_many_used() { let mut q = UsingQueue::new(1); - q.push(1); + q.set_pending(1); q.use_last_ref(); - q.push(2); + q.set_pending(2); q.use_last_ref(); assert!(q.take_used_if(|i| i == &1).is_none()); } @@ -205,8 +204,8 @@ fn should_not_find_when_too_many_used() { #[test] fn should_not_find_when_not_used_and_then_pushed() { let mut q = UsingQueue::new(3); - q.push(1); - q.push(2); + q.set_pending(1); + q.set_pending(2); q.use_last_ref(); assert!(q.take_used_if(|i| i == &1).is_none()); } @@ -214,19 +213,19 @@ fn should_not_find_when_not_used_and_then_pushed() { #[test] fn should_peek_correctly_after_push() { let mut q = UsingQueue::new(3); - q.push(1); + q.set_pending(1); assert_eq!(q.peek_last_ref(), Some(&1)); - q.push(2); + q.set_pending(2); assert_eq!(q.peek_last_ref(), Some(&2)); } #[test] fn should_inspect_correctly() { let mut q = UsingQueue::new(3); - q.push(1); + q.set_pending(1); assert_eq!(q.use_last_ref(), Some(&1)); assert_eq!(q.peek_last_ref(), Some(&1)); - q.push(2); + q.set_pending(2); assert_eq!(q.use_last_ref(), Some(&2)); assert_eq!(q.peek_last_ref(), Some(&2)); } @@ -234,9 +233,9 @@ fn should_inspect_correctly() { #[test] fn should_not_find_when_not_used_peeked_and_then_pushed() { let mut q = UsingQueue::new(3); - q.push(1); + q.set_pending(1); q.peek_last_ref(); - q.push(2); + q.set_pending(2); q.use_last_ref(); assert!(q.take_used_if(|i| i == &1).is_none()); } @@ -244,34 +243,34 @@ fn should_not_find_when_not_used_peeked_and_then_pushed() { #[test] fn should_pop_used() { let mut q = UsingQueue::new(3); - q.push(1); + q.set_pending(1); q.use_last_ref(); - let popped = q.pop_if(|i| i == &1); + let popped = q.get_pending_if(|i| i == &1); assert_eq!(popped, Some(1)); } #[test] -fn should_pop_unused() { +fn should_not_pop_last_pending() { let mut q = UsingQueue::new(3); - q.push(1); - assert_eq!(q.pop_if(|i| i == &1), Some(1)); - assert_eq!(q.pop_if(|i| i == &1), None); + q.set_pending(1); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); } #[test] fn should_not_pop_unused_before_used() { let mut q = UsingQueue::new(3); - q.push(1); - q.push(2); - let popped = q.pop_if(|i| i == &1); + q.set_pending(1); + q.set_pending(2); + let popped = q.get_pending_if(|i| i == &1); assert_eq!(popped, None); } #[test] fn should_not_remove_used_popped() { let mut q = UsingQueue::new(3); - q.push(1); + q.set_pending(1); q.use_last_ref(); - assert_eq!(q.pop_if(|i| i == &1), Some(1)); - assert_eq!(q.pop_if(|i| i == &1), Some(1)); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); + assert_eq!(q.get_pending_if(|i| i == &1), Some(1)); } From 1564fae01152946f58e67658c0b5d884fedaecdc Mon Sep 17 00:00:00 2001 From: Pierre Krieger Date: Fri, 10 Aug 2018 11:06:30 +0200 Subject: [PATCH 17/48] Allow setting the panic hook with parity-clib (#9292) * Allow setting the panic hook with parity-clib * Make all FFI functions unsafe * Fix comment * Fix concern --- Cargo.lock | 1 + parity-clib/Cargo.toml | 1 + parity-clib/parity.h | 17 +++ parity-clib/src/lib.rs | 245 +++++++++++++++++++------------------ parity/lib.rs | 1 - parity/main.rs | 3 +- util/panic_hook/src/lib.rs | 43 ++++--- 7 files changed, 168 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f529f2bc6..a933343a7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1947,6 +1947,7 @@ source = "git+https://github.com/paritytech/parity-common#0045887fecd2fec39e56c9 name = "parity-clib" version = "1.12.0" dependencies = [ + "panic_hook 0.1.0", "parity-ethereum 2.1.0", ] diff --git a/parity-clib/Cargo.toml b/parity-clib/Cargo.toml index 3a1e95b5f..32ddf0ecd 100644 --- a/parity-clib/Cargo.toml +++ b/parity-clib/Cargo.toml @@ -10,6 +10,7 @@ name = "parity" crate-type = ["cdylib", "staticlib"] [dependencies] +panic_hook = { path = "../util/panic_hook" } parity-ethereum = { path = "../", default-features = false } [features] diff --git a/parity-clib/parity.h b/parity-clib/parity.h index f647395ce..9be077b4d 100644 --- a/parity-clib/parity.h +++ b/parity-clib/parity.h @@ -102,6 +102,23 @@ void parity_destroy(void* parity); /// int parity_rpc(void* parity, const char* rpc, size_t len, char* out_str, size_t* out_len); +/// Sets a callback to call when a panic happens in the Rust code. +/// +/// The callback takes as parameter the custom param (the one passed to this function), plus the +/// panic message. You are expected to log the panic message somehow, in order to communicate it to +/// the user. A panic always indicates a bug in Parity. +/// +/// Note that this method sets the panic hook for the whole program, and not just for Parity. In +/// other words, if you use multiple Rust libraries at once (and not just Parity), then a panic +/// in any Rust code will call this callback as well. +/// +/// ## Thread safety +/// +/// The callback can be called from any thread and multiple times simultaneously. Make sure that +/// your code is thread safe. +/// +int parity_set_panic_hook(void (*cb)(void* param, const char* msg, size_t msg_len), void* param); + #ifdef __cplusplus } #endif diff --git a/parity-clib/src/lib.rs b/parity-clib/src/lib.rs index 563eafd73..ad0c8a032 100644 --- a/parity-clib/src/lib.rs +++ b/parity-clib/src/lib.rs @@ -18,6 +18,7 @@ //! duplicating documentation. extern crate parity_ethereum; +extern crate panic_hook; use std::os::raw::{c_char, c_void, c_int}; use std::panic; @@ -33,132 +34,132 @@ pub struct ParityParams { } #[no_mangle] -pub extern fn parity_config_from_cli(args: *const *const c_char, args_lens: *const usize, len: usize, output: *mut *mut c_void) -> c_int { - unsafe { - panic::catch_unwind(|| { - *output = ptr::null_mut(); +pub unsafe extern fn parity_config_from_cli(args: *const *const c_char, args_lens: *const usize, len: usize, output: *mut *mut c_void) -> c_int { + panic::catch_unwind(|| { + *output = ptr::null_mut(); - let args = { - let arg_ptrs = slice::from_raw_parts(args, len); - let arg_lens = slice::from_raw_parts(args_lens, len); + let args = { + let arg_ptrs = slice::from_raw_parts(args, len); + let arg_lens = slice::from_raw_parts(args_lens, len); - let mut args = Vec::with_capacity(len + 1); - args.push("parity".to_owned()); + let mut args = Vec::with_capacity(len + 1); + args.push("parity".to_owned()); - for (&arg, &len) in arg_ptrs.iter().zip(arg_lens.iter()) { - let string = slice::from_raw_parts(arg as *const u8, len); - match String::from_utf8(string.to_owned()) { - Ok(a) => args.push(a), - Err(_) => return 1, - }; - } - - args - }; - - match parity_ethereum::Configuration::parse_cli(&args) { - Ok(mut cfg) => { - // Always disable the auto-updater when used as a library. - cfg.args.arg_auto_update = "none".to_owned(); - - let cfg = Box::into_raw(Box::new(cfg)); - *output = cfg as *mut _; - 0 - }, - Err(_) => { - 1 - }, - } - }).unwrap_or(1) - } -} - -#[no_mangle] -pub extern fn parity_config_destroy(cfg: *mut c_void) { - unsafe { - let _ = panic::catch_unwind(|| { - let _cfg = Box::from_raw(cfg as *mut parity_ethereum::Configuration); - }); - } -} - -#[no_mangle] -pub extern fn parity_start(cfg: *const ParityParams, output: *mut *mut c_void) -> c_int { - unsafe { - panic::catch_unwind(|| { - *output = ptr::null_mut(); - let cfg: &ParityParams = &*cfg; - - let config = Box::from_raw(cfg.configuration as *mut parity_ethereum::Configuration); - - let on_client_restart_cb = { - struct Cb(Option, *mut c_void); - unsafe impl Send for Cb {} - unsafe impl Sync for Cb {} - impl Cb { - fn call(&self, new_chain: String) { - if let Some(ref cb) = self.0 { - cb(self.1, new_chain.as_bytes().as_ptr() as *const _, new_chain.len()) - } - } - } - let cb = Cb(cfg.on_client_restart_cb, cfg.on_client_restart_cb_custom); - move |new_chain: String| { cb.call(new_chain); } - }; - - let action = match parity_ethereum::start(*config, on_client_restart_cb, || {}) { - Ok(action) => action, - Err(_) => return 1, - }; - - match action { - parity_ethereum::ExecutionAction::Instant(Some(s)) => { println!("{}", s); 0 }, - parity_ethereum::ExecutionAction::Instant(None) => 0, - parity_ethereum::ExecutionAction::Running(client) => { - *output = Box::into_raw(Box::::new(client)) as *mut c_void; - 0 - } - } - }).unwrap_or(1) - } -} - -#[no_mangle] -pub extern fn parity_destroy(client: *mut c_void) { - unsafe { - let _ = panic::catch_unwind(|| { - let client = Box::from_raw(client as *mut parity_ethereum::RunningClient); - client.shutdown(); - }); - } -} - -#[no_mangle] -pub extern fn parity_rpc(client: *mut c_void, query: *const char, len: usize, out_str: *mut c_char, out_len: *mut usize) -> c_int { - unsafe { - panic::catch_unwind(|| { - let client: &mut parity_ethereum::RunningClient = &mut *(client as *mut parity_ethereum::RunningClient); - - let query_str = { - let string = slice::from_raw_parts(query as *const u8, len); - match str::from_utf8(string) { - Ok(a) => a, + for (&arg, &len) in arg_ptrs.iter().zip(arg_lens.iter()) { + let string = slice::from_raw_parts(arg as *const u8, len); + match String::from_utf8(string.to_owned()) { + Ok(a) => args.push(a), Err(_) => return 1, - } - }; - - if let Some(output) = client.rpc_query_sync(query_str) { - let q_out_len = output.as_bytes().len(); - if *out_len < q_out_len { - return 1; - } - - ptr::copy_nonoverlapping(output.as_bytes().as_ptr(), out_str as *mut u8, q_out_len); - *out_len = q_out_len; - 0 - } else { - 1 + }; } - }).unwrap_or(1) + + args + }; + + match parity_ethereum::Configuration::parse_cli(&args) { + Ok(mut cfg) => { + // Always disable the auto-updater when used as a library. + cfg.args.arg_auto_update = "none".to_owned(); + + let cfg = Box::into_raw(Box::new(cfg)); + *output = cfg as *mut _; + 0 + }, + Err(_) => { + 1 + }, + } + }).unwrap_or(1) +} + +#[no_mangle] +pub unsafe extern fn parity_config_destroy(cfg: *mut c_void) { + let _ = panic::catch_unwind(|| { + let _cfg = Box::from_raw(cfg as *mut parity_ethereum::Configuration); + }); +} + +#[no_mangle] +pub unsafe extern fn parity_start(cfg: *const ParityParams, output: *mut *mut c_void) -> c_int { + panic::catch_unwind(|| { + *output = ptr::null_mut(); + let cfg: &ParityParams = &*cfg; + + let config = Box::from_raw(cfg.configuration as *mut parity_ethereum::Configuration); + + let on_client_restart_cb = { + let cb = CallbackStr(cfg.on_client_restart_cb, cfg.on_client_restart_cb_custom); + move |new_chain: String| { cb.call(&new_chain); } + }; + + let action = match parity_ethereum::start(*config, on_client_restart_cb, || {}) { + Ok(action) => action, + Err(_) => return 1, + }; + + match action { + parity_ethereum::ExecutionAction::Instant(Some(s)) => { println!("{}", s); 0 }, + parity_ethereum::ExecutionAction::Instant(None) => 0, + parity_ethereum::ExecutionAction::Running(client) => { + *output = Box::into_raw(Box::::new(client)) as *mut c_void; + 0 + } + } + }).unwrap_or(1) +} + +#[no_mangle] +pub unsafe extern fn parity_destroy(client: *mut c_void) { + let _ = panic::catch_unwind(|| { + let client = Box::from_raw(client as *mut parity_ethereum::RunningClient); + client.shutdown(); + }); +} + +#[no_mangle] +pub unsafe extern fn parity_rpc(client: *mut c_void, query: *const char, len: usize, out_str: *mut c_char, out_len: *mut usize) -> c_int { + panic::catch_unwind(|| { + let client: &mut parity_ethereum::RunningClient = &mut *(client as *mut parity_ethereum::RunningClient); + + let query_str = { + let string = slice::from_raw_parts(query as *const u8, len); + match str::from_utf8(string) { + Ok(a) => a, + Err(_) => return 1, + } + }; + + if let Some(output) = client.rpc_query_sync(query_str) { + let q_out_len = output.as_bytes().len(); + if *out_len < q_out_len { + return 1; + } + + ptr::copy_nonoverlapping(output.as_bytes().as_ptr(), out_str as *mut u8, q_out_len); + *out_len = q_out_len; + 0 + } else { + 1 + } + }).unwrap_or(1) +} + +#[no_mangle] +pub unsafe extern fn parity_set_panic_hook(callback: extern "C" fn(*mut c_void, *const c_char, usize), param: *mut c_void) { + let cb = CallbackStr(Some(callback), param); + panic_hook::set_with(move |panic_msg| { + cb.call(panic_msg); + }); +} + +// Internal structure for handling callbacks that get passed a string. +struct CallbackStr(Option, *mut c_void); +unsafe impl Send for CallbackStr {} +unsafe impl Sync for CallbackStr {} +impl CallbackStr { + fn call(&self, new_chain: &str) { + if let Some(ref cb) = self.0 { + cb(self.1, new_chain.as_bytes().as_ptr() as *const _, new_chain.len()) + } } } diff --git a/parity/lib.rs b/parity/lib.rs index 84cacf07e..a2ea11ffe 100644 --- a/parity/lib.rs +++ b/parity/lib.rs @@ -57,7 +57,6 @@ extern crate ethcore_transaction as transaction; extern crate ethereum_types; extern crate ethkey; extern crate kvdb; -extern crate panic_hook; extern crate parity_hash_fetch as hash_fetch; extern crate parity_ipfs_api; extern crate parity_local_store as local_store; diff --git a/parity/main.rs b/parity/main.rs index 9256373ca..1254c3472 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -253,7 +253,8 @@ fn main_direct(force_can_restart: bool) -> i32 { panic_hook::set_with({ let e = exit.clone(); let exiting = exiting.clone(); - move || { + move |panic_msg| { + let _ = stdio::stderr().write_all(panic_msg.as_bytes()); if !exiting.swap(true, Ordering::SeqCst) { *e.0.lock() = ExitStatus { panicking: true, diff --git a/util/panic_hook/src/lib.rs b/util/panic_hook/src/lib.rs index cc7ed7ded..0c8552509 100644 --- a/util/panic_hook/src/lib.rs +++ b/util/panic_hook/src/lib.rs @@ -24,16 +24,26 @@ use std::thread; use std::process; use backtrace::Backtrace; -/// Set the panic hook +/// Set the panic hook to write to stderr and abort the process when a panic happens. pub fn set_abort() { - set_with(|| process::abort()); + set_with(|msg| { + let _ = io::stderr().write_all(msg.as_bytes()); + process::abort() + }); } -/// Set the panic hook with a closure to be called afterwards. -pub fn set_with(f: F) { +/// Set the panic hook with a closure to be called. The closure receives the panic message. +/// +/// Depending on how Parity was compiled, after the closure has been executed, either the process +/// aborts or unwinding starts. +/// +/// If you panic within the closure, a double panic happens and the process will stop. +pub fn set_with(f: F) +where F: Fn(&str) + Send + Sync + 'static +{ panic::set_hook(Box::new(move |info| { - panic_hook(info); - f(); + let msg = gen_panic_msg(info); + f(&msg); })); } @@ -43,7 +53,7 @@ This is a bug. Please report it at: https://github.com/paritytech/parity-ethereum/issues/new "; -fn panic_hook(info: &PanicInfo) { +fn gen_panic_msg(info: &PanicInfo) -> String { let location = info.location(); let file = location.as_ref().map(|l| l.file()).unwrap_or(""); let line = location.as_ref().map(|l| l.line()).unwrap_or(0); @@ -61,18 +71,13 @@ fn panic_hook(info: &PanicInfo) { let backtrace = Backtrace::new(); - let mut stderr = io::stderr(); + format!(r#" - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "===================="); - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "{:?}", backtrace); - let _ = writeln!(stderr, ""); - let _ = writeln!( - stderr, - "Thread '{}' panicked at '{}', {}:{}", - name, msg, file, line - ); +==================== - let _ = writeln!(stderr, "{}", ABOUT_PANIC); +{backtrace:?} + +Thread '{name}' panicked at '{msg}', {file}:{line} +{about} +"#, backtrace = backtrace, name = name, msg = msg, file = file, line = line, about = ABOUT_PANIC) } From 6e2821b4db199f488a8bba28ac83ab29a8bd83ff Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 10 Aug 2018 14:31:48 +0200 Subject: [PATCH 18/48] ethcore/sync `Make view macro only visible to test` (#9316) * remove needless macro import * enable ethcore/macros in tests --- Cargo.lock | 6 +++--- ethcore/sync/src/lib.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a933343a7..aa96f9fe2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -668,7 +668,7 @@ dependencies = [ "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", - "transaction-pool 1.12.1", + "transaction-pool 1.12.2", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2168,7 +2168,7 @@ dependencies = [ "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "transaction-pool 1.12.1", + "transaction-pool 1.12.2", "transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", ] @@ -3327,7 +3327,7 @@ dependencies = [ [[package]] name = "transaction-pool" -version = "1.12.1" +version = "1.12.2" dependencies = [ "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index eb38d09d9..236432ca8 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -26,7 +26,7 @@ extern crate ethcore_network_devp2p as devp2p; extern crate parity_bytes as bytes; extern crate ethcore_io as io; extern crate ethcore_transaction as transaction; -#[macro_use] +#[cfg_attr(test, macro_use)] extern crate ethcore; extern crate ethereum_types; extern crate env_logger; From 4eab8672b840b86007bb71878f3f77b096f354ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Fri, 10 Aug 2018 14:36:19 +0100 Subject: [PATCH 19/48] ethcore: fix pow difficulty validation (#9328) * ethcore: fix pow difficulty validation * ethcore: validate difficulty is not zero * ethcore: add issue link to regression test * ethcore: fix tests * ethcore: move difficulty_to_boundary to ethash crate * ethcore: reuse difficulty_to_boundary and boundary_to_difficulty * ethcore: fix grumbles in difficulty_to_boundary_aux --- Cargo.lock | 1 + ethash/Cargo.toml | 11 +++--- ethash/src/lib.rs | 66 ++++++++++++++++++++++++++++++++-- ethcore/src/ethereum/ethash.rs | 35 ++---------------- ethcore/src/miner/stratum.rs | 5 ++- json/src/spec/ethash.rs | 1 + miner/src/work_notify.rs | 11 +----- rpc/src/v1/impls/eth.rs | 5 ++- 8 files changed, 80 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa96f9fe2..9e95c9ea8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -473,6 +473,7 @@ version = "1.12.0" dependencies = [ "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "either 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethash/Cargo.toml b/ethash/Cargo.toml index f4a99cd5e..772f12d4b 100644 --- a/ethash/Cargo.toml +++ b/ethash/Cargo.toml @@ -6,13 +6,14 @@ authors = ["Parity Technologies "] [lib] [dependencies] -log = "0.4" -keccak-hash = { git = "https://github.com/paritytech/parity-common" } -primal = "0.2.3" -parking_lot = "0.6" crunchy = "0.1.0" -memmap = "0.6" either = "1.0.0" +ethereum-types = "0.3" +keccak-hash = { git = "https://github.com/paritytech/parity-common" } +log = "0.4" +memmap = "0.6" +parking_lot = "0.6" +primal = "0.2.3" [dev-dependencies] tempdir = "0.3" diff --git a/ethash/src/lib.rs b/ethash/src/lib.rs index 69b5a1d11..29361ad5c 100644 --- a/ethash/src/lib.rs +++ b/ethash/src/lib.rs @@ -16,10 +16,11 @@ #![cfg_attr(feature = "benches", feature(test))] -extern crate primal; -extern crate parking_lot; extern crate either; +extern crate ethereum_types; extern crate memmap; +extern crate parking_lot; +extern crate primal; #[macro_use] extern crate crunchy; @@ -38,6 +39,7 @@ mod shared; pub use cache::{NodeCacheBuilder, OptimizeFor}; pub use compute::{ProofOfWork, quick_get_difficulty, slow_hash_block_number}; use compute::Light; +use ethereum_types::{U256, U512}; use keccak::H256; use parking_lot::Mutex; pub use seed_compute::SeedHashCompute; @@ -136,6 +138,29 @@ impl EthashManager { } } +/// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`. +pub fn boundary_to_difficulty(boundary: ðereum_types::H256) -> U256 { + difficulty_to_boundary_aux(&**boundary) +} + +/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`. +pub fn difficulty_to_boundary(difficulty: &U256) -> ethereum_types::H256 { + difficulty_to_boundary_aux(difficulty).into() +} + +fn difficulty_to_boundary_aux>(difficulty: T) -> ethereum_types::U256 { + let difficulty = difficulty.into(); + + assert!(!difficulty.is_zero()); + + if difficulty == U512::one() { + U256::max_value() + } else { + // difficulty > 1, so result should never overflow 256 bits + U256::from((U512::one() << 256) / difficulty) + } +} + #[test] fn test_lru() { use tempdir::TempDir; @@ -155,6 +180,43 @@ fn test_lru() { assert_eq!(ethash.cache.lock().prev_epoch.unwrap(), 0); } +#[test] +fn test_difficulty_to_boundary() { + use ethereum_types::H256; + use std::str::FromStr; + + assert_eq!(difficulty_to_boundary(&U256::from(1)), H256::from(U256::max_value())); + assert_eq!(difficulty_to_boundary(&U256::from(2)), H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap()); + assert_eq!(difficulty_to_boundary(&U256::from(4)), H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap()); + assert_eq!(difficulty_to_boundary(&U256::from(32)), H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap()); +} + +#[test] +fn test_difficulty_to_boundary_regression() { + use ethereum_types::H256; + + // the last bit was originally being truncated when performing the conversion + // https://github.com/paritytech/parity-ethereum/issues/8397 + for difficulty in 1..9 { + assert_eq!(U256::from(difficulty), boundary_to_difficulty(&difficulty_to_boundary(&difficulty.into()))); + assert_eq!(H256::from(difficulty), difficulty_to_boundary(&boundary_to_difficulty(&difficulty.into()))); + assert_eq!(U256::from(difficulty), boundary_to_difficulty(&boundary_to_difficulty(&difficulty.into()).into())); + assert_eq!(H256::from(difficulty), difficulty_to_boundary(&difficulty_to_boundary(&difficulty.into()).into())); + } +} + +#[test] +#[should_panic] +fn test_difficulty_to_boundary_panics_on_zero() { + difficulty_to_boundary(&U256::from(0)); +} + +#[test] +#[should_panic] +fn test_boundary_to_difficulty_panics_on_zero() { + boundary_to_difficulty(ðereum_types::H256::from(0)); +} + #[cfg(feature = "benches")] mod benchmarks { extern crate test; diff --git a/ethcore/src/ethereum/ethash.rs b/ethcore/src/ethereum/ethash.rs index ca19983d3..16069c327 100644 --- a/ethcore/src/ethereum/ethash.rs +++ b/ethcore/src/ethereum/ethash.rs @@ -20,7 +20,7 @@ use std::collections::BTreeMap; use std::sync::Arc; use hash::{KECCAK_EMPTY_LIST_RLP}; use engines::block_reward::{self, RewardKind}; -use ethash::{quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; +use ethash::{self, quick_get_difficulty, slow_hash_block_number, EthashManager, OptimizeFor}; use ethereum_types::{H256, H64, U256, Address}; use unexpected::{OutOfBounds, Mismatch}; use block::*; @@ -302,7 +302,7 @@ impl Engine for Arc { return Err(From::from(BlockError::DifficultyOutOfBounds(OutOfBounds { min: Some(min_difficulty), max: None, found: header.difficulty().clone() }))) } - let difficulty = Ethash::boundary_to_difficulty(&H256(quick_get_difficulty( + let difficulty = ethash::boundary_to_difficulty(&H256(quick_get_difficulty( &header.bare_hash().0, seal.nonce.low_u64(), &seal.mix_hash.0 @@ -324,7 +324,7 @@ impl Engine for Arc { let result = self.pow.compute_light(header.number() as u64, &header.bare_hash().0, seal.nonce.low_u64()); let mix = H256(result.mix_hash); - let difficulty = Ethash::boundary_to_difficulty(&H256(result.value)); + let difficulty = ethash::boundary_to_difficulty(&H256(result.value)); trace!(target: "miner", "num: {num}, seed: {seed}, h: {h}, non: {non}, mix: {mix}, res: {res}", num = header.number() as u64, seed = H256(slow_hash_block_number(header.number() as u64)), @@ -447,25 +447,6 @@ impl Ethash { } target } - - /// Convert an Ethash boundary to its original difficulty. Basically just `f(x) = 2^256 / x`. - pub fn boundary_to_difficulty(boundary: &H256) -> U256 { - let d = U256::from(*boundary); - if d <= U256::one() { - U256::max_value() - } else { - ((U256::one() << 255) / d) << 1 - } - } - - /// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`. - pub fn difficulty_to_boundary(difficulty: &U256) -> H256 { - if *difficulty <= U256::one() { - U256::max_value().into() - } else { - (((U256::one() << 255) / *difficulty) << 1).into() - } - } } fn ecip1017_eras_block_reward(era_rounds: u64, mut reward: U256, block_number:u64) -> (u64, U256) { @@ -766,16 +747,6 @@ mod tests { } } - #[test] - fn test_difficulty_to_boundary() { - // result of f(0) is undefined, so do not assert the result - let _ = Ethash::difficulty_to_boundary(&U256::from(0)); - assert_eq!(Ethash::difficulty_to_boundary(&U256::from(1)), H256::from(U256::max_value())); - assert_eq!(Ethash::difficulty_to_boundary(&U256::from(2)), H256::from_str("8000000000000000000000000000000000000000000000000000000000000000").unwrap()); - assert_eq!(Ethash::difficulty_to_boundary(&U256::from(4)), H256::from_str("4000000000000000000000000000000000000000000000000000000000000000").unwrap()); - assert_eq!(Ethash::difficulty_to_boundary(&U256::from(32)), H256::from_str("0800000000000000000000000000000000000000000000000000000000000000").unwrap()); - } - #[test] fn difficulty_frontier() { let machine = new_homestead_test_machine(); diff --git a/ethcore/src/miner/stratum.rs b/ethcore/src/miner/stratum.rs index ca7443279..38c881bb1 100644 --- a/ethcore/src/miner/stratum.rs +++ b/ethcore/src/miner/stratum.rs @@ -22,8 +22,7 @@ use std::fmt; use client::{Client, ImportSealedBlock}; use ethereum_types::{H64, H256, clean_0x, U256}; -use ethereum::ethash::Ethash; -use ethash::SeedHashCompute; +use ethash::{self, SeedHashCompute}; #[cfg(feature = "work-notify")] use ethcore_miner::work_notify::NotifyWork; #[cfg(feature = "work-notify")] @@ -167,7 +166,7 @@ impl StratumJobDispatcher { /// Serializes payload for stratum service fn payload(&self, pow_hash: H256, difficulty: U256, number: u64) -> String { // TODO: move this to engine - let target = Ethash::difficulty_to_boundary(&difficulty); + let target = ethash::difficulty_to_boundary(&difficulty); let seed_hash = &self.seed_compute.lock().hash_block_number(number); let seed_hash = H256::from_slice(&seed_hash[..]); format!( diff --git a/json/src/spec/ethash.rs b/json/src/spec/ethash.rs index 19fd09662..fd6b9fca5 100644 --- a/json/src/spec/ethash.rs +++ b/json/src/spec/ethash.rs @@ -24,6 +24,7 @@ use hash::Address; pub struct EthashParams { /// See main EthashParams docs. #[serde(rename="minimumDifficulty")] + #[serde(deserialize_with="uint::validate_non_zero")] pub minimum_difficulty: Uint, /// See main EthashParams docs. #[serde(rename="difficultyBoundDivisor")] diff --git a/miner/src/work_notify.rs b/miner/src/work_notify.rs index efae26ff1..522901982 100644 --- a/miner/src/work_notify.rs +++ b/miner/src/work_notify.rs @@ -67,19 +67,10 @@ impl WorkPoster { } } -/// Convert an Ethash difficulty to the target boundary. Basically just `f(x) = 2^256 / x`. -fn difficulty_to_boundary(difficulty: &U256) -> H256 { - if *difficulty <= U256::one() { - U256::max_value().into() - } else { - (((U256::one() << 255) / *difficulty) << 1).into() - } -} - impl NotifyWork for WorkPoster { fn notify(&self, pow_hash: H256, difficulty: U256, number: u64) { // TODO: move this to engine - let target = difficulty_to_boundary(&difficulty); + let target = ethash::difficulty_to_boundary(&difficulty); let seed_hash = &self.seed_compute.lock().hash_block_number(number); let seed_hash = H256::from_slice(&seed_hash[..]); let body = format!( diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 1e4ad87a5..67dc640d8 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -24,10 +24,9 @@ use rlp::{self, Rlp}; use ethereum_types::{U256, H64, H256, Address}; use parking_lot::Mutex; -use ethash::SeedHashCompute; +use ethash::{self, SeedHashCompute}; use ethcore::account_provider::AccountProvider; use ethcore::client::{BlockChainClient, BlockId, TransactionId, UncleId, StateOrBlock, StateClient, StateInfo, Call, EngineInfo}; -use ethcore::ethereum::Ethash; use ethcore::filter::Filter as EthcoreFilter; use ethcore::header::{BlockNumber as EthBlockNumber}; use ethcore::log_entry::LogEntry; @@ -758,7 +757,7 @@ impl Eth for EthClient< })?; let (pow_hash, number, timestamp, difficulty) = work; - let target = Ethash::difficulty_to_boundary(&difficulty); + let target = ethash::difficulty_to_boundary(&difficulty); let seed_hash = self.seed_compute.lock().hash_block_number(number); let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(); From a6df452841cb9be92ebc51eae82f3720ce11cf50 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 13 Aug 2018 15:47:10 +0800 Subject: [PATCH 20/48] Implement EIP234 block_hash for eth_getLogs (#9256) * Implement EIP234 * Make filter conversion returns error if both blockHash and from/toBlock is found This also changes PollFilter to store the EthFilter type, instead of the jsonrpc one, saving repeated conversion. * Return error if block filtering target is not found in eth_getLogs Use the old behavior (unwrap_or_default) for anywhere else. * fix test: secret_store * Fix weird indentation * Make client log filter return error in case a block cannot be found * Return blockId error in rpc * test_client: allow return error on logs * Add a mocked test for eth_getLogs error * fix: should return error if from_block/to_block greater than best block number * Add notes on pending * Add comment for UNSUPPORTED_REQUEST * Address grumbles * Return err if from > to --- ethcore/src/client/client.rs | 158 ++++++++++-------- ethcore/src/client/test_client.rs | 19 ++- ethcore/src/client/traits.rs | 4 +- ethcore/src/tests/client.rs | 4 +- rpc/src/v1/helpers/errors.rs | 14 ++ rpc/src/v1/helpers/poll_filter.rs | 11 +- rpc/src/v1/impls/eth.rs | 16 +- rpc/src/v1/impls/eth_filter.rs | 36 ++-- rpc/src/v1/impls/eth_pubsub.rs | 13 +- rpc/src/v1/impls/light/eth.rs | 7 +- rpc/src/v1/tests/mocked/eth.rs | 9 + rpc/src/v1/types/filter.rs | 34 +++- rpc/src/v1/types/pubsub.rs | 3 + secret_store/src/listener/service_contract.rs | 2 +- updater/src/updater.rs | 3 +- 15 files changed, 222 insertions(+), 111 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index efc8b3f2e..84bcddfd6 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -1813,76 +1813,100 @@ impl BlockChainClient for Client { self.engine.additional_params().into_iter().collect() } - fn logs(&self, filter: Filter) -> Vec { - // Wrap the logic inside a closure so that we can take advantage of question mark syntax. - let fetch_logs = || { - let chain = self.chain.read(); + fn logs(&self, filter: Filter) -> Result, BlockId> { + let chain = self.chain.read(); - // First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the - // optimized version. - let is_canon = |id| { - match id { - // If it is referred by number, then it is always on the canon chain. - &BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true, - // If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same - // result. - &BlockId::Hash(ref hash) => chain.is_canon(hash), - } - }; - - let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) { - // If we are on the canon chain, use bloom filter to fetch required hashes. - let from = self.block_number_ref(&filter.from_block)?; - let to = self.block_number_ref(&filter.to_block)?; - - chain.blocks_with_bloom(&filter.bloom_possibilities(), from, to) - .into_iter() - .filter_map(|n| chain.block_hash(n)) - .collect::>() - } else { - // Otherwise, we use a slower version that finds a link between from_block and to_block. - let from_hash = Self::block_hash(&chain, filter.from_block)?; - let from_number = chain.block_number(&from_hash)?; - let to_hash = Self::block_hash(&chain, filter.to_block)?; - - let blooms = filter.bloom_possibilities(); - let bloom_match = |header: &encoded::Header| { - blooms.iter().any(|bloom| header.log_bloom().contains_bloom(bloom)) - }; - - let (blocks, last_hash) = { - let mut blocks = Vec::new(); - let mut current_hash = to_hash; - - loop { - let header = chain.block_header_data(¤t_hash)?; - if bloom_match(&header) { - blocks.push(current_hash); - } - - // Stop if `from` block is reached. - if header.number() <= from_number { - break; - } - current_hash = header.parent_hash(); - } - - blocks.reverse(); - (blocks, current_hash) - }; - - // Check if we've actually reached the expected `from` block. - if last_hash != from_hash || blocks.is_empty() { - return None; - } - - blocks - }; - - Some(self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit)) + // First, check whether `filter.from_block` and `filter.to_block` is on the canon chain. If so, we can use the + // optimized version. + let is_canon = |id| { + match id { + // If it is referred by number, then it is always on the canon chain. + &BlockId::Earliest | &BlockId::Latest | &BlockId::Number(_) => true, + // If it is referred by hash, we see whether a hash -> number -> hash conversion gives us the same + // result. + &BlockId::Hash(ref hash) => chain.is_canon(hash), + } }; - fetch_logs().unwrap_or_default() + let blocks = if is_canon(&filter.from_block) && is_canon(&filter.to_block) { + // If we are on the canon chain, use bloom filter to fetch required hashes. + // + // If we are sure the block does not exist (where val > best_block_number), then return error. Note that we + // don't need to care about pending blocks here because RPC query sets pending back to latest (or handled + // pending logs themselves). + let from = match self.block_number_ref(&filter.from_block) { + Some(val) if val <= chain.best_block_number() => val, + _ => return Err(filter.from_block.clone()), + }; + let to = match self.block_number_ref(&filter.to_block) { + Some(val) if val <= chain.best_block_number() => val, + _ => return Err(filter.to_block.clone()), + }; + + // If from is greater than to, then the current bloom filter behavior is to just return empty + // result. There's no point to continue here. + if from > to { + return Err(filter.to_block.clone()); + } + + chain.blocks_with_bloom(&filter.bloom_possibilities(), from, to) + .into_iter() + .filter_map(|n| chain.block_hash(n)) + .collect::>() + } else { + // Otherwise, we use a slower version that finds a link between from_block and to_block. + let from_hash = match Self::block_hash(&chain, filter.from_block) { + Some(val) => val, + None => return Err(filter.from_block.clone()), + }; + let from_number = match chain.block_number(&from_hash) { + Some(val) => val, + None => return Err(BlockId::Hash(from_hash)), + }; + let to_hash = match Self::block_hash(&chain, filter.to_block) { + Some(val) => val, + None => return Err(filter.to_block.clone()), + }; + + let blooms = filter.bloom_possibilities(); + let bloom_match = |header: &encoded::Header| { + blooms.iter().any(|bloom| header.log_bloom().contains_bloom(bloom)) + }; + + let (blocks, last_hash) = { + let mut blocks = Vec::new(); + let mut current_hash = to_hash; + + loop { + let header = match chain.block_header_data(¤t_hash) { + Some(val) => val, + None => return Err(BlockId::Hash(current_hash)), + }; + if bloom_match(&header) { + blocks.push(current_hash); + } + + // Stop if `from` block is reached. + if header.number() <= from_number { + break; + } + current_hash = header.parent_hash(); + } + + blocks.reverse(); + (blocks, current_hash) + }; + + // Check if we've actually reached the expected `from` block. + if last_hash != from_hash || blocks.is_empty() { + // In this case, from_hash is the cause (for not matching last_hash). + return Err(BlockId::Hash(from_hash)); + } + + blocks + }; + + Ok(self.chain.read().logs(blocks, |entry| filter.matches(entry), filter.limit)) } fn filter_traces(&self, filter: TraceFilter) -> Option> { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index f729b15b7..9981f8d2c 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -94,6 +94,8 @@ pub struct TestBlockChainClient { pub receipts: RwLock>, /// Logs pub logs: RwLock>, + /// Should return errors on logs. + pub error_on_logs: RwLock>, /// Block queue size. pub queue_size: AtomicUsize, /// Miner @@ -178,6 +180,7 @@ impl TestBlockChainClient { traces: RwLock::new(None), history: RwLock::new(None), disabled: AtomicBool::new(false), + error_on_logs: RwLock::new(None), }; // insert genesis hash. @@ -233,6 +236,11 @@ impl TestBlockChainClient { *self.logs.write() = logs; } + /// Set return errors on logs. + pub fn set_error_on_logs(&self, val: Option) { + *self.error_on_logs.write() = val; + } + /// Add blocks to test client. pub fn add_blocks(&self, count: usize, with: EachBlockWith) { let len = self.numbers.read().len(); @@ -665,13 +673,18 @@ impl BlockChainClient for TestBlockChainClient { self.receipts.read().get(&id).cloned() } - fn logs(&self, filter: Filter) -> Vec { + fn logs(&self, filter: Filter) -> Result, BlockId> { + match self.error_on_logs.read().as_ref() { + Some(id) => return Err(id.clone()), + None => (), + } + let mut logs = self.logs.read().clone(); let len = logs.len(); - match filter.limit { + Ok(match filter.limit { Some(limit) if limit <= len => logs.split_off(len - limit), _ => logs, - } + }) } fn last_hashes(&self) -> LastHashes { diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index 6ccba5e0f..189ca67f4 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -297,8 +297,8 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra /// Get the registrar address, if it exists. fn additional_params(&self) -> BTreeMap; - /// Returns logs matching given filter. - fn logs(&self, filter: Filter) -> Vec; + /// Returns logs matching given filter. If one of the filtering block cannot be found, returns the block id that caused the error. + fn logs(&self, filter: Filter) -> Result, BlockId>; /// Replays a given transaction for inspection. fn replay(&self, t: TransactionId, analytics: CallAnalytics) -> Result; diff --git a/ethcore/src/tests/client.rs b/ethcore/src/tests/client.rs index 24801cb57..8f598a6c2 100644 --- a/ethcore/src/tests/client.rs +++ b/ethcore/src/tests/client.rs @@ -150,7 +150,7 @@ fn returns_logs() { address: None, topics: vec![], limit: None, - }); + }).unwrap(); assert_eq!(logs.len(), 0); } @@ -164,7 +164,7 @@ fn returns_logs_with_limit() { address: None, topics: vec![], limit: None, - }); + }).unwrap(); assert_eq!(logs.len(), 0); } diff --git a/rpc/src/v1/helpers/errors.rs b/rpc/src/v1/helpers/errors.rs index 710f7d749..4afd40ff8 100644 --- a/rpc/src/v1/helpers/errors.rs +++ b/rpc/src/v1/helpers/errors.rs @@ -20,6 +20,7 @@ use std::fmt; use ethcore::account_provider::{SignError as AccountError}; use ethcore::error::{Error as EthcoreError, ErrorKind, CallError}; +use ethcore::client::BlockId; use jsonrpc_core::{futures, Error, ErrorCode, Value}; use rlp::DecoderError; use transaction::Error as TransactionError; @@ -422,6 +423,19 @@ pub fn filter_not_found() -> Error { } } +pub fn filter_block_not_found(id: BlockId) -> Error { + Error { + code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), // Specified in EIP-234. + message: "One of the blocks specified in filter (fromBlock, toBlock or blockHash) cannot be found".into(), + data: Some(Value::String(match id { + BlockId::Hash(hash) => format!("0x{:x}", hash), + BlockId::Number(number) => format!("0x{:x}", number), + BlockId::Earliest => "earliest".to_string(), + BlockId::Latest => "latest".to_string(), + })), + } +} + // on-demand sender cancelled. pub fn on_demand_cancel(_cancel: futures::sync::oneshot::Canceled) -> Error { internal("on-demand sender cancelled", "") diff --git a/rpc/src/v1/helpers/poll_filter.rs b/rpc/src/v1/helpers/poll_filter.rs index 19979c814..48df7ca2a 100644 --- a/rpc/src/v1/helpers/poll_filter.rs +++ b/rpc/src/v1/helpers/poll_filter.rs @@ -22,7 +22,8 @@ use std::{ }; use ethereum_types::H256; use parking_lot::Mutex; -use v1::types::{Filter, Log}; +use ethcore::filter::Filter; +use v1::types::Log; pub type BlockNumber = u64; @@ -52,7 +53,13 @@ pub enum PollFilter { /// Hashes of all pending transactions the client knows about. PendingTransaction(BTreeSet), /// Number of From block number, last seen block hash, pending logs and log filter itself. - Logs(BlockNumber, Option, HashSet, Filter) + Logs { + block_number: BlockNumber, + last_block_hash: Option, + previous_logs: HashSet, + filter: Filter, + include_pending: bool, + } } /// Returns only last `n` logs diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 67dc640d8..5c45c440f 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -708,11 +708,17 @@ impl Eth for EthClient< fn logs(&self, filter: Filter) -> BoxFuture> { let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = filter.into(); - let mut logs = self.client.logs(filter.clone()) - .into_iter() - .map(From::from) - .collect::>(); + let filter: EthcoreFilter = match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }; + let mut logs = match self.client.logs(filter.clone()) { + Ok(logs) => logs + .into_iter() + .map(From::from) + .collect::>(), + Err(id) => return Box::new(future::err(errors::filter_block_not_found(id))), + }; if include_pending { let best_block = self.client.chain_info().best_block_number; diff --git a/rpc/src/v1/impls/eth_filter.rs b/rpc/src/v1/impls/eth_filter.rs index 926439cfc..7bccc46d1 100644 --- a/rpc/src/v1/impls/eth_filter.rs +++ b/rpc/src/v1/impls/eth_filter.rs @@ -92,7 +92,7 @@ impl Filterable for EthFilterClient where } fn logs(&self, filter: EthcoreFilter) -> BoxFuture> { - Box::new(future::ok(self.client.logs(filter).into_iter().map(Into::into).collect())) + Box::new(future::ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect())) } fn pending_logs(&self, block_number: u64, filter: &EthcoreFilter) -> Vec { @@ -125,7 +125,7 @@ impl Filterable for EthFilterClient where filter.from_block = BlockId::Hash(block_hash); filter.to_block = filter.from_block; - self.client.logs(filter).into_iter().map(|log| { + self.client.logs(filter).unwrap_or_default().into_iter().map(|log| { let mut log: Log = log.into(); log.log_type = "removed".into(); log.removed = true; @@ -140,7 +140,13 @@ impl EthFilter for T { fn new_filter(&self, filter: Filter) -> Result { let mut polls = self.polls().lock(); let block_number = self.best_block_number(); - let id = polls.create_poll(SyncPollFilter::new(PollFilter::Logs(block_number, None, Default::default(), filter))); + let include_pending = filter.to_block == Some(BlockNumber::Pending); + let filter = filter.try_into()?; + let id = polls.create_poll(SyncPollFilter::new(PollFilter::Logs { + block_number, filter, include_pending, + last_block_hash: None, + previous_logs: Default::default() + })); Ok(id.into()) } @@ -195,15 +201,17 @@ impl EthFilter for T { // return new hashes Either::A(future::ok(FilterChanges::Hashes(new_hashes))) }, - PollFilter::Logs(ref mut block_number, ref mut last_block_hash, ref mut previous_logs, ref filter) => { + PollFilter::Logs { + ref mut block_number, + ref mut last_block_hash, + ref mut previous_logs, + ref filter, + include_pending, + } => { // retrive the current block number let current_number = self.best_block_number(); - // check if we need to check pending hashes - let include_pending = filter.to_block == Some(BlockNumber::Pending); - - // build appropriate filter - let mut filter: EthcoreFilter = filter.clone().into(); + let mut filter = filter.clone(); // retrieve reorg logs let (mut reorg, reorg_len) = last_block_hash.map_or_else(|| (Vec::new(), 0), |h| self.removed_logs(h, &filter)); @@ -250,21 +258,19 @@ impl EthFilter for T { } fn filter_logs(&self, index: Index) -> BoxFuture> { - let filter = { + let (filter, include_pending) = { let mut polls = self.polls().lock(); match polls.poll(&index.value()).and_then(|f| f.modify(|filter| match *filter { - PollFilter::Logs(.., ref filter) => Some(filter.clone()), + PollFilter::Logs { ref filter, include_pending, .. } => + Some((filter.clone(), include_pending)), _ => None, })) { - Some(filter) => filter, + Some((filter, include_pending)) => (filter, include_pending), None => return Box::new(future::err(errors::filter_not_found())), } }; - let include_pending = filter.to_block == Some(BlockNumber::Pending); - let filter: EthcoreFilter = filter.into(); - // fetch pending logs. let pending = if include_pending { let best_block = self.best_block_number(); diff --git a/rpc/src/v1/impls/eth_pubsub.rs b/rpc/src/v1/impls/eth_pubsub.rs index 9f592b1fa..7961b1d18 100644 --- a/rpc/src/v1/impls/eth_pubsub.rs +++ b/rpc/src/v1/impls/eth_pubsub.rs @@ -252,9 +252,9 @@ impl ChainNotify for ChainNotificationHandler { self.notify_logs(route.route(), |filter, ex| { match ex { &ChainRouteType::Enacted => - Ok(self.client.logs(filter).into_iter().map(Into::into).collect()), + Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).collect()), &ChainRouteType::Retracted => - Ok(self.client.logs(filter).into_iter().map(Into::into).map(|mut log: Log| { + Ok(self.client.logs(filter).unwrap_or_default().into_iter().map(Into::into).map(|mut log: Log| { log.log_type = "removed".into(); log.removed = true; log @@ -283,8 +283,13 @@ impl EthPubSub for EthPubSubClient { errors::invalid_params("newHeads", "Expected no parameters.") }, (pubsub::Kind::Logs, Some(pubsub::Params::Logs(filter))) => { - self.logs_subscribers.write().push(subscriber, filter.into()); - return; + match filter.try_into() { + Ok(filter) => { + self.logs_subscribers.write().push(subscriber, filter); + return; + }, + Err(err) => err, + } }, (pubsub::Kind::Logs, _) => { errors::invalid_params("logs", "Expected a filter object.") diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index de2170f12..a22fffe27 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -502,8 +502,11 @@ impl Eth for EthClient { fn logs(&self, filter: Filter) -> BoxFuture> { let limit = filter.limit; - Box::new(Filterable::logs(self, filter.into()) - .map(move|logs| limit_logs(logs, limit))) + Box::new( + Filterable::logs(self, match filter.try_into() { + Ok(value) => value, + Err(err) => return Box::new(future::err(err)), + }).map(move |logs| limit_logs(logs, limit))) } fn work(&self, _timeout: Trailing) -> Result { diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index 602621194..33f820f1c 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -233,6 +233,15 @@ fn rpc_eth_logs() { assert_eq!(tester.io.handle_request_sync(request3), Some(response3.to_owned())); } +#[test] +fn rpc_eth_logs_error() { + let tester = EthTester::default(); + tester.client.set_error_on_logs(Some(BlockId::Hash(H256::from([5u8].as_ref())))); + let request = r#"{"jsonrpc": "2.0", "method": "eth_getLogs", "params": [{"limit":1,"blockHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"One of the blocks specified in filter (fromBlock, toBlock or blockHash) cannot be found","data":"0x0500000000000000000000000000000000000000000000000000000000000000"},"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + #[test] fn rpc_logs_filter() { let tester = EthTester::default(); diff --git a/rpc/src/v1/types/filter.rs b/rpc/src/v1/types/filter.rs index dd8b823e8..6d3e94c70 100644 --- a/rpc/src/v1/types/filter.rs +++ b/rpc/src/v1/types/filter.rs @@ -17,9 +17,11 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::de::{Error, DeserializeOwned}; use serde_json::{Value, from_value}; +use jsonrpc_core::{Error as RpcError}; use ethcore::filter::Filter as EthFilter; use ethcore::client::BlockId; use v1::types::{BlockNumber, H160, H256, Log}; +use v1::helpers::errors::invalid_params; /// Variadic value #[derive(Debug, PartialEq, Eq, Clone, Hash)] @@ -62,6 +64,9 @@ pub struct Filter { /// To Block #[serde(rename="toBlock")] pub to_block: Option, + /// Block hash + #[serde(rename="blockHash")] + pub block_hash: Option, /// Address pub address: Option, /// Topics @@ -70,17 +75,30 @@ pub struct Filter { pub limit: Option, } -impl Into for Filter { - fn into(self) -> EthFilter { +impl Filter { + pub fn try_into(self) -> Result { + if self.block_hash.is_some() && (self.from_block.is_some() || self.to_block.is_some()) { + return Err(invalid_params("blockHash", "blockHash is mutually exclusive with fromBlock/toBlock")); + } + let num_to_id = |num| match num { BlockNumber::Num(n) => BlockId::Number(n), BlockNumber::Earliest => BlockId::Earliest, BlockNumber::Latest | BlockNumber::Pending => BlockId::Latest, }; - EthFilter { - from_block: self.from_block.map_or_else(|| BlockId::Latest, &num_to_id), - to_block: self.to_block.map_or_else(|| BlockId::Latest, &num_to_id), + let (from_block, to_block) = match self.block_hash { + Some(hash) => { + let hash = hash.into(); + (BlockId::Hash(hash), BlockId::Hash(hash)) + }, + None => + (self.from_block.map_or_else(|| BlockId::Latest, &num_to_id), + self.to_block.map_or_else(|| BlockId::Latest, &num_to_id)), + }; + + Ok(EthFilter { + from_block, to_block, address: self.address.and_then(|address| match address { VariadicValue::Null => None, VariadicValue::Single(a) => Some(vec![a.into()]), @@ -101,7 +119,7 @@ impl Into for Filter { ] }, limit: self.limit, - } + }) } } @@ -157,6 +175,7 @@ mod tests { assert_eq!(deserialized, Filter { from_block: Some(BlockNumber::Earliest), to_block: Some(BlockNumber::Latest), + block_hash: None, address: None, topics: None, limit: None, @@ -168,6 +187,7 @@ mod tests { let filter = Filter { from_block: Some(BlockNumber::Earliest), to_block: Some(BlockNumber::Latest), + block_hash: None, address: Some(VariadicValue::Multiple(vec![])), topics: Some(vec![ VariadicValue::Null, @@ -177,7 +197,7 @@ mod tests { limit: None, }; - let eth_filter: EthFilter = filter.into(); + let eth_filter: EthFilter = filter.try_into().unwrap(); assert_eq!(eth_filter, EthFilter { from_block: BlockId::Earliest, to_block: BlockId::Latest, diff --git a/rpc/src/v1/types/pubsub.rs b/rpc/src/v1/types/pubsub.rs index ea01d6427..db4af4e87 100644 --- a/rpc/src/v1/types/pubsub.rs +++ b/rpc/src/v1/types/pubsub.rs @@ -119,6 +119,7 @@ mod tests { assert_eq!(logs1, Params::Logs(Filter { from_block: None, to_block: None, + block_hash: None, address: None, topics: None, limit: None, @@ -126,6 +127,7 @@ mod tests { assert_eq!(logs2, Params::Logs(Filter { from_block: None, to_block: None, + block_hash: None, address: None, topics: None, limit: Some(10), @@ -133,6 +135,7 @@ mod tests { assert_eq!(logs3, Params::Logs(Filter { from_block: None, to_block: None, + block_hash: None, address: None, topics: Some(vec![ VariadicValue::Single("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b".parse().unwrap() diff --git a/secret_store/src/listener/service_contract.rs b/secret_store/src/listener/service_contract.rs index daf70cd64..e4d54e0dc 100644 --- a/secret_store/src/listener/service_contract.rs +++ b/secret_store/src/listener/service_contract.rs @@ -283,7 +283,7 @@ impl ServiceContract for OnChainServiceContract { address: Some(vec![address]), topics: vec![Some(mask_topics(&self.mask))], limit: None, - }); + }).unwrap_or_default(); Box::new(request_logs.into_iter() .filter_map(|log| { diff --git a/updater/src/updater.rs b/updater/src/updater.rs index a3ed413c4..9dad11e9a 100644 --- a/updater/src/updater.rs +++ b/updater/src/updater.rs @@ -314,6 +314,7 @@ impl OperationsClient for OperationsContractClient { }; client.logs(filter) + .unwrap_or_default() .iter() .filter_map(|log| { let event = event.parse_log((log.topics.clone(), log.data.clone()).into()).ok()?; @@ -618,7 +619,7 @@ impl Updater Date: Mon, 13 Aug 2018 18:53:19 +0300 Subject: [PATCH 21/48] Fix load share (#9321) * fix(light_sync): calculate `load_share` properly * refactor(api.rs): extract `light_params` fn, add test * style(api.rs): add trailing commas --- Cargo.lock | 1 + ethcore/sync/Cargo.toml | 1 + ethcore/sync/src/api.rs | 71 +++++++++++++++++++++++++++++++---------- ethcore/sync/src/lib.rs | 1 + 4 files changed, 57 insertions(+), 17 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e95c9ea8..d3e229c40 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -839,6 +839,7 @@ dependencies = [ name = "ethcore-sync" version = "1.12.0" dependencies = [ + "common-types 0.1.0", "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", "ethcore-io 1.12.0", diff --git a/ethcore/sync/Cargo.toml b/ethcore/sync/Cargo.toml index 6cc87df31..7935eeca4 100644 --- a/ethcore/sync/Cargo.toml +++ b/ethcore/sync/Cargo.toml @@ -8,6 +8,7 @@ authors = ["Parity Technologies "] [lib] [dependencies] +common-types = { path = "../types" } parity-bytes = { git = "https://github.com/paritytech/parity-common" } ethcore-network = { path = "../../util/network" } ethcore-network-devp2p = { path = "../../util/network-devp2p" } diff --git a/ethcore/sync/src/api.rs b/ethcore/sync/src/api.rs index ef54a4802..606aa39b3 100644 --- a/ethcore/sync/src/api.rs +++ b/ethcore/sync/src/api.rs @@ -24,6 +24,8 @@ use devp2p::NetworkService; use network::{NetworkProtocolHandler, NetworkContext, PeerId, ProtocolId, NetworkConfiguration as BasicNetworkConfiguration, NonReservedPeerMode, Error, ErrorKind, ConnectionFilter}; + +use types::pruning_info::PruningInfo; use ethereum_types::{H256, H512, U256}; use io::{TimerToken}; use ethcore::ethstore::ethkey::Secret; @@ -39,7 +41,10 @@ use chain::{ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_62, PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, PAR_PROTOCOL_VERSION_3}; use light::client::AsLightClient; use light::Provider; -use light::net::{self as light_net, LightProtocol, Params as LightParams, Capabilities, Handler as LightHandler, EventContext}; +use light::net::{ + self as light_net, LightProtocol, Params as LightParams, + Capabilities, Handler as LightHandler, EventContext, SampleStore, +}; use network::IpFilter; use private_tx::PrivateTxHandler; use transaction::UnverifiedTransaction; @@ -256,11 +261,35 @@ pub struct EthSync { light_subprotocol_name: [u8; 3], } +fn light_params( + network_id: u64, + max_peers: u32, + pruning_info: PruningInfo, + sample_store: Option>, +) -> LightParams { + const MAX_LIGHTSERV_LOAD: f64 = 0.5; + + let mut light_params = LightParams { + network_id: network_id, + config: Default::default(), + capabilities: Capabilities { + serve_headers: true, + serve_chain_since: Some(pruning_info.earliest_chain), + serve_state_since: Some(pruning_info.earliest_state), + tx_relay: true, + }, + sample_store: sample_store, + }; + + let max_peers = ::std::cmp::max(max_peers, 1); + light_params.config.load_share = MAX_LIGHTSERV_LOAD / max_peers as f64; + + light_params +} + impl EthSync { /// Creates and register protocol with the network service pub fn new(params: Params, connection_filter: Option>) -> Result, Error> { - const MAX_LIGHTSERV_LOAD: f64 = 0.5; - let pruning_info = params.chain.pruning_info(); let light_proto = match params.config.serve_light { false => None, @@ -271,20 +300,12 @@ impl EthSync { .map(|mut p| { p.push("request_timings"); light_net::FileStore(p) }) .map(|store| Box::new(store) as Box<_>); - let mut light_params = LightParams { - network_id: params.config.network_id, - config: Default::default(), - capabilities: Capabilities { - serve_headers: true, - serve_chain_since: Some(pruning_info.earliest_chain), - serve_state_since: Some(pruning_info.earliest_state), - tx_relay: true, - }, - sample_store: sample_store, - }; - - let max_peers = ::std::cmp::min(params.network_config.max_peers, 1); - light_params.config.load_share = MAX_LIGHTSERV_LOAD / max_peers as f64; + let light_params = light_params( + params.config.network_id, + params.network_config.max_peers, + pruning_info, + sample_store, + ); let mut light_proto = LightProtocol::new(params.provider, light_params); light_proto.add_handler(Arc::new(TxRelay(params.chain.clone()))); @@ -916,3 +937,19 @@ impl LightSyncProvider for LightSync { Default::default() // TODO } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn light_params_load_share_depends_on_max_peers() { + let pruning_info = PruningInfo { + earliest_chain: 0, + earliest_state: 0, + }; + let params1 = light_params(0, 10, pruning_info.clone(), None); + let params2 = light_params(0, 20, pruning_info, None); + assert!(params1.config.load_share > params2.config.load_share) + } +} diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index 236432ca8..18a185e51 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -21,6 +21,7 @@ //! https://github.com/ethereum/wiki/wiki/Ethereum-Wire-Protocol //! +extern crate common_types as types; extern crate ethcore_network as network; extern crate ethcore_network_devp2p as devp2p; extern crate parity_bytes as bytes; From 9c595aff9547073391260ee048aca16516e63b2c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 14 Aug 2018 04:06:15 +0800 Subject: [PATCH 22/48] Allow single opcode stepping for EVM (#9051) * Feed in ActionParams on VM creation * Fix ethcore after Vm interface change * Move informant inside Interpreter struct * Move do_trace to Interpreter struct * Move all remaining exec variables to Interpreter struct * Refactor VM to allow single opcode step * Fix all EVM tests * Fix all wasm tests * Fix wasm runner tests * Fix a check case where code length is zero * Fix jsontests compile * Fix cargo lock * Use match instead of expect * Use cheaper check reader.len() == 0 for the initial special case * Get rid of try_and_done! macro by using Result<(), ReturnType> * Use Never instead of () * Fix parity-bytes path * Bypass gasometer lifetime problem by borrow only for a instance * typo: missing { * Fix ethcore test compile * Fix evm tests --- Cargo.lock | 1 + ethcore/evm/Cargo.toml | 1 + ethcore/evm/src/evm.rs | 6 + ethcore/evm/src/factory.rs | 20 +- ethcore/evm/src/interpreter/mod.rs | 690 ++++++++++++++++------------ ethcore/evm/src/lib.rs | 1 + ethcore/evm/src/tests.rs | 146 +++--- ethcore/src/executive.rs | 19 +- ethcore/src/factory.rs | 10 +- ethcore/src/json_tests/executive.rs | 4 +- ethcore/vm/src/lib.rs | 2 +- ethcore/wasm/run/src/runner.rs | 10 +- ethcore/wasm/src/lib.rs | 26 +- ethcore/wasm/src/tests.rs | 84 ++-- 14 files changed, 574 insertions(+), 446 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d3e229c40..d69838383 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1007,6 +1007,7 @@ dependencies = [ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", "memory-cache 0.1.0", + "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", diff --git a/ethcore/evm/Cargo.toml b/ethcore/evm/Cargo.toml index ce9f644cc..3ccabffb3 100644 --- a/ethcore/evm/Cargo.toml +++ b/ethcore/evm/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies "] [dependencies] bit-set = "0.4" +parity-bytes = { git = "https://github.com/paritytech/parity-common" } ethereum-types = "0.3" heapsize = "0.4" lazy_static = "1.0" diff --git a/ethcore/evm/src/evm.rs b/ethcore/evm/src/evm.rs index 4c85b3702..9d2ff0cb1 100644 --- a/ethcore/evm/src/evm.rs +++ b/ethcore/evm/src/evm.rs @@ -55,6 +55,12 @@ impl Finalize for Result { } } +impl Finalize for Error { + fn finalize(self, _ext: E) -> Result { + Err(self) + } +} + /// Cost calculation type. For low-gas usage we calculate costs using usize instead of U256 pub trait CostType: Sized + From + Copy + ops::Mul + ops::Div + ops::Add +ops::Sub diff --git a/ethcore/evm/src/factory.rs b/ethcore/evm/src/factory.rs index 65a683cd4..84e01460d 100644 --- a/ethcore/evm/src/factory.rs +++ b/ethcore/evm/src/factory.rs @@ -17,8 +17,9 @@ //! Evm factory. //! use std::sync::Arc; -use vm::Vm; +use vm::{Vm, Schedule}; use ethereum_types::U256; +use super::vm::ActionParams; use super::interpreter::SharedCache; use super::vmtype::VMType; @@ -32,12 +33,12 @@ pub struct Factory { impl Factory { /// Create fresh instance of VM /// Might choose implementation depending on supplied gas. - pub fn create(&self, gas: &U256) -> Box { + pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { match self.evm { - VMType::Interpreter => if Self::can_fit_in_usize(gas) { - Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) + VMType::Interpreter => if Self::can_fit_in_usize(¶ms.gas) { + Box::new(super::interpreter::Interpreter::::new(params, self.evm_cache.clone(), schedule, depth)) } else { - Box::new(super::interpreter::Interpreter::::new(self.evm_cache.clone())) + Box::new(super::interpreter::Interpreter::::new(params, self.evm_cache.clone(), schedule, depth)) } } } @@ -68,7 +69,14 @@ impl Default for Factory { #[test] fn test_create_vm() { - let _vm = Factory::default().create(&U256::zero()); + use vm::Ext; + use vm::tests::FakeExt; + use bytes::Bytes; + + let mut params = ActionParams::default(); + params.code = Some(Arc::new(Bytes::default())); + let ext = FakeExt::new(); + let _vm = Factory::default().create(params, ext.schedule(), ext.depth()); } /// Create tests by injecting different VM factories diff --git a/ethcore/evm/src/interpreter/mod.rs b/ethcore/evm/src/interpreter/mod.rs index 3af1e34dd..fefa3a60b 100644 --- a/ethcore/evm/src/interpreter/mod.rs +++ b/ethcore/evm/src/interpreter/mod.rs @@ -27,11 +27,12 @@ use std::marker::PhantomData; use std::{cmp, mem}; use std::sync::Arc; use hash::keccak; +use bytes::Bytes; use ethereum_types::{U256, U512, H256, Address}; use vm::{ - self, ActionParams, ActionValue, CallType, MessageCallResult, - ContractCreateResult, CreateContractAddress, ReturnData, GasLeft + self, ActionParams, ParamsType, ActionValue, CallType, MessageCallResult, + ContractCreateResult, CreateContractAddress, ReturnData, GasLeft, Schedule }; use evm::CostType; @@ -44,6 +45,8 @@ pub use self::shared_cache::SharedCache; use bit_set::BitSet; +const GASOMETER_PROOF: &str = "If gasometer is None, Err is immediately returned in step; this function is only called by step; qed"; + type ProgramCounter = usize; const ONE: U256 = U256([1, 0, 0, 0]); @@ -58,17 +61,17 @@ const TWO_POW_224: U256 = U256([0, 0, 0, 0x100000000]); //0x1 00000000 00000000 const TWO_POW_248: U256 = U256([0, 0, 0, 0x100000000000000]); //0x1 00000000 00000000 00000000 00000000 00000000 00000000 00000000 000000 /// Abstraction over raw vector of Bytes. Easier state management of PC. -struct CodeReader<'a> { +struct CodeReader { position: ProgramCounter, - code: &'a [u8] + code: Arc, } -impl<'a> CodeReader<'a> { +impl CodeReader { /// Create new code reader - starting at position 0. - fn new(code: &'a [u8]) -> Self { + fn new(code: Arc) -> Self { CodeReader { + code, position: 0, - code: code, } } @@ -102,127 +105,236 @@ enum InstructionResult { StopExecution, } +enum Never {} + +/// ActionParams without code, so that it can be feed into CodeReader. +#[derive(Debug)] +struct InterpreterParams { + /// Address of currently executed code. + pub code_address: Address, + /// Hash of currently executed code. + pub code_hash: Option, + /// Receive address. Usually equal to code_address, + /// except when called using CALLCODE. + pub address: Address, + /// Sender of current part of the transaction. + pub sender: Address, + /// Transaction initiator. + pub origin: Address, + /// Gas paid up front for transaction execution + pub gas: U256, + /// Gas price. + pub gas_price: U256, + /// Transaction value. + pub value: ActionValue, + /// Input data. + pub data: Option, + /// Type of call + pub call_type: CallType, + /// Param types encoding + pub params_type: ParamsType, +} + +impl From for InterpreterParams { + fn from(params: ActionParams) -> Self { + InterpreterParams { + code_address: params.code_address, + code_hash: params.code_hash, + address: params.address, + sender: params.sender, + origin: params.origin, + gas: params.gas, + gas_price: params.gas_price, + value: params.value, + data: params.data, + call_type: params.call_type, + params_type: params.params_type, + } + } +} + +/// Stepping result returned by interpreter. +pub enum InterpreterResult { + /// The VM has already stopped. + Stopped, + /// The VM has just finished execution in the current step. + Done(vm::Result), + /// The VM can continue to run. + Continue, +} + +impl From for InterpreterResult { + fn from(error: vm::Error) -> InterpreterResult { + InterpreterResult::Done(Err(error)) + } +} + /// Intepreter EVM implementation pub struct Interpreter { mem: Vec, cache: Arc, + params: InterpreterParams, + reader: CodeReader, return_data: ReturnData, + informant: informant::EvmInformant, + do_trace: bool, + done: bool, + valid_jump_destinations: Option>, + gasometer: Option>, + stack: VecStack, _type: PhantomData, } impl vm::Vm for Interpreter { - fn exec(&mut self, params: ActionParams, ext: &mut vm::Ext) -> vm::Result { - self.mem.clear(); - - let mut informant = informant::EvmInformant::new(ext.depth()); - let mut do_trace = true; - - let code = ¶ms.code.as_ref().expect("exec always called with code; qed"); - let mut valid_jump_destinations = None; - - let mut gasometer = Gasometer::::new(Cost::from_u256(params.gas)?); - let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero()); - let mut reader = CodeReader::new(code); - - while reader.position < code.len() { - let opcode = code[reader.position]; - let instruction = Instruction::from_u8(opcode); - reader.position += 1; - - // TODO: make compile-time removable if too much of a performance hit. - do_trace = do_trace && ext.trace_next_instruction( - reader.position - 1, opcode, gasometer.current_gas.as_u256(), - ); - - if instruction.is_none() { - return Err(vm::Error::BadInstruction { - instruction: opcode - }); - } - let instruction = instruction.expect("None case is checked above; qed"); - - let info = instruction.info(); - self.verify_instruction(ext, instruction, info, &stack)?; - - // Calculate gas cost - let requirements = gasometer.requirements(ext, instruction, info, &stack, self.mem.size())?; - if do_trace { - ext.trace_prepare_execute(reader.position - 1, opcode, requirements.gas_cost.as_u256()); - } - - gasometer.verify_gas(&requirements.gas_cost)?; - self.mem.expand(requirements.memory_required_size); - gasometer.current_mem_gas = requirements.memory_total_gas; - gasometer.current_gas = gasometer.current_gas - requirements.gas_cost; - - evm_debug!({ informant.before_instruction(reader.position, instruction, info, &gasometer.current_gas, &stack) }); - - let (mem_written, store_written) = match do_trace { - true => (Self::mem_written(instruction, &stack), Self::store_written(instruction, &stack)), - false => (None, None), - }; - - // Execute instruction - let result = self.exec_instruction( - gasometer.current_gas, ¶ms, ext, instruction, &mut reader, &mut stack, requirements.provide_gas - )?; - - evm_debug!({ informant.after_instruction(instruction) }); - - if let InstructionResult::UnusedGas(ref gas) = result { - gasometer.current_gas = gasometer.current_gas + *gas; - } - - if do_trace { - ext.trace_executed( - gasometer.current_gas.as_u256(), - stack.peek_top(info.ret), - mem_written.map(|(o, s)| (o, &(self.mem[o..o+s]))), - store_written, - ); - } - - // Advance + fn exec(&mut self, ext: &mut vm::Ext) -> vm::Result { + loop { + let result = self.step(ext); match result { - InstructionResult::JumpToPosition(position) => { - if valid_jump_destinations.is_none() { - let code_hash = params.code_hash.clone().unwrap_or_else(|| keccak(code.as_ref())); - valid_jump_destinations = Some(self.cache.jump_destinations(&code_hash, code)); - } - let jump_destinations = valid_jump_destinations.as_ref().expect("jump_destinations are initialized on first jump; qed"); - let pos = self.verify_jump(position, jump_destinations)?; - reader.position = pos; - }, - InstructionResult::StopExecutionNeedsReturn {gas, init_off, init_size, apply} => { - informant.done(); - let mem = mem::replace(&mut self.mem, Vec::new()); - return Ok(GasLeft::NeedsReturn { - gas_left: gas.as_u256(), - data: mem.into_return_data(init_off, init_size), - apply_state: apply - }); - }, - InstructionResult::StopExecution => break, - _ => {}, + InterpreterResult::Continue => {}, + InterpreterResult::Done(value) => return value, + InterpreterResult::Stopped => panic!("Attempted to execute an already stopped VM.") } } - informant.done(); - Ok(GasLeft::Known(gasometer.current_gas.as_u256())) } } impl Interpreter { /// Create a new `Interpreter` instance with shared cache. - pub fn new(cache: Arc) -> Interpreter { + pub fn new(mut params: ActionParams, cache: Arc, schedule: &Schedule, depth: usize) -> Interpreter { + let reader = CodeReader::new(params.code.take().expect("VM always called with code; qed")); + let params = InterpreterParams::from(params); + let informant = informant::EvmInformant::new(depth); + let valid_jump_destinations = None; + let gasometer = Cost::from_u256(params.gas).ok().map(|gas| Gasometer::::new(gas)); + let stack = VecStack::with_capacity(schedule.stack_limit, U256::zero()); + Interpreter { + cache, params, reader, informant, + valid_jump_destinations, gasometer, stack, + done: false, + do_trace: true, mem: Vec::new(), - cache: cache, return_data: ReturnData::empty(), - _type: PhantomData::default(), + _type: PhantomData, } } - fn verify_instruction(&self, ext: &vm::Ext, instruction: Instruction, info: &InstructionInfo, stack: &Stack) -> vm::Result<()> { + /// Execute a single step on the VM. + #[inline(always)] + pub fn step(&mut self, ext: &mut vm::Ext) -> InterpreterResult { + if self.done { + return InterpreterResult::Stopped; + } + + let result = if self.gasometer.is_none() { + InterpreterResult::Done(Err(vm::Error::OutOfGas)) + } else if self.reader.len() == 0 { + InterpreterResult::Done(Ok(GasLeft::Known(self.gasometer.as_ref().expect("Gasometer None case is checked above; qed").current_gas.as_u256()))) + } else { + self.step_inner(ext).err().expect("step_inner never returns Ok(()); qed") + }; + + if let &InterpreterResult::Done(_) = &result { + self.done = true; + self.informant.done(); + } + return result; + } + + /// Inner helper function for step. + #[inline(always)] + fn step_inner(&mut self, ext: &mut vm::Ext) -> Result { + let opcode = self.reader.code[self.reader.position]; + let instruction = Instruction::from_u8(opcode); + self.reader.position += 1; + + // TODO: make compile-time removable if too much of a performance hit. + self.do_trace = self.do_trace && ext.trace_next_instruction( + self.reader.position - 1, opcode, self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256(), + ); + + let instruction = match instruction { + Some(i) => i, + None => return Err(InterpreterResult::Done(Err(vm::Error::BadInstruction { + instruction: opcode + }))), + }; + + let info = instruction.info(); + self.verify_instruction(ext, instruction, info)?; + + // Calculate gas cost + let requirements = self.gasometer.as_mut().expect(GASOMETER_PROOF).requirements(ext, instruction, info, &self.stack, self.mem.size())?; + if self.do_trace { + ext.trace_prepare_execute(self.reader.position - 1, opcode, requirements.gas_cost.as_u256()); + } + + self.gasometer.as_mut().expect(GASOMETER_PROOF).verify_gas(&requirements.gas_cost)?; + self.mem.expand(requirements.memory_required_size); + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_mem_gas = requirements.memory_total_gas; + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas - requirements.gas_cost; + + evm_debug!({ informant.before_instruction(reader.position, instruction, info, &self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas, &stack) }); + + let (mem_written, store_written) = match self.do_trace { + true => (Self::mem_written(instruction, &self.stack), Self::store_written(instruction, &self.stack)), + false => (None, None), + }; + + // Execute instruction + let current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas; + let result = self.exec_instruction( + current_gas, ext, instruction, requirements.provide_gas + )?; + + evm_debug!({ informant.after_instruction(instruction) }); + + if let InstructionResult::UnusedGas(ref gas) = result { + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas = self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas + *gas; + } + + if self.do_trace { + ext.trace_executed( + self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256(), + self.stack.peek_top(info.ret), + mem_written.map(|(o, s)| (o, &(self.mem[o..o+s]))), + store_written, + ); + } + + // Advance + match result { + InstructionResult::JumpToPosition(position) => { + if self.valid_jump_destinations.is_none() { + let code_hash = self.params.code_hash.clone().unwrap_or_else(|| keccak(self.reader.code.as_ref())); + self.valid_jump_destinations = Some(self.cache.jump_destinations(&code_hash, &self.reader.code)); + } + let jump_destinations = self.valid_jump_destinations.as_ref().expect("jump_destinations are initialized on first jump; qed"); + let pos = self.verify_jump(position, jump_destinations)?; + self.reader.position = pos; + }, + InstructionResult::StopExecutionNeedsReturn {gas, init_off, init_size, apply} => { + let mem = mem::replace(&mut self.mem, Vec::new()); + return Err(InterpreterResult::Done(Ok(GasLeft::NeedsReturn { + gas_left: gas.as_u256(), + data: mem.into_return_data(init_off, init_size), + apply_state: apply + }))); + }, + InstructionResult::StopExecution => { + return Err(InterpreterResult::Done(Ok(GasLeft::Known(self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256())))); + }, + _ => {}, + } + + if self.reader.position >= self.reader.len() { + return Err(InterpreterResult::Done(Ok(GasLeft::Known(self.gasometer.as_mut().expect(GASOMETER_PROOF).current_gas.as_u256())))); + } + + Err(InterpreterResult::Continue) + } + + fn verify_instruction(&self, ext: &vm::Ext, instruction: Instruction, info: &InstructionInfo) -> vm::Result<()> { let schedule = ext.schedule(); if (instruction == instructions::DELEGATECALL && !schedule.have_delegate_call) || @@ -238,13 +350,13 @@ impl Interpreter { }); } - if !stack.has(info.args) { + if !self.stack.has(info.args) { Err(vm::Error::StackUnderflow { instruction: info.name, wanted: info.args, - on_stack: stack.size() + on_stack: self.stack.size() }) - } else if stack.size() - info.args + info.ret > schedule.stack_limit { + } else if self.stack.size() - info.args + info.ret > schedule.stack_limit { Err(vm::Error::OutOfStack { instruction: info.name, wanted: info.ret - info.args, @@ -289,24 +401,21 @@ impl Interpreter { fn exec_instruction( &mut self, gas: Cost, - params: &ActionParams, ext: &mut vm::Ext, instruction: Instruction, - code: &mut CodeReader, - stack: &mut Stack, provided: Option ) -> vm::Result> { match instruction { instructions::JUMP => { - let jump = stack.pop_back(); + let jump = self.stack.pop_back(); return Ok(InstructionResult::JumpToPosition( jump )); }, instructions::JUMPI => { - let jump = stack.pop_back(); - let condition = stack.pop_back(); - if !self.is_zero(&condition) { + let jump = self.stack.pop_back(); + let condition = self.stack.pop_back(); + if !condition.is_zero() { return Ok(InstructionResult::JumpToPosition( jump )); @@ -316,14 +425,14 @@ impl Interpreter { // ignore }, instructions::CREATE | instructions::CREATE2 => { - let endowment = stack.pop_back(); + let endowment = self.stack.pop_back(); let address_scheme = match instruction { instructions::CREATE => CreateContractAddress::FromSenderAndNonce, - instructions::CREATE2 => CreateContractAddress::FromSenderSaltAndCodeHash(stack.pop_back().into()), + instructions::CREATE2 => CreateContractAddress::FromSenderSaltAndCodeHash(self.stack.pop_back().into()), _ => unreachable!("instruction can only be CREATE/CREATE2 checked above; qed"), }; - let init_off = stack.pop_back(); - let init_size = stack.pop_back(); + let init_off = self.stack.pop_back(); + let init_size = self.stack.pop_back(); let create_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is `CREATE`; qed"); @@ -334,9 +443,9 @@ impl Interpreter { // clear return data buffer before creating new call frame. self.return_data = ReturnData::empty(); - let can_create = ext.balance(¶ms.address)? >= endowment && ext.depth() < ext.schedule().max_depth; + let can_create = ext.balance(&self.params.address)? >= endowment && ext.depth() < ext.schedule().max_depth; if !can_create { - stack.push(U256::zero()); + self.stack.push(U256::zero()); return Ok(InstructionResult::UnusedGas(create_gas)); } @@ -345,16 +454,16 @@ impl Interpreter { let create_result = ext.create(&create_gas.as_u256(), &endowment, contract_code, address_scheme); return match create_result { ContractCreateResult::Created(address, gas_left) => { - stack.push(address_to_u256(address)); + self.stack.push(address_to_u256(address)); Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater."))) }, ContractCreateResult::Reverted(gas_left, return_data) => { - stack.push(U256::zero()); + self.stack.push(U256::zero()); self.return_data = return_data; Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater."))) }, ContractCreateResult::Failed => { - stack.push(U256::zero()); + self.stack.push(U256::zero()); Ok(InstructionResult::Ok) }, }; @@ -362,9 +471,9 @@ impl Interpreter { instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL | instructions::STATICCALL => { assert!(ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, "overflow possible"); - stack.pop_back(); + self.stack.pop_back(); let call_gas = provided.expect("`provided` comes through Self::exec from `Gasometer::get_gas_cost_mem`; `gas_gas_mem_cost` guarantees `Some` when instruction is `CALL`/`CALLCODE`/`DELEGATECALL`/`CREATE`; this is one of `CALL`/`CALLCODE`/`DELEGATECALL`; qed"); - let code_address = stack.pop_back(); + let code_address = self.stack.pop_back(); let code_address = u256_to_address(&code_address); let value = if instruction == instructions::DELEGATECALL { @@ -372,13 +481,13 @@ impl Interpreter { } else if instruction == instructions::STATICCALL { Some(U256::zero()) } else { - Some(stack.pop_back()) + Some(self.stack.pop_back()) }; - let in_off = stack.pop_back(); - let in_size = stack.pop_back(); - let out_off = stack.pop_back(); - let out_size = stack.pop_back(); + let in_off = self.stack.pop_back(); + let in_size = self.stack.pop_back(); + let out_off = self.stack.pop_back(); + let out_size = self.stack.pop_back(); // Add stipend (only CALL|CALLCODE when value > 0) let call_gas = call_gas + value.map_or_else(|| Cost::from(0), |val| match val.is_zero() { @@ -392,15 +501,15 @@ impl Interpreter { if ext.is_static() && value.map_or(false, |v| !v.is_zero()) { return Err(vm::Error::MutableCallInStaticContext); } - let has_balance = ext.balance(¶ms.address)? >= value.expect("value set for all but delegate call; qed"); - (¶ms.address, &code_address, has_balance, CallType::Call) + let has_balance = ext.balance(&self.params.address)? >= value.expect("value set for all but delegate call; qed"); + (&self.params.address, &code_address, has_balance, CallType::Call) }, instructions::CALLCODE => { - let has_balance = ext.balance(¶ms.address)? >= value.expect("value set for all but delegate call; qed"); - (¶ms.address, ¶ms.address, has_balance, CallType::CallCode) + let has_balance = ext.balance(&self.params.address)? >= value.expect("value set for all but delegate call; qed"); + (&self.params.address, &self.params.address, has_balance, CallType::CallCode) }, - instructions::DELEGATECALL => (¶ms.sender, ¶ms.address, true, CallType::DelegateCall), - instructions::STATICCALL => (¶ms.address, &code_address, true, CallType::StaticCall), + instructions::DELEGATECALL => (&self.params.sender, &self.params.address, true, CallType::DelegateCall), + instructions::STATICCALL => (&self.params.address, &code_address, true, CallType::StaticCall), _ => panic!(format!("Unexpected instruction {:?} in CALL branch.", instruction)) }; @@ -409,7 +518,7 @@ impl Interpreter { let can_call = has_balance && ext.depth() < ext.schedule().max_depth; if !can_call { - stack.push(U256::zero()); + self.stack.push(U256::zero()); return Ok(InstructionResult::UnusedGas(call_gas)); } @@ -423,30 +532,30 @@ impl Interpreter { return match call_result { MessageCallResult::Success(gas_left, data) => { - stack.push(U256::one()); + self.stack.push(U256::one()); self.return_data = data; Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))) }, MessageCallResult::Reverted(gas_left, data) => { - stack.push(U256::zero()); + self.stack.push(U256::zero()); self.return_data = data; Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))) }, MessageCallResult::Failed => { - stack.push(U256::zero()); + self.stack.push(U256::zero()); Ok(InstructionResult::Ok) }, }; }, instructions::RETURN => { - let init_off = stack.pop_back(); - let init_size = stack.pop_back(); + let init_off = self.stack.pop_back(); + let init_size = self.stack.pop_back(); return Ok(InstructionResult::StopExecutionNeedsReturn {gas: gas, init_off: init_off, init_size: init_size, apply: true}) }, instructions::REVERT => { - let init_off = stack.pop_back(); - let init_size = stack.pop_back(); + let init_off = self.stack.pop_back(); + let init_size = self.stack.pop_back(); return Ok(InstructionResult::StopExecutionNeedsReturn {gas: gas, init_off: init_off, init_size: init_size, apply: false}) }, @@ -454,16 +563,16 @@ impl Interpreter { return Ok(InstructionResult::StopExecution); }, instructions::SUICIDE => { - let address = stack.pop_back(); + let address = self.stack.pop_back(); ext.suicide(&u256_to_address(&address))?; return Ok(InstructionResult::StopExecution); }, instructions::LOG0 | instructions::LOG1 | instructions::LOG2 | instructions::LOG3 | instructions::LOG4 => { let no_of_topics = instruction.log_topics().expect("log_topics always return some for LOG* instructions; qed"); - let offset = stack.pop_back(); - let size = stack.pop_back(); - let topics = stack.pop_n(no_of_topics) + let offset = self.stack.pop_back(); + let size = self.stack.pop_back(); + let topics = self.stack.pop_n(no_of_topics) .iter() .map(H256::from) .collect(); @@ -478,157 +587,157 @@ impl Interpreter { instructions::PUSH25 | instructions::PUSH26 | instructions::PUSH27 | instructions::PUSH28 | instructions::PUSH29 | instructions::PUSH30 | instructions::PUSH31 | instructions::PUSH32 => { let bytes = instruction.push_bytes().expect("push_bytes always return some for PUSH* instructions"); - let val = code.read(bytes); - stack.push(val); + let val = self.reader.read(bytes); + self.stack.push(val); }, instructions::MLOAD => { - let word = self.mem.read(stack.pop_back()); - stack.push(U256::from(word)); + let word = self.mem.read(self.stack.pop_back()); + self.stack.push(U256::from(word)); }, instructions::MSTORE => { - let offset = stack.pop_back(); - let word = stack.pop_back(); + let offset = self.stack.pop_back(); + let word = self.stack.pop_back(); Memory::write(&mut self.mem, offset, word); }, instructions::MSTORE8 => { - let offset = stack.pop_back(); - let byte = stack.pop_back(); + let offset = self.stack.pop_back(); + let byte = self.stack.pop_back(); self.mem.write_byte(offset, byte); }, instructions::MSIZE => { - stack.push(U256::from(self.mem.size())); + self.stack.push(U256::from(self.mem.size())); }, instructions::SHA3 => { - let offset = stack.pop_back(); - let size = stack.pop_back(); + let offset = self.stack.pop_back(); + let size = self.stack.pop_back(); let k = keccak(self.mem.read_slice(offset, size)); - stack.push(U256::from(&*k)); + self.stack.push(U256::from(&*k)); }, instructions::SLOAD => { - let key = H256::from(&stack.pop_back()); + let key = H256::from(&self.stack.pop_back()); let word = U256::from(&*ext.storage_at(&key)?); - stack.push(word); + self.stack.push(word); }, instructions::SSTORE => { - let address = H256::from(&stack.pop_back()); - let val = stack.pop_back(); + let address = H256::from(&self.stack.pop_back()); + let val = self.stack.pop_back(); let current_val = U256::from(&*ext.storage_at(&address)?); // Increase refund for clear - if !self.is_zero(¤t_val) && self.is_zero(&val) { + if !current_val.is_zero() && val.is_zero() { ext.inc_sstore_clears(); } ext.set_storage(address, H256::from(&val))?; }, instructions::PC => { - stack.push(U256::from(code.position - 1)); + self.stack.push(U256::from(self.reader.position - 1)); }, instructions::GAS => { - stack.push(gas.as_u256()); + self.stack.push(gas.as_u256()); }, instructions::ADDRESS => { - stack.push(address_to_u256(params.address.clone())); + self.stack.push(address_to_u256(self.params.address.clone())); }, instructions::ORIGIN => { - stack.push(address_to_u256(params.origin.clone())); + self.stack.push(address_to_u256(self.params.origin.clone())); }, instructions::BALANCE => { - let address = u256_to_address(&stack.pop_back()); + let address = u256_to_address(&self.stack.pop_back()); let balance = ext.balance(&address)?; - stack.push(balance); + self.stack.push(balance); }, instructions::CALLER => { - stack.push(address_to_u256(params.sender.clone())); + self.stack.push(address_to_u256(self.params.sender.clone())); }, instructions::CALLVALUE => { - stack.push(match params.value { + self.stack.push(match self.params.value { ActionValue::Transfer(val) | ActionValue::Apparent(val) => val }); }, instructions::CALLDATALOAD => { - let big_id = stack.pop_back(); + let big_id = self.stack.pop_back(); let id = big_id.low_u64() as usize; let max = id.wrapping_add(32); - if let Some(data) = params.data.as_ref() { + if let Some(data) = self.params.data.as_ref() { let bound = cmp::min(data.len(), max); if id < bound && big_id < U256::from(data.len()) { let mut v = [0u8; 32]; v[0..bound-id].clone_from_slice(&data[id..bound]); - stack.push(U256::from(&v[..])) + self.stack.push(U256::from(&v[..])) } else { - stack.push(U256::zero()) + self.stack.push(U256::zero()) } } else { - stack.push(U256::zero()) + self.stack.push(U256::zero()) } }, instructions::CALLDATASIZE => { - stack.push(U256::from(params.data.clone().map_or(0, |l| l.len()))); + self.stack.push(U256::from(self.params.data.as_ref().map_or(0, |l| l.len()))); }, instructions::CODESIZE => { - stack.push(U256::from(code.len())); + self.stack.push(U256::from(self.reader.len())); }, instructions::RETURNDATASIZE => { - stack.push(U256::from(self.return_data.len())) + self.stack.push(U256::from(self.return_data.len())) }, instructions::EXTCODESIZE => { - let address = u256_to_address(&stack.pop_back()); + let address = u256_to_address(&self.stack.pop_back()); let len = ext.extcodesize(&address)?.unwrap_or(0); - stack.push(U256::from(len)); + self.stack.push(U256::from(len)); }, instructions::EXTCODEHASH => { - let address = u256_to_address(&stack.pop_back()); + let address = u256_to_address(&self.stack.pop_back()); let hash = ext.extcodehash(&address)?.unwrap_or_else(H256::zero); - stack.push(U256::from(hash)); + self.stack.push(U256::from(hash)); }, instructions::CALLDATACOPY => { - Self::copy_data_to_memory(&mut self.mem, stack, params.data.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); + Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &self.params.data.as_ref().map_or_else(|| &[] as &[u8], |d| &*d as &[u8])); }, instructions::RETURNDATACOPY => { { - let source_offset = stack.peek(1); - let size = stack.peek(2); + let source_offset = self.stack.peek(1); + let size = self.stack.peek(2); let return_data_len = U256::from(self.return_data.len()); if source_offset.saturating_add(*size) > return_data_len { return Err(vm::Error::OutOfBounds); } } - Self::copy_data_to_memory(&mut self.mem, stack, &*self.return_data); + Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &*self.return_data); }, instructions::CODECOPY => { - Self::copy_data_to_memory(&mut self.mem, stack, params.code.as_ref().map_or_else(|| &[] as &[u8], |c| &**c as &[u8])); + Self::copy_data_to_memory(&mut self.mem, &mut self.stack, &self.reader.code); }, instructions::EXTCODECOPY => { - let address = u256_to_address(&stack.pop_back()); + let address = u256_to_address(&self.stack.pop_back()); let code = ext.extcode(&address)?; Self::copy_data_to_memory( &mut self.mem, - stack, + &mut self.stack, code.as_ref().map(|c| &(*c)[..]).unwrap_or(&[]) ); }, instructions::GASPRICE => { - stack.push(params.gas_price.clone()); + self.stack.push(self.params.gas_price.clone()); }, instructions::BLOCKHASH => { - let block_number = stack.pop_back(); + let block_number = self.stack.pop_back(); let block_hash = ext.blockhash(&block_number); - stack.push(U256::from(&*block_hash)); + self.stack.push(U256::from(&*block_hash)); }, instructions::COINBASE => { - stack.push(address_to_u256(ext.env_info().author.clone())); + self.stack.push(address_to_u256(ext.env_info().author.clone())); }, instructions::TIMESTAMP => { - stack.push(U256::from(ext.env_info().timestamp)); + self.stack.push(U256::from(ext.env_info().timestamp)); }, instructions::NUMBER => { - stack.push(U256::from(ext.env_info().number)); + self.stack.push(U256::from(ext.env_info().number)); }, instructions::DIFFICULTY => { - stack.push(ext.env_info().difficulty.clone()); + self.stack.push(ext.env_info().difficulty.clone()); }, instructions::GASLIMIT => { - stack.push(ext.env_info().gas_limit.clone()); + self.stack.push(ext.env_info().gas_limit.clone()); }, // Stack instructions @@ -638,38 +747,38 @@ impl Interpreter { instructions::DUP9 | instructions::DUP10 | instructions::DUP11 | instructions::DUP12 | instructions::DUP13 | instructions::DUP14 | instructions::DUP15 | instructions::DUP16 => { let position = instruction.dup_position().expect("dup_position always return some for DUP* instructions"); - let val = stack.peek(position).clone(); - stack.push(val); + let val = self.stack.peek(position).clone(); + self.stack.push(val); }, instructions::SWAP1 | instructions::SWAP2 | instructions::SWAP3 | instructions::SWAP4 | instructions::SWAP5 | instructions::SWAP6 | instructions::SWAP7 | instructions::SWAP8 | instructions::SWAP9 | instructions::SWAP10 | instructions::SWAP11 | instructions::SWAP12 | instructions::SWAP13 | instructions::SWAP14 | instructions::SWAP15 | instructions::SWAP16 => { let position = instruction.swap_position().expect("swap_position always return some for SWAP* instructions"); - stack.swap_with_top(position) + self.stack.swap_with_top(position) }, instructions::POP => { - stack.pop_back(); + self.stack.pop_back(); }, instructions::ADD => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(a.overflowing_add(b).0); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a.overflowing_add(b).0); }, instructions::MUL => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(a.overflowing_mul(b).0); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a.overflowing_mul(b).0); }, instructions::SUB => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(a.overflowing_sub(b).0); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a.overflowing_sub(b).0); }, instructions::DIV => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(if !self.is_zero(&b) { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(if !b.is_zero() { match b { ONE => a, TWO => a >> 1, @@ -688,21 +797,21 @@ impl Interpreter { }); }, instructions::MOD => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(if !self.is_zero(&b) { + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(if !b.is_zero() { a.overflowing_rem(b).0 } else { U256::zero() }); }, instructions::SDIV => { - let (a, sign_a) = get_and_reset_sign(stack.pop_back()); - let (b, sign_b) = get_and_reset_sign(stack.pop_back()); + let (a, sign_a) = get_and_reset_sign(self.stack.pop_back()); + let (b, sign_b) = get_and_reset_sign(self.stack.pop_back()); // -2^255 let min = (U256::one() << 255) - U256::one(); - stack.push(if self.is_zero(&b) { + self.stack.push(if b.is_zero() { U256::zero() } else if a == min && b == !U256::zero() { min @@ -712,12 +821,12 @@ impl Interpreter { }); }, instructions::SMOD => { - let ua = stack.pop_back(); - let ub = stack.pop_back(); + let ua = self.stack.pop_back(); + let ub = self.stack.pop_back(); let (a, sign_a) = get_and_reset_sign(ua); let b = get_and_reset_sign(ub).0; - stack.push(if !self.is_zero(&b) { + self.stack.push(if !b.is_zero() { let c = a.overflowing_rem(b).0; set_sign(c, sign_a) } else { @@ -725,84 +834,84 @@ impl Interpreter { }); }, instructions::EXP => { - let base = stack.pop_back(); - let expon = stack.pop_back(); + let base = self.stack.pop_back(); + let expon = self.stack.pop_back(); let res = base.overflowing_pow(expon).0; - stack.push(res); + self.stack.push(res); }, instructions::NOT => { - let a = stack.pop_back(); - stack.push(!a); + let a = self.stack.pop_back(); + self.stack.push(!a); }, instructions::LT => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(self.bool_to_u256(a < b)); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a < b)); }, instructions::SLT => { - let (a, neg_a) = get_and_reset_sign(stack.pop_back()); - let (b, neg_b) = get_and_reset_sign(stack.pop_back()); + let (a, neg_a) = get_and_reset_sign(self.stack.pop_back()); + let (b, neg_b) = get_and_reset_sign(self.stack.pop_back()); let is_positive_lt = a < b && !(neg_a | neg_b); let is_negative_lt = a > b && (neg_a & neg_b); let has_different_signs = neg_a && !neg_b; - stack.push(self.bool_to_u256(is_positive_lt | is_negative_lt | has_different_signs)); + self.stack.push(Self::bool_to_u256(is_positive_lt | is_negative_lt | has_different_signs)); }, instructions::GT => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(self.bool_to_u256(a > b)); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a > b)); }, instructions::SGT => { - let (a, neg_a) = get_and_reset_sign(stack.pop_back()); - let (b, neg_b) = get_and_reset_sign(stack.pop_back()); + let (a, neg_a) = get_and_reset_sign(self.stack.pop_back()); + let (b, neg_b) = get_and_reset_sign(self.stack.pop_back()); let is_positive_gt = a > b && !(neg_a | neg_b); let is_negative_gt = a < b && (neg_a & neg_b); let has_different_signs = !neg_a && neg_b; - stack.push(self.bool_to_u256(is_positive_gt | is_negative_gt | has_different_signs)); + self.stack.push(Self::bool_to_u256(is_positive_gt | is_negative_gt | has_different_signs)); }, instructions::EQ => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(self.bool_to_u256(a == b)); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a == b)); }, instructions::ISZERO => { - let a = stack.pop_back(); - stack.push(self.bool_to_u256(self.is_zero(&a))); + let a = self.stack.pop_back(); + self.stack.push(Self::bool_to_u256(a.is_zero())); }, instructions::AND => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(a & b); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a & b); }, instructions::OR => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(a | b); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a | b); }, instructions::XOR => { - let a = stack.pop_back(); - let b = stack.pop_back(); - stack.push(a ^ b); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + self.stack.push(a ^ b); }, instructions::BYTE => { - let word = stack.pop_back(); - let val = stack.pop_back(); + let word = self.stack.pop_back(); + let val = self.stack.pop_back(); let byte = match word < U256::from(32) { true => (val >> (8 * (31 - word.low_u64() as usize))) & U256::from(0xff), false => U256::zero() }; - stack.push(byte); + self.stack.push(byte); }, instructions::ADDMOD => { - let a = stack.pop_back(); - let b = stack.pop_back(); - let c = stack.pop_back(); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + let c = self.stack.pop_back(); - stack.push(if !self.is_zero(&c) { + self.stack.push(if !c.is_zero() { // upcast to 512 let a5 = U512::from(a); let res = a5.overflowing_add(U512::from(b)).0; @@ -813,11 +922,11 @@ impl Interpreter { }); }, instructions::MULMOD => { - let a = stack.pop_back(); - let b = stack.pop_back(); - let c = stack.pop_back(); + let a = self.stack.pop_back(); + let b = self.stack.pop_back(); + let c = self.stack.pop_back(); - stack.push(if !self.is_zero(&c) { + self.stack.push(if !c.is_zero() { let a5 = U512::from(a); let res = a5.overflowing_mul(U512::from(b)).0; let x = res.overflowing_rem(U512::from(c)).0; @@ -827,14 +936,14 @@ impl Interpreter { }); }, instructions::SIGNEXTEND => { - let bit = stack.pop_back(); + let bit = self.stack.pop_back(); if bit < U256::from(32) { - let number = stack.pop_back(); + let number = self.stack.pop_back(); let bit_position = (bit.low_u64() * 8 + 7) as usize; let bit = number.bit(bit_position); let mask = (U256::one() << bit_position) - U256::one(); - stack.push(if bit { + self.stack.push(if bit { number | !mask } else { number & mask @@ -844,28 +953,28 @@ impl Interpreter { instructions::SHL => { const CONST_256: U256 = U256([256, 0, 0, 0]); - let shift = stack.pop_back(); - let value = stack.pop_back(); + let shift = self.stack.pop_back(); + let value = self.stack.pop_back(); let result = if shift >= CONST_256 { U256::zero() } else { value << (shift.as_u32() as usize) }; - stack.push(result); + self.stack.push(result); }, instructions::SHR => { const CONST_256: U256 = U256([256, 0, 0, 0]); - let shift = stack.pop_back(); - let value = stack.pop_back(); + let shift = self.stack.pop_back(); + let value = self.stack.pop_back(); let result = if shift >= CONST_256 { U256::zero() } else { value >> (shift.as_u32() as usize) }; - stack.push(result); + self.stack.push(result); }, instructions::SAR => { // We cannot use get_and_reset_sign/set_sign here, because the rounding looks different. @@ -873,8 +982,8 @@ impl Interpreter { const CONST_256: U256 = U256([256, 0, 0, 0]); const CONST_HIBIT: U256 = U256([0, 0, 0, 0x8000000000000000]); - let shift = stack.pop_back(); - let value = stack.pop_back(); + let shift = self.stack.pop_back(); + let value = self.stack.pop_back(); let sign = value & CONST_HIBIT != U256::zero(); let result = if shift >= CONST_256 { @@ -891,7 +1000,7 @@ impl Interpreter { } shifted }; - stack.push(result); + self.stack.push(result); }, }; Ok(InstructionResult::Ok) @@ -936,11 +1045,7 @@ impl Interpreter { } } - fn is_zero(&self, val: &U256) -> bool { - val.is_zero() - } - - fn bool_to_u256(&self, val: bool) -> U256 { + fn bool_to_u256(val: bool) -> U256 { if val { U256::one() } else { @@ -979,12 +1084,11 @@ mod tests { use rustc_hex::FromHex; use vmtype::VMType; use factory::Factory; - use vm::{Vm, ActionParams, ActionValue}; + use vm::{self, Vm, ActionParams, ActionValue}; use vm::tests::{FakeExt, test_finalize}; - use ethereum_types::U256; - fn interpreter(gas: &U256) -> Box { - Factory::new(VMType::Interpreter, 1).create(gas) + fn interpreter(params: ActionParams, ext: &vm::Ext) -> Box { + Factory::new(VMType::Interpreter, 1).create(params, ext.schedule(), ext.depth()) } #[test] @@ -1002,8 +1106,8 @@ mod tests { ext.tracing = true; let gas_left = { - let mut vm = interpreter(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = interpreter(params, &ext); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(ext.calls.len(), 1); @@ -1024,8 +1128,8 @@ mod tests { ext.tracing = true; let err = { - let mut vm = interpreter(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).err().unwrap() + let mut vm = interpreter(params, &ext); + test_finalize(vm.exec(&mut ext)).err().unwrap() }; assert_eq!(err, ::vm::Error::OutOfBounds); diff --git a/ethcore/evm/src/lib.rs b/ethcore/evm/src/lib.rs index cd326a317..0a067e88f 100644 --- a/ethcore/evm/src/lib.rs +++ b/ethcore/evm/src/lib.rs @@ -23,6 +23,7 @@ extern crate heapsize; extern crate vm; extern crate keccak_hash as hash; extern crate memory_cache; +extern crate parity_bytes as bytes; #[macro_use] extern crate lazy_static; diff --git a/ethcore/evm/src/tests.rs b/ethcore/evm/src/tests.rs index b8f0df363..b1f6268bf 100644 --- a/ethcore/evm/src/tests.rs +++ b/ethcore/evm/src/tests.rs @@ -21,7 +21,7 @@ use std::sync::Arc; use std::collections::{HashMap, HashSet}; use rustc_hex::FromHex; use ethereum_types::{U256, H256, Address}; -use vm::{self, ActionParams, ActionValue}; +use vm::{self, ActionParams, ActionValue, Ext}; use vm::tests::{FakeExt, FakeCall, FakeCallType, test_finalize}; use factory::Factory; use vmtype::VMType; @@ -38,8 +38,8 @@ fn test_add(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_988)); @@ -58,8 +58,8 @@ fn test_sha3(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_961)); @@ -78,8 +78,8 @@ fn test_address(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -100,8 +100,8 @@ fn test_origin(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -122,8 +122,8 @@ fn test_sender(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -157,8 +157,8 @@ fn test_extcodecopy(factory: super::Factory) { ext.codes.insert(sender, Arc::new(sender_code)); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_935)); @@ -177,8 +177,8 @@ fn test_log_empty(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(99_619)); @@ -209,8 +209,8 @@ fn test_log_sender(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(98_974)); @@ -234,8 +234,8 @@ fn test_blockhash(factory: super::Factory) { ext.blockhashes.insert(U256::zero(), blockhash.clone()); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_974)); @@ -256,8 +256,8 @@ fn test_calldataload(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_991)); @@ -277,8 +277,8 @@ fn test_author(factory: super::Factory) { ext.info.author = author; let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -297,8 +297,8 @@ fn test_timestamp(factory: super::Factory) { ext.info.timestamp = timestamp; let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -317,8 +317,8 @@ fn test_number(factory: super::Factory) { ext.info.number = number; let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -337,8 +337,8 @@ fn test_difficulty(factory: super::Factory) { ext.info.difficulty = difficulty; let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -357,8 +357,8 @@ fn test_gas_limit(factory: super::Factory) { ext.info.gas_limit = gas_limit; let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(79_995)); @@ -375,8 +375,8 @@ fn test_mul(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "000000000000000000000000000000000000000000000000734349397b853383"); @@ -393,8 +393,8 @@ fn test_sub(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000012364ad0302"); @@ -411,8 +411,8 @@ fn test_div(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "000000000000000000000000000000000000000000000000000000000002e0ac"); @@ -429,8 +429,8 @@ fn test_div_zero(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); @@ -447,8 +447,8 @@ fn test_mod(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000076b4b"); @@ -466,8 +466,8 @@ fn test_smod(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000076b4b"); @@ -485,8 +485,8 @@ fn test_sdiv(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "000000000000000000000000000000000000000000000000000000000002e0ac"); @@ -504,8 +504,8 @@ fn test_exp(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "90fd23767b60204c3d6fc8aec9e70a42a3f127140879c133a20129a597ed0c59"); @@ -524,8 +524,8 @@ fn test_comparison(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); @@ -545,8 +545,8 @@ fn test_signed_comparison(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); @@ -566,8 +566,8 @@ fn test_bitops(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "00000000000000000000000000000000000000000000000000000000000000f0"); @@ -589,8 +589,8 @@ fn test_addmod_mulmod(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000001"); @@ -610,8 +610,8 @@ fn test_byte(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000000"); @@ -629,8 +629,8 @@ fn test_signextend(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000fff"); @@ -649,8 +649,8 @@ fn test_badinstruction_int() { let mut ext = FakeExt::new(); let err = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap_err() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap_err() }; match err { @@ -669,8 +669,8 @@ fn test_pop(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "00000000000000000000000000000000000000000000000000000000000000f0"); @@ -689,8 +689,8 @@ fn test_extops(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, "0000000000000000000000000000000000000000000000000000000000000004"); // PC / CALLDATASIZE @@ -712,8 +712,8 @@ fn test_jumps(factory: super::Factory) { let mut ext = FakeExt::new(); let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_eq!(ext.sstore_clears, 1); @@ -740,8 +740,8 @@ fn test_calls(factory: super::Factory) { }; let gas_left = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_set_contains(&ext.calls, &FakeCall { @@ -781,8 +781,8 @@ fn test_create_in_staticcall(factory: super::Factory) { ext.is_static = true; let err = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap_err() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap_err() }; assert_eq!(err, vm::Error::MutableCallInStaticContext); @@ -1049,8 +1049,8 @@ fn push_two_pop_one_constantinople_test(factory: &super::Factory, opcode: u8, pu let mut ext = FakeExt::new_constantinople(); let _ = { - let mut vm = factory.create(¶ms.gas); - test_finalize(vm.exec(params, &mut ext)).unwrap() + let mut vm = factory.create(params, ext.schedule(), ext.depth()); + test_finalize(vm.exec(&mut ext)).unwrap() }; assert_store(&ext, 0, result); diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index b77cb050e..1d1a733ad 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -354,23 +354,22 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // Ordinary execution - keep VM in same thread if self.depth != depth_threshold { let vm_factory = self.state.vm_factory(); - let wasm = self.schedule.wasm.is_some(); + let origin_info = OriginInfo::from(¶ms); trace!(target: "executive", "ext.schedule.have_delegate_call: {}", self.schedule.have_delegate_call); - let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call); - let mut vm = vm_factory.create(¶ms, wasm); - return vm.exec(params, &mut ext).finalize(ext); + let mut vm = vm_factory.create(params, self.schedule, self.depth); + let mut ext = self.as_externalities(origin_info, unconfirmed_substate, output_policy, tracer, vm_tracer, static_call); + return vm.exec(&mut ext).finalize(ext); } // Start in new thread with stack size needed up to max depth crossbeam::scope(|scope| { let vm_factory = self.state.vm_factory(); - let max_depth = self.schedule.max_depth; - let wasm = self.schedule.wasm.is_some(); - let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer, static_call); + let origin_info = OriginInfo::from(¶ms); - scope.builder().stack_size(::std::cmp::max(max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, local_stack_size)).spawn(move || { - let mut vm = vm_factory.create(¶ms, wasm); - vm.exec(params, &mut ext).finalize(ext) + scope.builder().stack_size(::std::cmp::max(self.schedule.max_depth.saturating_sub(depth_threshold) * STACK_SIZE_PER_DEPTH, local_stack_size)).spawn(move || { + let mut vm = vm_factory.create(params, self.schedule, self.depth); + let mut ext = self.as_externalities(origin_info, unconfirmed_substate, output_policy, tracer, vm_tracer, static_call); + vm.exec(&mut ext).finalize(ext) }).expect("Sub-thread creation cannot fail; the host might run out of resources; qed") }).join() } diff --git a/ethcore/src/factory.rs b/ethcore/src/factory.rs index c6b9b0f6d..dbfdcffc7 100644 --- a/ethcore/src/factory.rs +++ b/ethcore/src/factory.rs @@ -18,7 +18,7 @@ use trie::TrieFactory; use ethtrie::RlpCodec; use account_db::Factory as AccountFactory; use evm::{Factory as EvmFactory, VMType}; -use vm::{Vm, ActionParams}; +use vm::{Vm, ActionParams, Schedule}; use wasm::WasmInterpreter; use keccak_hasher::KeccakHasher; @@ -31,11 +31,11 @@ pub struct VmFactory { } impl VmFactory { - pub fn create(&self, params: &ActionParams, wasm: bool) -> Box { - if wasm && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) { - Box::new(WasmInterpreter) + pub fn create(&self, params: ActionParams, schedule: &Schedule, depth: usize) -> Box { + if schedule.wasm.is_some() && params.code.as_ref().map_or(false, |code| code.len() > 4 && &code[0..4] == WASM_MAGIC_NUMBER) { + Box::new(WasmInterpreter::new(params)) } else { - self.evm.create(¶ms.gas) + self.evm.create(params, schedule, depth) } } diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index a8fd4b453..3a04bbe5e 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -281,8 +281,8 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8] &mut tracer, &mut vm_tracer, )); - let mut evm = vm_factory.create(¶ms, schedule.wasm.is_some()); - let res = evm.exec(params, &mut ex); + let mut evm = vm_factory.create(params, &schedule, 0); + let res = evm.exec(&mut ex); // a return in finalize will not alter callcreates let callcreates = ex.callcreates.clone(); (res.finalize(ex), callcreates) diff --git a/ethcore/vm/src/lib.rs b/ethcore/vm/src/lib.rs index 2c98cfcd2..314db030e 100644 --- a/ethcore/vm/src/lib.rs +++ b/ethcore/vm/src/lib.rs @@ -48,5 +48,5 @@ pub trait Vm { /// This function should be used to execute transaction. /// It returns either an error, a known amount of gas left, or parameters to be used /// to compute the final gas left. - fn exec(&mut self, params: ActionParams, ext: &mut Ext) -> Result; + fn exec(&mut self, ext: &mut Ext) -> Result; } diff --git a/ethcore/wasm/run/src/runner.rs b/ethcore/wasm/run/src/runner.rs index 3e24ced5d..a6b7b83a8 100644 --- a/ethcore/wasm/run/src/runner.rs +++ b/ethcore/wasm/run/src/runner.rs @@ -31,8 +31,8 @@ fn load_code>(p: P) -> io::Result> { Ok(result) } -fn wasm_interpreter() -> WasmInterpreter { - WasmInterpreter +fn wasm_interpreter(params: ActionParams) -> WasmInterpreter { + WasmInterpreter::new(params) } #[derive(Debug)] @@ -131,7 +131,7 @@ pub fn construct( params.params_type = ParamsType::Separate; Ok( - match wasm_interpreter().exec(params, ext)? { + match wasm_interpreter(params).exec(ext)? { GasLeft::Known(_) => Vec::new(), GasLeft::NeedsReturn { data, .. } => data.to_vec(), } @@ -192,9 +192,9 @@ pub fn run_fixture(fixture: &Fixture) -> Vec { } } - let mut interpreter = wasm_interpreter(); + let mut interpreter = wasm_interpreter(params); - let interpreter_return = match interpreter.exec(params, &mut ext) { + let interpreter_return = match interpreter.exec(&mut ext) { Ok(ret) => ret, Err(e) => { return Fail::runtime(e); } }; diff --git a/ethcore/wasm/src/lib.rs b/ethcore/wasm/src/lib.rs index 1fcfe9371..97758c192 100644 --- a/ethcore/wasm/src/lib.rs +++ b/ethcore/wasm/src/lib.rs @@ -69,7 +69,15 @@ impl From for vm::Error { } /// Wasm interpreter instance -pub struct WasmInterpreter; +pub struct WasmInterpreter { + params: ActionParams, +} + +impl WasmInterpreter { + pub fn new(params: ActionParams) -> Self { + WasmInterpreter { params } + } +} impl From for vm::Error { fn from(e: runtime::Error) -> Self { @@ -85,8 +93,8 @@ enum ExecutionOutcome { impl vm::Vm for WasmInterpreter { - fn exec(&mut self, params: ActionParams, ext: &mut vm::Ext) -> vm::Result { - let (module, data) = parser::payload(¶ms, ext.schedule().wasm())?; + fn exec(&mut self, ext: &mut vm::Ext) -> vm::Result { + let (module, data) = parser::payload(&self.params, ext.schedule().wasm())?; let loaded_module = wasmi::Module::from_parity_wasm_module(module).map_err(Error::Interpreter)?; @@ -97,7 +105,7 @@ impl vm::Vm for WasmInterpreter { &wasmi::ImportsBuilder::new().with_resolver("env", &instantiation_resolver) ).map_err(Error::Interpreter)?; - let adjusted_gas = params.gas * U256::from(ext.schedule().wasm().opcodes_div) / + let adjusted_gas = self.params.gas * U256::from(ext.schedule().wasm().opcodes_div) / U256::from(ext.schedule().wasm().opcodes_mul); if adjusted_gas > ::std::u64::MAX.into() @@ -116,11 +124,11 @@ impl vm::Vm for WasmInterpreter { adjusted_gas.low_u64(), data.to_vec(), RuntimeContext { - address: params.address, - sender: params.sender, - origin: params.origin, - code_address: params.code_address, - value: params.value.value(), + address: self.params.address, + sender: self.params.sender, + origin: self.params.origin, + code_address: self.params.code_address, + value: self.params.value.value(), }, ); diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index e72cc15c8..fdbb54590 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -47,8 +47,8 @@ macro_rules! reqrep_test { fake_ext.info = $info; fake_ext.blockhashes = $block_hashes; - let mut interpreter = wasm_interpreter(); - interpreter.exec(params, &mut fake_ext) + let mut interpreter = wasm_interpreter(params); + interpreter.exec(&mut fake_ext) .map(|result| match result { GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -65,8 +65,8 @@ fn test_finalize(res: Result) -> Result { } } -fn wasm_interpreter() -> WasmInterpreter { - WasmInterpreter +fn wasm_interpreter(params: ActionParams) -> WasmInterpreter { + WasmInterpreter::new(params) } /// Empty contract does almost nothing except producing 1 (one) local node debug log message @@ -82,8 +82,8 @@ fn empty() { let mut ext = FakeExt::new().with_wasm(); let gas_left = { - let mut interpreter = wasm_interpreter(); - test_finalize(interpreter.exec(params, &mut ext)).unwrap() + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext)).unwrap() }; assert_eq!(gas_left, U256::from(96_926)); @@ -111,8 +111,8 @@ fn logger() { let mut ext = FakeExt::new().with_wasm(); let gas_left = { - let mut interpreter = wasm_interpreter(); - test_finalize(interpreter.exec(params, &mut ext)).unwrap() + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext)).unwrap() }; let address_val: H256 = address.into(); @@ -160,8 +160,8 @@ fn identity() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Identity contract should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -195,8 +195,8 @@ fn dispersion() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Dispersion routine should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -223,8 +223,8 @@ fn suicide_not() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Suicidal contract should return payload when had not actualy killed himself"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -256,8 +256,8 @@ fn suicide() { let mut ext = FakeExt::new().with_wasm(); let gas_left = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(gas) => gas, GasLeft::NeedsReturn { .. } => { @@ -284,8 +284,8 @@ fn create() { ext.schedule.wasm.as_mut().unwrap().have_create2 = true; let gas_left = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Create contract always return 40 bytes of the creation address, or in the case where it fails, return 40 bytes of zero."); @@ -346,8 +346,8 @@ fn call_msg() { ext.balances.insert(receiver.clone(), U256::from(10000000000u64)); let gas_left = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(gas_left) => gas_left, GasLeft::NeedsReturn { .. } => { panic!("Call test should not return payload"); }, @@ -389,8 +389,8 @@ fn call_code() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Call test should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -437,8 +437,8 @@ fn call_static() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Static call test should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -478,8 +478,8 @@ fn realloc() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("Realloc should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -500,8 +500,8 @@ fn alloc() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("alloc test should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -527,8 +527,8 @@ fn storage_read() { ext.store.insert("0100000000000000000000000000000000000000000000000000000000000000".into(), address.into()); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("storage_read should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -553,8 +553,8 @@ fn keccak() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("keccak should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -700,8 +700,8 @@ fn storage_metering() { ]); let gas_left = { - let mut interpreter = wasm_interpreter(); - test_finalize(interpreter.exec(params, &mut ext)).unwrap() + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext)).unwrap() }; // 0 -> not 0 @@ -719,8 +719,8 @@ fn storage_metering() { ]); let gas_left = { - let mut interpreter = wasm_interpreter(); - test_finalize(interpreter.exec(params, &mut ext)).unwrap() + let mut interpreter = wasm_interpreter(params); + test_finalize(interpreter.exec(&mut ext)).unwrap() }; // not 0 -> not 0 @@ -829,8 +829,8 @@ fn embedded_keccak() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("keccak should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -857,8 +857,8 @@ fn events() { let mut ext = FakeExt::new().with_wasm(); let (gas_left, result) = { - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors"); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); match result { GasLeft::Known(_) => { panic!("events should return payload"); }, GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()), @@ -897,8 +897,8 @@ fn recursive() { let mut ext = FakeExt::new().with_wasm(); - let mut interpreter = wasm_interpreter(); - let result = interpreter.exec(params, &mut ext); + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext); // We expect that stack overflow will occur and it should be generated by // deterministic stack metering. Exceeding deterministic stack height limit From ff716e77990fecd5a64ba80ddda98e8b412c899b Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 14 Aug 2018 05:27:13 +0800 Subject: [PATCH 23/48] Remove pass-by-reference return data value from executive (#9211) * Remove pass-by-reference return data value from executive * Fix tests * Fix a missing test output * typo: wasm_activation_test * Tracing change in output * json_tests: fix compile * typo: 0..32 -> ..32 to keep it consistent with other occurance * Fix tests --- ethcore/evm/src/evm.rs | 2 +- ethcore/evm/src/interpreter/mod.rs | 15 ++-- ethcore/src/client/evm_test_client.rs | 4 +- ethcore/src/executive.rs | 102 ++++++++++++++------------ ethcore/src/externalities.rs | 79 +++++++++----------- ethcore/src/json_tests/executive.rs | 13 ++-- ethcore/src/machine.rs | 13 ++-- ethcore/src/spec/spec.rs | 2 +- ethcore/src/tests/evm.rs | 15 ++-- ethcore/vm/src/ext.rs | 1 - ethcore/vm/src/tests.rs | 1 - ethcore/wasm/src/runtime.rs | 12 ++- 12 files changed, 137 insertions(+), 122 deletions(-) diff --git a/ethcore/evm/src/evm.rs b/ethcore/evm/src/evm.rs index 9d2ff0cb1..08b4b0981 100644 --- a/ethcore/evm/src/evm.rs +++ b/ethcore/evm/src/evm.rs @@ -45,7 +45,7 @@ impl Finalize for Result { fn finalize(self, ext: E) -> Result { match self { Ok(GasLeft::Known(gas_left)) => Ok(FinalizationResult { gas_left: gas_left, apply_state: true, return_data: ReturnData::empty() }), - Ok(GasLeft::NeedsReturn {gas_left, data, apply_state}) => ext.ret(&gas_left, &data, apply_state).map(|gas_left| FinalizationResult { + Ok(GasLeft::NeedsReturn { gas_left, data, apply_state }) => ext.ret(&gas_left, &data, apply_state).map(|gas_left| FinalizationResult { gas_left: gas_left, apply_state: apply_state, return_data: data, diff --git a/ethcore/evm/src/interpreter/mod.rs b/ethcore/evm/src/interpreter/mod.rs index fefa3a60b..b535b0833 100644 --- a/ethcore/evm/src/interpreter/mod.rs +++ b/ethcore/evm/src/interpreter/mod.rs @@ -523,20 +523,25 @@ impl Interpreter { } let call_result = { - // we need to write and read from memory in the same time - // and we don't want to copy - let input = unsafe { ::std::mem::transmute(self.mem.read_slice(in_off, in_size)) }; - let output = self.mem.writeable_slice(out_off, out_size); - ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, output, call_type) + let input = self.mem.read_slice(in_off, in_size); + ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, call_type) }; + let output = self.mem.writeable_slice(out_off, out_size); + return match call_result { MessageCallResult::Success(gas_left, data) => { + let len = cmp::min(output.len(), data.len()); + (&mut output[..len]).copy_from_slice(&data[..len]); + self.stack.push(U256::one()); self.return_data = data; Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))) }, MessageCallResult::Reverted(gas_left, data) => { + let len = cmp::min(output.len(), data.len()); + (&mut output[..len]).copy_from_slice(&data[..len]); + self.stack.push(U256::zero()); self.return_data = data; Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater than current one"))) diff --git a/ethcore/src/client/evm_test_client.rs b/ethcore/src/client/evm_test_client.rs index ace761723..c54dc2a98 100644 --- a/ethcore/src/client/evm_test_client.rs +++ b/ethcore/src/client/evm_test_client.rs @@ -19,7 +19,7 @@ use std::fmt; use std::sync::Arc; use ethereum_types::{H256, U256, H160}; -use {factory, journaldb, trie, kvdb_memorydb, bytes}; +use {factory, journaldb, trie, kvdb_memorydb}; use kvdb::{self, KeyValueDB}; use {state, state_db, client, executive, trace, transaction, db, spec, pod_state, log_entry, receipt}; use factory::Factories; @@ -183,14 +183,12 @@ impl<'a> EvmTestClient<'a> { gas_limit: *genesis.gas_limit(), }; let mut substate = state::Substate::new(); - let mut output = vec![]; let machine = self.spec.engine.machine(); let schedule = machine.schedule(info.number); let mut executive = executive::Executive::new(&mut self.state, &info, &machine, &schedule); executive.call( params, &mut substate, - bytes::BytesRef::Flexible(&mut output), tracer, vm_tracer, ).map_err(EvmTestError::Evm) diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 1d1a733ad..17d870fcb 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -204,7 +204,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { &'any mut self, origin_info: OriginInfo, substate: &'any mut Substate, - output: OutputPolicy<'any, 'any>, + output: OutputPolicy, tracer: &'any mut T, vm_tracer: &'any mut V, static_call: bool, @@ -312,8 +312,12 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { call_type: CallType::None, params_type: vm::ParamsType::Embedded, }; - let mut out = if output_from_create { Some(vec![]) } else { None }; - (self.create(params, &mut substate, &mut out, &mut tracer, &mut vm_tracer), out.unwrap_or_else(Vec::new)) + let res = self.create(params, &mut substate, &mut tracer, &mut vm_tracer); + let out = match &res { + Ok(res) if output_from_create => res.return_data.to_vec(), + _ => Vec::new(), + }; + (res, out) }, Action::Call(ref address) => { let params = ActionParams { @@ -330,8 +334,12 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { call_type: CallType::Call, params_type: vm::ParamsType::Separate, }; - let mut out = vec![]; - (self.call(params, &mut substate, BytesRef::Flexible(&mut out), &mut tracer, &mut vm_tracer), out) + let res = self.call(params, &mut substate, &mut tracer, &mut vm_tracer); + let out = match &res { + Ok(res) => res.return_data.to_vec(), + _ => Vec::new(), + }; + (res, out) } }; @@ -382,7 +390,6 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { &mut self, params: ActionParams, substate: &mut Substate, - mut output: BytesRef, tracer: &mut T, vm_tracer: &mut V ) -> vm::Result where T: Tracer, V: VMTracer { @@ -432,7 +439,6 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { Err(evm_err) } else { self.state.discard_checkpoint(); - output.write(0, &builtin_out_buffer); // Trace only top level calls and calls with balance transfer to builtins. The reason why we don't // trace all internal calls to builtin contracts is that memcpy (IDENTITY) is a heavily used @@ -444,7 +450,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { if self.depth == 0 || is_transferred { let mut trace_output = tracer.prepare_trace_output(); if let Some(out) = trace_output.as_mut() { - *out = output.to_owned(); + *out = builtin_out_buffer.clone(); } tracer.trace_call( @@ -485,7 +491,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let mut subvmtracer = vm_tracer.prepare_subtrace(params.code.as_ref().expect("scope is conditional on params.code.is_some(); qed")); let res = { - self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return(output, trace_output.as_mut()), &mut subtracer, &mut subvmtracer) + self.exec_vm(params, &mut unconfirmed_substate, OutputPolicy::Return, &mut subtracer, &mut subvmtracer) }; vm_tracer.done_subtrace(subvmtracer); @@ -494,12 +500,15 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let traces = subtracer.drain(); match res { - Ok(ref res) if res.apply_state => tracer.trace_call( - trace_info, - gas - res.gas_left, - trace_output, - traces - ), + Ok(ref res) if res.apply_state => { + trace_output.as_mut().map(|d| *d = res.return_data.to_vec()); + tracer.trace_call( + trace_info, + gas - res.gas_left, + trace_output, + traces + ); + }, Ok(_) => tracer.trace_failed_call(trace_info, traces, vm::Error::Reverted.into()), Err(ref e) => tracer.trace_failed_call(trace_info, traces, e.into()), }; @@ -530,7 +539,6 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { &mut self, params: ActionParams, substate: &mut Substate, - output: &mut Option, tracer: &mut T, vm_tracer: &mut V, ) -> vm::Result where T: Tracer, V: VMTracer { @@ -579,7 +587,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let res = self.exec_vm( params, &mut unconfirmed_substate, - OutputPolicy::InitContract(output.as_mut().or(trace_output.as_mut())), + OutputPolicy::InitContract, &mut subtracer, &mut subvmtracer ); @@ -587,13 +595,16 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { vm_tracer.done_subtrace(subvmtracer); match res { - Ok(ref res) if res.apply_state => tracer.trace_create( - trace_info, - gas - res.gas_left, - trace_output.map(|data| output.as_ref().map(|out| out.to_vec()).unwrap_or(data)), - created, - subtracer.drain() - ), + Ok(ref res) if res.apply_state => { + trace_output.as_mut().map(|trace| *trace = res.return_data.to_vec()); + tracer.trace_create( + trace_info, + gas - res.gas_left, + trace_output, + created, + subtracer.drain() + ); + } Ok(_) => tracer.trace_failed_create(trace_info, subtracer.drain(), vm::Error::Reverted.into()), Err(ref e) => tracer.trace_failed_create(trace_info, subtracer.drain(), e.into()) }; @@ -715,7 +726,6 @@ mod tests { use ethkey::{Generator, Random}; use super::*; use ethereum_types::{H256, U256, U512, Address}; - use bytes::BytesRef; use vm::{ActionParams, ActionValue, CallType, EnvInfo, CreateContractAddress}; use evm::{Factory, VMType}; use error::ExecutionError; @@ -766,7 +776,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(79_975)); @@ -824,7 +834,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(62_976)); @@ -868,8 +878,7 @@ mod tests { let mut vm_tracer = ExecutiveVMTracer::toplevel(); let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let output = BytesRef::Fixed(&mut[0u8;0]); - ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap(); + ex.call(params, &mut substate, &mut tracer, &mut vm_tracer).unwrap(); assert_eq!(tracer.drain(), vec![FlatTrace { action: trace::Action::Call(trace::Call { @@ -896,7 +905,7 @@ mod tests { call_type: CallType::Call }), result: trace::Res::Call(trace::CallResult { gas_used: 600.into(), - output: vec![] + output: vec![0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 156, 17, 133, 165, 197, 233, 252, 84, 97, 40, 8, 151, 126, 232, 245, 72, 178, 37, 141, 49] }), subtraces: 0, trace_address: vec![0].into_iter().collect(), @@ -954,8 +963,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let output = BytesRef::Fixed(&mut[0u8;0]); - ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap() + ex.call(params, &mut substate, &mut tracer, &mut vm_tracer).unwrap() }; assert_eq!(gas_left, U256::from(44_752)); @@ -1071,8 +1079,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - let output = BytesRef::Fixed(&mut[0u8;0]); - ex.call(params, &mut substate, output, &mut tracer, &mut vm_tracer).unwrap() + ex.call(params, &mut substate, &mut tracer, &mut vm_tracer).unwrap() }; assert_eq!(gas_left, U256::from(62967)); @@ -1144,7 +1151,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params.clone(), &mut substate, &mut None, &mut tracer, &mut vm_tracer).unwrap() + ex.create(params.clone(), &mut substate, &mut tracer, &mut vm_tracer).unwrap() }; assert_eq!(gas_left, U256::from(96_776)); @@ -1230,7 +1237,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(62_976)); @@ -1282,7 +1289,7 @@ mod tests { { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer).unwrap(); + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap(); } assert_eq!(substate.contracts_created.len(), 1); @@ -1343,7 +1350,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(73_237)); @@ -1388,7 +1395,7 @@ mod tests { let FinalizationResult { gas_left, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, BytesRef::Fixed(&mut []), &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; assert_eq!(gas_left, U256::from(59_870)); @@ -1562,7 +1569,7 @@ mod tests { let result = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) + ex.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) }; match result { @@ -1595,10 +1602,11 @@ mod tests { let mut substate = Substate::new(); let mut output = [0u8; 14]; - let FinalizationResult { gas_left: result, .. } = { + let FinalizationResult { gas_left: result, return_data, .. } = { let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer).unwrap() }; + (&mut output).copy_from_slice(&return_data[..(cmp::min(14, return_data.len()))]); assert_eq!(result, U256::from(1)); assert_eq!(output[..], returns[..]); @@ -1638,11 +1646,12 @@ mod tests { let machine = ::ethereum::new_kovan_wasm_test_machine(); let mut output = [0u8; 20]; - let FinalizationResult { gas_left: result, .. } = { + let FinalizationResult { gas_left: result, return_data, .. } = { let schedule = machine.schedule(info.number); let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params.clone(), &mut Substate::new(), BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.call(params.clone(), &mut Substate::new(), &mut NoopTracer, &mut NoopVMTracer).unwrap() }; + (&mut output).copy_from_slice(&return_data[..(cmp::min(20, return_data.len()))]); assert_eq!(result, U256::from(18433)); // Transaction successfully returned sender @@ -1652,11 +1661,12 @@ mod tests { info.number = 1; let mut output = [0u8; 20]; - let FinalizationResult { gas_left: result, .. } = { + let FinalizationResult { gas_left: result, return_data, .. } = { let schedule = machine.schedule(info.number); let mut ex = Executive::new(&mut state, &info, &machine, &schedule); - ex.call(params, &mut Substate::new(), BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer).unwrap() + ex.call(params, &mut Substate::new(), &mut NoopTracer, &mut NoopVMTracer).unwrap() }; + (&mut output[..((cmp::min(20, return_data.len())))]).copy_from_slice(&return_data[..(cmp::min(20, return_data.len()))]); assert_eq!(result, U256::from(20025)); // Since transaction errored due to wasm was not activated, result is just empty diff --git a/ethcore/src/externalities.rs b/ethcore/src/externalities.rs index 68d2c0817..74fedd0fd 100644 --- a/ethcore/src/externalities.rs +++ b/ethcore/src/externalities.rs @@ -18,7 +18,7 @@ use std::cmp; use std::sync::Arc; use ethereum_types::{H256, U256, Address}; -use bytes::{Bytes, BytesRef}; +use bytes::Bytes; use state::{Backend as StateBackend, State, Substate, CleanupMode}; use machine::EthereumMachine as Machine; use executive::*; @@ -32,12 +32,12 @@ use transaction::UNSIGNED_SENDER; use trace::{Tracer, VMTracer}; /// Policy for handling output data on `RETURN` opcode. -pub enum OutputPolicy<'a, 'b> { +pub enum OutputPolicy { /// Return reference to fixed sized output. /// Used for message calls. - Return(BytesRef<'a>, Option<&'b mut Bytes>), + Return, /// Init new contract as soon as `RETURN` is called. - InitContract(Option<&'b mut Bytes>), + InitContract, } /// Transaction properties that externalities need to know about. @@ -71,7 +71,7 @@ pub struct Externalities<'a, T: 'a, V: 'a, B: 'a> { substate: &'a mut Substate, machine: &'a Machine, schedule: &'a Schedule, - output: OutputPolicy<'a, 'a>, + output: OutputPolicy, tracer: &'a mut T, vm_tracer: &'a mut V, static_flag: bool, @@ -89,7 +89,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Externalities<'a, T, V, B> depth: usize, origin_info: OriginInfo, substate: &'a mut Substate, - output: OutputPolicy<'a, 'a>, + output: OutputPolicy, tracer: &'a mut T, vm_tracer: &'a mut V, static_flag: bool, @@ -171,9 +171,12 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> params_type: vm::ParamsType::Separate, }; - let mut output = H256::new(); let mut ex = Executive::new(self.state, self.env_info, self.machine, self.schedule); - let r = ex.call(params, self.substate, BytesRef::Fixed(&mut output), self.tracer, self.vm_tracer); + let r = ex.call(params, self.substate, self.tracer, self.vm_tracer); + let output = match &r { + Ok(ref r) => H256::from(&r.return_data[..32]), + _ => H256::new(), + }; trace!("ext: blockhash contract({}) -> {:?}({}) self.env_info.number={}\n", number, r, output, self.env_info.number); output } else { @@ -194,7 +197,13 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> } } - fn create(&mut self, gas: &U256, value: &U256, code: &[u8], address_scheme: CreateContractAddress) -> ContractCreateResult { + fn create( + &mut self, + gas: &U256, + value: &U256, + code: &[u8], + address_scheme: CreateContractAddress + ) -> ContractCreateResult { // create new contract address let (address, code_hash) = match self.state.nonce(&self.origin_info.address) { Ok(nonce) => contract_address(address_scheme, &self.origin_info.address, &nonce, &code), @@ -231,7 +240,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> let mut ex = Executive::from_parent(self.state, self.env_info, self.machine, self.schedule, self.depth, self.static_flag); // TODO: handle internal error separately - match ex.create(params, self.substate, &mut None, self.tracer, self.vm_tracer) { + match ex.create(params, self.substate, self.tracer, self.vm_tracer) { Ok(FinalizationResult{ gas_left, apply_state: true, .. }) => { self.substate.contracts_created.push(address.clone()); ContractCreateResult::Created(address, gas_left) @@ -243,14 +252,14 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> } } - fn call(&mut self, + fn call( + &mut self, gas: &U256, sender_address: &Address, receive_address: &Address, value: Option, data: &[u8], code_address: &Address, - output: &mut [u8], call_type: CallType ) -> MessageCallResult { trace!(target: "externalities", "call"); @@ -284,7 +293,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> let mut ex = Executive::from_parent(self.state, self.env_info, self.machine, self.schedule, self.depth, self.static_flag); - match ex.call(params, self.substate, BytesRef::Fixed(output), self.tracer, self.vm_tracer) { + match ex.call(params, self.substate, self.tracer, self.vm_tracer) { Ok(FinalizationResult{ gas_left, return_data, apply_state: true }) => MessageCallResult::Success(gas_left, return_data), Ok(FinalizationResult{ gas_left, return_data, apply_state: false }) => MessageCallResult::Reverted(gas_left, return_data), _ => MessageCallResult::Failed @@ -303,27 +312,13 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> Ok(self.state.code_size(address)?) } - fn ret(mut self, gas: &U256, data: &ReturnData, apply_state: bool) -> vm::Result + fn ret(self, gas: &U256, data: &ReturnData, apply_state: bool) -> vm::Result where Self: Sized { - let handle_copy = |to: &mut Option<&mut Bytes>| { - to.as_mut().map(|b| **b = data.to_vec()); - }; match self.output { - OutputPolicy::Return(BytesRef::Fixed(ref mut slice), ref mut copy) => { - handle_copy(copy); - - let len = cmp::min(slice.len(), data.len()); - (&mut slice[..len]).copy_from_slice(&data[..len]); + OutputPolicy::Return => { Ok(*gas) }, - OutputPolicy::Return(BytesRef::Flexible(ref mut vec), ref mut copy) => { - handle_copy(copy); - - vec.clear(); - vec.extend_from_slice(&*data); - Ok(*gas) - }, - OutputPolicy::InitContract(ref mut copy) if apply_state => { + OutputPolicy::InitContract if apply_state => { let return_cost = U256::from(data.len()) * U256::from(self.schedule.create_data_gas); if return_cost > *gas || data.len() > self.schedule.create_data_limit { return match self.schedule.exceptional_failed_code_deposit { @@ -331,11 +326,10 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for Externalities<'a, T, V, B> false => Ok(*gas) } } - handle_copy(copy); self.state.init_code(&self.origin_info.address, data.to_vec())?; Ok(*gas - return_cost) }, - OutputPolicy::InitContract(_) => { + OutputPolicy::InitContract => { Ok(*gas) }, } @@ -479,7 +473,7 @@ mod tests { let mut tracer = NoopTracer; let mut vm_tracer = NoopVMTracer; - let ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); + let ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); assert_eq!(ext.env_info().number, 100); } @@ -491,7 +485,7 @@ mod tests { let mut tracer = NoopTracer; let mut vm_tracer = NoopVMTracer; - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::().unwrap()); @@ -515,7 +509,7 @@ mod tests { let mut tracer = NoopTracer; let mut vm_tracer = NoopVMTracer; - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); let hash = ext.blockhash(&"0000000000000000000000000000000000000000000000000000000000120000".parse::().unwrap()); @@ -530,9 +524,7 @@ mod tests { let mut tracer = NoopTracer; let mut vm_tracer = NoopVMTracer; - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); - - let mut output = vec![]; + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); // this should panic because we have no balance on any account ext.call( @@ -542,7 +534,6 @@ mod tests { Some("0000000000000000000000000000000000000000000000000000000000150000".parse::().unwrap()), &[], &Address::new(), - &mut output, CallType::Call ); } @@ -558,7 +549,7 @@ mod tests { let mut vm_tracer = NoopVMTracer; { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); ext.log(log_topics, &log_data).unwrap(); } @@ -575,7 +566,7 @@ mod tests { let mut vm_tracer = NoopVMTracer; { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); ext.suicide(refund_account).unwrap(); } @@ -592,7 +583,7 @@ mod tests { let mut vm_tracer = NoopVMTracer; let address = { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); match ext.create(&U256::max_value(), &U256::zero(), &[], CreateContractAddress::FromSenderAndNonce) { ContractCreateResult::Created(address, _) => address, _ => panic!("Test create failed; expected Created, got Failed/Reverted."), @@ -612,8 +603,8 @@ mod tests { let mut vm_tracer = NoopVMTracer; let address = { - let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract(None), &mut tracer, &mut vm_tracer, false); - + let mut ext = Externalities::new(state, &setup.env_info, &setup.machine, &setup.schedule, 0, get_test_origin(), &mut setup.sub_state, OutputPolicy::InitContract, &mut tracer, &mut vm_tracer, false); + match ext.create(&U256::max_value(), &U256::zero(), &[], CreateContractAddress::FromSenderSaltAndCodeHash(H256::default())) { ContractCreateResult::Created(address, _) => address, _ => panic!("Test create failed; expected Created, got Failed/Reverted."), diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 3a04bbe5e..20e7ebb20 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -30,7 +30,7 @@ use test_helpers::get_temp_state; use ethjson; use trace::{Tracer, NoopTracer}; use trace::{VMTracer, NoopVMTracer}; -use bytes::{Bytes, BytesRef}; +use bytes::Bytes; use ethtrie; use rlp::RlpStream; use hash::keccak; @@ -90,7 +90,7 @@ impl<'a, T: 'a, V: 'a, B: 'a> TestExt<'a, T, V, B> depth: usize, origin_info: OriginInfo, substate: &'a mut Substate, - output: OutputPolicy<'a, 'a>, + output: OutputPolicy, address: Address, tracer: &'a mut T, vm_tracer: &'a mut V, @@ -154,7 +154,6 @@ impl<'a, T: 'a, V: 'a, B: 'a> Ext for TestExt<'a, T, V, B> value: Option, data: &[u8], _code_address: &Address, - _output: &mut [u8], _call_type: CallType ) -> MessageCallResult { self.callcreates.push(CallCreate { @@ -262,7 +261,6 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8] let mut substate = Substate::new(); let mut tracer = NoopTracer; let mut vm_tracer = NoopVMTracer; - let mut output = vec![]; let vm_factory = state.vm_factory(); // execute @@ -276,7 +274,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8] 0, OriginInfo::from(¶ms), &mut substate, - OutputPolicy::Return(BytesRef::Flexible(&mut output), None), + OutputPolicy::Return, params.address.clone(), &mut tracer, &mut vm_tracer, @@ -288,6 +286,11 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8] (res.finalize(ex), callcreates) }; + let output = match &res { + Ok(res) => res.return_data.to_vec(), + Err(_) => Vec::new(), + }; + let log_hash = { let mut rlp = RlpStream::new_list(substate.logs.len()); for l in &substate.logs { diff --git a/ethcore/src/machine.rs b/ethcore/src/machine.rs index d487fff9b..fdeed4c8e 100644 --- a/ethcore/src/machine.rs +++ b/ethcore/src/machine.rs @@ -33,7 +33,6 @@ use transaction::{self, SYSTEM_ADDRESS, UnverifiedTransaction, SignedTransaction use tx_filter::TransactionFilter; use ethereum_types::{U256, Address}; -use bytes::BytesRef; use rlp::Rlp; use vm::{CallType, ActionParams, ActionValue, ParamsType}; use vm::{EnvInfo, Schedule, CreateContractAddress}; @@ -148,10 +147,14 @@ impl EthereumMachine { let schedule = self.schedule(env_info.number); let mut ex = Executive::new(&mut state, &env_info, self, &schedule); let mut substate = Substate::new(); - let mut output = Vec::new(); - if let Err(e) = ex.call(params, &mut substate, BytesRef::Flexible(&mut output), &mut NoopTracer, &mut NoopVMTracer) { - warn!("Encountered error on making system call: {}", e); - } + let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer); + let output = match res { + Ok(res) => res.return_data.to_vec(), + Err(e) => { + warn!("Encountered error on making system call: {}", e); + Vec::new() + } + }; Ok(output) } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index e842835f7..bcfa64ed1 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -655,7 +655,7 @@ impl Spec { let machine = self.engine.machine(); let schedule = machine.schedule(env_info.number); let mut exec = Executive::new(&mut state, &env_info, &machine, &schedule); - if let Err(e) = exec.create(params, &mut substate, &mut None, &mut NoopTracer, &mut NoopVMTracer) { + if let Err(e) = exec.create(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { warn!(target: "spec", "Genesis constructor execution at {} failed: {}.", address, e); } } diff --git a/ethcore/src/tests/evm.rs b/ethcore/src/tests/evm.rs index 239905fac..7befa47f6 100644 --- a/ethcore/src/tests/evm.rs +++ b/ethcore/src/tests/evm.rs @@ -29,7 +29,6 @@ use transaction::SYSTEM_ADDRESS; use rustc_hex::FromHex; use ethereum_types::{H256, Address}; -use bytes::BytesRef; evm_test!{test_blockhash_eip210: test_blockhash_eip210_int} fn test_blockhash_eip210(factory: Factory) { @@ -65,8 +64,7 @@ fn test_blockhash_eip210(factory: Factory) { let schedule = machine.schedule(env_info.number); let mut ex = Executive::new(&mut state, &env_info, &machine, &schedule); let mut substate = Substate::new(); - let mut output = []; - if let Err(e) = ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer) { + if let Err(e) = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer) { panic!("Encountered error on updating last hashes: {}", e); } } @@ -89,9 +87,12 @@ fn test_blockhash_eip210(factory: Factory) { let schedule = machine.schedule(env_info.number); let mut ex = Executive::new(&mut state, &env_info, &machine, &schedule); let mut substate = Substate::new(); - let mut output = H256::new(); - if let Err(e) = ex.call(params, &mut substate, BytesRef::Fixed(&mut output), &mut NoopTracer, &mut NoopVMTracer) { - panic!("Encountered error on getting last hash: {}", e); - } + let res = ex.call(params, &mut substate, &mut NoopTracer, &mut NoopVMTracer); + let output = match res { + Ok(res) => H256::from(&res.return_data[..32]), + Err(e) => { + panic!("Encountered error on getting last hash: {}", e); + }, + }; assert_eq!(output, 255.into()); } diff --git a/ethcore/vm/src/ext.rs b/ethcore/vm/src/ext.rs index c1ce1b79f..1eb696f97 100644 --- a/ethcore/vm/src/ext.rs +++ b/ethcore/vm/src/ext.rs @@ -101,7 +101,6 @@ pub trait Ext { value: Option, data: &[u8], code_address: &Address, - output: &mut [u8], call_type: CallType ) -> MessageCallResult; diff --git a/ethcore/vm/src/tests.rs b/ethcore/vm/src/tests.rs index 4930e4219..3dac74484 100644 --- a/ethcore/vm/src/tests.rs +++ b/ethcore/vm/src/tests.rs @@ -155,7 +155,6 @@ impl Ext for FakeExt { value: Option, data: &[u8], code_address: &Address, - _output: &mut [u8], _call_type: CallType ) -> MessageCallResult { diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index 347023dd9..1c814ab7c 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -14,6 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::cmp; use ethereum_types::{U256, H256, Address}; use vm::{self, CallType}; use wasmi::{self, MemoryRef, RuntimeArgs, RuntimeValue, Error as InterpreterError, Trap, TrapKind}; @@ -447,12 +448,14 @@ impl<'a> Runtime<'a> { val, &payload, &address, - &mut result[..], call_type, ); match call_result { - vm::MessageCallResult::Success(gas_left, _) => { + vm::MessageCallResult::Success(gas_left, data) => { + let len = cmp::min(result.len(), data.len()); + (&mut result[..len]).copy_from_slice(&data[..len]); + // cannot overflow, before making call gas_counter was incremented with gas, and gas_left < gas self.gas_counter = self.gas_counter - gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 @@ -461,7 +464,10 @@ impl<'a> Runtime<'a> { self.memory.set(result_ptr, &result)?; Ok(0i32.into()) }, - vm::MessageCallResult::Reverted(gas_left, _) => { + vm::MessageCallResult::Reverted(gas_left, data) => { + let len = cmp::min(result.len(), data.len()); + (&mut result[..len]).copy_from_slice(&data[..len]); + // cannot overflow, before making call gas_counter was incremented with gas, and gas_left < gas self.gas_counter = self.gas_counter - gas_left.low_u64() * self.ext.schedule().wasm().opcodes_div as u64 From fcb6cc1e767efa813f8d2f8c77682ee62a3edeec Mon Sep 17 00:00:00 2001 From: cheme Date: Tue, 14 Aug 2018 09:58:29 +0200 Subject: [PATCH 24/48] Light client logs should include 'from_block' when querying logs (#9331) * Fix PubSub for logs when using light client : prior to this fix the pubsub process did send a query for each new block header (and for each subs : there is something to optimize here) by setting from and to of the filter at this block number; but there was a bug in the code that fetch logs : it was non inclusive for its start bound, meaning that with start bound = end bound we never query any block (and attached logs). * Option iter instead of once. Use of bloom existing function to check if a bloom contains another. * Makes from block header checking explicit --- rpc/src/v1/helpers/light_fetch.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 1da2fdf1a..c63591caf 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -321,9 +321,15 @@ impl LightFetch { BlockId::Number(x) => Some(x), }; - match (block_number(filter.to_block), block_number(filter.from_block)) { - (Some(to), Some(from)) if to < from => return Either::A(future::ok(Vec::new())), - (Some(_), Some(_)) => {}, + let (from_block_number, from_block_header) = match self.client.block_header(filter.from_block) { + Some(from) => (from.number(), from), + None => return Either::A(future::err(errors::unknown_block())), + }; + + match block_number(filter.to_block) { + Some(to) if to < from_block_number || from_block_number > best_number + => return Either::A(future::ok(Vec::new())), + Some(_) => (), _ => return Either::A(future::err(errors::unknown_block())), } @@ -332,11 +338,11 @@ impl LightFetch { // match them with their numbers for easy sorting later. let bit_combos = filter.bloom_possibilities(); let receipts_futures: Vec<_> = self.client.ancestry_iter(filter.to_block) - .take_while(|ref hdr| BlockId::Number(hdr.number()) != filter.from_block) - .take_while(|ref hdr| BlockId::Hash(hdr.hash()) != filter.from_block) + .take_while(|ref hdr| hdr.number() != from_block_number) + .chain(Some(from_block_header)) .filter(|ref hdr| { let hdr_bloom = hdr.log_bloom(); - bit_combos.iter().find(|&bloom| hdr_bloom & *bloom == *bloom).is_some() + bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom)) }) .map(|hdr| (hdr.number(), request::BlockReceipts(hdr.into()))) .map(|(num, req)| self.on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, x))) From 726260112313e07416d87d2df83c322eaeaf6ceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 14 Aug 2018 12:11:56 +0200 Subject: [PATCH 25/48] Expose UnorderedIterator. (#9347) --- Cargo.lock | 6 +++--- transaction-pool/Cargo.toml | 2 +- transaction-pool/src/lib.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d69838383..54979b319 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -669,7 +669,7 @@ dependencies = [ "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", - "transaction-pool 1.12.2", + "transaction-pool 1.12.3", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2171,7 +2171,7 @@ dependencies = [ "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "transaction-pool 1.12.2", + "transaction-pool 1.12.3", "transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", ] @@ -3330,7 +3330,7 @@ dependencies = [ [[package]] name = "transaction-pool" -version = "1.12.2" +version = "1.12.3" dependencies = [ "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 4540f62a7..0f692e3c0 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Generic transaction pool." name = "transaction-pool" -version = "1.12.2" +version = "1.12.3" license = "GPL-3.0" authors = ["Parity Technologies "] diff --git a/transaction-pool/src/lib.rs b/transaction-pool/src/lib.rs index c23c6662c..4bddd003b 100644 --- a/transaction-pool/src/lib.rs +++ b/transaction-pool/src/lib.rs @@ -96,7 +96,7 @@ pub mod scoring; pub use self::error::{Error, ErrorKind}; pub use self::listener::{Listener, NoopListener}; pub use self::options::Options; -pub use self::pool::{Pool, PendingIterator, Transaction}; +pub use self::pool::{Pool, PendingIterator, UnorderedIterator, Transaction}; pub use self::ready::{Ready, Readiness}; pub use self::scoring::Scoring; pub use self::status::{LightStatus, Status}; From fe5301cebf00279ca71dd9ce193214a8cddccb9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 14 Aug 2018 16:13:11 +0200 Subject: [PATCH 26/48] More details in logs returned by light client (#9324) * Log details for light logs. * Create Log directly. --- rpc/src/v1/helpers/light_fetch.rs | 31 ++++++++++++++++++++++++------- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index c63591caf..14f08fe6c 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -20,8 +20,8 @@ use std::sync::Arc; use ethcore::basic_account::BasicAccount; use ethcore::encoded; -use ethcore::ids::BlockId; use ethcore::filter::Filter as EthcoreFilter; +use ethcore::ids::BlockId; use ethcore::receipt::Receipt; use jsonrpc_core::{Result, Error}; @@ -344,17 +344,34 @@ impl LightFetch { let hdr_bloom = hdr.log_bloom(); bit_combos.iter().any(|bloom| hdr_bloom.contains_bloom(bloom)) }) - .map(|hdr| (hdr.number(), request::BlockReceipts(hdr.into()))) - .map(|(num, req)| self.on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, x))) + .map(|hdr| (hdr.number(), hdr.hash(), request::BlockReceipts(hdr.into()))) + .map(|(num, hash, req)| self.on_demand.request(ctx, req).expect(NO_INVALID_BACK_REFS).map(move |x| (num, hash, x))) .collect(); // as the receipts come in, find logs within them which match the filter. // insert them into a BTreeMap to maintain order by number and block index. stream::futures_unordered(receipts_futures) - .fold(BTreeMap::new(), move |mut matches, (num, receipts)| { - for (block_index, log) in receipts.into_iter().flat_map(|r| r.logs).enumerate() { - if filter.matches(&log) { - matches.insert((num, block_index), log.into()); + .fold(BTreeMap::new(), move |mut matches, (num, hash, receipts)| { + let mut block_index = 0; + for (transaction_index, receipt) in receipts.into_iter().enumerate() { + for (transaction_log_index, log) in receipt.logs.into_iter().enumerate() { + if filter.matches(&log) { + matches.insert((num, block_index), Log { + address: log.address.into(), + topics: log.topics.into_iter().map(Into::into).collect(), + data: log.data.into(), + block_hash: Some(hash.into()), + block_number: Some(num.into()), + // No way to easily retrieve transaction hash, so let's just skip it. + transaction_hash: None, + transaction_index: Some(transaction_index.into()), + log_index: Some(block_index.into()), + transaction_log_index: Some(transaction_log_index.into()), + log_type: "mined".into(), + removed: false, + }); + } + block_index += 1; } } future::ok(matches) From 1ac4676f4bbf3024d53e7a80900c14d3a1129c91 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 14 Aug 2018 22:34:46 +0800 Subject: [PATCH 27/48] Remove prepare_trace_output and make sure prepare_trace_call and trace*call are balanced (#9353) This refactors `prepare_trace_output` to instead directly take the reference of return values, so that it's simpler and we save a stack item. This should also fixes [the issue](https://github.com/paritytech/parity-ethereum/pull/9236#issuecomment-408444995) @udoprog is facing. Replaces #9236 --- ethcore/src/executive.rs | 34 +++++++++++++-------------- ethcore/src/trace/executive_tracer.rs | 13 ++++------ ethcore/src/trace/mod.rs | 8 ++----- ethcore/src/trace/noop_tracer.rs | 11 ++------- 4 files changed, 25 insertions(+), 41 deletions(-) diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index 17d870fcb..5be54f724 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -423,8 +423,6 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let default = []; let data = if let Some(ref d) = params.data { d as &[u8] } else { &default as &[u8] }; - let trace_info = tracer.prepare_trace_call(¶ms); - let cost = builtin.cost(data); if cost <= params.gas { let mut builtin_out_buffer = Vec::new(); @@ -435,7 +433,12 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { if let Err(e) = result { self.state.revert_to_checkpoint(); let evm_err: vm::Error = e.into(); - tracer.trace_failed_call(trace_info, vec![], evm_err.clone().into()); + let trace_info = tracer.prepare_trace_call(¶ms); + tracer.trace_failed_call( + trace_info, + vec![], + evm_err.clone().into() + ); Err(evm_err) } else { self.state.discard_checkpoint(); @@ -448,15 +451,11 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { ActionValue::Apparent(_) => false, }; if self.depth == 0 || is_transferred { - let mut trace_output = tracer.prepare_trace_output(); - if let Some(out) = trace_output.as_mut() { - *out = builtin_out_buffer.clone(); - } - + let trace_info = tracer.prepare_trace_call(¶ms); tracer.trace_call( trace_info, cost, - trace_output, + &builtin_out_buffer, vec![] ); } @@ -472,13 +471,17 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // just drain the whole gas self.state.revert_to_checkpoint(); - tracer.trace_failed_call(trace_info, vec![], vm::Error::OutOfGas.into()); + let trace_info = tracer.prepare_trace_call(¶ms); + tracer.trace_failed_call( + trace_info, + vec![], + vm::Error::OutOfGas.into() + ); Err(vm::Error::OutOfGas) } } else { let trace_info = tracer.prepare_trace_call(¶ms); - let mut trace_output = tracer.prepare_trace_output(); let mut subtracer = tracer.subtracer(); let gas = params.gas; @@ -501,11 +504,10 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { let traces = subtracer.drain(); match res { Ok(ref res) if res.apply_state => { - trace_output.as_mut().map(|d| *d = res.return_data.to_vec()); tracer.trace_call( trace_info, gas - res.gas_left, - trace_output, + &res.return_data, traces ); }, @@ -522,7 +524,7 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { // otherwise it's just a basic transaction, only do tracing, if necessary. self.state.discard_checkpoint(); - tracer.trace_call(trace_info, U256::zero(), trace_output, vec![]); + tracer.trace_call(trace_info, U256::zero(), &[], vec![]); Ok(FinalizationResult { gas_left: params.gas, return_data: ReturnData::empty(), @@ -577,7 +579,6 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { } let trace_info = tracer.prepare_trace_create(¶ms); - let mut trace_output = tracer.prepare_trace_output(); let mut subtracer = tracer.subtracer(); let gas = params.gas; let created = params.address.clone(); @@ -596,11 +597,10 @@ impl<'a, B: 'a + StateBackend> Executive<'a, B> { match res { Ok(ref res) if res.apply_state => { - trace_output.as_mut().map(|trace| *trace = res.return_data.to_vec()); tracer.trace_create( trace_info, gas - res.gas_left, - trace_output, + &res.return_data, created, subtracer.drain() ); diff --git a/ethcore/src/trace/executive_tracer.rs b/ethcore/src/trace/executive_tracer.rs index 1bae15d59..227b3a39f 100644 --- a/ethcore/src/trace/executive_tracer.rs +++ b/ethcore/src/trace/executive_tracer.rs @@ -17,7 +17,6 @@ //! Simple executive tracer. use ethereum_types::{U256, Address}; -use bytes::Bytes; use vm::ActionParams; use trace::trace::{Call, Create, Action, Res, CreateResult, CallResult, VMTrace, VMOperation, VMExecutedOperation, MemoryDiff, StorageDiff, Suicide, Reward, RewardType}; use trace::{Tracer, VMTracer, FlatTrace, TraceError}; @@ -92,18 +91,14 @@ impl Tracer for ExecutiveTracer { Some(Create::from(params.clone())) } - fn prepare_trace_output(&self) -> Option { - Some(vec![]) - } - - fn trace_call(&mut self, call: Option, gas_used: U256, output: Option, subs: Vec) { + fn trace_call(&mut self, call: Option, gas_used: U256, output: &[u8], subs: Vec) { let trace = FlatTrace { trace_address: Default::default(), subtraces: top_level_subtraces(&subs), action: Action::Call(call.expect("self.prepare_trace_call().is_some(): so we must be tracing: qed")), result: Res::Call(CallResult { gas_used: gas_used, - output: output.expect("self.prepare_trace_output().is_some(): so we must be tracing: qed") + output: output.into() }), }; debug!(target: "trace", "Traced call {:?}", trace); @@ -111,13 +106,13 @@ impl Tracer for ExecutiveTracer { self.traces.extend(prefix_subtrace_addresses(subs)); } - fn trace_create(&mut self, create: Option, gas_used: U256, code: Option, address: Address, subs: Vec) { + fn trace_create(&mut self, create: Option, gas_used: U256, code: &[u8], address: Address, subs: Vec) { let trace = FlatTrace { subtraces: top_level_subtraces(&subs), action: Action::Create(create.expect("self.prepare_trace_create().is_some(): so we must be tracing: qed")), result: Res::Create(CreateResult { gas_used: gas_used, - code: code.expect("self.prepare_trace_output.is_some(): so we must be tracing: qed"), + code: code.into(), address: address }), trace_address: Default::default(), diff --git a/ethcore/src/trace/mod.rs b/ethcore/src/trace/mod.rs index 90ea64b5d..670cc755f 100644 --- a/ethcore/src/trace/mod.rs +++ b/ethcore/src/trace/mod.rs @@ -38,7 +38,6 @@ pub use self::types::filter::{Filter, AddressesFilter}; use ethereum_types::{H256, U256, Address}; use kvdb::DBTransaction; -use bytes::Bytes; use self::trace::{Call, Create}; use vm::ActionParams; use header::BlockNumber; @@ -58,9 +57,6 @@ pub trait Tracer: Send { /// This is called before a create has been executed. fn prepare_trace_create(&self, params: &ActionParams) -> Option; - /// Prepare trace output. Noop tracer should return None. - fn prepare_trace_output(&self) -> Option; - /// Stores trace call info. /// /// This is called after a call has completed successfully. @@ -68,7 +64,7 @@ pub trait Tracer: Send { &mut self, call: Option, gas_used: U256, - output: Option, + output: &[u8], subs: Vec, ); @@ -79,7 +75,7 @@ pub trait Tracer: Send { &mut self, create: Option, gas_used: U256, - code: Option, + code: &[u8], address: Address, subs: Vec ); diff --git a/ethcore/src/trace/noop_tracer.rs b/ethcore/src/trace/noop_tracer.rs index 8312de58f..fdde9a6e3 100644 --- a/ethcore/src/trace/noop_tracer.rs +++ b/ethcore/src/trace/noop_tracer.rs @@ -17,7 +17,6 @@ //! Nonoperative tracer. use ethereum_types::{U256, Address}; -use bytes::Bytes; use vm::ActionParams; use trace::{Tracer, VMTracer, FlatTrace, TraceError}; use trace::trace::{Call, Create, VMTrace, RewardType}; @@ -36,18 +35,12 @@ impl Tracer for NoopTracer { None } - fn prepare_trace_output(&self) -> Option { - None - } - - fn trace_call(&mut self, call: Option, _: U256, output: Option, _: Vec) { + fn trace_call(&mut self, call: Option, _: U256, _: &[u8], _: Vec) { assert!(call.is_none(), "self.prepare_trace_call().is_none(): so we can't be tracing: qed"); - assert!(output.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed"); } - fn trace_create(&mut self, create: Option, _: U256, code: Option, _: Address, _: Vec) { + fn trace_create(&mut self, create: Option, _: U256, _: &[u8], _: Address, _: Vec) { assert!(create.is_none(), "self.prepare_trace_create().is_none(): so we can't be tracing: qed"); - assert!(code.is_none(), "self.prepare_trace_output().is_none(): so we can't be tracing: qed"); } fn trace_failed_call(&mut self, call: Option, _: Vec, _: TraceError) { From 29125e830b4d85f0db740dc8d4606d3ac3b1eaf9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Tue, 14 Aug 2018 17:20:29 +0200 Subject: [PATCH 28/48] Lower the max size of transaction packet to prevent going oversize. (#9308) * Lower the max size of transaction packet to prevent going oversize. * Log RLP size. --- ethcore/sync/src/chain/mod.rs | 3 ++- ethcore/sync/src/chain/propagator.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/ethcore/sync/src/chain/mod.rs b/ethcore/sync/src/chain/mod.rs index 625ccb30d..b53c38b43 100644 --- a/ethcore/sync/src/chain/mod.rs +++ b/ethcore/sync/src/chain/mod.rs @@ -148,7 +148,8 @@ const MAX_PEER_LAG_PROPAGATION: BlockNumber = 20; const MAX_NEW_HASHES: usize = 64; const MAX_NEW_BLOCK_AGE: BlockNumber = 20; // maximal packet size with transactions (cannot be greater than 16MB - protocol limitation). -const MAX_TRANSACTION_PACKET_SIZE: usize = 8 * 1024 * 1024; +// keep it under 8MB as well, cause it seems that it may result oversized after compression. +const MAX_TRANSACTION_PACKET_SIZE: usize = 5 * 1024 * 1024; // Min number of blocks to be behind for a snapshot sync const SNAPSHOT_RESTORE_THRESHOLD: BlockNumber = 30000; const SNAPSHOT_MIN_PEERS: usize = 3; diff --git a/ethcore/sync/src/chain/propagator.rs b/ethcore/sync/src/chain/propagator.rs index 7cb145f36..aabe90c93 100644 --- a/ethcore/sync/src/chain/propagator.rs +++ b/ethcore/sync/src/chain/propagator.rs @@ -237,8 +237,9 @@ impl SyncPropagator { let lucky_peers_len = lucky_peers.len(); for (peer_id, sent, rlp) in lucky_peers { peers.insert(peer_id); + let size = rlp.len(); SyncPropagator::send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp); - trace!(target: "sync", "{:02} <- Transactions ({} entries)", peer_id, sent); + trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size); max_sent = cmp::max(max_sent, sent); } debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, lucky_peers_len); From c21c19bd6ca173d877a4157390856222e026e127 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Tue, 14 Aug 2018 18:10:51 +0100 Subject: [PATCH 29/48] Fix no line breaks in logs (#9355) --- logger/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/logger/src/lib.rs b/logger/src/lib.rs index 152d691f9..dee2bb0b0 100644 --- a/logger/src/lib.rs +++ b/logger/src/lib.rs @@ -127,7 +127,7 @@ pub fn setup_log(config: &Config) -> Result, String> { println!("{}", ret); } - write!(buf, "{}", ret) + writeln!(buf, "{}", ret) }; builder.format(format); From 949b9c85ca7a2f55027a3505a21da786bce5aaf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Fri, 17 Aug 2018 12:19:15 +0200 Subject: [PATCH 30/48] Changed http:// to https:// on Yasm link (#9369) Changed http:// to https:// on Yasm link in README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6c7378c57..40c04dff7 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ We recommend installing Rust through [rustup](https://www.rustup.rs/). If you do Once you have rustup installed, then you need to install: * [Perl](https://www.perl.org) -* [Yasm](http://yasm.tortall.net) +* [Yasm](https://yasm.tortall.net) Make sure that these binaries are in your `PATH`. After that you should be able to build Parity-Ethereum from source. From 3ae10915e42b4431bf81ce4589f69763d3cde8d6 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 17 Aug 2018 15:45:43 +0200 Subject: [PATCH 31/48] =?UTF-8?q?Light=20client=20`Provide=20default=20non?= =?UTF-8?q?ce=20in=20transactions=20when=20it=C2=B4s=20missing`=20(#9370)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Provide `default_nonce` in tx`s when it´s missing When `nonce` is missing in a `EthTransaction` will cause it to fall in these cases provide `default_nonce` value instead! * Changed http:// to https:// on Yasm link (#9369) Changed http:// to https:// on Yasm link in README.md * Provide `default_nonce` in tx`s when it´s missing When `nonce` is missing in a `EthTransaction` will cause it to fall in these cases provide `default_nonce` value instead! * Address grumbles --- rpc/src/v1/helpers/light_fetch.rs | 37 ++++++++++--------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/rpc/src/v1/helpers/light_fetch.rs b/rpc/src/v1/helpers/light_fetch.rs index 14f08fe6c..51fb0a5f8 100644 --- a/rpc/src/v1/helpers/light_fetch.rs +++ b/rpc/src/v1/helpers/light_fetch.rs @@ -47,7 +47,7 @@ use transaction::{Action, Transaction as EthTransaction, SignedTransaction, Loca use v1::helpers::{CallRequest as CallRequestHelper, errors, dispatch}; use v1::types::{BlockNumber, CallRequest, Log, Transaction}; -const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed"; +const NO_INVALID_BACK_REFS: &str = "Fails only on invalid back-references; back-references here known to be valid; qed"; /// Helper for fetching blockchain data either from the light client or the network /// as necessary. @@ -207,7 +207,7 @@ impl LightFetch { } }; - let from = req.from.unwrap_or(Address::zero()); + let from = req.from.unwrap_or_else(|| Address::zero()); let nonce_fut = match req.nonce { Some(nonce) => Either::A(future::ok(Some(nonce))), None => Either::B(self.account(from, id).map(|acc| acc.map(|a| a.nonce))), @@ -232,29 +232,16 @@ impl LightFetch { // fetch missing transaction fields from the network. Box::new(nonce_fut.join(gas_price_fut).and_then(move |(nonce, gas_price)| { - let action = req.to.map_or(Action::Create, Action::Call); - let value = req.value.unwrap_or_else(U256::zero); - let data = req.data.unwrap_or_default(); - - future::done(match (nonce, req.gas) { - (Some(n), Some(gas)) => Ok((true, EthTransaction { - nonce: n, - action: action, - gas: gas, - gas_price: gas_price, - value: value, - data: data, - })), - (Some(n), None) => Ok((false, EthTransaction { - nonce: n, - action: action, - gas: START_GAS.into(), - gas_price: gas_price, - value: value, - data: data, - })), - (None, _) => Err(errors::unknown_block()), - }) + future::done( + Ok((req.gas.is_some(), EthTransaction { + nonce: nonce.unwrap_or_default(), + action: req.to.map_or(Action::Create, Action::Call), + gas: req.gas.unwrap_or_else(|| START_GAS.into()), + gas_price, + value: req.value.unwrap_or_else(U256::zero), + data: req.data.unwrap_or_default(), + })) + ) }).join(header_fut).and_then(move |((gas_known, tx), hdr)| { // then request proved execution. // TODO: get last-hashes from network. From 18a8d2f67f416cdb85a04a30c90831d8817214e0 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 17 Aug 2018 16:04:03 +0200 Subject: [PATCH 32/48] Fix typos in `network-devp2p` (#9371) --- util/network-devp2p/src/connection.rs | 6 +++--- util/network-devp2p/src/handshake.rs | 14 +++++++------- util/network-devp2p/src/node_table.rs | 4 ++-- util/network-devp2p/src/service.rs | 2 +- util/network-devp2p/src/session.rs | 10 +++++----- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/util/network-devp2p/src/connection.rs b/util/network-devp2p/src/connection.rs index 1ed395acb..ee59c48f2 100644 --- a/util/network-devp2p/src/connection.rs +++ b/util/network-devp2p/src/connection.rs @@ -353,7 +353,7 @@ impl EncryptedConnection { } header.append_raw(&[(len >> 16) as u8, (len >> 8) as u8, len as u8], 1); header.append_raw(&[0xc2u8, 0x80u8, 0x80u8], 1); - //TODO: ger rid of vectors here + //TODO: get rid of vectors here let mut header = header.out(); let padding = (16 - (payload.len() % 16)) % 16; header.resize(16, 0u8); @@ -442,7 +442,7 @@ impl EncryptedConnection { mac.update(&enc); } - /// Readable IO handler. Tracker receive status and returns decoded packet if avaialable. + /// Readable IO handler. Tracker receive status and returns decoded packet if available. pub fn readable(&mut self, io: &IoContext) -> Result, Error> where Message: Send + Clone + Sync + 'static { io.clear_timer(self.connection.token)?; if let EncryptedConnectionState::Header = self.read_state { @@ -465,7 +465,7 @@ impl EncryptedConnection { } } - /// Writable IO handler. Processes send queeue. + /// Writable IO handler. Processes send queue. pub fn writable(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { self.connection.writable(io)?; Ok(()) diff --git a/util/network-devp2p/src/handshake.rs b/util/network-devp2p/src/handshake.rs index 4f54f0009..e2b1ebaa0 100644 --- a/util/network-devp2p/src/handshake.rs +++ b/util/network-devp2p/src/handshake.rs @@ -21,7 +21,7 @@ use mio::tcp::*; use ethereum_types::{H256, H520}; use parity_bytes::Bytes; use rlp::{Rlp, RlpStream}; -use connection::{Connection}; +use connection::Connection; use node_table::NodeId; use io::{IoContext, StreamToken}; use ethkey::{KeyPair, Public, Secret, recover, sign, Generator, Random}; @@ -45,7 +45,7 @@ enum HandshakeState { StartSession, } -/// `RLPx` protocol handhake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake +/// `RLPx` protocol handshake. See https://github.com/ethereum/devp2p/blob/master/rlpx.md#encrypted-handshake pub struct Handshake { /// Remote node public key pub id: NodeId, @@ -65,11 +65,11 @@ pub struct Handshake { pub remote_nonce: H256, /// Remote `RLPx` protocol version. pub remote_version: u64, - /// A copy of received encryped auth packet + /// A copy of received encrypted auth packet pub auth_cipher: Bytes, - /// A copy of received encryped ack packet + /// A copy of received encrypted ack packet pub ack_cipher: Bytes, - /// This Handshake is marked for deleteion flag + /// This Handshake is marked for deletion flag pub expired: bool, } @@ -104,7 +104,7 @@ impl Handshake { self.expired } - /// Start a handhsake + /// Start a handshake pub fn start(&mut self, io: &IoContext, host: &HostInfo, originated: bool) -> Result<(), Error> where Message: Send + Clone+ Sync + 'static { self.originated = originated; io.register_timer(self.connection.token, HANDSHAKE_TIMEOUT).ok(); @@ -152,7 +152,7 @@ impl Handshake { Ok(()) } - /// Writabe IO handler. + /// Writable IO handler. pub fn writable(&mut self, io: &IoContext) -> Result<(), Error> where Message: Send + Clone + Sync + 'static { if !self.expired() { self.connection.writable(io)?; diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index 2640cec79..b2c417b25 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -38,7 +38,7 @@ pub type NodeId = H512; pub struct NodeEndpoint { /// IP(V4 or V6) address pub address: SocketAddr, - /// Conneciton port. + /// Connection port. pub udp_port: u16 } @@ -373,7 +373,7 @@ impl NodeTable { self.useless_nodes.insert(id.clone()); } - /// Atempt to connect to useless nodes again. + /// Attempt to connect to useless nodes again. pub fn clear_useless(&mut self) { self.useless_nodes.clear(); } diff --git a/util/network-devp2p/src/service.rs b/util/network-devp2p/src/service.rs index d7182f461..709161aeb 100644 --- a/util/network-devp2p/src/service.rs +++ b/util/network-devp2p/src/service.rs @@ -68,7 +68,7 @@ impl NetworkService { }) } - /// Regiter a new protocol handler with the event loop. + /// Register a new protocol handler with the event loop. pub fn register_protocol( &self, handler: Arc, diff --git a/util/network-devp2p/src/session.rs b/util/network-devp2p/src/session.rs index a405ad469..910e54a06 100644 --- a/util/network-devp2p/src/session.rs +++ b/util/network-devp2p/src/session.rs @@ -49,11 +49,11 @@ enum ProtocolState { /// Peer session over encrypted connection. /// When created waits for Hello packet exchange and signals ready state. -/// Sends and receives protocol packets and handles basic packes such as ping/pong and disconnect. +/// Sends and receives protocol packets and handles basic packets such as ping/pong and disconnect. pub struct Session { /// Shared session information pub info: SessionInfo, - /// Session ready flag. Set after successfull Hello packet exchange + /// Session ready flag. Set after successful Hello packet exchange had_hello: bool, /// Session is no longer active flag. expired: bool, @@ -98,8 +98,8 @@ const PACKET_USER: u8 = 0x10; const PACKET_LAST: u8 = 0x7f; impl Session { - /// Create a new session out of comepleted handshake. This clones the handshake connection object - /// and leaves the handhsake in limbo to be deregistered from the event loop. + /// Create a new session out of completed handshake. This clones the handshake connection object + /// and leaves the handshake in limbo to be de-registered from the event loop. pub fn new(io: &IoContext, socket: TcpStream, token: StreamToken, id: Option<&NodeId>, nonce: &H256, host: &HostInfo) -> Result where Message: Send + Clone + Sync + 'static { @@ -450,7 +450,7 @@ impl Session { } } - // Sort capabilities alphabeticaly. + // Sort capabilities alphabetically. caps.sort(); i = 0; From 346913b7f649ea18054ed9b93bae4122d3fe62b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Fri, 17 Aug 2018 17:01:32 +0200 Subject: [PATCH 33/48] Better logging when mining own transactions. (#9363) --- Cargo.lock | 6 +-- ethcore/service/src/service.rs | 1 + ethcore/src/miner/miner.rs | 15 ++++++- miner/src/pool/listener.rs | 4 +- miner/src/pool/local_transactions.rs | 62 +++++++++++++++++++++++++--- miner/src/pool/queue.rs | 7 ++++ rpc/src/v1/types/transaction.rs | 11 ++++- transaction-pool/Cargo.toml | 2 +- transaction-pool/src/listener.rs | 10 ++--- transaction-pool/src/pool.rs | 2 +- transaction-pool/src/tests/mod.rs | 6 +-- 11 files changed, 102 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 54979b319..b44d65ac3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -669,7 +669,7 @@ dependencies = [ "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", - "transaction-pool 1.12.3", + "transaction-pool 1.13.1", "url 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2171,7 +2171,7 @@ dependencies = [ "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tokio-timer 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "transaction-pool 1.12.3", + "transaction-pool 1.13.1", "transient-hashmap 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", "vm 0.1.0", ] @@ -3330,7 +3330,7 @@ dependencies = [ [[package]] name = "transaction-pool" -version = "1.12.3" +version = "1.13.1" dependencies = [ "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/service/src/service.rs b/ethcore/service/src/service.rs index 81997be07..1ffb0d621 100644 --- a/ethcore/service/src/service.rs +++ b/ethcore/service/src/service.rs @@ -95,6 +95,7 @@ impl ClientService { let pruning = config.pruning; let client = Client::new(config, &spec, blockchain_db.clone(), miner.clone(), io_service.channel())?; miner.set_io_channel(io_service.channel()); + miner.set_in_chain_checker(&client.clone()); let snapshot_params = SnapServiceParams { engine: spec.engine.clone(), diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index 23c868ca9..dbf6d8615 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -43,7 +43,7 @@ use using_queue::{UsingQueue, GetAction}; use account_provider::{AccountProvider, SignError as AccountError}; use block::{ClosedBlock, IsBlock, Block, SealedBlock}; use client::{ - BlockChain, ChainInfo, CallContract, BlockProducer, SealedBlockImporter, Nonce + BlockChain, ChainInfo, CallContract, BlockProducer, SealedBlockImporter, Nonce, TransactionInfo, TransactionId }; use client::{BlockId, ClientIoMessage}; use executive::contract_address; @@ -296,6 +296,19 @@ impl Miner { *self.io_channel.write() = Some(io_channel); } + /// Sets in-blockchain checker for transactions. + pub fn set_in_chain_checker(&self, chain: &Arc) where + C: TransactionInfo + Send + Sync + 'static, + { + let client = Arc::downgrade(chain); + self.transaction_queue.set_in_chain_checker(move |hash| { + match client.upgrade() { + Some(info) => info.transaction_block(TransactionId::Hash(*hash)).is_some(), + None => false, + } + }); + } + /// Clear all pending block states pub fn clear(&self) { self.sealing.lock().queue.reset(); diff --git a/miner/src/pool/listener.rs b/miner/src/pool/listener.rs index e881a2ba2..5776ba845 100644 --- a/miner/src/pool/listener.rs +++ b/miner/src/pool/listener.rs @@ -107,8 +107,8 @@ impl txpool::Listener for Logger { debug!(target: "txqueue", "[{:?}] Canceled by the user.", tx.hash()); } - fn mined(&mut self, tx: &Arc) { - debug!(target: "txqueue", "[{:?}] Mined.", tx.hash()); + fn culled(&mut self, tx: &Arc) { + debug!(target: "txqueue", "[{:?}] Culled or mined.", tx.hash()); } } diff --git a/miner/src/pool/local_transactions.rs b/miner/src/pool/local_transactions.rs index d69da3347..a1c69ef22 100644 --- a/miner/src/pool/local_transactions.rs +++ b/miner/src/pool/local_transactions.rs @@ -16,7 +16,7 @@ //! Local Transactions List. -use std::sync::Arc; +use std::{fmt, sync::Arc}; use ethereum_types::H256; use linked_hash_map::LinkedHashMap; @@ -32,6 +32,8 @@ pub enum Status { Pending(Arc), /// Transaction is already mined. Mined(Arc), + /// Transaction didn't get into any block, but some other tx with the same nonce got. + Culled(Arc), /// Transaction is dropped because of limit Dropped(Arc), /// Replaced because of higher gas price of another transaction. @@ -60,11 +62,22 @@ impl Status { } /// Keeps track of local transactions that are in the queue or were mined/dropped recently. -#[derive(Debug)] pub struct LocalTransactionsList { max_old: usize, transactions: LinkedHashMap, pending: usize, + in_chain: Option bool + Send + Sync>>, +} + +impl fmt::Debug for LocalTransactionsList { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fmt.debug_struct("LocalTransactionsList") + .field("max_old", &self.max_old) + .field("transactions", &self.transactions) + .field("pending", &self.pending) + .field("in_chain", &self.in_chain.is_some()) + .finish() + } } impl Default for LocalTransactionsList { @@ -80,9 +93,20 @@ impl LocalTransactionsList { max_old, transactions: Default::default(), pending: 0, + in_chain: None, } } + /// Set blockchain checker. + /// + /// The function should return true if transaction is included in chain. + pub fn set_in_chain_checker(&mut self, checker: T) where + T: Into>, + F: Fn(&H256) -> bool + Send + Sync + 'static + { + self.in_chain = checker.into().map(|f| Box::new(f) as _); + } + /// Returns true if the transaction is already in local transactions. pub fn contains(&self, hash: &H256) -> bool { self.transactions.contains_key(hash) @@ -190,14 +214,20 @@ impl txpool::Listener for LocalTransactionsList { self.clear_old(); } - /// The transaction has been mined. - fn mined(&mut self, tx: &Arc) { + fn culled(&mut self, tx: &Arc) { if !tx.priority().is_local() { return; } - info!(target: "own_tx", "Transaction mined (hash {:?})", tx.hash()); - self.insert(*tx.hash(), Status::Mined(tx.clone())); + let is_in_chain = self.in_chain.as_ref().map(|checker| checker(tx.hash())).unwrap_or(false); + if is_in_chain { + info!(target: "own_tx", "Transaction mined (hash {:?})", tx.hash()); + self.insert(*tx.hash(), Status::Mined(tx.clone())); + return; + } + + info!(target: "own_tx", "Transaction culled (hash {:?})", tx.hash()); + self.insert(*tx.hash(), Status::Culled(tx.clone())); } } @@ -229,6 +259,26 @@ mod tests { assert_eq!(statuses, vec![Status::Pending(tx1), Status::Pending(tx2)]); } + #[test] + fn should_use_in_chain_checker_if_present() { + // given + let mut list = LocalTransactionsList::default(); + let tx1 = new_tx(10); + let tx2 = new_tx(20); + list.culled(&tx1); + list.culled(&tx2); + let statuses = list.all_transactions().values().cloned().collect::>(); + assert_eq!(statuses, vec![Status::Culled(tx1.clone()), Status::Culled(tx2.clone())]); + + // when + list.set_in_chain_checker(|_: &_| true); + list.culled(&tx1); + + // then + let statuses = list.all_transactions().values().cloned().collect::>(); + assert_eq!(statuses, vec![Status::Culled(tx2), Status::Mined(tx1)]); + } + #[test] fn should_clear_old_transactions() { // given diff --git a/miner/src/pool/queue.rs b/miner/src/pool/queue.rs index d11052108..e7810158b 100644 --- a/miner/src/pool/queue.rs +++ b/miner/src/pool/queue.rs @@ -229,6 +229,13 @@ impl TransactionQueue { *self.options.write() = options; } + /// Sets the in-chain transaction checker for pool listener. + pub fn set_in_chain_checker(&self, f: F) where + F: Fn(&H256) -> bool + Send + Sync + 'static + { + self.pool.write().listener_mut().0.set_in_chain_checker(f) + } + /// Import a set of transactions to the pool. /// /// Given blockchain and state access (Client) diff --git a/rpc/src/v1/types/transaction.rs b/rpc/src/v1/types/transaction.rs index 4266f60b6..8518e1497 100644 --- a/rpc/src/v1/types/transaction.rs +++ b/rpc/src/v1/types/transaction.rs @@ -82,8 +82,10 @@ pub enum LocalTransactionStatus { Pending, /// Transaction is in future part of the queue Future, - /// Transaction is already mined. + /// Transaction was mined. Mined(Transaction), + /// Transaction was removed from the queue, but not mined. + Culled(Transaction), /// Transaction was dropped because of limit. Dropped(Transaction), /// Transaction was replaced by transaction with higher gas price. @@ -104,7 +106,7 @@ impl Serialize for LocalTransactionStatus { let elems = match *self { Pending | Future => 1, - Mined(..) | Dropped(..) | Invalid(..) | Canceled(..) => 2, + Mined(..) | Culled(..) | Dropped(..) | Invalid(..) | Canceled(..) => 2, Rejected(..) => 3, Replaced(..) => 4, }; @@ -120,6 +122,10 @@ impl Serialize for LocalTransactionStatus { struc.serialize_field(status, "mined")?; struc.serialize_field(transaction, tx)?; }, + Culled(ref tx) => { + struc.serialize_field(status, "culled")?; + struc.serialize_field(transaction, tx)?; + }, Dropped(ref tx) => { struc.serialize_field(status, "dropped")?; struc.serialize_field(transaction, tx)?; @@ -257,6 +263,7 @@ impl LocalTransactionStatus { match s { Pending(_) => LocalTransactionStatus::Pending, Mined(tx) => LocalTransactionStatus::Mined(convert(tx)), + Culled(tx) => LocalTransactionStatus::Culled(convert(tx)), Dropped(tx) => LocalTransactionStatus::Dropped(convert(tx)), Rejected(tx, reason) => LocalTransactionStatus::Rejected(convert(tx), reason), Invalid(tx) => LocalTransactionStatus::Invalid(convert(tx)), diff --git a/transaction-pool/Cargo.toml b/transaction-pool/Cargo.toml index 0f692e3c0..4c48aae33 100644 --- a/transaction-pool/Cargo.toml +++ b/transaction-pool/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Generic transaction pool." name = "transaction-pool" -version = "1.12.3" +version = "1.13.1" license = "GPL-3.0" authors = ["Parity Technologies "] diff --git a/transaction-pool/src/listener.rs b/transaction-pool/src/listener.rs index 3339a7730..63786f6aa 100644 --- a/transaction-pool/src/listener.rs +++ b/transaction-pool/src/listener.rs @@ -40,8 +40,8 @@ pub trait Listener { /// The transaction has been canceled. fn canceled(&mut self, _tx: &Arc) {} - /// The transaction has been mined. - fn mined(&mut self, _tx: &Arc) {} + /// The transaction has been culled from the pool. + fn culled(&mut self, _tx: &Arc) {} } /// A no-op implementation of `Listener`. @@ -78,8 +78,8 @@ impl Listener for (A, B) where self.1.canceled(tx); } - fn mined(&mut self, tx: &Arc) { - self.0.mined(tx); - self.1.mined(tx); + fn culled(&mut self, tx: &Arc) { + self.0.culled(tx); + self.1.culled(tx); } } diff --git a/transaction-pool/src/pool.rs b/transaction-pool/src/pool.rs index c94b736b8..3a8af8be8 100644 --- a/transaction-pool/src/pool.rs +++ b/transaction-pool/src/pool.rs @@ -370,7 +370,7 @@ impl Pool where let len = removed.len(); for tx in removed { self.finalize_remove(tx.hash()); - self.listener.mined(&tx); + self.listener.culled(&tx); } len }, diff --git a/transaction-pool/src/tests/mod.rs b/transaction-pool/src/tests/mod.rs index 85260133e..43ef60472 100644 --- a/transaction-pool/src/tests/mod.rs +++ b/transaction-pool/src/tests/mod.rs @@ -648,8 +648,8 @@ mod listener { self.0.borrow_mut().push("canceled".into()); } - fn mined(&mut self, _tx: &SharedTransaction) { - self.0.borrow_mut().push("mined".into()); + fn culled(&mut self, _tx: &SharedTransaction) { + self.0.borrow_mut().push("culled".into()); } } @@ -743,6 +743,6 @@ mod listener { txq.cull(None, NonceReady::new(3)); // then - assert_eq!(*results.borrow(), &["added", "added", "mined", "mined"]); + assert_eq!(*results.borrow(), &["added", "added", "culled", "culled"]); } } From ee5ed447614cc2417e38b71899fdd1a81b488c36 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Fri, 17 Aug 2018 18:21:49 +0200 Subject: [PATCH 34/48] Light client "Enable more logs for light client `on_demand`" (#9374) * Enable more logs for light client `on_demand` * Remove extra whitespace * fix indentation --- ethcore/light/src/on_demand/request.rs | 86 +++++++++++++++++++++----- 1 file changed, 69 insertions(+), 17 deletions(-) diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index 9feb0a670..c305dea94 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -38,7 +38,7 @@ use transaction::SignedTransaction; use trie::Trie; use vm::EnvInfo; -const SUPPLIED_MATCHES: &'static str = "supplied responses always match produced requests; enforced by `check_response`; qed"; +const SUPPLIED_MATCHES: &str = "supplied responses always match produced requests; enforced by `check_response`; qed"; /// Core unit of the API: submit batches of these to be answered with `Response`s. #[derive(Clone)] @@ -265,30 +265,35 @@ impl From for CheckedRequest { max: 1, reverse: false, }; + trace!(target: "on_demand", "HeaderByHash Request, {:?}", net_req); CheckedRequest::HeaderByHash(req, net_req) } Request::HeaderProof(req) => { let net_req = net_request::IncompleteHeaderProofRequest { num: req.num().into(), }; + trace!(target: "on_demand", "HeaderProof Request, {:?}", net_req); CheckedRequest::HeaderProof(req, net_req) } Request::TransactionIndex(req) => { let net_req = net_request::IncompleteTransactionIndexRequest { hash: req.0.clone(), }; + trace!(target: "on_demand", "TransactionIndex Request, {:?}", net_req); CheckedRequest::TransactionIndex(req, net_req) } - Request::Body(req) => { + Request::Body(req) => { let net_req = net_request::IncompleteBodyRequest { hash: req.0.field(), }; + trace!(target: "on_demand", "Body Request, {:?}", net_req); CheckedRequest::Body(req, net_req) } Request::Receipts(req) => { let net_req = net_request::IncompleteReceiptsRequest { hash: req.0.field(), }; + trace!(target: "on_demand", "Receipt Request, {:?}", net_req); CheckedRequest::Receipts(req, net_req) } Request::Account(req) => { @@ -296,6 +301,7 @@ impl From for CheckedRequest { block_hash: req.header.field(), address_hash: ::hash::keccak(&req.address).into(), }; + trace!(target: "on_demand", "Account Request, {:?}", net_req); CheckedRequest::Account(req, net_req) } Request::Code(req) => { @@ -303,6 +309,7 @@ impl From for CheckedRequest { block_hash: req.header.field(), code_hash: req.code_hash.into(), }; + trace!(target: "on_demand", "Code Request, {:?}", net_req); CheckedRequest::Code(req, net_req) } Request::Execution(req) => { @@ -315,12 +322,14 @@ impl From for CheckedRequest { value: req.tx.value, data: req.tx.data.clone(), }; + trace!(target: "on_demand", "Execution request, {:?}", net_req); CheckedRequest::Execution(req, net_req) } Request::Signal(req) => { let net_req = net_request::IncompleteSignalRequest { block_hash: req.hash.into(), }; + trace!(target: "on_demand", "Signal Request, {:?}", net_req); CheckedRequest::Signal(req, net_req) } } @@ -507,15 +516,42 @@ impl IncompleteRequest for CheckedRequest { fn complete(self) -> Result { match self { - CheckedRequest::HeaderProof(_, req) => req.complete().map(CompleteRequest::HeaderProof), - CheckedRequest::HeaderByHash(_, req) => req.complete().map(CompleteRequest::Headers), - CheckedRequest::TransactionIndex(_, req) => req.complete().map(CompleteRequest::TransactionIndex), - CheckedRequest::Receipts(_, req) => req.complete().map(CompleteRequest::Receipts), - CheckedRequest::Body(_, req) => req.complete().map(CompleteRequest::Body), - CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account), - CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code), - CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution), - CheckedRequest::Signal(_, req) => req.complete().map(CompleteRequest::Signal), + CheckedRequest::HeaderProof(_, req) => { + trace!(target: "on_demand", "HeaderProof request completed {:?}", req); + req.complete().map(CompleteRequest::HeaderProof) + } + CheckedRequest::HeaderByHash(_, req) => { + trace!(target: "on_demand", "HeaderByHash request completed {:?}", req); + req.complete().map(CompleteRequest::Headers) + } + CheckedRequest::TransactionIndex(_, req) => { + trace!(target: "on_demand", "TransactionIndex request completed {:?}", req); + req.complete().map(CompleteRequest::TransactionIndex) + } + CheckedRequest::Receipts(_, req) => { + trace!(target: "on_demand", "Receipt request completed {:?}", req); + req.complete().map(CompleteRequest::Receipts) + } + CheckedRequest::Body(_, req) => { + trace!(target: "on_demand", "Block request completed {:?}", req); + req.complete().map(CompleteRequest::Body) + } + CheckedRequest::Account(_, req) => { + trace!(target: "on_demand", "Account request completed {:?}", req); + req.complete().map(CompleteRequest::Account) + } + CheckedRequest::Code(_, req) => { + trace!(target: "on_demand", "Code request completed {:?}", req); + req.complete().map(CompleteRequest::Code) + } + CheckedRequest::Execution(_, req) => { + trace!(target: "on_demand", "Execution request completed {:?}", req); + req.complete().map(CompleteRequest::Execution) + } + CheckedRequest::Signal(_, req) => { + trace!(target: "on_demand", "Signal request completed {:?}", req); + req.complete().map(CompleteRequest::Signal) + } } } @@ -772,11 +808,13 @@ impl Body { let header = self.0.as_ref()?; let tx_root = ::triehash::ordered_trie_root(body.transactions_rlp().iter().map(|r| r.as_raw())); if tx_root != header.transactions_root() { + trace!(target: "on_demand", "Body Response: \"WrongTrieRoot\" tx_root: {:?} header_root: {:?}", tx_root, header.transactions_root()); return Err(Error::WrongTrieRoot(header.transactions_root(), tx_root)); } let uncles_hash = keccak(body.uncles_rlp().as_raw()); if uncles_hash != header.uncles_hash() { + trace!(target: "on_demand", "Body Response: \"WrongHash\" tx_root: {:?} header_root: {:?}", uncles_hash, header.uncles_hash()); return Err(Error::WrongHash(header.uncles_hash(), uncles_hash)); } @@ -784,7 +822,6 @@ impl Body { let block = encoded::Block::new_from_header_and_body(&header.view(), &body.view()); cache.lock().insert_block_body(header.hash(), body.clone()); - Ok(block) } } @@ -804,7 +841,10 @@ impl BlockReceipts { cache.lock().insert_block_receipts(receipts_root, receipts.to_vec()); Ok(receipts.to_vec()) } - false => Err(Error::WrongTrieRoot(receipts_root, found_root)), + false => { + trace!(target: "on_demand", "Receipt Reponse: \"WrongTrieRoot\" receipts_root: {:?} found_root: {:?}", receipts_root, found_root); + Err(Error::WrongTrieRoot(receipts_root, found_root)) + } } } } @@ -837,7 +877,10 @@ impl Account { code_hash: rlp.val_at(3)?, })) }, - None => Ok(None), + None => { + trace!(target: "on_demand", "Account {:?} not found", self.address); + Ok(None) + } } } } @@ -899,9 +942,18 @@ impl TransactionProof { ); match proved_execution { - ProvedExecution::BadProof => Err(Error::BadProof), - ProvedExecution::Failed(e) => Ok(Err(e)), - ProvedExecution::Complete(e) => Ok(Ok(e)), + ProvedExecution::BadProof => { + trace!(target: "on_demand", "BadExecution Proof"); + Err(Error::BadProof) + } + ProvedExecution::Failed(e) => { + trace!(target: "on_demand", "Execution Proof failed: {:?}", e); + Ok(Err(e)) + } + ProvedExecution::Complete(e) => { + trace!(target: "on_demand", "Execution successful: {:?}", e); + Ok(Ok(e)) + } } } } From b2cf5d295815f4f92f17394f5d2d1c0e56e65bf4 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Mon, 20 Aug 2018 12:53:47 +0200 Subject: [PATCH 35/48] Make `Capabilities struct` Copy (#9372) --- ethcore/light/src/net/mod.rs | 2 +- ethcore/light/src/net/status.rs | 2 +- ethcore/light/src/net/tests/mod.rs | 26 +++++++++++++------------- ethcore/light/src/on_demand/mod.rs | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 5e73681a5..2d1d3bdd8 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -823,7 +823,7 @@ impl LightProtocol { self.peers.write().insert(*peer, Mutex::new(Peer { local_credits: local_flow.create_credits(), status: status.clone(), - capabilities: capabilities.clone(), + capabilities, remote_flow: remote_flow, sent_head: pending.sent_head, last_update: pending.last_update, diff --git a/ethcore/light/src/net/status.rs b/ethcore/light/src/net/status.rs index 98b29f3e3..08eb42e1c 100644 --- a/ethcore/light/src/net/status.rs +++ b/ethcore/light/src/net/status.rs @@ -169,7 +169,7 @@ impl Status { } /// Peer capabilities. -#[derive(Debug, Clone, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct Capabilities { /// Whether this peer can serve headers pub serve_headers: bool, diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index 6bc6751b1..169203407 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -222,7 +222,7 @@ fn status(chain_info: BlockChainInfo) -> Status { fn handshake_expected() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let status = status(provider.client.chain_info()); @@ -236,7 +236,7 @@ fn handshake_expected() { fn genesis_mismatch() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let mut status = status(provider.client.chain_info()); status.genesis_hash = H256::default(); @@ -250,7 +250,7 @@ fn genesis_mismatch() { fn credit_overflow() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let status = status(provider.client.chain_info()); @@ -283,7 +283,7 @@ fn credit_overflow() { fn get_block_headers() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); @@ -335,7 +335,7 @@ fn get_block_headers() { fn get_block_bodies() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); @@ -383,7 +383,7 @@ fn get_block_bodies() { fn get_block_receipts() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); @@ -438,7 +438,7 @@ fn get_block_receipts() { fn get_state_proofs() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let provider = TestProvider(provider); @@ -497,7 +497,7 @@ fn get_state_proofs() { fn get_contract_code() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); @@ -540,7 +540,7 @@ fn get_contract_code() { fn epoch_signal() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); @@ -583,7 +583,7 @@ fn epoch_signal() { fn proof_of_execution() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); @@ -643,7 +643,7 @@ fn id_guard() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let req_id_1 = ReqId(5143); @@ -666,7 +666,7 @@ fn id_guard() { proto.peers.write().insert(peer_id, ::parking_lot::Mutex::new(Peer { local_credits: flow_params.create_credits(), status: status(provider.client.chain_info()), - capabilities: capabilities.clone(), + capabilities, remote_flow: Some((flow_params.create_credits(), (&*flow_params).clone())), sent_head: provider.client.chain_info().best_block_hash, last_update: Instant::now(), @@ -723,7 +723,7 @@ fn id_guard() { fn get_transaction_index() { let capabilities = capabilities(); - let (provider, proto) = setup(capabilities.clone()); + let (provider, proto) = setup(capabilities); let flow_params = proto.flow_params.read().clone(); let cur_status = status(provider.client.chain_info()); diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index c7cc5ef5e..594d0dee4 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -418,7 +418,7 @@ impl Handler for OnDemand { ) -> PeerStatus { self.peers.write().insert( ctx.peer(), - Peer { status: status.clone(), capabilities: capabilities.clone() } + Peer { status: status.clone(), capabilities: *capabilities } ); self.attempt_dispatch(ctx.as_basic()); PeerStatus::Kept From 108590d924bcc00e62b894de02516ffb2378be11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Drwi=C4=99ga?= Date: Mon, 20 Aug 2018 14:05:01 +0200 Subject: [PATCH 36/48] Allow calling contracts in genesis state. (#9375) --- Cargo.lock | 1 + ethcore/src/ethereum/mod.rs | 3 +- ethcore/src/spec/spec.rs | 5 +++ evmbin/Cargo.toml | 1 + evmbin/res/testchain.json | 38 +++++++++++++++++++++ evmbin/src/display/std_json.rs | 6 ++-- evmbin/src/info.rs | 60 ++++++++++++++++++++++++++-------- evmbin/src/main.rs | 2 ++ 8 files changed, 98 insertions(+), 18 deletions(-) create mode 100644 evmbin/res/testchain.json diff --git a/Cargo.lock b/Cargo.lock index b44d65ac3..38f1cbb5d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1018,6 +1018,7 @@ name = "evmbin" version = "0.1.0" dependencies = [ "docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", "ethcore-transaction 0.1.0", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", diff --git a/ethcore/src/ethereum/mod.rs b/ethcore/src/ethereum/mod.rs index 4aab4b71f..a31f9ed50 100644 --- a/ethcore/src/ethereum/mod.rs +++ b/ethcore/src/ethereum/mod.rs @@ -30,7 +30,8 @@ pub use self::denominations::*; use machine::EthereumMachine; use super::spec::*; -fn load<'a, T: Into>>>(params: T, b: &[u8]) -> Spec { +/// Load chain spec from `SpecParams` and JSON. +pub fn load<'a, T: Into>>>(params: T, b: &[u8]) -> Spec { match params.into() { Some(params) => Spec::load(params, b), None => Spec::load(&::std::env::temp_dir(), b) diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index bcfa64ed1..a83046a72 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -770,6 +770,11 @@ impl Spec { Ok(()) } + /// Return genesis state as Plain old data. + pub fn genesis_state(&self) -> &PodState { + &self.genesis_state + } + /// Returns `false` if the memoized state root is invalid. `true` otherwise. pub fn is_state_root_valid(&self) -> bool { // TODO: get rid of this function and ensure state root always is valid. diff --git a/evmbin/Cargo.toml b/evmbin/Cargo.toml index 43264f042..a4cc451be 100644 --- a/evmbin/Cargo.toml +++ b/evmbin/Cargo.toml @@ -10,6 +10,7 @@ path = "./src/main.rs" [dependencies] docopt = "0.8" +env_logger = "0.5" ethcore = { path = "../ethcore", features = ["test-helpers", "json-tests"] } ethjson = { path = "../json" } parity-bytes = { git = "https://github.com/paritytech/parity-common" } diff --git a/evmbin/res/testchain.json b/evmbin/res/testchain.json new file mode 100644 index 000000000..be3455179 --- /dev/null +++ b/evmbin/res/testchain.json @@ -0,0 +1,38 @@ +{ + "name": "lab", + "engine": { + "Ethash": { + "params": { + "minimumDifficulty": "0x1", + "difficultyBoundDivisor": "0x800" + } + } + }, + "accounts": { + "0000000000000000000000000000000000000020": { + "nonce": "0x0", + "balance": "0x64", + "code": "0x62aaaaaa60aa60aa5060aa60aa60aa60aa60aa60aa" + } + }, + "params":{ + "networkID": "0x42", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1", + "gasLimitBoundDivisor": "0x400" + }, + "genesis": { + "gasLimit": "0x8000000", + "seal": { + "ethereum": { + "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "nonce": "0x0000000000000042" + } + }, + "difficulty": "0x400", + "extraData": "0x0", + "author": "0x3333333333333333333333333333333333333333", + "timestamp": "0x0", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} diff --git a/evmbin/src/display/std_json.rs b/evmbin/src/display/std_json.rs index b3533ea2c..ebf099bcf 100644 --- a/evmbin/src/display/std_json.rs +++ b/evmbin/src/display/std_json.rs @@ -167,13 +167,13 @@ impl trace::VMTracer for Informant { } #[cfg(test)] -mod tests { +pub mod tests { use std::sync::{Arc, Mutex}; use super::*; use info::tests::run_test; #[derive(Debug, Clone, Default)] - struct TestWriter(pub Arc>>); + pub struct TestWriter(pub Arc>>); impl Writer for TestWriter { fn clone(&self) -> Self { Clone::clone(self) } @@ -189,7 +189,7 @@ mod tests { } } - fn informant() -> (Informant, Arc>>) { + pub fn informant() -> (Informant, Arc>>) { let writer = TestWriter::default(); let res = writer.0.clone(); (Informant::new(writer), res) diff --git a/evmbin/src/info.rs b/evmbin/src/info.rs index ce824fe17..080c0c7ff 100644 --- a/evmbin/src/info.rs +++ b/evmbin/src/info.rs @@ -68,11 +68,19 @@ pub type RunResult = Result, Failure>; /// Execute given `ActionParams` and return the result. pub fn run_action( spec: &spec::Spec, - params: ActionParams, + mut params: ActionParams, mut informant: T, ) -> RunResult { informant.set_gas(params.gas); - run(spec, params.gas, None, |mut client| { + + // if the code is not overwritten from CLI, use code from spec file. + if params.code.is_none() { + if let Some(acc) = spec.genesis_state().get().get(¶ms.code_address) { + params.code = acc.code.clone().map(::std::sync::Arc::new); + params.code_hash = None; + } + } + run(spec, params.gas, spec.genesis_state(), |mut client| { let result = client .call(params, &mut trace::NoopTracer, &mut informant) .map(|r| (0.into(), r.gas_left, r.return_data.to_vec())); @@ -130,24 +138,21 @@ pub fn run_transaction( } /// Execute VM with given `ActionParams` -pub fn run<'a, F, T, X>( +pub fn run<'a, F, X>( spec: &'a spec::Spec, initial_gas: U256, - pre_state: T, + pre_state: &'a pod_state::PodState, run: F, ) -> RunResult where F: FnOnce(EvmTestClient) -> (Result<(H256, U256, Vec), EvmTestError>, Option), - T: Into>, { - let test_client = match pre_state.into() { - Some(pre_state) => EvmTestClient::from_pod_state(spec, pre_state.clone()), - None => EvmTestClient::new(spec), - }.map_err(|error| Failure { - gas_used: 0.into(), - error, - time: Duration::from_secs(0), - traces: None, - })?; + let test_client = EvmTestClient::from_pod_state(spec, pre_state.clone()) + .map_err(|error| Failure { + gas_used: 0.into(), + error, + time: Duration::from_secs(0), + traces: None, + })?; let start = Instant::now(); let result = run(test_client); @@ -204,4 +209,31 @@ pub mod tests { }, } } + + #[test] + fn should_call_account_from_spec() { + use display::std_json::tests::informant; + + let (inf, res) = informant(); + let mut params = ActionParams::default(); + params.code_address = 0x20.into(); + params.gas = 0xffff.into(); + + let spec = ::ethcore::ethereum::load(None, include_bytes!("../res/testchain.json")); + let _result = run_action(&spec, params, inf); + + assert_eq!( + &String::from_utf8_lossy(&**res.lock().unwrap()), +r#"{"pc":0,"op":98,"opName":"PUSH3","gas":"0xffff","stack":[],"storage":{},"depth":1} +{"pc":4,"op":96,"opName":"PUSH1","gas":"0xfffc","stack":["0xaaaaaa"],"storage":{},"depth":1} +{"pc":6,"op":96,"opName":"PUSH1","gas":"0xfff9","stack":["0xaaaaaa","0xaa"],"storage":{},"depth":1} +{"pc":8,"op":80,"opName":"POP","gas":"0xfff6","stack":["0xaaaaaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":9,"op":96,"opName":"PUSH1","gas":"0xfff4","stack":["0xaaaaaa","0xaa"],"storage":{},"depth":1} +{"pc":11,"op":96,"opName":"PUSH1","gas":"0xfff1","stack":["0xaaaaaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":13,"op":96,"opName":"PUSH1","gas":"0xffee","stack":["0xaaaaaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":15,"op":96,"opName":"PUSH1","gas":"0xffeb","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":17,"op":96,"opName":"PUSH1","gas":"0xffe8","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +{"pc":19,"op":96,"opName":"PUSH1","gas":"0xffe5","stack":["0xaaaaaa","0xaa","0xaa","0xaa","0xaa","0xaa","0xaa"],"storage":{},"depth":1} +"#); + } } diff --git a/evmbin/src/main.rs b/evmbin/src/main.rs index 144c99fb3..78fc47488 100644 --- a/evmbin/src/main.rs +++ b/evmbin/src/main.rs @@ -31,6 +31,7 @@ extern crate ethereum_types; extern crate vm; extern crate evm; extern crate panic_hook; +extern crate env_logger; #[cfg(test)] #[macro_use] @@ -92,6 +93,7 @@ General options: fn main() { panic_hook::set_abort(); + env_logger::init(); let args: Args = Docopt::new(USAGE).and_then(|d| d.deserialize()).unwrap_or_else(|e| e.exit()); From 8703449dfe8174683e5941be49af648261aa5fd0 Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 21 Aug 2018 11:55:31 +0200 Subject: [PATCH 37/48] network-devp2p `Fix some clippy errors/warnings` (#9378) * fix some clippy warnings * Remove `shallow-copy` of Node's * Make `NonReservedPeerMode` Copy and pass-by-value --- util/network-devp2p/src/connection.rs | 22 +++---- util/network-devp2p/src/discovery.rs | 28 ++++---- util/network-devp2p/src/handshake.rs | 2 +- util/network-devp2p/src/host.rs | 95 +++++++++++++-------------- util/network-devp2p/src/node_table.rs | 1 - util/network-devp2p/src/service.rs | 14 ++-- util/network/src/lib.rs | 2 +- 7 files changed, 80 insertions(+), 84 deletions(-) diff --git a/util/network-devp2p/src/connection.rs b/util/network-devp2p/src/connection.rs index ee59c48f2..eb663d379 100644 --- a/util/network-devp2p/src/connection.rs +++ b/util/network-devp2p/src/connection.rs @@ -167,8 +167,8 @@ impl Connection { /// Create a new connection with given id and socket. pub fn new(token: StreamToken, socket: TcpStream) -> Connection { Connection { - token: token, - socket: socket, + token, + socket, send_queue: VecDeque::new(), rec_buf: Bytes::new(), rec_size: 0, @@ -318,24 +318,24 @@ impl EncryptedConnection { let mac_encoder = EcbEncryptor::new(AesSafe256Encryptor::new(&key_material[32..64]), NoPadding); let mut egress_mac = Keccak::new_keccak256(); - let mut mac_material = &H256::from_slice(&key_material[32..64]) ^ &handshake.remote_nonce; + let mut mac_material = H256::from_slice(&key_material[32..64]) ^ handshake.remote_nonce; egress_mac.update(&mac_material); egress_mac.update(if handshake.originated { &handshake.auth_cipher } else { &handshake.ack_cipher }); let mut ingress_mac = Keccak::new_keccak256(); - mac_material = &H256::from_slice(&key_material[32..64]) ^ &handshake.nonce; + mac_material = H256::from_slice(&key_material[32..64]) ^ handshake.nonce; ingress_mac.update(&mac_material); ingress_mac.update(if handshake.originated { &handshake.ack_cipher } else { &handshake.auth_cipher }); let old_connection = handshake.connection.try_clone()?; let connection = ::std::mem::replace(&mut handshake.connection, old_connection); let mut enc = EncryptedConnection { - connection: connection, - encoder: encoder, - decoder: decoder, - mac_encoder: mac_encoder, - egress_mac: egress_mac, - ingress_mac: ingress_mac, + connection, + encoder, + decoder, + mac_encoder, + egress_mac, + ingress_mac, read_state: EncryptedConnectionState::Header, protocol_id: 0, payload_len: 0, @@ -534,7 +534,7 @@ mod tests { read_buffer: vec![], write_buffer: vec![], cursor: 0, - buf_size: buf_size, + buf_size, } } } diff --git a/util/network-devp2p/src/discovery.rs b/util/network-devp2p/src/discovery.rs index bc808c398..c7782b247 100644 --- a/util/network-devp2p/src/discovery.rs +++ b/util/network-devp2p/src/discovery.rs @@ -72,7 +72,7 @@ impl BucketEntry { let now = Instant::now(); BucketEntry { id_hash: keccak(address.id), - address: address, + address, last_seen: now, backoff_until: now, fail_count: 0, @@ -137,7 +137,7 @@ pub struct TableUpdates { impl<'a> Discovery<'a> { pub fn new(key: &KeyPair, public: NodeEndpoint, ip_filter: IpFilter) -> Discovery<'static> { Discovery { - id: key.public().clone(), + id: *key.public(), id_hash: keccak(key.public()), secret: key.secret().clone(), public_endpoint: public, @@ -151,7 +151,7 @@ impl<'a> Discovery<'a> { send_queue: VecDeque::new(), check_timestamps: true, adding_nodes: Vec::new(), - ip_filter: ip_filter, + ip_filter, request_backoff: &REQUEST_BACKOFF, } } @@ -248,11 +248,11 @@ impl<'a> Discovery<'a> { { let nearest = self.nearest_node_entries(&self.discovery_id).into_iter(); let nearest = nearest.filter(|x| !self.discovery_nodes.contains(&x.id)).take(ALPHA).collect::>(); - let target = self.discovery_id.clone(); + let target = self.discovery_id; for r in nearest { match self.send_find_node(&r, &target) { Ok(()) => { - self.discovery_nodes.insert(r.id.clone()); + self.discovery_nodes.insert(r.id); tried_count += 1; }, Err(e) => { @@ -401,7 +401,7 @@ impl<'a> Discovery<'a> { } fn send_to(&mut self, payload: Bytes, address: SocketAddr) { - self.send_queue.push_back(Datagram { payload: payload, address: address }); + self.send_queue.push_back(Datagram { payload, address }); } @@ -461,7 +461,7 @@ impl<'a> Discovery<'a> { append_expiration(&mut response); self.send_packet(PACKET_PONG, from, &response.drain())?; - let entry = NodeEntry { id: node.clone(), endpoint: source.clone() }; + let entry = NodeEntry { id: *node, endpoint: source.clone() }; if !entry.endpoint.is_valid() { debug!(target: "discovery", "Got bad address: {:?}", entry); } else if !self.is_allowed(&entry) { @@ -479,10 +479,10 @@ impl<'a> Discovery<'a> { let echo_hash: H256 = rlp.val_at(1)?; let timestamp: u64 = rlp.val_at(2)?; self.check_timestamp(timestamp)?; - let mut node = NodeEntry { id: node_id.clone(), endpoint: dest }; + let mut node = NodeEntry { id: *node_id, endpoint: dest }; if !node.endpoint.is_valid() { debug!(target: "discovery", "Bad address: {:?}", node); - node.endpoint.address = from.clone(); + node.endpoint.address = *from; } let is_expected = match self.in_flight_requests.entry(*node_id) { @@ -530,10 +530,10 @@ impl<'a> Discovery<'a> { let packets = chunks.map(|c| { let mut rlp = RlpStream::new_list(2); rlp.begin_list(c.len()); - for n in 0 .. c.len() { + for n in c { rlp.begin_list(4); - c[n].endpoint.to_rlp(&mut rlp); - rlp.append(&c[n].id); + n.endpoint.to_rlp(&mut rlp); + rlp.append(&n.id); } append_expiration(&mut rlp); rlp.out() @@ -581,7 +581,7 @@ impl<'a> Discovery<'a> { if node_id == self.id { continue; } - let entry = NodeEntry { id: node_id.clone(), endpoint: endpoint }; + let entry = NodeEntry { id: node_id, endpoint }; if !self.is_allowed(&entry) { debug!(target: "discovery", "Address not allowed: {:?}", entry); continue; @@ -644,7 +644,7 @@ impl<'a> Discovery<'a> { let removed = self.check_expired(Instant::now()); self.discover(); if !removed.is_empty() { - Some(TableUpdates { added: HashMap::new(), removed: removed }) + Some(TableUpdates { added: HashMap::new(), removed }) } else { None } } diff --git a/util/network-devp2p/src/handshake.rs b/util/network-devp2p/src/handshake.rs index e2b1ebaa0..e664f0402 100644 --- a/util/network-devp2p/src/handshake.rs +++ b/util/network-devp2p/src/handshake.rs @@ -271,7 +271,7 @@ impl Handshake { // E(remote-pubk, S(ecdhe-random, ecdh-shared-secret^nonce) || H(ecdhe-random-pubk) || pubk || nonce || 0x0) let shared = *ecdh::agree(secret, &self.id)?; - sig.copy_from_slice(&*sign(self.ecdhe.secret(), &(&shared ^ &self.nonce))?); + sig.copy_from_slice(&*sign(self.ecdhe.secret(), &(shared ^ self.nonce))?); write_keccak(self.ecdhe.public(), hepubk); pubk.copy_from_slice(public); nonce.copy_from_slice(&self.nonce); diff --git a/util/network-devp2p/src/host.rs b/util/network-devp2p/src/host.rs index 81e304f1a..a24684c3b 100644 --- a/util/network-devp2p/src/host.rs +++ b/util/network-devp2p/src/host.rs @@ -280,7 +280,7 @@ impl Host { listen_address = SocketAddr::new(listen_address.ip(), tcp_listener.local_addr()?.port()); debug!(target: "network", "Listening at {:?}", listen_address); let udp_port = config.udp_port.unwrap_or_else(|| listen_address.port()); - let local_endpoint = NodeEndpoint { address: listen_address, udp_port: udp_port }; + let local_endpoint = NodeEndpoint { address: listen_address, udp_port }; let boot_nodes = config.boot_nodes.clone(); let reserved_nodes = config.reserved_nodes.clone(); @@ -288,13 +288,13 @@ impl Host { let mut host = Host { info: RwLock::new(HostInfo { - keys: keys, - config: config, + keys, + config, nonce: H256::random(), protocol_version: PROTOCOL_VERSION, capabilities: Vec::new(), public_endpoint: None, - local_endpoint: local_endpoint, + local_endpoint, }), discovery: Mutex::new(None), udp_socket: Mutex::new(None), @@ -306,7 +306,7 @@ impl Host { timer_counter: RwLock::new(USER_TIMER), reserved_nodes: RwLock::new(HashSet::new()), stopping: AtomicBool::new(false), - filter: filter, + filter, }; for n in boot_nodes { @@ -349,11 +349,11 @@ impl Host { Ok(()) } - pub fn set_non_reserved_mode(&self, mode: &NonReservedPeerMode, io: &IoContext) { + pub fn set_non_reserved_mode(&self, mode: NonReservedPeerMode, io: &IoContext) { let mut info = self.info.write(); - if &info.config.non_reserved_mode != mode { - info.config.non_reserved_mode = mode.clone(); + if info.config.non_reserved_mode != mode { + info.config.non_reserved_mode = mode; drop(info); if let NonReservedPeerMode::Deny = mode { // disconnect all non-reserved peers here. @@ -430,7 +430,7 @@ impl Host { return Ok(()); } let local_endpoint = self.info.read().local_endpoint.clone(); - let public_address = self.info.read().config.public_address.clone(); + let public_address = self.info.read().config.public_address; let allow_ips = self.info.read().config.ip_filter.clone(); let public_endpoint = match public_address { None => { @@ -489,7 +489,7 @@ impl Host { } fn have_session(&self, id: &NodeId) -> bool { - self.sessions.read().iter().any(|e| e.lock().info.id == Some(id.clone())) + self.sessions.read().iter().any(|e| e.lock().info.id == Some(*id)) } // returns (handshakes, egress, ingress) @@ -534,7 +534,7 @@ impl Host { } let config = &info.config; - (config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny, config.max_handshakes as usize, config.ip_filter.clone(), info.id().clone()) + (config.min_peers, config.non_reserved_mode == NonReservedPeerMode::Deny, config.max_handshakes as usize, config.ip_filter.clone(), *info.id()) }; let (handshake_count, egress_count, ingress_count) = self.session_count(); @@ -710,18 +710,18 @@ impl Host { let (min_peers, mut max_peers, reserved_only, self_id) = { let info = self.info.read(); let mut max_peers = info.config.max_peers; - for cap in s.info.capabilities.iter() { + for cap in &s.info.capabilities { if let Some(num) = info.config.reserved_protocols.get(&cap.protocol) { max_peers += *num; break; } } - (info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny, info.id().clone()) + (info.config.min_peers as usize, max_peers as usize, info.config.non_reserved_mode == NonReservedPeerMode::Deny, *info.id()) }; max_peers = max(max_peers, min_peers); - let id = s.id().expect("Ready session always has id").clone(); + let id = *s.id().expect("Ready session always has id"); // Check for the session limit. // Outgoing connections are allowed as long as their count is <= min_peers @@ -729,13 +729,11 @@ impl Host { let max_ingress = max(max_peers - min_peers, min_peers / 2); if reserved_only || (s.info.originated && egress_count > min_peers) || - (!s.info.originated && ingress_count > max_ingress) { + (!s.info.originated && ingress_count > max_ingress) && !self.reserved_nodes.read().contains(&id) { // only proceed if the connecting peer is reserved. - if !self.reserved_nodes.read().contains(&id) { - s.disconnect(io, DisconnectReason::TooManyPeers); - kill = true; - break; - } + s.disconnect(io, DisconnectReason::TooManyPeers); + kill = true; + break; } if !self.filter.as_ref().map_or(true, |f| f.connection_allowed(&self_id, &id, ConnectionDirection::Inbound)) { @@ -752,7 +750,7 @@ impl Host { if let Ok(address) = s.remote_addr() { // We can't know remote listening ports, so just assume defaults and hope for the best. let endpoint = NodeEndpoint { address: SocketAddr::new(address.ip(), DEFAULT_PORT), udp_port: DEFAULT_PORT }; - let entry = NodeEntry { id: id, endpoint: endpoint }; + let entry = NodeEntry { id, endpoint }; let mut nodes = self.nodes.write(); if !nodes.contains(&entry.id) { nodes.add_node(Node::new(entry.id, entry.endpoint.clone())); @@ -807,7 +805,7 @@ impl Host { } for p in ready_data { let reserved = self.reserved_nodes.read(); - if let Some(h) = handlers.get(&p).clone() { + if let Some(h) = handlers.get(&p) { h.connected(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token); // accumulate pending packets. let mut session = session.lock(); @@ -818,7 +816,7 @@ impl Host { for (p, packet_id, data) in packet_data { let reserved = self.reserved_nodes.read(); - if let Some(h) = handlers.get(&p).clone() { + if let Some(h) = handlers.get(&p) { h.read(&NetworkContext::new(io, p, Some(session.clone()), self.sessions.clone(), &reserved), &token, packet_id, &data); } } @@ -858,31 +856,28 @@ impl Host { } fn discovery_writable(&self, io: &IoContext) { - match (self.udp_socket.lock().as_ref(), self.discovery.lock().as_mut()) { - (Some(udp_socket), Some(discovery)) => { - while let Some(data) = discovery.dequeue_send() { - match udp_socket.send_to(&data.payload, &data.address) { - Ok(Some(size)) if size == data.payload.len() => { - }, - Ok(Some(_)) => { - warn!(target: "network", "UDP sent incomplete datagram"); - }, - Ok(None) => { - discovery.requeue_send(data); - return; - } - Err(e) => { - debug!(target: "network", "UDP send error: {:?}, address: {:?}", e, &data.address); - return; - } + if let (Some(udp_socket), Some(discovery)) = (self.udp_socket.lock().as_ref(), self.discovery.lock().as_mut()) { + while let Some(data) = discovery.dequeue_send() { + match udp_socket.send_to(&data.payload, &data.address) { + Ok(Some(size)) if size == data.payload.len() => { + }, + Ok(Some(_)) => { + warn!(target: "network", "UDP sent incomplete datagram"); + }, + Ok(None) => { + discovery.requeue_send(data); + return; + } + Err(e) => { + debug!(target: "network", "UDP send error: {:?}, address: {:?}", e, &data.address); + return; } } - io.update_registration(DISCOVERY) - .unwrap_or_else(|e| { - debug!(target: "network", "Error updating discovery registration: {:?}", e) - }); - }, - _ => (), + } + io.update_registration(DISCOVERY) + .unwrap_or_else(|e| { + debug!(target: "network", "Error updating discovery registration: {:?}", e) + }); } } @@ -922,7 +917,7 @@ impl Host { } for p in to_disconnect { let reserved = self.reserved_nodes.read(); - if let Some(h) = self.handlers.read().get(&p).clone() { + if let Some(h) = self.handlers.read().get(&p) { h.disconnected(&NetworkContext::new(io, p, expired_session.clone(), self.sessions.clone(), &reserved), &token); } } @@ -1012,11 +1007,13 @@ impl IoHandler for Host { IDLE => self.maintain_network(io), FIRST_SESSION ... LAST_SESSION => self.connection_timeout(token, io), DISCOVERY_REFRESH => { - self.discovery.lock().as_mut().map(|d| d.refresh()); + if let Some(d) = self.discovery.lock().as_mut() { + d.refresh(); + } io.update_registration(DISCOVERY).unwrap_or_else(|e| debug!("Error updating discovery registration: {:?}", e)); }, DISCOVERY_ROUND => { - let node_changes = { self.discovery.lock().as_mut().map_or(None, |d| d.round()) }; + let node_changes = { self.discovery.lock().as_mut().and_then(|d| d.round()) }; if let Some(node_changes) = node_changes { self.update_nodes(io, node_changes); } diff --git a/util/network-devp2p/src/node_table.rs b/util/network-devp2p/src/node_table.rs index b2c417b25..7d1380907 100644 --- a/util/network-devp2p/src/node_table.rs +++ b/util/network-devp2p/src/node_table.rs @@ -393,7 +393,6 @@ impl NodeTable { let nodes = node_ids.into_iter() .map(|id| self.nodes.get(&id).expect("self.nodes() only returns node IDs from self.nodes")) .take(MAX_NODES) - .map(|node| node.clone()) .map(Into::into) .collect(); let table = json::NodeTable { nodes }; diff --git a/util/network-devp2p/src/service.rs b/util/network-devp2p/src/service.rs index 709161aeb..fc8f79b36 100644 --- a/util/network-devp2p/src/service.rs +++ b/util/network-devp2p/src/service.rs @@ -59,12 +59,12 @@ impl NetworkService { let io_service = IoService::::start()?; Ok(NetworkService { - io_service: io_service, + io_service, host_info: config.client_version.clone(), host: RwLock::new(None), - config: config, - host_handler: host_handler, - filter: filter, + config, + host_handler, + filter, }) } @@ -120,10 +120,10 @@ impl NetworkService { /// In case of error, also returns the listening address for better error reporting. pub fn start(&self) -> Result<(), (Error, Option)> { let mut host = self.host.write(); - let listen_addr = self.config.listen_address.clone(); + let listen_addr = self.config.listen_address; if host.is_none() { let h = Arc::new(Host::new(self.config.clone(), self.filter.clone()) - .map_err(|err| (err.into(), listen_addr))?); + .map_err(|err| (err, listen_addr))?); self.io_service.register_handler(h.clone()) .map_err(|err| (err.into(), listen_addr))?; *host = Some(h); @@ -177,7 +177,7 @@ impl NetworkService { let host = self.host.read(); if let Some(ref host) = *host { let io_ctxt = IoContext::new(self.io_service.channel(), 0); - host.set_non_reserved_mode(&mode, &io_ctxt); + host.set_non_reserved_mode(mode, &io_ctxt); } } diff --git a/util/network/src/lib.rs b/util/network/src/lib.rs index c31ace410..88bd4e686 100644 --- a/util/network/src/lib.rs +++ b/util/network/src/lib.rs @@ -350,7 +350,7 @@ pub trait NetworkProtocolHandler: Sync + Send { } /// Non-reserved peer modes. -#[derive(Clone, Debug, PartialEq, Eq)] +#[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum NonReservedPeerMode { /// Accept them. This is the default. Accept, From f230c719d8121162f35e52c5aad0b008cc0b3b33 Mon Sep 17 00:00:00 2001 From: Thibaut Sardan <33178835+Tbaut@users.noreply.github.com> Date: Tue, 21 Aug 2018 14:36:04 +0200 Subject: [PATCH 38/48] Delete Dockerfile (#9386) --- docker/ubuntu/Dockerfile | 42 ---------------------------------------- 1 file changed, 42 deletions(-) delete mode 100644 docker/ubuntu/Dockerfile diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile deleted file mode 100644 index 574ff64eb..000000000 --- a/docker/ubuntu/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM ubuntu:14.04 -WORKDIR /build - -# install tools and dependencies -RUN apt-get update && \ - apt-get install -y \ - g++ \ - build-essential \ - cmake \ - curl \ - git \ - file \ - binutils \ - pkg-config \ - libudev-dev - -# install rustup -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y - -# rustup directory -ENV PATH /root/.cargo/bin:$PATH - -# show backtraces -ENV RUST_BACKTRACE 1 - -# show tools -RUN rustc -vV && \ -cargo -V && \ -gcc -v &&\ -g++ -v - -# build parity -ADD . /build/parity -RUN cd parity && \ - cargo build --release --verbose && \ - ls /build/parity/target/release/parity && \ - strip /build/parity/target/release/parity - -RUN file /build/parity/target/release/parity - -EXPOSE 8080 8545 8180 -ENTRYPOINT ["/build/parity/target/release/parity"] From 139a2b7b0d6499c21b0977804725ea1e437c849d Mon Sep 17 00:00:00 2001 From: Nick Sanders Date: Tue, 21 Aug 2018 06:30:24 -0700 Subject: [PATCH 39/48] Replace `std::env::home_dir()` with `home` crate impl. (#9293) * Import the `home` crate in `util/dir`. * Replace uses of `env::home_dir()` with `home::home_dir()`. * `home` uses a 'correct' impl. on windows and the stdlib impl. of `::home_dir` otherwise. * Reexport `home::home_dir` from `util/dir`. * Bump `util/dir` to 0.1.2. --- Cargo.lock | 41 ++++++---- README.md | 166 ---------------------------------------- parity/upgrade.rs | 5 +- util/dir/Cargo.toml | 3 +- util/dir/src/helpers.rs | 4 +- util/dir/src/lib.rs | 7 +- 6 files changed, 37 insertions(+), 189 deletions(-) delete mode 100644 README.md diff --git a/Cargo.lock b/Cargo.lock index 38f1cbb5d..bbedd2fed 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -357,10 +357,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "dir" -version = "0.1.1" +version = "0.1.2" dependencies = [ "app_dirs 1.2.1 (git+https://github.com/paritytech/app-dirs-rs)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", "journaldb 0.2.0", ] @@ -622,7 +623,7 @@ dependencies = [ "rlp_derive 0.1.0", "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "stats 0.1.0", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "triehash-ethereum 0.2.0", @@ -866,7 +867,7 @@ dependencies = [ "rlp 0.2.1 (git+https://github.com/paritytech/parity-common)", "rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", "triehash-ethereum 0.2.0", ] @@ -959,7 +960,7 @@ dependencies = [ name = "ethstore" version = "0.2.0" dependencies = [ - "dir 0.1.1", + "dir 0.1.2", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethkey 0.3.0", "itertools 0.5.10 (registry+https://github.com/rust-lang/crates.io-index)", @@ -974,7 +975,7 @@ dependencies = [ "serde 1.0.70 (registry+https://github.com/rust-lang/crates.io-index)", "serde_derive 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.38 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -984,7 +985,7 @@ dependencies = [ name = "ethstore-cli" version = "0.1.0" dependencies = [ - "dir 0.1.1", + "dir 0.1.2", "docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", "ethstore 0.2.0", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1223,6 +1224,15 @@ dependencies = [ "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "home" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "httparse" version = "1.2.3" @@ -1849,7 +1859,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "num-integer 0.1.36 (registry+https://github.com/rust-lang/crates.io-index)", "num-traits 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1977,7 +1987,7 @@ dependencies = [ "clap 2.29.1 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)", "daemonize 0.2.3 (git+https://github.com/paritytech/daemonize)", - "dir 0.1.1", + "dir 0.1.2", "docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.5.12 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.12.0", @@ -2280,7 +2290,7 @@ dependencies = [ "serde_derive 1.0.37 (registry+https://github.com/rust-lang/crates.io-index)", "serde_json 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2313,7 +2323,7 @@ dependencies = [ "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "petgraph 0.4.12 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "thread-id 3.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -2469,7 +2479,7 @@ dependencies = [ "hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "primal-bit 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)", "primal-estimate 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2586,7 +2596,7 @@ dependencies = [ "lazy_static 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rand 0.3.20 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2931,7 +2941,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "smallvec" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3336,7 +3346,7 @@ dependencies = [ "error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.1 (registry+https://github.com/rust-lang/crates.io-index)", - "smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", "trace-time 0.1.0", ] @@ -3750,6 +3760,7 @@ dependencies = [ "checksum heck 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ea04fa3ead4e05e51a7c806fc07271fdbde4e246a6c6d1efd52e72230b771b82" "checksum hex 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d6a22814455d41612f41161581c2883c0c6a1c41852729b17d5ed88f01e153aa" "checksum hidapi 0.3.1 (git+https://github.com/paritytech/hidapi-rs)" = "" +"checksum home 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "80dff82fb58cfbbc617fb9a9184b010be0529201553cda50ad04372bc2333aff" "checksum httparse 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "af2f2dd97457e8fb1ae7c5a420db346af389926e36f43768b96f101546b04a07" "checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e" "checksum hyper 0.11.24 (registry+https://github.com/rust-lang/crates.io-index)" = "df4dd5dae401458087396b6db7fabc4d6760aa456a5fa8e92bda549f39cae661" @@ -3894,7 +3905,7 @@ dependencies = [ "checksum slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "17b4fcaed89ab08ef143da37bc52adbcc04d4a69014f4c1208d6b51f0c47bc23" "checksum slab 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fdeff4cd9ecff59ec7e3744cbca73dfe5ac35c2aedb2cfba8a1c715a18912e9d" "checksum smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4c8cbcd6df1e117c2210e13ab5109635ad68a929fcbb8964dc965b76cb5ee013" -"checksum smallvec 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fcd03faf178110ab0334d74ca9631d77f94c8c11cc77fcb59538abf0025695d" +"checksum smallvec 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "f90c5e5fe535e48807ab94fc611d323935f39d4660c52b26b96446a7b33aef10" "checksum snappy 0.1.0 (git+https://github.com/paritytech/rust-snappy)" = "" "checksum snappy-sys 0.1.0 (git+https://github.com/paritytech/rust-snappy)" = "" "checksum socket2 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "06dc9f86ee48652b7c80f3d254e3b9accb67a928c562c64d10d7b016d3d98dab" diff --git a/README.md b/README.md deleted file mode 100644 index 40c04dff7..000000000 --- a/README.md +++ /dev/null @@ -1,166 +0,0 @@ -## Parity-Ethereum - a fast, light, and robust EVM and WASM blockchain client - -### [» Download the latest release «](https://github.com/paritytech/parity-ethereum/releases/latest) - -[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) -[![codecov](https://codecov.io/gh/paritytech/parity-ethereum/branch/master/graph/badge.svg)](https://codecov.io/gh/paritytech/parity-ethereum) -[![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity) -[![GPLv3](https://img.shields.io/badge/license-GPL%20v3-green.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html) - - -### Join the chat! - -Get in touch with us on Gitter: -[![Gitter: Parity](https://img.shields.io/badge/gitter-parity-4AB495.svg)](https://gitter.im/paritytech/parity) -[![Gitter: Parity.js](https://img.shields.io/badge/gitter-parity.js-4AB495.svg)](https://gitter.im/paritytech/parity.js) -[![Gitter: Parity/Miners](https://img.shields.io/badge/gitter-parity/miners-4AB495.svg)](https://gitter.im/paritytech/parity/miners) -[![Gitter: Parity-PoA](https://img.shields.io/badge/gitter-parity--poa-4AB495.svg)](https://gitter.im/paritytech/parity-poa) - -Or join our community on Matrix: -[![Riot: +Parity](https://img.shields.io/badge/riot-%2Bparity%3Amatrix.parity.io-orange.svg)](https://riot.im/app/#/group/+parity:matrix.parity.io) - -Official website: https://parity.io | Be sure to check out [our wiki](https://wiki.parity.io) for more information. - ----- - -## About Parity-Ethereum - -Parity-Ethereum's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity-Ethereum using the sophisticated and cutting-edge Rust programming language. Parity-Ethereum is licensed under the GPLv3, and can be used for all your Ethereum needs. - -By default, Parity-Ethereum will run a JSON-RPC HTTP server on `127.0.0.1:8545` and a Web-Sockets server on `127.0.0.1:8546`. This is fully configurable and supports a number of APIs. - -If you run into problems while using Parity-Ethereum, feel free to file an issue in this repository or hop on our [Gitter](https://gitter.im/paritytech/parity) or [Riot](https://riot.im/app/#/group/+parity:matrix.parity.io) chat room to ask a question. We are glad to help! **For security-critical issues**, please refer to the security policy outlined in [SECURITY.md](SECURITY.md). - -Parity-Ethereum's current beta-release is 2.0. You can download it at [the releases page](https://github.com/paritytech/parity-ethereum/releases) or follow the instructions below to build from source. Please, mind the [CHANGELOG.md](CHANGELOG.md) for a list of all changes between different versions. - ----- - -## Build dependencies - -**Parity-Ethereum requires Rust version 1.27.0 to build** - -We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this: - -- Linux: - ```bash - $ curl https://sh.rustup.rs -sSf | sh - ``` - - Parity-Ethereum also requires `gcc`, `g++`, `libudev-dev`, `pkg-config`, `file`, `make`, and `cmake` packages to be installed. - -- OSX: - ```bash - $ curl https://sh.rustup.rs -sSf | sh - ``` - - `clang` is required. It comes with Xcode command line tools or can be installed with homebrew. - -- Windows - Make sure you have Visual Studio 2015 with C++ support installed. Next, download and run the rustup installer from - https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe, start "VS2015 x64 Native Tools Command Prompt", and use the following command to install and set up the msvc toolchain: - ```bash - $ rustup default stable-x86_64-pc-windows-msvc - ``` - -Once you have rustup installed, then you need to install: -* [Perl](https://www.perl.org) -* [Yasm](https://yasm.tortall.net) - -Make sure that these binaries are in your `PATH`. After that you should be able to build Parity-Ethereum from source. - ----- - -## Install from the snap store - -In any of the [supported Linux distros](https://snapcraft.io/docs/core/install): - -```bash -sudo snap install parity -``` - -Or, if you want to contribute testing the upcoming release: - -```bash -sudo snap install parity --beta -``` - -And to test the latest code landed into the master branch: - -```bash -sudo snap install parity --edge -``` - ----- - -## Build from source - -```bash -# download Parity-Ethereum code -$ git clone https://github.com/paritytech/parity-ethereum -$ cd parity-ethereum - -# build in release mode -$ cargo build --release --features final -``` - -This will produce an executable in the `./target/release` subdirectory. - -Note: if cargo fails to parse manifest try: - -```bash -$ ~/.cargo/bin/cargo build --release -``` - -Note, when compiling a crate and you receive errors, it's in most cases your outdated version of Rust, or some of your crates have to be recompiled. Cleaning the repository will most likely solve the issue if you are on the latest stable version of Rust, try: - -```bash -$ cargo clean -``` - -This will always compile the latest nightly builds. If you want to build stable or beta, do a - -```bash -$ git checkout stable -``` - -or - -```bash -$ git checkout beta -``` - -first. - ----- - -## Simple one-line installer for Mac and Ubuntu - -```bash -bash <(curl https://get.parity.io -L) -``` - -The one-line installer always defaults to the latest beta release. To install a stable release, run: - -```bash -bash <(curl https://get.parity.io -L) -r stable -``` - -## Start Parity-Ethereum - -### Manually - -To start Parity-Ethereum manually, just run - -```bash -$ ./target/release/parity -``` - -and Parity-Ethereum will begin syncing the Ethereum blockchain. - -### Using systemd service file - -To start Parity-Ethereum as a regular user using systemd init: - -1. Copy `./scripts/parity.service` to your -systemd user directory (usually `~/.config/systemd/user`). -2. To configure Parity-Ethereum, write a `/etc/parity/config.toml` config file, see [Configuring Parity-Ethereum](https://paritytech.github.io/wiki/Configuring-Parity) for details. diff --git a/parity/upgrade.rs b/parity/upgrade.rs index e81c1cbee..1db2e77bd 100644 --- a/parity/upgrade.rs +++ b/parity/upgrade.rs @@ -19,11 +19,10 @@ use semver::{Version, SemVerError}; use std::collections::*; use std::fs::{self, File, create_dir_all}; -use std::env; use std::io; use std::io::{Read, Write}; use std::path::{PathBuf, Path}; -use dir::{DatabaseDirectories, default_data_path}; +use dir::{DatabaseDirectories, default_data_path, home_dir}; use dir::helpers::replace_home; use journaldb::Algorithm; @@ -201,7 +200,7 @@ fn upgrade_user_defaults(dirs: &DatabaseDirectories) { } pub fn upgrade_data_paths(base_path: &str, dirs: &DatabaseDirectories, pruning: Algorithm) { - if env::home_dir().is_none() { + if home_dir().is_none() { return; } diff --git a/util/dir/Cargo.toml b/util/dir/Cargo.toml index 092d45408..00eab143b 100644 --- a/util/dir/Cargo.toml +++ b/util/dir/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "dir" -version = "0.1.1" +version = "0.1.2" authors = ["Parity Technologies "] license = "GPL3" @@ -8,3 +8,4 @@ license = "GPL3" ethereum-types = "0.3" journaldb = { path = "../journaldb" } app_dirs = { git = "https://github.com/paritytech/app-dirs-rs" } +home = "0.3" diff --git a/util/dir/src/helpers.rs b/util/dir/src/helpers.rs index 24faaff6e..6c3e05072 100644 --- a/util/dir/src/helpers.rs +++ b/util/dir/src/helpers.rs @@ -15,14 +15,14 @@ // along with Parity. If not, see . //! Directory helper functions -use std::env; +use ::home_dir; /// Replaces `$HOME` str with home directory path. pub fn replace_home(base: &str, arg: &str) -> String { // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` // We use an `if` so that we don't need to call `home_dir()` if not necessary. let r = if arg.contains("$HOME") { - arg.replace("$HOME", env::home_dir().expect("$HOME isn't defined").to_str().unwrap()) + arg.replace("$HOME", home_dir().expect("$HOME isn't defined").to_str().unwrap()) } else { arg.to_owned() }; diff --git a/util/dir/src/lib.rs b/util/dir/src/lib.rs index aac672b1f..cddeac78f 100644 --- a/util/dir/src/lib.rs +++ b/util/dir/src/lib.rs @@ -20,9 +20,10 @@ extern crate app_dirs; extern crate ethereum_types; extern crate journaldb; +extern crate home; pub mod helpers; -use std::{env, fs}; +use std::fs; use std::path::{PathBuf, Path}; use ethereum_types::{H64, H256}; use journaldb::Algorithm; @@ -31,6 +32,8 @@ use app_dirs::{AppInfo, get_app_root, AppDataType}; // re-export platform-specific functions use platform::*; +pub use home::home_dir; + /// Platform-specific chains path for standard client - Windows only #[cfg(target_os = "windows")] pub const CHAINS_PATH: &str = "$LOCAL/chains"; /// Platform-specific chains path for light client - Windows only @@ -237,7 +240,7 @@ pub fn default_hypervisor_path() -> PathBuf { /// Get home directory. fn home() -> PathBuf { - env::home_dir().expect("Failed to get home dir") + home_dir().expect("Failed to get home dir") } /// Geth path From 7bf1889af1d775e015dd5400548244ddbc452bdc Mon Sep 17 00:00:00 2001 From: Afri Schoedon <5chdn@users.noreply.github.com> Date: Tue, 21 Aug 2018 16:49:24 +0200 Subject: [PATCH 40/48] docs: restore readme (#9391) --- README.md | 166 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 README.md diff --git a/README.md b/README.md new file mode 100644 index 000000000..40c04dff7 --- /dev/null +++ b/README.md @@ -0,0 +1,166 @@ +## Parity-Ethereum - a fast, light, and robust EVM and WASM blockchain client + +### [» Download the latest release «](https://github.com/paritytech/parity-ethereum/releases/latest) + +[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) +[![codecov](https://codecov.io/gh/paritytech/parity-ethereum/branch/master/graph/badge.svg)](https://codecov.io/gh/paritytech/parity-ethereum) +[![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity) +[![GPLv3](https://img.shields.io/badge/license-GPL%20v3-green.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html) + + +### Join the chat! + +Get in touch with us on Gitter: +[![Gitter: Parity](https://img.shields.io/badge/gitter-parity-4AB495.svg)](https://gitter.im/paritytech/parity) +[![Gitter: Parity.js](https://img.shields.io/badge/gitter-parity.js-4AB495.svg)](https://gitter.im/paritytech/parity.js) +[![Gitter: Parity/Miners](https://img.shields.io/badge/gitter-parity/miners-4AB495.svg)](https://gitter.im/paritytech/parity/miners) +[![Gitter: Parity-PoA](https://img.shields.io/badge/gitter-parity--poa-4AB495.svg)](https://gitter.im/paritytech/parity-poa) + +Or join our community on Matrix: +[![Riot: +Parity](https://img.shields.io/badge/riot-%2Bparity%3Amatrix.parity.io-orange.svg)](https://riot.im/app/#/group/+parity:matrix.parity.io) + +Official website: https://parity.io | Be sure to check out [our wiki](https://wiki.parity.io) for more information. + +---- + +## About Parity-Ethereum + +Parity-Ethereum's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity-Ethereum using the sophisticated and cutting-edge Rust programming language. Parity-Ethereum is licensed under the GPLv3, and can be used for all your Ethereum needs. + +By default, Parity-Ethereum will run a JSON-RPC HTTP server on `127.0.0.1:8545` and a Web-Sockets server on `127.0.0.1:8546`. This is fully configurable and supports a number of APIs. + +If you run into problems while using Parity-Ethereum, feel free to file an issue in this repository or hop on our [Gitter](https://gitter.im/paritytech/parity) or [Riot](https://riot.im/app/#/group/+parity:matrix.parity.io) chat room to ask a question. We are glad to help! **For security-critical issues**, please refer to the security policy outlined in [SECURITY.md](SECURITY.md). + +Parity-Ethereum's current beta-release is 2.0. You can download it at [the releases page](https://github.com/paritytech/parity-ethereum/releases) or follow the instructions below to build from source. Please, mind the [CHANGELOG.md](CHANGELOG.md) for a list of all changes between different versions. + +---- + +## Build dependencies + +**Parity-Ethereum requires Rust version 1.27.0 to build** + +We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this: + +- Linux: + ```bash + $ curl https://sh.rustup.rs -sSf | sh + ``` + + Parity-Ethereum also requires `gcc`, `g++`, `libudev-dev`, `pkg-config`, `file`, `make`, and `cmake` packages to be installed. + +- OSX: + ```bash + $ curl https://sh.rustup.rs -sSf | sh + ``` + + `clang` is required. It comes with Xcode command line tools or can be installed with homebrew. + +- Windows + Make sure you have Visual Studio 2015 with C++ support installed. Next, download and run the rustup installer from + https://static.rust-lang.org/rustup/dist/x86_64-pc-windows-msvc/rustup-init.exe, start "VS2015 x64 Native Tools Command Prompt", and use the following command to install and set up the msvc toolchain: + ```bash + $ rustup default stable-x86_64-pc-windows-msvc + ``` + +Once you have rustup installed, then you need to install: +* [Perl](https://www.perl.org) +* [Yasm](https://yasm.tortall.net) + +Make sure that these binaries are in your `PATH`. After that you should be able to build Parity-Ethereum from source. + +---- + +## Install from the snap store + +In any of the [supported Linux distros](https://snapcraft.io/docs/core/install): + +```bash +sudo snap install parity +``` + +Or, if you want to contribute testing the upcoming release: + +```bash +sudo snap install parity --beta +``` + +And to test the latest code landed into the master branch: + +```bash +sudo snap install parity --edge +``` + +---- + +## Build from source + +```bash +# download Parity-Ethereum code +$ git clone https://github.com/paritytech/parity-ethereum +$ cd parity-ethereum + +# build in release mode +$ cargo build --release --features final +``` + +This will produce an executable in the `./target/release` subdirectory. + +Note: if cargo fails to parse manifest try: + +```bash +$ ~/.cargo/bin/cargo build --release +``` + +Note, when compiling a crate and you receive errors, it's in most cases your outdated version of Rust, or some of your crates have to be recompiled. Cleaning the repository will most likely solve the issue if you are on the latest stable version of Rust, try: + +```bash +$ cargo clean +``` + +This will always compile the latest nightly builds. If you want to build stable or beta, do a + +```bash +$ git checkout stable +``` + +or + +```bash +$ git checkout beta +``` + +first. + +---- + +## Simple one-line installer for Mac and Ubuntu + +```bash +bash <(curl https://get.parity.io -L) +``` + +The one-line installer always defaults to the latest beta release. To install a stable release, run: + +```bash +bash <(curl https://get.parity.io -L) -r stable +``` + +## Start Parity-Ethereum + +### Manually + +To start Parity-Ethereum manually, just run + +```bash +$ ./target/release/parity +``` + +and Parity-Ethereum will begin syncing the Ethereum blockchain. + +### Using systemd service file + +To start Parity-Ethereum as a regular user using systemd init: + +1. Copy `./scripts/parity.service` to your +systemd user directory (usually `~/.config/systemd/user`). +2. To configure Parity-Ethereum, write a `/etc/parity/config.toml` config file, see [Configuring Parity-Ethereum](https://paritytech.github.io/wiki/Configuring-Parity) for details. From e12a26dac5cb6ffd21774781c190f09a0ec23eca Mon Sep 17 00:00:00 2001 From: Niklas Adolfsson Date: Tue, 21 Aug 2018 17:01:09 +0200 Subject: [PATCH 41/48] Replace `Duration::new()` w/ `Duration::from_nanos` (#9387) --- ethcore/light/src/net/load_timer.rs | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/ethcore/light/src/net/load_timer.rs b/ethcore/light/src/net/load_timer.rs index 0ad962702..9612be51e 100644 --- a/ethcore/light/src/net/load_timer.rs +++ b/ethcore/light/src/net/load_timer.rs @@ -120,17 +120,15 @@ impl LoadDistribution { pub fn expected_time(&self, kind: Kind) -> Duration { let samples = self.samples.read(); samples.get(&kind).and_then(|s| { - if s.len() == 0 { return None } + if s.is_empty() { return None } - let alpha: f64 = 1f64 / s.len() as f64; - let start = s.front().expect("length known to be non-zero; qed").clone(); - let ema = s.iter().skip(1).fold(start as f64, |a, &c| { + let alpha: f64 = 1_f64 / s.len() as f64; + let start = *s.front().expect("length known to be non-zero; qed") as f64; + let ema = s.iter().skip(1).fold(start, |a, &c| { (alpha * c as f64) + ((1.0 - alpha) * a) }); - // TODO: use `Duration::from_nanos` once stable (https://github.com/rust-lang/rust/issues/46507) - let ema = ema as u64; - Some(Duration::new(ema / 1_000_000_000, (ema % 1_000_000_000) as u32)) + Some(Duration::from_nanos(ema as u64)) }).unwrap_or_else(move || hardcoded_serve_time(kind)) } From 491ce61a7674746b8a00570d0e260375f8fec00b Mon Sep 17 00:00:00 2001 From: Andronik Ordian Date: Wed, 22 Aug 2018 17:01:07 +0300 Subject: [PATCH 42/48] Revert "Use std::sync::Condvar (#1732)" (#9392) * Revert "Use std::sync::Condvar (#1732)" This reverts commit c65ee9354250ff9ee961cb1104659ecf3d3355c4. * verification_queue: remove redundant mutexes --- ethcore/src/verification/queue/mod.rs | 34 ++++++++++++--------------- util/io/src/service_mio.rs | 9 ++++--- util/io/src/worker.rs | 20 ++++++++-------- 3 files changed, 29 insertions(+), 34 deletions(-) diff --git a/ethcore/src/verification/queue/mod.rs b/ethcore/src/verification/queue/mod.rs index 5ae4f7c8f..c64ccd9e2 100644 --- a/ethcore/src/verification/queue/mod.rs +++ b/ethcore/src/verification/queue/mod.rs @@ -19,7 +19,7 @@ use std::thread::{self, JoinHandle}; use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering as AtomicOrdering}; -use std::sync::{Condvar as SCondvar, Mutex as SMutex, Arc}; +use std::sync::Arc; use std::cmp; use std::collections::{VecDeque, HashSet, HashMap}; use heapsize::HeapSizeOf; @@ -141,11 +141,11 @@ struct Sizes { /// Keeps them in the same order as inserted, minus invalid items. pub struct VerificationQueue { engine: Arc, - more_to_verify: Arc, + more_to_verify: Arc, verification: Arc>, deleting: Arc, ready_signal: Arc, - empty: Arc, + empty: Arc, processing: RwLock>, // hash to difficulty ticks_since_adjustment: AtomicUsize, max_queue_size: usize, @@ -202,8 +202,6 @@ struct Verification { verifying: Mutex>>, verified: Mutex>, bad: Mutex>, - more_to_verify: SMutex<()>, - empty: SMutex<()>, sizes: Sizes, check_seal: bool, } @@ -216,8 +214,6 @@ impl VerificationQueue { verifying: Mutex::new(VecDeque::new()), verified: Mutex::new(VecDeque::new()), bad: Mutex::new(HashSet::new()), - more_to_verify: SMutex::new(()), - empty: SMutex::new(()), sizes: Sizes { unverified: AtomicUsize::new(0), verifying: AtomicUsize::new(0), @@ -225,14 +221,14 @@ impl VerificationQueue { }, check_seal: check_seal, }); - let more_to_verify = Arc::new(SCondvar::new()); + let more_to_verify = Arc::new(Condvar::new()); let deleting = Arc::new(AtomicBool::new(false)); let ready_signal = Arc::new(QueueSignal { deleting: deleting.clone(), signalled: AtomicBool::new(false), message_channel: Mutex::new(message_channel), }); - let empty = Arc::new(SCondvar::new()); + let empty = Arc::new(Condvar::new()); let scale_verifiers = config.verifier_settings.scale_verifiers; let num_cpus = ::num_cpus::get(); @@ -292,9 +288,9 @@ impl VerificationQueue { fn verify( verification: Arc>, engine: Arc, - wait: Arc, + wait: Arc, ready: Arc, - empty: Arc, + empty: Arc, state: Arc<(Mutex, Condvar)>, id: usize, ) { @@ -319,19 +315,19 @@ impl VerificationQueue { // wait for work if empty. { - let mut more_to_verify = verification.more_to_verify.lock().unwrap(); + let mut unverified = verification.unverified.lock(); - if verification.unverified.lock().is_empty() && verification.verifying.lock().is_empty() { + if unverified.is_empty() && verification.verifying.lock().is_empty() { empty.notify_all(); } - while verification.unverified.lock().is_empty() { + while unverified.is_empty() { if let State::Exit = *state.0.lock() { debug!(target: "verification", "verifier {} exiting", id); return; } - more_to_verify = wait.wait(more_to_verify).unwrap(); + wait.wait(&mut unverified); } if let State::Exit = *state.0.lock() { @@ -450,9 +446,9 @@ impl VerificationQueue { /// Wait for unverified queue to be empty pub fn flush(&self) { - let mut lock = self.verification.empty.lock().unwrap(); - while !self.verification.unverified.lock().is_empty() || !self.verification.verifying.lock().is_empty() { - lock = self.empty.wait(lock).unwrap(); + let mut unverified = self.verification.unverified.lock(); + while !unverified.is_empty() || !self.verification.verifying.lock().is_empty() { + self.empty.wait(&mut unverified); } } @@ -712,7 +708,7 @@ impl Drop for VerificationQueue { // acquire this lock to force threads to reach the waiting point // if they're in-between the exit check and the more_to_verify wait. { - let _more = self.verification.more_to_verify.lock().unwrap(); + let _unverified = self.verification.unverified.lock(); self.more_to_verify.notify_all(); } diff --git a/util/io/src/service_mio.rs b/util/io/src/service_mio.rs index 089d54cc4..5687fe6b0 100644 --- a/util/io/src/service_mio.rs +++ b/util/io/src/service_mio.rs @@ -24,8 +24,7 @@ use crossbeam::sync::chase_lev; use slab::Slab; use {IoError, IoHandler}; use worker::{Worker, Work, WorkType}; -use parking_lot::{RwLock, Mutex}; -use std::sync::{Condvar as SCondvar, Mutex as SMutex}; +use parking_lot::{Condvar, RwLock, Mutex}; use std::time::Duration; /// Timer ID @@ -186,7 +185,7 @@ pub struct IoManager where Message: Send + Sync { handlers: Arc>>>>, workers: Vec, worker_channel: chase_lev::Worker>, - work_ready: Arc, + work_ready: Arc, } impl IoManager where Message: Send + Sync + 'static { @@ -197,8 +196,8 @@ impl IoManager where Message: Send + Sync + 'static { ) -> Result<(), IoError> { let (worker, stealer) = chase_lev::deque(); let num_workers = 4; - let work_ready_mutex = Arc::new(SMutex::new(())); - let work_ready = Arc::new(SCondvar::new()); + let work_ready_mutex = Arc::new(Mutex::new(())); + let work_ready = Arc::new(Condvar::new()); let workers = (0..num_workers).map(|i| Worker::new( i, diff --git a/util/io/src/worker.rs b/util/io/src/worker.rs index da144afea..252060848 100644 --- a/util/io/src/worker.rs +++ b/util/io/src/worker.rs @@ -22,7 +22,7 @@ use service_mio::{HandlerId, IoChannel, IoContext}; use IoHandler; use LOCAL_STACK_SIZE; -use std::sync::{Condvar as SCondvar, Mutex as SMutex}; +use parking_lot::{Condvar, Mutex}; const STACK_SIZE: usize = 16*1024*1024; @@ -45,9 +45,9 @@ pub struct Work { /// Sorts them ready for blockchain insertion. pub struct Worker { thread: Option>, - wait: Arc, + wait: Arc, deleting: Arc, - wait_mutex: Arc>, + wait_mutex: Arc>, } impl Worker { @@ -55,8 +55,8 @@ impl Worker { pub fn new(index: usize, stealer: chase_lev::Stealer>, channel: IoChannel, - wait: Arc, - wait_mutex: Arc>, + wait: Arc, + wait_mutex: Arc>, ) -> Worker where Message: Send + Sync + 'static { let deleting = Arc::new(AtomicBool::new(false)); @@ -76,17 +76,17 @@ impl Worker { } fn work_loop(stealer: chase_lev::Stealer>, - channel: IoChannel, wait: Arc, - wait_mutex: Arc>, + channel: IoChannel, wait: Arc, + wait_mutex: Arc>, deleting: Arc) where Message: Send + Sync + 'static { loop { { - let lock = wait_mutex.lock().expect("Poisoned work_loop mutex"); + let mut lock = wait_mutex.lock(); if deleting.load(AtomicOrdering::Acquire) { return; } - let _ = wait.wait(lock); + wait.wait(&mut lock); } while !deleting.load(AtomicOrdering::Acquire) { @@ -122,7 +122,7 @@ impl Worker { impl Drop for Worker { fn drop(&mut self) { trace!(target: "shutdown", "[IoWorker] Closing..."); - let _ = self.wait_mutex.lock().expect("Poisoned work_loop mutex"); + let _ = self.wait_mutex.lock(); self.deleting.store(true, AtomicOrdering::Release); self.wait.notify_all(); if let Some(thread) = self.thread.take() { From e8e0b08f17014a3f94734cfc7208d3bc493f756a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Silva?= Date: Fri, 24 Aug 2018 01:14:01 +0100 Subject: [PATCH 43/48] ethcore: kovan: delay activation of strict score validation (#9406) --- ethcore/res/ethereum/kovan.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethcore/res/ethereum/kovan.json b/ethcore/res/ethereum/kovan.json index 1268de55f..47f80082f 100644 --- a/ethcore/res/ethereum/kovan.json +++ b/ethcore/res/ethereum/kovan.json @@ -19,7 +19,7 @@ "0x00a0a24b9f0e5ec7aa4c7389b8302fd0123194de" ] }, - "validateScoreTransition": 1000000, + "validateScoreTransition": 4301764, "validateStepTransition": 1500000, "maximumUncleCountTransition": 5067000, "maximumUncleCount": 0 From 31291ebd3563208afbd40204f1ab74418e05abae Mon Sep 17 00:00:00 2001 From: JohnnySheffield Date: Fri, 24 Aug 2018 05:55:54 +0200 Subject: [PATCH 44/48] nonroot CentOS Docker image (#9280) * Updates CentOS Docker image build process * rename build.Dockerfile --- docker/README.md | 41 +++++++++++++++++++++++++++++- docker/centos/Dockerfile | 46 ++++++++++++++-------------------- docker/centos/Dockerfile.build | 25 ++++++++++++++++++ docker/centos/build.sh | 29 +++++++++++++++++++++ 4 files changed, 113 insertions(+), 28 deletions(-) create mode 100644 docker/centos/Dockerfile.build create mode 100755 docker/centos/build.sh diff --git a/docker/README.md b/docker/README.md index 3b79e8dd7..b2f8374b5 100644 --- a/docker/README.md +++ b/docker/README.md @@ -1,3 +1,42 @@ -Usage +## Usage ```docker build -f docker/ubuntu/Dockerfile --tag ethcore/parity:branch_or_tag_name .``` + +## Usage - CentOS + +Builds a lightweight non-root Parity docker image: + +``` +git clone https://github.com/paritytech/parity-ethereum.git +cd parity-ethereum +./docker/centos/build.sh +``` + +Fully customised build: +``` +PARITY_IMAGE_REPO=my-personal/parity \ +PARITY_BUILDER_IMAGE_TAG=build-latest \ +PARITY_RUNNER_IMAGE_TAG=centos-parity-experimental \ +./docker/centos/build.sh +``` + + +Default values: +``` +# The image name +PARITY_IMAGE_REPO - parity/parity + +# The tag to be used for builder image, git commit sha will be appended +PARITY_BUILDER_IMAGE_TAG - build + +# The tag to be used for runner image +PARITY_RUNNER_IMAGE_TAG - latest +``` + +All default ports you might use will be exposed: +``` +# secret +# ipfs store ui rpc ws listener discovery +# ↓ ↓ ↓ ↓ ↓ ↓ ↓ +EXPOSE 5001 8082 8083 8180 8545 8546 30303/tcp 30303/udp +``` diff --git a/docker/centos/Dockerfile b/docker/centos/Dockerfile index 7c944001e..22a98c003 100644 --- a/docker/centos/Dockerfile +++ b/docker/centos/Dockerfile @@ -1,36 +1,28 @@ FROM centos:latest -WORKDIR /build -# install tools and dependencies -RUN yum -y update&& \ - yum install -y git make gcc-c++ gcc file binutils cmake +RUN mkdir -p /opt/parity/data && \ + chmod g+rwX /opt/parity/data && \ + mkdir -p /opt/parity/release -# install rustup -RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\ -ls&&\ - sh rustup.sh --disable-sudo +COPY parity/parity /opt/parity/release -# show backtraces -ENV RUST_BACKTRACE 1 +WORKDIR /opt/parity/data -# set compiler -ENV CXX g++ -ENV CC gcc +# exposing default ports +# +# secret +# ipfs store ui rpc ws listener discovery +# ↓ ↓ ↓ ↓ ↓ ↓ ↓ +EXPOSE 5001 8082 8083 8180 8545 8546 30303/tcp 30303/udp -# show tools -RUN rustc -vV && \ -cargo -V && \ -gcc -v &&\ -g++ -v +# switch to non-root user +USER 1001 -# build parity -ADD . /build/parity -RUN cd parity&&\ - cargo build --release --verbose && \ - ls /build/parity/target/release/parity && \ - strip /build/parity/target/release/parity +#if no base path provided, assume it's current workdir +CMD ["--base-path","."] +ENTRYPOINT ["/opt/parity/release/parity"] + + + -RUN file /build/parity/target/release/parity -EXPOSE 8080 8545 8180 -ENTRYPOINT ["/build/parity/target/release/parity"] diff --git a/docker/centos/Dockerfile.build b/docker/centos/Dockerfile.build new file mode 100644 index 000000000..454af403a --- /dev/null +++ b/docker/centos/Dockerfile.build @@ -0,0 +1,25 @@ +FROM centos:latest + +WORKDIR /build + +ADD . /build/parity-ethereum + +RUN yum -y update && \ + yum install -y systemd-devel git make gcc-c++ gcc file binutils && \ + curl -L "https://cmake.org/files/v3.12/cmake-3.12.0-Linux-x86_64.tar.gz" -o cmake.tar.gz && \ + tar -xzf cmake.tar.gz && \ + cp -r cmake-3.12.0-Linux-x86_64/* /usr/ && \ + curl https://sh.rustup.rs -sSf | sh -s -- -y && \ + PATH=/root/.cargo/bin:$PATH && \ + RUST_BACKTRACE=1 && \ + rustc -vV && \ + cargo -V && \ + gcc -v && \ + g++ -v && \ + cmake --version && \ + cd parity-ethereum && \ + cargo build --verbose --release --features final && \ + strip /build/parity-ethereum/target/release/parity && \ + file /build/parity-ethereum/target/release/parity + + diff --git a/docker/centos/build.sh b/docker/centos/build.sh new file mode 100755 index 000000000..7215e745f --- /dev/null +++ b/docker/centos/build.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env sh + +# The image name +PARITY_IMAGE_REPO=${PARITY_IMAGE_REPO:-parity/parity} +# The tag to be used for builder image +PARITY_BUILDER_IMAGE_TAG=${PARITY_BUILDER_IMAGE_TAG:-build} +# The tag to be used for runner image +PARITY_RUNNER_IMAGE_TAG=${PARITY_RUNNER_IMAGE_TAG:-latest} + +echo Building $PARITY_IMAGE_REPO:$PARITY_BUILDER_IMAGE_TAG-$(git log -1 --format="%H") +docker build --no-cache -t $PARITY_IMAGE_REPO:$PARITY_BUILDER_IMAGE_TAG-$(git log -1 --format="%H") . -f docker/centos/Dockerfile.build + +echo Creating $PARITY_BUILDER_IMAGE_TAG-$(git log -1 --format="%H"), extracting binary +docker create --name extract $PARITY_IMAGE_REPO:$PARITY_BUILDER_IMAGE_TAG-$(git log -1 --format="%H") +mkdir docker/centos/parity +docker cp extract:/build/parity-ethereum/target/release/parity docker/centos/parity + +echo Building $PARITY_IMAGE_REPO:$PARITY_RUNNER_IMAGE_TAG +docker build --no-cache -t $PARITY_IMAGE_REPO:$PARITY_RUNNER_IMAGE_TAG docker/centos/ -f docker/centos/Dockerfile + +echo Cleaning up ... +rm -rf docker/centos/parity +docker rm -f extract +docker rmi -f $PARITY_IMAGE_REPO:$PARITY_BUILDER_IMAGE_TAG-$(git log -1 --format="%H") + +echo Echoing Parity version: +docker run $PARITY_IMAGE_REPO:$PARITY_RUNNER_IMAGE_TAG --version + +echo Done. From 0b34579b044c36b3c0969b16bb38d94f662a1eef Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 24 Aug 2018 09:42:24 +0100 Subject: [PATCH 45/48] Prevent sync restart if import queue full (#9381) --- ethcore/src/client/client.rs | 36 +++++++++++----------------------- ethcore/src/error.rs | 19 ++++++++++++++++++ ethcore/sync/src/block_sync.rs | 6 +++++- 3 files changed, 35 insertions(+), 26 deletions(-) diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 84bcddfd6..e8707d6eb 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -16,7 +16,6 @@ use std::collections::{HashSet, BTreeMap, VecDeque}; use std::cmp; -use std::fmt; use std::str::FromStr; use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering}; use std::sync::{Arc, Weak}; @@ -50,13 +49,16 @@ use client::{ }; use encoded; use engines::{EthEngine, EpochTransition, ForkChoice}; -use error::{ImportErrorKind, BlockImportErrorKind, ExecutionError, CallError, BlockError, ImportResult, Error as EthcoreError}; +use error::{ + ImportErrorKind, BlockImportErrorKind, ExecutionError, CallError, BlockError, ImportResult, + QueueError, QueueErrorKind, Error as EthcoreError +}; use vm::{EnvInfo, LastHashes}; use evm::Schedule; use executive::{Executive, Executed, TransactOptions, contract_address}; use factory::{Factories, VmFactory}; use header::{BlockNumber, Header, ExtendedHeader}; -use io::{IoChannel, IoError}; +use io::IoChannel; use log_entry::LocalizedLogEntry; use miner::{Miner, MinerService}; use ethcore_miner::pool::VerifiedTransaction; @@ -2095,7 +2097,7 @@ impl IoClient for Client { let queued = self.queued_ancient_blocks.clone(); let lock = self.ancient_blocks_import_lock.clone(); - match self.queue_ancient_blocks.queue(&self.io_channel.read(), 1, move |client| { + self.queue_ancient_blocks.queue(&self.io_channel.read(), 1, move |client| { trace_time!("import_ancient_block"); // Make sure to hold the lock here to prevent importing out of order. // We use separate lock, cause we don't want to block queueing. @@ -2119,10 +2121,9 @@ impl IoClient for Client { break; } } - }) { - Ok(_) => Ok(hash), - Err(e) => bail!(BlockImportErrorKind::Other(format!("{}", e))), - } + })?; + + Ok(hash) } fn queue_consensus_message(&self, message: Bytes) { @@ -2538,21 +2539,6 @@ mod tests { } } -#[derive(Debug)] -enum QueueError { - Channel(IoError), - Full(usize), -} - -impl fmt::Display for QueueError { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - match *self { - QueueError::Channel(ref c) => fmt::Display::fmt(c, fmt), - QueueError::Full(limit) => write!(fmt, "The queue is full ({})", limit), - } - } -} - /// Queue some items to be processed by IO client. struct IoChannelQueue { currently_queued: Arc, @@ -2571,7 +2557,7 @@ impl IoChannelQueue { F: Fn(&Client) + Send + Sync + 'static, { let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed); - ensure!(queue_size < self.limit, QueueError::Full(self.limit)); + ensure!(queue_size < self.limit, QueueErrorKind::Full(self.limit)); let currently_queued = self.currently_queued.clone(); let result = channel.send(ClientIoMessage::execute(move |client| { @@ -2584,7 +2570,7 @@ impl IoChannelQueue { self.currently_queued.fetch_add(count, AtomicOrdering::SeqCst); Ok(()) }, - Err(e) => Err(QueueError::Channel(e)), + Err(e) => bail!(QueueErrorKind::Channel(e)), } } } diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 75b2b5175..29ef89612 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -150,6 +150,24 @@ impl error::Error for BlockError { } } +error_chain! { + types { + QueueError, QueueErrorKind, QueueErrorResultExt, QueueErrorResult; + } + + errors { + #[doc = "Queue is full"] + Full(limit: usize) { + description("Queue is full") + display("The queue is full ({})", limit) + } + } + + foreign_links { + Channel(IoError) #[doc = "Io channel error"]; + } +} + error_chain! { types { ImportError, ImportErrorKind, ImportErrorResultExt, ImportErrorResult; @@ -183,6 +201,7 @@ error_chain! { links { Import(ImportError, ImportErrorKind) #[doc = "Import error"]; + Queue(QueueError, QueueErrorKind) #[doc = "Io channel queue error"]; } foreign_links { diff --git a/ethcore/sync/src/block_sync.rs b/ethcore/sync/src/block_sync.rs index 4c229cd87..5dd1bdac2 100644 --- a/ethcore/sync/src/block_sync.rs +++ b/ethcore/sync/src/block_sync.rs @@ -25,7 +25,7 @@ use ethereum_types::H256; use rlp::{self, Rlp}; use ethcore::header::BlockNumber; use ethcore::client::{BlockStatus, BlockId, BlockImportError, BlockImportErrorKind}; -use ethcore::error::{ImportErrorKind, BlockError}; +use ethcore::error::{ImportErrorKind, QueueErrorKind, BlockError}; use sync_io::SyncIo; use blocks::{BlockCollection, SyncBody, SyncHeader}; @@ -513,6 +513,10 @@ impl BlockDownloader { debug!(target: "sync", "Block temporarily invalid, restarting sync"); break; }, + Err(BlockImportError(BlockImportErrorKind::Queue(QueueErrorKind::Full(limit)), _)) => { + debug!(target: "sync", "Block import queue full ({}), restarting sync", limit); + break; + }, Err(e) => { debug!(target: "sync", "Bad block {:?} : {:?}", h, e); bad = true; From b87c7cac5468649973fc8672f120052357062385 Mon Sep 17 00:00:00 2001 From: Marek Kotewicz Date: Fri, 24 Aug 2018 11:53:31 +0200 Subject: [PATCH 46/48] block view! removal in progress (#9397) --- ethcore/src/block.rs | 24 ++++++++-------- ethcore/src/verification/verification.rs | 24 ++++++++-------- ethcore/sync/src/blocks.rs | 36 ++++++++++++------------ ethcore/sync/src/chain/supplier.rs | 10 +++---- ethcore/sync/src/lib.rs | 1 - 5 files changed, 47 insertions(+), 48 deletions(-) diff --git a/ethcore/src/block.rs b/ethcore/src/block.rs index fc2873ba3..00da9fa9f 100644 --- a/ethcore/src/block.rs +++ b/ethcore/src/block.rs @@ -637,10 +637,11 @@ mod tests { use ethereum_types::Address; use std::sync::Arc; use transaction::SignedTransaction; + use verification::queue::kind::blocks::Unverified; /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header fn enact_bytes( - block_bytes: &[u8], + block_bytes: Vec, engine: &EthEngine, tracing: bool, db: StateDB, @@ -648,10 +649,10 @@ mod tests { last_hashes: Arc, factories: Factories, ) -> Result { - let block = view!(BlockView, block_bytes); - let header = block.header(); + let block = Unverified::from_rlp(block_bytes)?; + let header = block.header; let transactions: Result, Error> = block - .transactions() + .transactions .into_iter() .map(SignedTransaction::new) .map(|r| r.map_err(Into::into)) @@ -683,8 +684,8 @@ mod tests { b.populate_from(&header); b.push_transactions(transactions)?; - for u in &block.uncles() { - b.push_uncle(u.clone())?; + for u in block.uncles { + b.push_uncle(u)?; } b.close_and_lock() @@ -692,7 +693,7 @@ mod tests { /// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards fn enact_and_seal( - block_bytes: &[u8], + block_bytes: Vec, engine: &EthEngine, tracing: bool, db: StateDB, @@ -700,8 +701,9 @@ mod tests { last_hashes: Arc, factories: Factories, ) -> Result { - let header = view!(BlockView, block_bytes).header_view(); - Ok(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)?.seal(engine, header.seal())?) + let header = Unverified::from_rlp(block_bytes.clone())?.header; + Ok(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, factories)? + .seal(engine, header.seal().to_vec())?) } #[test] @@ -731,7 +733,7 @@ mod tests { let orig_db = b.drain().state.drop().1; let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); + let e = enact_and_seal(orig_bytes.clone(), engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); assert_eq!(e.rlp_bytes(), orig_bytes); @@ -762,7 +764,7 @@ mod tests { let orig_db = b.drain().state.drop().1; let db = spec.ensure_db_good(get_temp_state_db(), &Default::default()).unwrap(); - let e = enact_and_seal(&orig_bytes, engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); + let e = enact_and_seal(orig_bytes.clone(), engine, false, db, &genesis_header, last_hashes, Default::default()).unwrap(); let bytes = e.rlp_bytes(); assert_eq!(bytes, orig_bytes); diff --git a/ethcore/src/verification/verification.rs b/ethcore/src/verification/verification.rs index a0ecf9634..b5fa95285 100644 --- a/ethcore/src/verification/verification.rs +++ b/ethcore/src/verification/verification.rs @@ -376,7 +376,6 @@ mod tests { use types::log_entry::{LogEntry, LocalizedLogEntry}; use rlp; use triehash::ordered_trie_root; - use views::BlockView; fn check_ok(result: Result<(), Error>) { result.unwrap_or_else(|e| panic!("Block verification failed: {:?}", e)); @@ -420,10 +419,10 @@ mod tests { } pub fn insert(&mut self, bytes: Bytes) { - let number = view!(BlockView, &bytes).header_view().number(); - let hash = view!(BlockView, &bytes).header_view().hash(); - self.blocks.insert(hash.clone(), bytes); - self.numbers.insert(number, hash.clone()); + let header = Unverified::from_rlp(bytes.clone()).unwrap().header; + let hash = header.hash(); + self.blocks.insert(hash, bytes); + self.numbers.insert(header.number(), hash); } } @@ -460,11 +459,11 @@ mod tests { /// Get the familial details concerning a block. fn block_details(&self, hash: &H256) -> Option { self.blocks.get(hash).map(|bytes| { - let header = view!(BlockView, bytes).header(); + let header = Unverified::from_rlp(bytes.to_vec()).unwrap().header; BlockDetails { number: header.number(), - total_difficulty: header.difficulty().clone(), - parent: header.parent_hash().clone(), + total_difficulty: *header.difficulty(), + parent: *header.parent_hash(), children: Vec::new(), is_finalized: false, } @@ -501,9 +500,9 @@ mod tests { } fn family_test(bytes: &[u8], engine: &EthEngine, bc: &BC) -> Result<(), Error> where BC: BlockProvider { - let view = view!(BlockView, bytes); - let header = view.header(); - let transactions: Vec<_> = view.transactions() + let block = Unverified::from_rlp(bytes.to_vec()).unwrap(); + let header = block.header; + let transactions: Vec<_> = block.transactions .into_iter() .map(SignedTransaction::new) .collect::>()?; @@ -520,7 +519,7 @@ mod tests { let block = PreverifiedBlock { header, transactions, - uncles: view.uncles(), + uncles: block.uncles, bytes: bytes.to_vec(), }; @@ -533,7 +532,6 @@ mod tests { } fn unordered_test(bytes: &[u8], engine: &EthEngine) -> Result<(), Error> { - use verification::queue::kind::blocks::Unverified; let un = Unverified::from_rlp(bytes.to_vec())?; verify_block_unordered(un, engine, false)?; Ok(()) diff --git a/ethcore/sync/src/blocks.rs b/ethcore/sync/src/blocks.rs index a502cee9c..3815084f8 100644 --- a/ethcore/sync/src/blocks.rs +++ b/ethcore/sync/src/blocks.rs @@ -31,6 +31,7 @@ known_heap_size!(0, HeaderId); type SmallHashVec = SmallVec<[H256; 1]>; +#[derive(PartialEq, Debug, Clone)] pub struct SyncHeader { pub bytes: Bytes, pub header: BlockHeader, @@ -578,7 +579,6 @@ mod test { use ethcore::client::{TestBlockChainClient, EachBlockWith, BlockId, BlockChainClient}; use ethcore::header::BlockNumber; use ethcore::verification::queue::kind::blocks::Unverified; - use ethcore::views::HeaderView; use rlp::*; fn is_empty(bc: &BlockCollection) -> bool { @@ -614,9 +614,9 @@ mod test { let blocks: Vec<_> = (0..nblocks) .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .collect(); - let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).unwrap().as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| view!(HeaderView, h).hash()).collect(); - let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); + let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect(); bc.reset_to(heads); assert!(!bc.is_empty()); assert_eq!(hashes[0], bc.heads[0]); @@ -631,7 +631,7 @@ mod test { assert_eq!(bc.downloading_headers.len(), 1); assert!(bc.drain().is_empty()); - bc.insert_headers(headers[0..6].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[0..6].into_iter().map(Clone::clone).collect()); assert_eq!(hashes[5], bc.heads[0]); for h in &hashes[0..6] { bc.clear_header_download(h) @@ -651,9 +651,9 @@ mod test { assert_eq!(hashes[5], h); let (h, _) = bc.needed_headers(6, false).unwrap(); assert_eq!(hashes[20], h); - bc.insert_headers(headers[10..16].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[10..16].into_iter().map(Clone::clone).collect()); assert!(bc.drain().is_empty()); - bc.insert_headers(headers[5..10].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[5..10].into_iter().map(Clone::clone).collect()); assert_eq!( bc.drain().into_iter().map(|b| b.block).collect::>(), blocks[6..16].iter().map(|b| Unverified::from_rlp(b.to_vec()).unwrap()).collect::>() @@ -661,7 +661,7 @@ mod test { assert_eq!(hashes[15], bc.heads[0]); - bc.insert_headers(headers[15..].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[15..].into_iter().map(Clone::clone).collect()); bc.drain(); assert!(bc.is_empty()); } @@ -676,16 +676,16 @@ mod test { let blocks: Vec<_> = (0..nblocks) .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .collect(); - let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).unwrap().as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| view!(HeaderView, h).hash()).collect(); - let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); + let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect(); bc.reset_to(heads); - bc.insert_headers(headers[2..22].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[2..22].into_iter().map(Clone::clone).collect()); assert_eq!(hashes[0], bc.heads[0]); assert_eq!(hashes[21], bc.heads[1]); assert!(bc.head.is_none()); - bc.insert_headers(headers[0..2].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[0..2].into_iter().map(Clone::clone).collect()); assert!(bc.head.is_some()); assert_eq!(hashes[21], bc.heads[0]); } @@ -700,14 +700,14 @@ mod test { let blocks: Vec<_> = (0..nblocks) .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).unwrap().into_inner()) .collect(); - let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).unwrap().as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| view!(HeaderView, h).hash()).collect(); - let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(h.clone()) } else { None }).collect(); + let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); + let heads: Vec<_> = hashes.iter().enumerate().filter_map(|(i, h)| if i % 20 == 0 { Some(*h) } else { None }).collect(); bc.reset_to(heads); - bc.insert_headers(headers[1..2].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[1..2].into_iter().map(Clone::clone).collect()); assert!(bc.drain().is_empty()); - bc.insert_headers(headers[0..1].iter().map(|h| SyncHeader::from_rlp(h.to_vec()).unwrap()).collect()); + bc.insert_headers(headers[0..1].into_iter().map(Clone::clone).collect()); assert_eq!(bc.drain().len(), 2); } } diff --git a/ethcore/sync/src/chain/supplier.rs b/ethcore/sync/src/chain/supplier.rs index e8a5c93ea..e2113b0b1 100644 --- a/ethcore/sync/src/chain/supplier.rs +++ b/ethcore/sync/src/chain/supplier.rs @@ -307,11 +307,11 @@ mod test { use bytes::Bytes; use rlp::{Rlp, RlpStream}; use super::{*, super::tests::*}; + use blocks::SyncHeader; use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient}; #[test] fn return_block_headers() { - use ethcore::views::HeaderView; fn make_hash_req(h: &H256, count: usize, skip: usize, reverse: bool) -> Bytes { let mut rlp = RlpStream::new_list(4); rlp.append(h); @@ -329,16 +329,16 @@ mod test { rlp.append(&if reverse {1u32} else {0u32}); rlp.out() } - fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec { - Rlp::new(&rlp.unwrap().unwrap().1.out()).iter().map(|r| r.as_raw().to_vec()).collect() + fn to_header_vec(rlp: ::chain::RlpResponseResult) -> Vec { + Rlp::new(&rlp.unwrap().unwrap().1.out()).iter().map(|r| SyncHeader::from_rlp(r.as_raw().to_vec()).unwrap()).collect() } let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Nothing); let blocks: Vec<_> = (0 .. 100) .map(|i| (&client as &BlockChainClient).block(BlockId::Number(i as BlockNumber)).map(|b| b.into_inner()).unwrap()).collect(); - let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).unwrap().as_raw().to_vec()).collect(); - let hashes: Vec<_> = headers.iter().map(|h| view!(HeaderView, h).hash()).collect(); + let headers: Vec<_> = blocks.iter().map(|b| SyncHeader::from_rlp(Rlp::new(b).at(0).unwrap().as_raw().to_vec()).unwrap()).collect(); + let hashes: Vec<_> = headers.iter().map(|h| h.header.hash()).collect(); let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); diff --git a/ethcore/sync/src/lib.rs b/ethcore/sync/src/lib.rs index 18a185e51..9fb7da990 100644 --- a/ethcore/sync/src/lib.rs +++ b/ethcore/sync/src/lib.rs @@ -27,7 +27,6 @@ extern crate ethcore_network_devp2p as devp2p; extern crate parity_bytes as bytes; extern crate ethcore_io as io; extern crate ethcore_transaction as transaction; -#[cfg_attr(test, macro_use)] extern crate ethcore; extern crate ethereum_types; extern crate env_logger; From 5ed25276635f66450925cba3081028a36de5150d Mon Sep 17 00:00:00 2001 From: Alexey Date: Fri, 24 Aug 2018 20:03:46 +0400 Subject: [PATCH 47/48] `gasleft` extern implemented for WASM runtime (kip-6) (#9357) * Wasm gasleft extern added * wasm_gasleft_activation_transition -> kip4_transition * use kip-6 switch * gasleft_panic -> gasleft_fail rename * call_msg_gasleft test added and gas_left agustments because this https://github.com/paritytech/wasm-tests/pull/52 * change .. to _ * fix comment for the have_gasleft param * update tests (https://github.com/paritytech/wasm-tests/pull/54/commits/0edbf860ff7ed4b6b6336097ba44836e8c6482dd) --- ethcore/res/wasm-tests | 2 +- ethcore/src/spec/spec.rs | 9 +++ ethcore/vm/src/schedule.rs | 3 + ethcore/wasm/src/env.rs | 9 +++ ethcore/wasm/src/runtime.rs | 10 ++++ ethcore/wasm/src/tests.rs | 109 ++++++++++++++++++++++++++++++++---- json/src/spec/params.rs | 3 + 7 files changed, 134 insertions(+), 11 deletions(-) diff --git a/ethcore/res/wasm-tests b/ethcore/res/wasm-tests index 242b8d8a8..0edbf860f 160000 --- a/ethcore/res/wasm-tests +++ b/ethcore/res/wasm-tests @@ -1 +1 @@ -Subproject commit 242b8d8a89ecb3e11277f0beb8180c95792aac6b +Subproject commit 0edbf860ff7ed4b6b6336097ba44836e8c6482dd diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index a83046a72..720198114 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -127,6 +127,8 @@ pub struct CommonParams { pub wasm_activation_transition: BlockNumber, /// Number of first block where KIP-4 rules begin. Only has effect if Wasm is activated. pub kip4_transition: BlockNumber, + /// Number of first block where KIP-6 rules begin. Only has effect if Wasm is activated. + pub kip6_transition: BlockNumber, /// Gas limit bound divisor (how much gas limit can change per block) pub gas_limit_bound_divisor: U256, /// Registrar contract address. @@ -195,6 +197,9 @@ impl CommonParams { if block_number >= self.kip4_transition { wasm.have_create2 = true; } + if block_number >= self.kip6_transition { + wasm.have_gasleft = true; + } schedule.wasm = Some(wasm); } } @@ -308,6 +313,10 @@ impl From for CommonParams { BlockNumber::max_value, Into::into ), + kip6_transition: p.kip6_transition.map_or_else( + BlockNumber::max_value, + Into::into + ), } } } diff --git a/ethcore/vm/src/schedule.rs b/ethcore/vm/src/schedule.rs index ec72c4683..2d263b63e 100644 --- a/ethcore/vm/src/schedule.rs +++ b/ethcore/vm/src/schedule.rs @@ -151,6 +151,8 @@ pub struct WasmCosts { pub opcodes_div: u32, /// Whether create2 extern function is activated. pub have_create2: bool, + /// Whether gasleft extern function is activated. + pub have_gasleft: bool, } impl Default for WasmCosts { @@ -169,6 +171,7 @@ impl Default for WasmCosts { opcodes_mul: 3, opcodes_div: 8, have_create2: false, + have_gasleft: false, } } } diff --git a/ethcore/wasm/src/env.rs b/ethcore/wasm/src/env.rs index a9e536f5f..fb9e93e0f 100644 --- a/ethcore/wasm/src/env.rs +++ b/ethcore/wasm/src/env.rs @@ -49,6 +49,7 @@ pub mod ids { pub const ORIGIN_FUNC: usize = 200; pub const ELOG_FUNC: usize = 210; pub const CREATE2_FUNC: usize = 220; + pub const GASLEFT_FUNC: usize = 230; pub const PANIC_FUNC: usize = 1000; pub const DEBUG_FUNC: usize = 1010; @@ -157,6 +158,11 @@ pub mod signatures { None, ); + pub const GASLEFT: StaticSignature = StaticSignature( + &[], + Some(I64), + ); + pub const GASLIMIT: StaticSignature = StaticSignature( &[I32], None, @@ -207,6 +213,7 @@ pub struct ImportResolver { memory: RefCell>, have_create2: bool, + have_gasleft: bool, } impl ImportResolver { @@ -217,6 +224,7 @@ impl ImportResolver { memory: RefCell::new(None), have_create2: schedule.have_create2, + have_gasleft: schedule.have_gasleft, } } @@ -274,6 +282,7 @@ impl wasmi::ModuleImportResolver for ImportResolver { "origin" => host(signatures::ORIGIN, ids::ORIGIN_FUNC), "elog" => host(signatures::ELOG, ids::ELOG_FUNC), "create2" if self.have_create2 => host(signatures::CREATE2, ids::CREATE2_FUNC), + "gasleft" if self.have_gasleft => host(signatures::GASLEFT, ids::GASLEFT_FUNC), _ => { return Err(wasmi::Error::Instantiation( format!("Export {} not found", field_name), diff --git a/ethcore/wasm/src/runtime.rs b/ethcore/wasm/src/runtime.rs index 1c814ab7c..3c5d27d5c 100644 --- a/ethcore/wasm/src/runtime.rs +++ b/ethcore/wasm/src/runtime.rs @@ -666,6 +666,15 @@ impl<'a> Runtime<'a> { self.return_u256_ptr(args.nth_checked(0)?, difficulty) } + /// Signature: `fn gasleft() -> i64` + pub fn gasleft(&mut self) -> Result { + Ok(RuntimeValue::from( + self.gas_left()? * self.ext.schedule().wasm().opcodes_mul as u64 + / self.ext.schedule().wasm().opcodes_div as u64 + ) + ) + } + /// Signature: `fn gaslimit(dest: *mut u8)` pub fn gaslimit(&mut self, args: RuntimeArgs) -> Result<()> { let gas_limit = self.ext.env_info().gas_limit; @@ -782,6 +791,7 @@ mod ext_impl { ORIGIN_FUNC => void!(self.origin(args)), ELOG_FUNC => void!(self.elog(args)), CREATE2_FUNC => some!(self.create2(args)), + GASLEFT_FUNC => some!(self.gasleft()), _ => panic!("env module doesn't provide function at index {}", index), } } diff --git a/ethcore/wasm/src/tests.rs b/ethcore/wasm/src/tests.rs index fdbb54590..b1a773cb4 100644 --- a/ethcore/wasm/src/tests.rs +++ b/ethcore/wasm/src/tests.rs @@ -303,7 +303,7 @@ fn create() { &FakeCall { call_type: FakeCallType::Create, create_scheme: Some(CreateContractAddress::FromSenderAndCodeHash), - gas: U256::from(52_017), + gas: U256::from(49_674), sender_address: None, receive_address: None, value: Some((1_000_000_000 / 2).into()), @@ -315,7 +315,7 @@ fn create() { &FakeCall { call_type: FakeCallType::Create, create_scheme: Some(CreateContractAddress::FromSenderSaltAndCodeHash(H256::from([5u8].as_ref()))), - gas: U256::from(10_740), + gas: U256::from(6039), sender_address: None, receive_address: None, value: Some((1_000_000_000 / 2).into()), @@ -323,7 +323,7 @@ fn create() { code_address: None, } )); - assert_eq!(gas_left, U256::from(10_675)); + assert_eq!(gas_left, U256::from(5974)); } #[test] @@ -371,6 +371,54 @@ fn call_msg() { assert_eq!(gas_left, U256::from(91_672)); } +// The same as `call_msg`, but send a `pwasm_ethereum::gasleft` +// value as `gas` argument to the inner pwasm_ethereum::call +#[test] +fn call_msg_gasleft() { + ::ethcore_logger::init_log(); + + let sender: Address = "01030507090b0d0f11131517191b1d1f21232527".parse().unwrap(); + let receiver: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap(); + let contract_address: Address = "0d461d4174b4ae35775c4a342f1e5e1e4e6c4db5".parse().unwrap(); + + let mut params = ActionParams::default(); + params.sender = sender.clone(); + params.address = receiver.clone(); + params.code_address = contract_address.clone(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("call_gasleft.wasm"))); + params.data = Some(Vec::new()); + + let mut ext = FakeExt::new().with_wasm(); + ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; + ext.balances.insert(receiver.clone(), U256::from(10000000000u64)); + + let gas_left = { + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(gas_left) => gas_left, + GasLeft::NeedsReturn { .. } => { panic!("Call test should not return payload"); }, + } + }; + + trace!(target: "wasm", "fake_calls: {:?}", &ext.calls); + assert!(ext.calls.contains( + &FakeCall { + call_type: FakeCallType::Call, + create_scheme: None, + gas: U256::from(91_165), + sender_address: Some(receiver), + receive_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), + value: Some(1000000000.into()), + data: vec![129u8, 123, 113, 107, 101, 97], + code_address: Some(Address::from([99, 88, 77, 66, 55, 44, 33, 22, 11, 0, 11, 22, 33, 44, 55, 66, 77, 88, 99, 0])), + } + )); + + assert_eq!(gas_left, U256::from(91_671)); +} + #[test] fn call_code() { ::ethcore_logger::init_log(); @@ -591,7 +639,7 @@ fn math_add() { U256::from_dec_str("1888888888888888888888888888887").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(92_095)); + assert_eq!(gas_left, U256::from(92_072)); } // multiplication @@ -613,7 +661,7 @@ fn math_mul() { U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(91_423)); + assert_eq!(gas_left, U256::from(91_400)); } // subtraction @@ -635,7 +683,7 @@ fn math_sub() { U256::from_dec_str("111111111111111111111111111111").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(92_095)); + assert_eq!(gas_left, U256::from(92_072)); } // subtraction with overflow @@ -677,7 +725,7 @@ fn math_div() { U256::from_dec_str("1125000").unwrap(), (&result[..]).into() ); - assert_eq!(gas_left, U256::from(87_379)); + assert_eq!(gas_left, U256::from(85_700)); } #[test] @@ -705,7 +753,7 @@ fn storage_metering() { }; // 0 -> not 0 - assert_eq!(gas_left, U256::from(72_395)); + assert_eq!(gas_left, U256::from(72_164)); // #2 @@ -724,7 +772,7 @@ fn storage_metering() { }; // not 0 -> not 0 - assert_eq!(gas_left, U256::from(87_395)); + assert_eq!(gas_left, U256::from(87_164)); } // This test checks the ability of wasm contract to invoke @@ -815,6 +863,47 @@ fn externs() { assert_eq!(gas_left, U256::from(90_428)); } +// This test checks the ability of wasm contract to invoke gasleft +#[test] +fn gasleft() { + ::ethcore_logger::init_log(); + + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); + + let mut ext = FakeExt::new().with_wasm(); + ext.schedule.wasm.as_mut().unwrap().have_gasleft = true; + + let mut interpreter = wasm_interpreter(params); + let result = interpreter.exec(&mut ext).expect("Interpreter to execute without any errors"); + match result { + GasLeft::Known(_) => {}, + GasLeft::NeedsReturn { gas_left, data, .. } => { + let gas = LittleEndian::read_u64(data.as_ref()); + assert_eq!(gas, 93_423); + assert_eq!(gas_left, U256::from(93_349)); + }, + } +} + +// This test should fail because +// ext.schedule.wasm.as_mut().unwrap().have_gasleft = false; +#[test] +fn gasleft_fail() { + ::ethcore_logger::init_log(); + + let mut params = ActionParams::default(); + params.gas = U256::from(100_000); + params.code = Some(Arc::new(load_sample!("gasleft.wasm"))); + let mut ext = FakeExt::new().with_wasm(); + let mut interpreter = wasm_interpreter(params); + match interpreter.exec(&mut ext) { + Err(_) => {}, + Ok(_) => panic!("interpreter.exec should return Err if ext.schedule.wasm.have_gasleft = false") + } +} + #[test] fn embedded_keccak() { ::ethcore_logger::init_log(); @@ -873,7 +962,7 @@ fn events() { assert_eq!(&log_entry.data, b"gnihtemos"); assert_eq!(&result, b"gnihtemos"); - assert_eq!(gas_left, U256::from(83_158)); + assert_eq!(gas_left, U256::from(83_161)); } #[test] diff --git a/json/src/spec/params.rs b/json/src/spec/params.rs index cf57e9af4..d3319f7c4 100644 --- a/json/src/spec/params.rs +++ b/json/src/spec/params.rs @@ -152,6 +152,9 @@ pub struct Params { /// KIP4 activiation block height. #[serde(rename="kip4Transition")] pub kip4_transition: Option, + /// KIP6 activiation block height. + #[serde(rename="kip6Transition")] + pub kip6_transition: Option, } #[cfg(test)] From 7abe9ec4ccb3136052cea56f36022c3f8e9c0fd6 Mon Sep 17 00:00:00 2001 From: Christopher Purta Date: Fri, 24 Aug 2018 09:14:07 -0700 Subject: [PATCH 48/48] Add update docs script to CI (#9219) * Add update docs script to CI Added a script to CI that will use the jsonrpc tool to update rpc documentation then commit and push those to the wiki repo. * fix gitlab ci lint * Only apply jsonrpc docs update on tags * Update gitlab-rpc-docs.sh * Copy correct parity repo to jsonrpc folder Copy correct parity repo to jsonrpc folder before attempting to build docs since the CI runner clones the repo as parity and not parity-ethereum. * Fix JSONRPC docs CI job Update remote config in wiki repo before pushing changes using a github token for authentication. Add message to wiki tag when pushing changes. Use project directory to correctly copy parity code base into the jsonrpc repo for doc generation. * Fix set_remote_wiki function call in CI --- .gitlab-ci.yml | 10 +++++++ scripts/gitlab-rpc-docs.sh | 53 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100755 scripts/gitlab-rpc-docs.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d73d494b2..ac3e44892 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -2,6 +2,7 @@ stages: - test - push-release - build + - docs variables: RUST_BACKTRACE: "1" RUSTFLAGS: "" @@ -220,6 +221,15 @@ test-rust-nightly: - rust - rust-nightly allow_failure: true +json-rpc-docs: + stage: docs + only: + - tags + image: parity/rust:gitlab-ci + script: + - scripts/gitlab-rpc-docs.sh + tags: + - docs push-release: stage: push-release only: diff --git a/scripts/gitlab-rpc-docs.sh b/scripts/gitlab-rpc-docs.sh new file mode 100755 index 000000000..de03fc69f --- /dev/null +++ b/scripts/gitlab-rpc-docs.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +clone_repos() { + git clone https://github.com/parity-js/jsonrpc.git jsonrpc + git clone https://github.com/paritytech/wiki.git wiki +} + +build_docs() { + npm install + npm run build:markdown +} + +update_wiki_docs() { + for file in $(ls jsonrpc/docs); do + module_name=${file:0:-3} + mv jsonrpc/docs/$file wiki/JSONRPC-$module_name-module.md + done +} + +set_remote_wiki() { + git config remote.origin.url "https://${GITHUB_TOKEN}@github.com/paritytech/wiki.git" +} + +setup_git() { + git config --global user.email "devops@parity.com" + git config --global user.name "Devops Parity" +} + +commit_files() { + git checkout -b rpcdoc-update-${CI_COMMIT_REF_NAME} + git add . + git commit -m "Update docs to ${CI_COMMIT_REF_NAME}" + git tag -a "${CI_COMMIT_REF_NAME}" -m "Updated to ${CI_COMMIT_REF_NAME}" +} + +upload_files() { + git push --tags +} + +PROJECT_DIR=$(pwd) + +setup_git +cd .. +clone_repos +cp -r $PROJECT_DIR jsonrpc/.parity +cd jsonrpc +build_docs +cd .. +update_wiki_docs +cd wiki +set_remote_wiki +commit_files +upload_files