ethabi version 5 (#7723)

* Refactor updater to use ethabi-derive

* Grumble: do_call type alias

* Empty commit to trigger test re-run

* migration to ethabi-5.0

* migration to ethabi-5.0 in progress

* use ethabi_deriven to generate TransactAcl contract

* use ethabi_deriven to generate Registry contract

* hash-fetch uses ethabi_derive, removed retain cycle from updater, fixed #7720

* node-filter crate uses ethabi_derive to generate peer_set contract interface

* use LruCache in node-filter instead of HashMap

* validator_set engine uses ethabi_derive

* ethcore does not depend on native_contracts

* miner does no depend on native_contracts

* secret_store does not use native_contracts (in progress)

* removed native-contracts

* ethcore and updater does not depend on futures

* updated ethereum-types

* fixed all warnings caused by using new version of ethereum-types

* updated ethabi_derive && ethabi_contract to get rid of warnings

* removed another retain cycle in updater, fixed following minor version on update

* moved contracts out of native_contracts res

* updated ethabi_contract

* fixed failing test

* fixed failing test

* there is no need to create two contracts of the same kind any more

* simplify updater::ReleaseTrack conversion into u8 and add several tests for it

* applied review suggestions

* applied review suggestions
This commit is contained in:
Marek Kotewicz 2018-02-09 09:32:06 +01:00 committed by GitHub
parent 2c60a53fef
commit c060d9584d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
129 changed files with 1678 additions and 2173 deletions

495
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -45,7 +45,7 @@ ethcore-miner = { path = "miner" }
ethcore-network = { path = "util/network" }
ethcore-stratum = { path = "stratum" }
ethcore-transaction = { path = "ethcore/transaction" }
ethereum-types = "0.1"
ethereum-types = "0.2"
node-filter = { path = "ethcore/node_filter" }
ethkey = { path = "ethkey" }
node-health = { path = "dapps/node-health" }

View File

@ -16,7 +16,7 @@ log = "0.3"
parity-dapps-glue = "1.9"
parking_lot = "0.5"
mime_guess = "2.0.0-alpha.2"
rand = "0.3"
rand = "0.4"
rustc-hex = "1.0"
serde = "1.0"
serde_derive = "1.0"
@ -29,7 +29,7 @@ jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "pa
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.10" }
ethcore-bytes = { path = "../util/bytes" }
ethereum-types = "0.1"
ethereum-types = "0.2"
fetch = { path = "../util/fetch" }
node-health = { path = "./node-health" }
parity-hash-fetch = { path = "../hash-fetch" }

View File

@ -23,7 +23,6 @@ mod installers;
use std::{fs, env};
use std::path::PathBuf;
use std::sync::Arc;
use rustc_hex::FromHex;
use futures::{future, Future};
use futures_cpupool::CpuPool;
use fetch::{Client as FetchClient, Fetch};
@ -31,6 +30,7 @@ use hash_fetch::urlhint::{URLHintContract, URLHint, URLHintResult};
use hyper::StatusCode;
use ethereum_types::H256;
use {Embeddable, SyncStatus, random_filename};
use parking_lot::Mutex;
use page::local;
@ -132,7 +132,7 @@ impl<R: URLHint + 'static, F: Fetch> ContentFetcher<F, R> {
// resolve contract call synchronously.
// TODO: port to futures-based hyper and make it all async.
fn resolve(&self, content_id: Vec<u8>) -> Option<URLHintResult> {
fn resolve(&self, content_id: H256) -> Option<URLHintResult> {
self.resolver.resolve(content_id)
.wait()
.unwrap_or_else(|e| { warn!("Error resolving content-id: {}", e); None })
@ -149,7 +149,7 @@ impl<R: URLHint + 'static, F: Fetch> Fetcher for ContentFetcher<F, R> {
}
}
// fallback to resolver
if let Ok(content_id) = content_id.from_hex() {
if let Ok(content_id) = content_id.parse() {
// if there is content or we are syncing return true
self.sync.is_major_importing() || self.resolve(content_id).is_some()
} else {
@ -178,7 +178,7 @@ impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
// We need to start fetching the content
_ => {
trace!(target: "dapps", "Content unavailable. Fetching... {:?}", content_id);
let content_hex = content_id.from_hex().expect("to_handler is called only when `contains` returns true.");
let content_hex = content_id.parse().expect("to_handler is called only when `contains` returns true.");
let content = self.resolve(content_hex);
let cache = self.cache.clone();
@ -280,10 +280,10 @@ impl<R: URLHint + 'static, F: Fetch> Endpoint for ContentFetcher<F, R> {
mod tests {
use std::env;
use std::sync::Arc;
use bytes::Bytes;
use fetch::{Fetch, Client};
use futures::future;
use hash_fetch::urlhint::{URLHint, URLHintResult, BoxFuture};
use futures::{future, Future};
use hash_fetch::urlhint::{URLHint, URLHintResult};
use ethereum_types::H256;
use apps::cache::ContentStatus;
use endpoint::EndpointInfo;
@ -294,7 +294,7 @@ mod tests {
#[derive(Clone)]
struct FakeResolver;
impl URLHint for FakeResolver {
fn resolve(&self, _id: Bytes) -> BoxFuture<Option<URLHintResult>, String> {
fn resolve(&self, _id: H256) -> Box<Future<Item = Option<URLHintResult>, Error = String> + Send> {
Box::new(future::ok(None))
}
}

View File

@ -18,9 +18,10 @@ use std::str;
use std::sync::Arc;
use std::collections::HashMap;
use futures::Future;
use ethereum_types::{H256, Address};
use bytes::{Bytes, ToPretty};
use hash_fetch::urlhint::{ContractClient, BoxFuture};
use hash_fetch::urlhint::ContractClient;
use parking_lot::Mutex;
use rustc_hex::FromHex;
@ -66,7 +67,7 @@ impl ContractClient for FakeRegistrar {
Ok(REGISTRAR.parse().unwrap())
}
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item = Bytes, Error = String> + Send> {
let call = (address.to_hex(), data.to_hex());
self.calls.lock().push(call.clone());
let res = self.responses.lock().get(&call).cloned().expect(&format!("No response for call: {:?}", call));

View File

@ -24,27 +24,28 @@ ethcore-logger = { path = "../logger" }
ethcore-miner = { path = "../miner" }
ethcore-stratum = { path = "../stratum" }
ethcore-transaction = { path = "./transaction" }
ethereum-types = "0.1"
ethereum-types = "0.2"
memory-cache = { path = "../util/memory_cache" }
ethabi = "5.1"
ethabi-derive = "5.0"
ethabi-contract = "5.0"
ethjson = { path = "../json" }
ethkey = { path = "../ethkey" }
ethstore = { path = "../ethstore" }
evm = { path = "evm" }
futures = "0.1"
hardware-wallet = { path = "../hw" }
heapsize = "0.4"
itertools = "0.5"
lazy_static = "1.0"
log = "0.3"
lru-cache = "0.1"
native-contracts = { path = "native_contracts" }
num = "0.1"
num_cpus = "1.2"
parity-machine = { path = "../machine" }
parking_lot = "0.5"
price-info = { path = "../price-info" }
rayon = "0.8"
rand = "0.3"
rand = "0.4"
rlp = { path = "../util/rlp" }
rlp_derive = { path = "../util/rlp_derive" }
kvdb = { path = "../util/kvdb" }
@ -69,9 +70,6 @@ unexpected = { path = "../util/unexpected" }
journaldb = { path = "../util/journaldb" }
tempdir = "0.3"
[dev-dependencies]
native-contracts = { path = "native_contracts", features = ["test_contracts"] }
[features]
jit = ["evm/jit"]
evm-debug = ["slow-blocks"]

View File

@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
bit-set = "0.4"
ethereum-types = "0.1"
ethereum-types = "0.2"
evmjit = { path = "../../evmjit", optional = true }
heapsize = "0.4"
lazy_static = "1.0"

View File

@ -11,7 +11,7 @@ log = "0.3"
ethcore = { path = ".."}
ethcore-bytes = { path = "../../util/bytes" }
ethcore-transaction = { path = "../transaction" }
ethereum-types = "0.1"
ethereum-types = "0.2"
memorydb = { path = "../../util/memorydb" }
patricia-trie = { path = "../../util/patricia_trie" }
ethcore-network = { path = "../../util/network" }
@ -26,7 +26,7 @@ rlp_derive = { path = "../../util/rlp_derive" }
time = "0.1"
smallvec = "0.4"
futures = "0.1"
rand = "0.3"
rand = "0.4"
itertools = "0.5"
bincode = "0.8.0"
serde = "1.0"

View File

@ -724,6 +724,7 @@ mod tests {
use super::HeaderChain;
use std::sync::Arc;
use ethereum_types::U256;
use ethcore::ids::BlockId;
use ethcore::header::Header;
use ethcore::spec::Spec;
@ -755,7 +756,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();
@ -788,7 +789,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();
@ -807,7 +808,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();
@ -831,7 +832,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * (i * i).into());
header.set_difficulty(*genesis_header.difficulty() * U256::from(i * i));
parent_hash = header.hash();
let mut tx = db.transaction();
@ -884,7 +885,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();
@ -922,7 +923,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();
@ -939,7 +940,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into() * 1000.into());
header.set_difficulty(*genesis_header.difficulty() * U256::from(i as u32 * 1000u32));
parent_hash = header.hash();
let mut tx = db.transaction();
@ -989,7 +990,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();
@ -1023,7 +1024,7 @@ mod tests {
header.set_parent_hash(parent_hash);
header.set_number(i);
header.set_timestamp(rolling_timestamp);
header.set_difficulty(*genesis_header.difficulty() * i.into());
header.set_difficulty(*genesis_header.difficulty() * i as u32);
parent_hash = header.hash();
let mut tx = db.transaction();

View File

@ -320,7 +320,7 @@ impl FlowParams {
/// and number of requests made.
pub fn compute_cost(&self, request: &Request) -> Option<U256> {
match *request {
Request::Headers(ref req) => self.costs.headers.map(|c| c * req.max.into()),
Request::Headers(ref req) => self.costs.headers.map(|c| c * U256::from(req.max)),
Request::HeaderProof(_) => self.costs.header_proof,
Request::TransactionIndex(_) => self.costs.transaction_index,
Request::Body(_) => self.costs.body,
@ -444,6 +444,6 @@ mod tests {
);
assert_eq!(flow_params2.costs, flow_params3.costs);
assert_eq!(flow_params.costs.headers.unwrap(), flow_params2.costs.headers.unwrap() * 2.into());
assert_eq!(flow_params.costs.headers.unwrap(), flow_params2.costs.headers.unwrap() * 2u32);
}
}

View File

@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethcore-bytes = { path = "../../util/bytes" }
ethereum-types = "0.1.4"
ethereum-types = "0.2"
keccak-hash = { path = "../../util/hash" }
kvdb = { path = "../../util/kvdb" }
kvdb-rocksdb = { path = "../../util/kvdb-rocksdb" }

View File

@ -1,19 +0,0 @@
[package]
name = "native-contracts"
description = "Generated Rust code for Ethereum contract ABIs"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
build = "build.rs"
[dependencies]
ethabi = "4.0"
futures = "0.1"
byteorder = "1.0"
ethereum-types = "0.1"
[build-dependencies]
native-contract-generator = { path = "generator" }
[features]
default = []
test_contracts = []

View File

@ -1,64 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate native_contract_generator;
use std::path::Path;
use std::fs::File;
use std::io::Write;
// TODO: just walk the "res" directory and generate whole crate automatically.
const KEY_SERVER_SET_ABI: &'static str = include_str!("res/key_server_set.json");
const REGISTRY_ABI: &'static str = include_str!("res/registrar.json");
const URLHINT_ABI: &'static str = include_str!("res/urlhint.json");
const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json");
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json");
const SECRETSTORE_SERVICE_ABI: &'static str = include_str!("res/secretstore_service.json");
const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json");
const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json");
const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json");
const TX_ACL_ABI: &'static str = include_str!("res/tx_acl.json");
const TEST_VALIDATOR_SET_ABI: &'static str = include_str!("res/test_validator_set.json");
fn build_file(name: &str, abi: &str, filename: &str) {
let code = ::native_contract_generator::generate_module(name, abi).unwrap();
let out_dir = ::std::env::var("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join(filename);
let mut f = File::create(&dest_path).unwrap();
f.write_all(code.as_bytes()).unwrap();
}
fn build_test_contracts() {
build_file("ValidatorSet", TEST_VALIDATOR_SET_ABI, "test_validator_set.rs");
}
fn main() {
build_file("KeyServerSet", KEY_SERVER_SET_ABI, "key_server_set.rs");
build_file("Registry", REGISTRY_ABI, "registry.rs");
build_file("Urlhint", URLHINT_ABI, "urlhint.rs");
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
build_file("SecretStoreService", SECRETSTORE_SERVICE_ABI, "secretstore_service.rs");
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
build_file("PeerSet", PEER_SET_ABI, "peer_set.rs");
build_file("TransactAcl", TX_ACL_ABI, "tx_acl.rs");
build_test_contracts();
}

View File

@ -1,9 +0,0 @@
[package]
name = "native-contract-generator"
description = "Generates Rust code for ethereum contract ABIs"
version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethabi = "4.0"
heck = "0.3"

View File

@ -1,373 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Rust code contract generator.
//! The code generated will require a dependence on the `ethcore-ethereum_types`,
//! `ethabi`, `byteorder`, and `futures` crates.
//! This currently isn't hygienic, so compilation of generated code may fail
//! due to missing crates or name collisions. This will change when
//! it can be ported to a procedural macro.
extern crate ethabi;
extern crate heck;
use ethabi::{Contract, ParamType};
use heck::SnakeCase;
/// Errors in generation.
#[derive(Debug)]
pub enum Error {
/// Bad ABI.
Abi(ethabi::Error),
/// Unsupported parameter type in given function.
UnsupportedType(String, ParamType),
}
/// Given an ABI string, generate code for a a Rust module containing
/// a struct which can be used to call it.
// TODO: make this a proc macro when that's possible.
pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
let contract = Contract::load(abi.as_bytes()).map_err(Error::Abi)?;
let functions = generate_functions(&contract)?;
Ok(format!(r##"
use byteorder::{{BigEndian, ByteOrder}};
use futures::{{future, Future, IntoFuture}};
use ethabi::{{Bytes, Contract, Token, Event}};
use ethereum_types;
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
/// Generated Rust bindings to an Ethereum contract.
#[derive(Clone, Debug)]
pub struct {name} {{
contract: Contract,
/// Address to make calls to.
pub address: ethereum_types::H160,
}}
const ABI: &'static str = r#"{abi_str}"#;
impl {name} {{
/// Create a new instance of `{name}` with an address.
/// Calls can be made, given a callback for dispatching calls asynchronously.
pub fn new(address: ethereum_types::H160) -> Self {{
let contract = Contract::load(ABI.as_bytes())
.expect("ABI checked at generation-time; qed");
{name} {{
contract: contract,
address: address,
}}
}}
/// Access the underlying `ethabi` contract.
pub fn contract(this: &Self) -> &Contract {{
&this.contract
}}
{functions}
}}
"##,
name = struct_name,
abi_str = abi,
functions = functions,
))
}
// generate function bodies from the ABI.
fn generate_functions(contract: &Contract) -> Result<String, Error> {
let mut functions = String::new();
for function in contract.functions() {
let name = &function.name;
let snake_name = name.to_snake_case();
let inputs: Vec<_> = function.inputs.iter().map(|i| i.kind.clone()).collect();
let outputs: Vec<_> = function.outputs.iter().map(|i| i.kind.clone()).collect();
let (input_params, input_names, to_tokens) = input_params_codegen(&inputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
let (output_type, decode_outputs) = output_params_codegen(&outputs)
.map_err(|bad_type| Error::UnsupportedType(name.clone(), bad_type))?;
functions.push_str(&format!(r##"
/// Call the function "{abi_name}" on the contract.
///
/// Inputs: {abi_inputs:?}
/// Outputs: {abi_outputs:?}
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
where
F: FnOnce(ethereum_types::H160, Vec<u8>) -> U,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send + 'static
{{
let call_addr = self.address;
let call_future = match self.encode_{snake_name}_input({params_names}) {{
Ok(call_data) => (call)(call_addr, call_data),
Err(e) => return Box::new(future::err(e)),
}};
let function = self.contract.function(r#"{abi_name}"#)
.expect("function existence checked at compile-time; qed").clone();
Box::new(call_future
.into_future()
.and_then(move |out| function.decode_output(&out).map_err(|e| format!("{{:?}}", e)))
.map(Vec::into_iter)
.and_then(|mut outputs| {decode_outputs}))
}}
/// Encode "{abi_name}" function arguments.
/// Arguments: {abi_inputs:?}
pub fn encode_{snake_name}_input(&self, {params}) -> Result<Vec<u8>, String> {{
self.contract.function(r#"{abi_name}"#)
.expect("function existence checked at compile-time; qed")
.encode_input(&{to_tokens})
.map_err(|e| format!("Error encoding call: {{:?}}", e))
}}
"##,
abi_name = name,
abi_inputs = inputs,
abi_outputs = outputs,
snake_name = snake_name,
params = input_params,
params_names = input_names,
output_type = output_type,
to_tokens = to_tokens,
decode_outputs = decode_outputs,
))
}
Ok(functions)
}
// generate code for params in function signature and turning them into tokens.
//
// three pieces of code are generated: the first gives input types for the function signature,
// the second one gives input parameter names to pass to another method,
// and the third gives code to tokenize those inputs.
//
// params of form `param_0: type_0, param_1: type_1, ...`
// tokenizing code of form `{let mut tokens = Vec::new(); tokens.push({param_X}); tokens }`
//
// returns any unsupported param type encountered.
fn input_params_codegen(inputs: &[ParamType]) -> Result<(String, String, String), ParamType> {
let mut params = String::new();
let mut params_names = String::new();
let mut to_tokens = "{ let mut tokens = Vec::new();".to_string();
for (index, param_type) in inputs.iter().enumerate() {
let param_name = format!("param_{}", index);
let rust_type = rust_type(param_type.clone())?;
let (needs_mut, tokenize_code) = tokenize(&param_name, param_type.clone());
params.push_str(&format!("{}{}: {}, ",
if needs_mut { "mut " } else { "" }, param_name, rust_type));
params_names.push_str(&format!("{}, ", param_name));
to_tokens.push_str(&format!("tokens.push({{ {} }});", tokenize_code));
}
to_tokens.push_str(" tokens }");
Ok((params, params_names, to_tokens))
}
// generate code for outputs of the function and detokenizing them.
//
// two pieces of code are generated: the first gives an output type for the function signature
// as a tuple, and the second gives code to get that tuple from a deque of tokens.
//
// produce output type of the form (type_1, type_2, ...) without trailing comma.
// produce code for getting this output type from `outputs: Vec<Token>::IntoIter`, where
// an `Err(String)` can be returned.
//
// returns any unsupported param type encountered.
fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), ParamType> {
let mut output_type = "(".to_string();
let mut decode_outputs = "Ok((".to_string();
for (index, output) in outputs.iter().cloned().enumerate() {
let rust_type = rust_type(output.clone())?;
output_type.push_str(&rust_type);
decode_outputs.push_str(&format!(
r#"
outputs
.next()
.and_then(|output| {{ {} }})
.ok_or_else(|| "Wrong output type".to_string())?
"#,
detokenize("output", output)
));
// don't append trailing commas for the last element
// so we can reuse the same code for single-output contracts,
// since T == (T) != (T,)
if index < outputs.len() - 1 {
output_type.push_str(", ");
decode_outputs.push_str(", ");
}
}
output_type.push_str(")");
decode_outputs.push_str("))");
Ok((output_type, decode_outputs))
}
// create code for an argument type from param type.
fn rust_type(input: ParamType) -> Result<String, ParamType> {
Ok(match input {
ParamType::Address => "ethereum_types::H160".into(),
ParamType::FixedBytes(len) if len <= 32 => format!("ethereum_types::H{}", len * 8),
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
ParamType::Int(width) => match width {
8 | 16 | 32 | 64 => format!("i{}", width),
_ => return Err(ParamType::Int(width)),
},
ParamType::Uint(width) => match width {
8 | 16 | 32 | 64 => format!("u{}", width),
128 | 160 | 256 => format!("ethereum_types::U{}", width),
_ => return Err(ParamType::Uint(width)),
},
ParamType::Bool => "bool".into(),
ParamType::String => "String".into(),
ParamType::Array(kind) => format!("Vec<{}>", rust_type(*kind)?),
other => return Err(other),
})
}
// create code for tokenizing this parameter.
// returns (needs_mut, code), where needs_mut indicates mutability required.
// panics on unsupported types.
fn tokenize(name: &str, input: ParamType) -> (bool, String) {
let mut needs_mut = false;
let code = match input {
ParamType::Address => format!("Token::Address({}.0)", name),
ParamType::Bytes => format!("Token::Bytes({})", name),
ParamType::FixedBytes(len) if len <= 32 =>
format!("Token::FixedBytes({}.0.to_vec())", name),
ParamType::FixedBytes(len) => {
needs_mut = true;
format!("{}.resize({}, 0); Token::FixedBytes({})", name, len, name)
}
ParamType::Int(width) => match width {
8 => format!("let mut r = [0xff; 32]; r[31] = {}; Token::Int(r)", name),
16 | 32 | 64 =>
format!("let mut r = [0xff; 32]; BigEndian::write_i{}(&mut r[{}..], {}); Token::Int(r))",
width, 32 - (width / 8), name),
_ => panic!("Signed int with more than 64 bits not supported."),
},
ParamType::Uint(width) => format!(
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
if width <= 64 { format!("ethereum_types::U256::from({} as u64)", name) }
else { format!("ethereum_types::U256::from({})", name) }
),
ParamType::Bool => format!("Token::Bool({})", name),
ParamType::String => format!("Token::String({})", name),
ParamType::Array(kind) => {
let (needs_mut, code) = tokenize("i", *kind);
format!("Token::Array({}.into_iter().map(|{}i| {{ {} }}).collect())",
name, if needs_mut { "mut " } else { "" }, code)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported."),
};
(needs_mut, code)
}
// create code for detokenizing this parameter.
// takes an output type and the identifier of a token.
// expands to code that evaluates to a Option<concrete type>
// panics on unsupported types.
fn detokenize(name: &str, output_type: ParamType) -> String {
match output_type {
ParamType::Address => format!("{}.to_address().map(ethereum_types::H160)", name),
ParamType::Bytes => format!("{}.to_bytes()", name),
ParamType::FixedBytes(len) if len <= 32 => {
// ensure no panic on slice too small.
let read_hash = format!("b.resize({}, 0); ethereum_types::H{}::from_slice(&b[..{}])",
len, len * 8, len);
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
name, read_hash)
}
ParamType::FixedBytes(_) => format!("{}.to_fixed_bytes()", name),
ParamType::Int(width) => {
let read_int = match width {
8 => "i[31] as i8".into(),
16 | 32 | 64 => format!("BigEndian::read_i{}(&i[{}..])", width, 32 - (width / 8)),
_ => panic!("Signed integers over 64 bytes not allowed."),
};
format!("{}.to_int().map(|i| {})", name, read_int)
}
ParamType::Uint(width) => {
let read_uint = match width {
8 => "u[31] as u8".into(),
16 | 32 | 64 => format!("BigEndian::read_u{}(&u[{}..])", width, 32 - (width / 8)),
_ => format!("ethereum_types::U{}::from(&u[..])", width),
};
format!("{}.to_uint().map(|u| {})", name, read_uint)
}
ParamType::Bool => format!("{}.to_bool()", name),
ParamType::String => format!("{}.to_string()", name),
ParamType::Array(kind) => {
let read_array = format!("x.into_iter().map(|a| {{ {} }}).collect::<Option<Vec<_>>>()",
detokenize("a", *kind));
format!("{}.to_array().and_then(|x| {{ {} }})",
name, read_array)
}
ParamType::FixedArray(_, _) => panic!("Fixed-length arrays not supported.")
}
}
#[cfg(test)]
mod tests {
use ethabi::ParamType;
#[test]
fn input_types() {
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: ethereum_types::H160, ");
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
"param_0: ethereum_types::H160, param_1: Vec<u8>, ");
}
#[test]
fn output_types() {
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(ethereum_types::H160)");
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
"(ethereum_types::H160, Vec<Vec<u8>>)");
}
#[test]
fn rust_type() {
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "ethereum_types::H256");
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
"Vec<ethereum_types::H256>");
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
assert!(::rust_type(ParamType::Uint(63)).is_err());
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "ethereum_types::U256");
}
// codegen tests will need bootstrapping of some kind.
}

View File

@ -1 +0,0 @@
[{"constant":false,"inputs":[{"name":"sender","type":"address"}],"name":"allowedTxTypes","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]

View File

@ -1,6 +0,0 @@
[
{"constant":false,"inputs":[{"name":"_content","type":"bytes32"},{"name":"_url","type":"string"}],"name":"hintURL","outputs":[],"type":"function"},
{"constant":false,"inputs":[{"name":"_content","type":"bytes32"},{"name":"_accountSlashRepo","type":"string"},{"name":"_commit","type":"bytes20"}],"name":"hint","outputs":[],"type":"function"},
{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"entries","outputs":[{"name":"accountSlashRepo","type":"string"},{"name":"commit","type":"bytes20"},{"name":"owner","type":"address"}],"type":"function"},
{"constant":false,"inputs":[{"name":"_content","type":"bytes32"}],"name":"unhint","outputs":[],"type":"function"}
]

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Secret store Key Server set contract.
include!(concat!(env!("OUT_DIR"), "/key_server_set.rs"));

View File

@ -1,48 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Native contracts useful for Parity. These are type-safe wrappers
//! autogenerated at compile-time from Ethereum ABIs, and can be instantiated
//! given any closure which can dispatch calls to them asynchronously.
extern crate futures;
extern crate byteorder;
extern crate ethabi;
extern crate ethereum_types;
mod key_server_set;
mod registry;
mod urlhint;
mod service_transaction;
mod secretstore_acl_storage;
mod secretstore_service;
mod validator_set;
mod validator_report;
mod peer_set;
mod tx_acl;
pub mod test_contracts;
pub use self::key_server_set::KeyServerSet;
pub use self::registry::Registry;
pub use self::urlhint::Urlhint;
pub use self::service_transaction::ServiceTransactionChecker;
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
pub use self::secretstore_service::SecretStoreService;
pub use self::validator_set::ValidatorSet;
pub use self::validator_report::ValidatorReport;
pub use self::peer_set::PeerSet;
pub use self::tx_acl::TransactAcl;

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Peer set contract.
include!(concat!(env!("OUT_DIR"), "/peer_set.rs"));

View File

@ -1,22 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Registrar contract: maps names to addresses and data.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/registry.rs"));

View File

@ -1,22 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Secret store ACL storage contract.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/secretstore_acl_storage.rs"));

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Secret store service contract.
include!(concat!(env!("OUT_DIR"), "/secretstore_service.rs"));

View File

@ -1,22 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Service transaction contract.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/service_transaction.rs"));

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Contracts used for testing.
pub mod validator_set;
pub use self::validator_set::ValidatorSet;

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Test validator set contract.
include!(concat!(env!("OUT_DIR"), "/test_validator_set.rs"));

View File

@ -1,21 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Transact permissions contract.
include!(concat!(env!("OUT_DIR"), "/tx_acl.rs"));

View File

@ -1,22 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports)]
//! Registrar contract: maps names to arbitrary URL.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/urlhint.rs"));

View File

@ -1,22 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports)]
//! Validator reporting.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/validator_report.rs"));

View File

@ -1,22 +0,0 @@
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
#![allow(unused_mut, unused_variables, unused_imports, unused_parens)]
//! Validator set contract.
// TODO: testing.
include!(concat!(env!("OUT_DIR"), "/validator_set.rs"));

View File

@ -10,11 +10,13 @@ authors = ["Parity Technologies <admin@parity.io>"]
ethcore = { path = ".."}
ethcore-bytes = { path = "../../util/bytes" }
ethcore-network = { path = "../../util/network" }
ethereum-types = "0.1"
native-contracts = { path = "../native_contracts" }
futures = "0.1"
ethereum-types = "0.2"
log = "0.3"
parking_lot = "0.5"
ethabi = "5.1"
ethabi-derive = "5.0"
ethabi-contract = "5.0"
lru-cache = "0.1"
[dev-dependencies]
kvdb-memorydb = { path = "../../util/kvdb-memorydb" }

View File

@ -16,50 +16,55 @@
//! Smart contract based node filter.
extern crate ethabi;
extern crate ethcore;
extern crate ethcore_bytes as bytes;
extern crate ethcore_network as network;
extern crate ethereum_types;
extern crate native_contracts;
extern crate futures;
extern crate lru_cache;
extern crate parking_lot;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate kvdb_memorydb;
extern crate ethabi_derive;
#[macro_use]
extern crate ethabi_contract;
#[cfg(test)]
extern crate ethcore_io as io;
#[cfg(test)]
extern crate kvdb_memorydb;
#[macro_use]
extern crate log;
use std::sync::Weak;
use std::collections::HashMap;
use native_contracts::PeerSet as Contract;
use network::{NodeId, ConnectionFilter, ConnectionDirection};
use lru_cache::LruCache;
use parking_lot::Mutex;
use bytes::Bytes;
use ethcore::client::{BlockChainClient, BlockId, ChainNotify};
use ethereum_types::{H256, Address};
use bytes::Bytes;
use parking_lot::Mutex;
use futures::Future;
use network::{NodeId, ConnectionFilter, ConnectionDirection};
use_contract!(peer_set, "PeerSet", "res/peer_set.json");
const MAX_CACHE_SIZE: usize = 4096;
/// Connection filter that uses a contract to manage permissions.
pub struct NodeFilter {
contract: Mutex<Option<Contract>>,
contract: peer_set::PeerSet,
client: Weak<BlockChainClient>,
contract_address: Address,
permission_cache: Mutex<HashMap<NodeId, bool>>,
permission_cache: Mutex<LruCache<NodeId, bool>>,
}
impl NodeFilter {
/// Create a new instance. Accepts a contract address.
pub fn new(client: Weak<BlockChainClient>, contract_address: Address) -> NodeFilter {
NodeFilter {
contract: Mutex::new(None),
contract: peer_set::PeerSet::default(),
client: client,
contract_address: contract_address,
permission_cache: Mutex::new(HashMap::new()),
permission_cache: Mutex::new(LruCache::new(MAX_CACHE_SIZE)),
}
}
@ -73,40 +78,30 @@ impl ConnectionFilter for NodeFilter {
fn connection_allowed(&self, own_id: &NodeId, connecting_id: &NodeId, _direction: ConnectionDirection) -> bool {
let mut cache = self.permission_cache.lock();
if let Some(res) = cache.get(connecting_id) {
if let Some(res) = cache.get_mut(connecting_id) {
return *res;
}
let mut contract = self.contract.lock();
if contract.is_none() {
*contract = Some(Contract::new(self.contract_address));
}
let allowed = match (self.client.upgrade(), &*contract) {
(Some(ref client), &Some(ref contract)) => {
let own_low = H256::from_slice(&own_id[0..32]);
let own_high = H256::from_slice(&own_id[32..64]);
let id_low = H256::from_slice(&connecting_id[0..32]);
let id_high = H256::from_slice(&connecting_id[32..64]);
let allowed = contract.connection_allowed(
|addr, data| futures::done(client.call_contract(BlockId::Latest, addr, data)),
own_low,
own_high,
id_low,
id_high,
).wait().unwrap_or_else(|e| {
debug!("Error callling peer set contract: {:?}", e);
false
});
allowed
}
_ => false,
let client = match self.client.upgrade() {
Some(client) => client,
None => return false,
};
if cache.len() < MAX_CACHE_SIZE {
cache.insert(*connecting_id, allowed);
}
let address = self.contract_address;
let own_low = H256::from_slice(&own_id[0..32]);
let own_high = H256::from_slice(&own_id[32..64]);
let id_low = H256::from_slice(&connecting_id[0..32]);
let id_high = H256::from_slice(&connecting_id[32..64]);
let allowed = self.contract.functions()
.connection_allowed()
.call(own_low, own_high, id_low, id_high, &|data| client.call_contract(BlockId::Latest, address, data))
.unwrap_or_else(|e| {
debug!("Error callling peer set contract: {:?}", e);
false
});
cache.insert(*connecting_id, allowed);
allowed
}
}

View File

@ -0,0 +1 @@
[{"constant":true,"inputs":[{"name":"sender","type":"address"}],"name":"allowedTxTypes","outputs":[{"name":"","type":"uint32"}],"payable":false,"stateMutability":"nonpayable","type":"function"}]

View File

@ -50,13 +50,11 @@ use vm::{EnvInfo, LastHashes};
use evm::{Factory as EvmFactory, Schedule};
use executive::{Executive, Executed, TransactOptions, contract_address};
use factory::Factories;
use futures::{future, Future};
use header::{BlockNumber, Header, Seal};
use io::*;
use log_entry::LocalizedLogEntry;
use miner::{Miner, MinerService};
use native_contracts::Registry;
use parking_lot::{Mutex, RwLock, MutexGuard};
use parking_lot::{Mutex, RwLock};
use rand::OsRng;
use receipt::{Receipt, LocalizedReceipt};
use rlp::UntrustedRlp;
@ -82,6 +80,8 @@ pub use types::block_status::BlockStatus;
pub use blockchain::CacheSize as BlockChainCacheSize;
pub use verification::queue::QueueInfo as BlockQueueInfo;
use_contract!(registry, "Registry", "res/contracts/registrar.json");
const MAX_TX_QUEUE_SIZE: usize = 4096;
const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2;
const MIN_HISTORY_SIZE: u64 = 8;
@ -165,7 +165,8 @@ pub struct Client {
history: u64,
ancient_verifier: Mutex<Option<AncientVerifier>>,
on_user_defaults_change: Mutex<Option<Box<FnMut(Option<Mode>) + 'static + Send>>>,
registrar: Mutex<Option<Registry>>,
registrar: registry::Registry,
registrar_address: Option<Address>,
exit_handler: Mutex<Option<Box<Fn(bool, Option<String>) + 'static + Send>>>,
}
@ -217,7 +218,7 @@ impl Client {
};
if !chain.block_header(&chain.best_block_hash()).map_or(true, |h| state_db.journal_db().contains(h.state_root())) {
warn!("State root not found for block #{} ({})", chain.best_block_number(), chain.best_block_hash().hex());
warn!("State root not found for block #{} ({:x})", chain.best_block_number(), chain.best_block_hash());
}
let engine = spec.engine.clone();
@ -226,6 +227,11 @@ impl Client {
let awake = match config.mode { Mode::Dark(..) | Mode::Off => false, _ => true };
let registrar_address = engine.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok());
if let Some(ref addr) = registrar_address {
trace!(target: "client", "Found registrar at {}", addr);
}
let client = Arc::new(Client {
enabled: AtomicBool::new(true),
sleep_state: Mutex::new(SleepState::new(awake)),
@ -251,7 +257,8 @@ impl Client {
history: history,
ancient_verifier: Mutex::new(None),
on_user_defaults_change: Mutex::new(None),
registrar: Mutex::new(None),
registrar: registry::Registry::default(),
registrar_address,
exit_handler: Mutex::new(None),
});
@ -294,12 +301,6 @@ impl Client {
}
}
if let Some(reg_addr) = client.additional_params().get("registrar").and_then(|s| Address::from_str(s).ok()) {
trace!(target: "client", "Found registrar at {}", reg_addr);
let registrar = Registry::new(reg_addr);
*client.registrar.lock() = Some(registrar);
}
// ensure buffered changes are flushed.
client.db.read().flush().map_err(ClientError::Database)?;
Ok(client)
@ -340,11 +341,6 @@ impl Client {
}
}
/// Get the Registry object - useful for looking up names.
pub fn registrar(&self) -> MutexGuard<Option<Registry>> {
self.registrar.lock()
}
/// Register an action to be done if a mode/spec_name change happens.
pub fn on_user_defaults_change<F>(&self, f: F) where F: 'static + FnMut(Option<Mode>) + Send {
*self.on_user_defaults_change.lock() = Some(Box::new(f));
@ -1828,18 +1824,24 @@ impl BlockChainClient for Client {
}
fn registrar_address(&self) -> Option<Address> {
self.registrar.lock().as_ref().map(|r| r.address)
self.registrar_address.clone()
}
fn registry_address(&self, name: String) -> Option<Address> {
self.registrar.lock().as_ref()
.and_then(|r| {
let dispatch = move |reg_addr, data| {
future::done(self.call_contract(BlockId::Latest, reg_addr, data))
};
r.get_address(dispatch, keccak(name.as_bytes()), "A".to_string()).wait().ok()
let address = match self.registrar_address {
Some(address) => address,
None => return None,
};
self.registrar.functions()
.get_address()
.call(keccak(name.as_bytes()), "A", &|data| self.call_contract(BlockId::Latest, address, data))
.ok()
.and_then(|a| if a.is_zero() {
None
} else {
Some(a)
})
.and_then(|a| if a.is_zero() { None } else { Some(a) })
}
fn eip86_transition(&self) -> u64 {

View File

@ -18,12 +18,10 @@
/// It can also report validators for misbehaviour with two levels: `reportMalicious` and `reportBenign`.
use std::sync::Weak;
use bytes::Bytes;
use ethereum_types::{H256, Address};
use parking_lot::RwLock;
use bytes::Bytes;
use futures::Future;
use native_contracts::ValidatorReport as Provider;
use client::EngineClient;
use header::{Header, BlockNumber};
@ -32,40 +30,41 @@ use machine::{AuxiliaryData, Call, EthereumMachine};
use super::{ValidatorSet, SimpleList, SystemCall};
use super::safe_contract::ValidatorSafeContract;
use_contract!(validator_report, "ValidatorReport", "res/contracts/validator_report.json");
/// A validator contract with reporting.
pub struct ValidatorContract {
contract_address: Address,
validators: ValidatorSafeContract,
provider: Provider,
provider: validator_report::ValidatorReport,
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove
}
impl ValidatorContract {
pub fn new(contract_address: Address) -> Self {
ValidatorContract {
contract_address,
validators: ValidatorSafeContract::new(contract_address),
provider: Provider::new(contract_address),
provider: validator_report::ValidatorReport::default(),
client: RwLock::new(None),
}
}
}
impl ValidatorContract {
// could be `impl Trait`.
// note: dispatches transactions to network as well as execute.
// TODO [keorn]: Make more general.
fn transact(&self) -> Box<Fn(Address, Bytes) -> Result<Bytes, String>> {
let client = self.client.read().clone();
Box::new(move |a, d| client.as_ref()
fn transact(&self, data: Bytes) -> Result<(), String> {
let client = self.client.read().as_ref()
.and_then(Weak::upgrade)
.ok_or_else(|| "No client!".into())
.and_then(|c| {
match c.as_full_client() {
Some(c) => c.transact_contract(a, d)
.map_err(|e| format!("Transaction import error: {}", e)),
None => Err("No full client!".into()),
}
})
.map(|_| Default::default()))
.ok_or_else(|| "No client!")?;
match client.as_full_client() {
Some(c) => {
c.transact_contract(self.contract_address, data)
.map_err(|e| format!("Transaction import error: {}", e))?;
Ok(())
},
None => Err("No full client!".into()),
}
}
}
@ -112,14 +111,16 @@ impl ValidatorSet for ValidatorContract {
}
fn report_malicious(&self, address: &Address, _set_block: BlockNumber, block: BlockNumber, proof: Bytes) {
match self.provider.report_malicious(&*self.transact(), *address, block.into(), proof).wait() {
let data = self.provider.functions().report_malicious().input(*address, block, proof);
match self.transact(data) {
Ok(_) => warn!(target: "engine", "Reported malicious validator {}", address),
Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s),
}
}
fn report_benign(&self, address: &Address, _set_block: BlockNumber, block: BlockNumber) {
match self.provider.report_benign(&*self.transact(), *address, block.into()).wait() {
let data = self.provider.functions().report_benign().input(*address, block);
match self.transact(data) {
Ok(_) => warn!(target: "engine", "Reported benign validator misbehaviour {}", address),
Err(s) => warn!(target: "engine", "Validator {} could not be reported {}", address, s),
}

View File

@ -17,11 +17,9 @@
/// Validator set maintained in a contract, updated using `getValidators` method.
use std::sync::{Weak, Arc};
use futures::Future;
use native_contracts::ValidatorSet as Provider;
use hash::keccak;
use ethereum_types::{H160, H256, U256, Address, Bloom};
use ethereum_types::{H256, U256, Address, Bloom};
use parking_lot::{Mutex, RwLock};
use bytes::Bytes;
@ -40,6 +38,8 @@ use receipt::Receipt;
use super::{SystemCall, ValidatorSet};
use super::simple_list::SimpleList;
use_contract!(validator_set, "ValidatorSet", "res/contracts/validator_set.json");
const MEMOIZE_CAPACITY: usize = 500;
// TODO: ethabi should be able to generate this.
@ -52,13 +52,14 @@ lazy_static! {
// state-dependent proofs for the safe contract:
// only "first" proofs are such.
struct StateProof {
contract_address: Address,
header: Mutex<Header>,
provider: Provider,
provider: validator_set::ValidatorSet,
}
impl ::engines::StateDependentProof<EthereumMachine> for StateProof {
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String> {
prove_initial(&self.provider, &*self.header.lock(), caller)
prove_initial(&self.provider, self.contract_address, &*self.header.lock(), caller)
}
fn check_proof(&self, machine: &EthereumMachine, proof: &[u8]) -> Result<(), String> {
@ -68,15 +69,15 @@ impl ::engines::StateDependentProof<EthereumMachine> for StateProof {
return Err("wrong header in proof".into());
}
check_first_proof(machine, &self.provider, header, &state_items).map(|_| ())
check_first_proof(machine, &self.provider, self.contract_address, header, &state_items).map(|_| ())
}
}
/// The validator contract should have the following interface:
pub struct ValidatorSafeContract {
pub address: Address,
contract_address: Address,
validators: RwLock<MemoryLruCache<H256, SimpleList>>,
provider: Provider,
provider: validator_set::ValidatorSet,
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove
}
@ -92,7 +93,7 @@ fn encode_first_proof(header: &Header, state_items: &[Vec<u8>]) -> Bytes {
}
// check a first proof: fetch the validator set at the given block.
fn check_first_proof(machine: &EthereumMachine, provider: &Provider, old_header: Header, state_items: &[DBValue])
fn check_first_proof(machine: &EthereumMachine, provider: &validator_set::ValidatorSet, contract_address: Address, old_header: Header, state_items: &[DBValue])
-> Result<Vec<Address>, String>
{
use transaction::{Action, Transaction};
@ -117,15 +118,15 @@ fn check_first_proof(machine: &EthereumMachine, provider: &Provider, old_header:
// check state proof using given machine.
let number = old_header.number();
provider.get_validators(move |a, d| {
provider.functions().get_validators().call(&|data| {
let from = Address::default();
let tx = Transaction {
nonce: machine.account_start_nonce(number),
action: Action::Call(a),
action: Action::Call(contract_address),
gas: PROVIDED_GAS.into(),
gas_price: U256::default(),
value: U256::default(),
data: d,
data,
}.fake_sign(from);
let res = ::state::check_proof(
@ -141,7 +142,7 @@ fn check_first_proof(machine: &EthereumMachine, provider: &Provider, old_header:
::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)),
::state::ProvedExecution::Complete(e) => Ok(e.output),
}
}).wait()
}).map_err(|err| err.to_string())
}
fn decode_first_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec<DBValue>), ::error::Error> {
@ -170,18 +171,19 @@ fn decode_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec<Receipt>), ::error::E
// given a provider and caller, generate proof. this will just be a state proof
// of `getValidators`.
fn prove_initial(provider: &Provider, header: &Header, caller: &Call) -> Result<Vec<u8>, String> {
fn prove_initial(provider: &validator_set::ValidatorSet, contract_address: Address, header: &Header, caller: &Call) -> Result<Vec<u8>, String> {
use std::cell::RefCell;
let epoch_proof = RefCell::new(None);
let res = {
let caller = |a, d| {
let (result, proof) = caller(a, d)?;
let caller = |data| {
let (result, proof) = caller(contract_address, data)?;
*epoch_proof.borrow_mut() = Some(encode_first_proof(header, &proof));
Ok(result)
};
provider.get_validators(caller).wait()
provider.functions().get_validators().call(&caller)
.map_err(|err| err.to_string())
};
res.map(|validators| {
@ -200,17 +202,18 @@ fn prove_initial(provider: &Provider, header: &Header, caller: &Call) -> Result<
impl ValidatorSafeContract {
pub fn new(contract_address: Address) -> Self {
ValidatorSafeContract {
address: contract_address,
contract_address,
validators: RwLock::new(MemoryLruCache::new(MEMOIZE_CAPACITY)),
provider: Provider::new(contract_address),
provider: validator_set::ValidatorSet::default(),
client: RwLock::new(None),
}
}
/// Queries the state and gets the set of validators.
fn get_list(&self, caller: &Call) -> Option<SimpleList> {
let caller = move |a, d| caller(a, d).map(|x| x.0);
match self.provider.get_validators(caller).wait() {
let contract_address = self.contract_address;
let caller = move |data| caller(contract_address, data).map(|x| x.0);
match self.provider.functions().get_validators().call(&caller) {
Ok(new) => {
debug!(target: "engine", "Set of validators obtained: {:?}", new);
Some(SimpleList::new(new))
@ -244,7 +247,7 @@ impl ValidatorSafeContract {
header.hash(), topics);
LogEntry {
address: self.address,
address: self.contract_address,
topics: topics,
data: Vec::new(), // irrelevant for bloom.
}.bloom()
@ -254,52 +257,29 @@ impl ValidatorSafeContract {
// header the receipts correspond to.
fn extract_from_event(&self, bloom: Bloom, header: &Header, receipts: &[Receipt]) -> Option<SimpleList> {
let check_log = |log: &LogEntry| {
log.address == self.address &&
log.address == self.contract_address &&
log.topics.len() == 2 &&
log.topics[0] == *EVENT_NAME_HASH &&
log.topics[1] == *header.parent_hash()
};
let event = Provider::contract(&self.provider)
.event("InitiateChange".into())
.expect("Contract known ahead of time to have `InitiateChange` event; qed");
// iterate in reverse because only the _last_ change in a given
// block actually has any effect.
// the contract should only increment the nonce once.
let event = self.provider.events().initiate_change();
//// iterate in reverse because only the _last_ change in a given
//// block actually has any effect.
//// the contract should only increment the nonce once.
let mut decoded_events = receipts.iter()
.rev()
.filter(|r| &bloom & &r.log_bloom == bloom)
.filter(|r| r.log_bloom.contains_bloom(&bloom))
.flat_map(|r| r.logs.iter())
.filter(move |l| check_log(l))
.filter_map(|log| {
let topics = log.topics.iter().map(|x| x.0.clone()).collect();
event.parse_log((topics, log.data.clone()).into()).ok()
event.parse_log((log.topics.clone(), log.data.clone()).into()).ok()
});
// only last log is taken into account
match decoded_events.next() {
None => None,
Some(matched_event) => {
// decode log manually until the native contract generator is
// good enough to do it for us.
let validators_token = &matched_event.params[1].value;
let validators = validators_token.clone().to_array()
.and_then(|a| a.into_iter()
.map(|x| x.to_address().map(H160))
.collect::<Option<Vec<_>>>()
)
.map(SimpleList::new);
if validators.is_none() {
debug!(target: "engine", "Successfully decoded log turned out to be bad.");
}
trace!(target: "engine", "decoded log. validators: {:?}", validators);
validators
}
Some(matched_event) => Some(SimpleList::new(matched_event.new_set))
}
}
}
@ -320,14 +300,15 @@ impl ValidatorSet for ValidatorSafeContract {
}
fn on_epoch_begin(&self, _first: bool, _header: &Header, caller: &mut SystemCall) -> Result<(), ::error::Error> {
self.provider.finalize_change(caller)
.wait()
let data = self.provider.functions().finalize_change().input();
caller(self.contract_address, data)
.map(|_| ())
.map_err(::engines::EngineError::FailedSystemCall)
.map_err(Into::into)
}
fn genesis_epoch_data(&self, header: &Header, call: &Call) -> Result<Vec<u8>, String> {
prove_initial(&self.provider, header, call)
prove_initial(&self.provider, self.contract_address, header, call)
}
fn is_epoch_end(&self, _first: bool, _chain_head: &Header) -> Option<Vec<u8>> {
@ -343,8 +324,9 @@ impl ValidatorSet for ValidatorSafeContract {
if first {
debug!(target: "engine", "signalling transition to fresh contract.");
let state_proof = Arc::new(StateProof {
contract_address: self.contract_address,
header: Mutex::new(header.clone()),
provider: self.provider.clone(),
provider: validator_set::ValidatorSet::default(),
});
return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>));
}
@ -383,7 +365,7 @@ impl ValidatorSet for ValidatorSafeContract {
let (old_header, state_items) = decode_first_proof(&rlp)?;
let number = old_header.number();
let old_hash = old_header.hash();
let addresses = check_first_proof(machine, &self.provider, old_header, &state_items)
let addresses = check_first_proof(machine, &self.provider, self.contract_address, old_header, &state_items)
.map_err(::engines::EngineError::InsufficientProof)?;
trace!(target: "engine", "extracted epoch set at #{}: {} addresses",

View File

@ -175,8 +175,8 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
if header.seal().len() == self.seal_fields() {
map![
"nonce".to_owned() => format!("0x{}", header.nonce().hex()),
"mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())
"nonce".to_owned() => format!("0x{:x}", header.nonce()),
"mixHash".to_owned() => format!("0x{:x}", header.mix_hash())
]
} else {
BTreeMap::default()
@ -384,9 +384,9 @@ impl Ethash {
let diff_inc = (header.timestamp() - parent.timestamp()) / increment_divisor;
if diff_inc <= threshold {
*parent.difficulty() + *parent.difficulty() / difficulty_bound_divisor * (threshold - diff_inc).into()
*parent.difficulty() + *parent.difficulty() / difficulty_bound_divisor * U256::from(threshold - diff_inc)
} else {
let multiplier = cmp::min(diff_inc - threshold, 99).into();
let multiplier: U256 = cmp::min(diff_inc - threshold, 99).into();
parent.difficulty().saturating_sub(
*parent.difficulty() / difficulty_bound_divisor * multiplier
)
@ -474,6 +474,7 @@ mod tests {
use error::{BlockError, Error};
use header::Header;
use spec::Spec;
use engines::Engine;
use super::super::{new_morden, new_mcip3_test, new_homestead_test_machine};
use super::{Ethash, EthashParams, ecip1017_eras_block_reward};
use rlp;
@ -849,4 +850,16 @@ mod tests {
let difficulty = ethash.calculate_difficulty(&header, &parent_header);
assert_eq!(U256::from(12543204905719u64), difficulty);
}
#[test]
fn test_extra_info() {
let machine = new_homestead_test_machine();
let ethparams = get_default_ethash_params();
let ethash = Ethash::new(&::std::env::temp_dir(), ethparams, machine, None);
let mut header = Header::default();
header.set_seal(vec![rlp::encode(&H256::from("b251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d")).into_vec(), rlp::encode(&H64::zero()).into_vec()]);
let info = ethash.extra_info(&header);
assert_eq!(info["nonce"], "0x0000000000000000");
assert_eq!(info["mixHash"], "0xb251bd2e0283d0658f2cadfdc8ca619b5de94eca5742725e2e757dd13ed7503d");
}
}

View File

@ -71,12 +71,10 @@ extern crate ethcore_transaction as transaction;
extern crate ethereum_types;
extern crate ethjson;
extern crate ethkey;
extern crate futures;
extern crate hardware_wallet;
extern crate hashdb;
extern crate itertools;
extern crate lru_cache;
extern crate native_contracts;
extern crate num_cpus;
extern crate num;
extern crate parity_machine;
@ -99,6 +97,12 @@ extern crate util_error;
extern crate snappy;
extern crate migration;
extern crate ethabi;
#[macro_use]
extern crate ethabi_derive;
#[macro_use]
extern crate ethabi_contract;
#[macro_use]
extern crate rlp_derive;
extern crate rustc_hex;

View File

@ -220,7 +220,7 @@ impl EthereumMachine {
let total_lower_limit = cmp::max(lower_limit, gas_floor_target);
let total_upper_limit = cmp::min(upper_limit, gas_ceil_target);
let gas_limit = cmp::max(gas_floor_target, cmp::min(total_upper_limit,
lower_limit + (header.gas_used().clone() * 6.into() / 5.into()) / bound_divisor));
lower_limit + (header.gas_used().clone() * 6u32 / 5.into()) / bound_divisor));
round_block_gas_limit(gas_limit, total_lower_limit, total_upper_limit)
};
// ensure that we are not violating protocol limits
@ -384,7 +384,7 @@ impl EthereumMachine {
/// Additional params.
pub fn additional_params(&self) -> HashMap<String, String> {
hash_map![
"registrar".to_owned() => self.params.registrar.hex()
"registrar".to_owned() => format!("{:x}", self.params.registrar)
]
}
}

View File

@ -647,7 +647,7 @@ impl Miner {
queue.set_gas_limit(gas_limit);
if let GasLimit::Auto = self.options.tx_queue_gas_limit {
// Set total tx queue gas limit to be 20x the block gas limit.
queue.set_total_gas_limit(gas_limit * 20.into());
queue.set_total_gas_limit(gas_limit * 20u32);
}
}
@ -720,9 +720,6 @@ impl Miner {
}
}).unwrap_or(default_origin);
// try to install service transaction checker before appending transactions
self.service_transaction_action.update_from_chain_client(client);
let details_provider = TransactionDetailsProvider::new(client, &self.service_transaction_action);
match origin {
TransactionOrigin::Local | TransactionOrigin::RetractedBlock => {
@ -838,7 +835,7 @@ impl MinerService for Miner {
fn sensible_gas_price(&self) -> U256 {
// 10% above our minimum.
*self.transaction_queue.read().minimal_gas_price() * 110.into() / 100.into()
*self.transaction_queue.read().minimal_gas_price() * 110u32 / 100.into()
}
fn sensible_gas_limit(&self) -> U256 {
@ -1169,7 +1166,7 @@ impl MinerService for Miner {
let n = sealed.header().number();
let h = sealed.header().hash();
chain.import_sealed_block(sealed)?;
info!(target: "miner", "Submitted block imported OK. #{}: {}", Colour::White.bold().paint(format!("{}", n)), Colour::White.bold().paint(h.hex()));
info!(target: "miner", "Submitted block imported OK. #{}: {}", Colour::White.bold().paint(format!("{}", n)), Colour::White.bold().paint(format!("{:x}", h)));
Ok(())
})
}
@ -1231,12 +1228,6 @@ enum ServiceTransactionAction {
}
impl ServiceTransactionAction {
pub fn update_from_chain_client(&self, client: &MiningBlockChainClient) {
if let ServiceTransactionAction::Check(ref checker) = *self {
checker.update_from_chain_client(&client);
}
}
pub fn check(&self, client: &MiningBlockChainClient, tx: &SignedTransaction) -> Result<bool, String> {
match *self {
ServiceTransactionAction::Refuse => Err("configured to refuse service transactions".to_owned()),

View File

@ -173,8 +173,8 @@ impl StratumJobDispatcher {
let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
format!(
r#"["0x", "0x{}","0x{}","0x{}","0x{:x}"]"#,
pow_hash.hex(), seed_hash.hex(), target.hex(), number
r#"["0x", "0x{:x}","0x{:x}","0x{:x}","0x{:x}"]"#,
pow_hash, seed_hash, target, number
)
}

View File

@ -156,12 +156,9 @@ impl LooseWriter {
// writing logic is the same for both kinds of chunks.
fn write_chunk(&mut self, hash: H256, chunk: &[u8]) -> io::Result<()> {
let mut file_path = self.dir.clone();
file_path.push(hash.hex());
let file_path = self.dir.join(format!("{:x}", hash));
let mut file = File::create(file_path)?;
file.write_all(chunk)?;
Ok(())
}
}
@ -327,14 +324,10 @@ impl SnapshotReader for LooseReader {
}
fn chunk(&self, hash: H256) -> io::Result<Bytes> {
let mut path = self.dir.clone();
path.push(hash.hex());
let path = self.dir.join(format!("{:x}", hash));
let mut buf = Vec::new();
let mut file = File::open(&path)?;
file.read_to_end(&mut buf)?;
Ok(buf)
}
}

View File

@ -181,8 +181,8 @@ pub fn chunk_secondary<'a>(mut chunker: Box<SnapshotComponents>, chain: &'a Bloc
let size = compressed.len();
writer.lock().write_block_chunk(hash, compressed)?;
trace!(target: "snapshot", "wrote secondary chunk. hash: {}, size: {}, uncompressed size: {}",
hash.hex(), size, raw_data.len());
trace!(target: "snapshot", "wrote secondary chunk. hash: {:x}, size: {}, uncompressed size: {}",
hash, size, raw_data.len());
progress.size.fetch_add(size, Ordering::SeqCst);
chunk_hashes.push(hash);

View File

@ -23,8 +23,6 @@ use std::str::FromStr;
use account_provider::AccountProvider;
use client::{Client, BlockChainClient};
use ethkey::Secret;
use futures::Future;
use native_contracts::test_contracts::ValidatorSet;
use snapshot::tests::helpers as snapshot_helpers;
use spec::Spec;
use tests::helpers;
@ -33,6 +31,8 @@ use transaction::{Transaction, Action, SignedTransaction};
use ethereum_types::Address;
use kvdb_memorydb;
use_contract!(test_validator_set, "ValidatorSet", "res/contracts/test_validator_set.json");
const PASS: &'static str = "";
const TRANSITION_BLOCK_1: usize = 2; // block at which the contract becomes activated.
const TRANSITION_BLOCK_2: usize = 10; // block at which the second contract activates.
@ -56,7 +56,7 @@ lazy_static! {
/// Account with secrets keccak("1") is initially the validator.
/// Transitions to the contract at block 2, initially same validator set.
/// Create a new Spec with AuthorityRound which uses a contract at address 5 to determine the current validators using `getValidators`.
/// `native_contracts::test_contracts::ValidatorSet` provides a native wrapper for the ABi.
/// `test_validator_set::ValidatorSet` provides a native wrapper for the ABi.
fn spec_fixed_to_contract() -> Spec {
let data = include_bytes!("test_validator_contract.json");
Spec::load(&::std::env::temp_dir(), &data[..]).unwrap()
@ -136,8 +136,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
vec![transaction]
};
let contract_1 = ValidatorSet::new(*CONTRACT_ADDR_1);
let contract_2 = ValidatorSet::new(*CONTRACT_ADDR_2);
let contract = test_validator_set::ValidatorSet::default();
// apply all transitions.
for transition in transitions {
@ -160,34 +159,24 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
let pending = if manual {
trace!(target: "snapshot", "applying set transition at block #{}", num);
let contract = match num >= TRANSITION_BLOCK_2 {
true => &contract_2,
false => &contract_1,
let address = match num >= TRANSITION_BLOCK_2 {
true => &CONTRACT_ADDR_2 as &Address,
false => &CONTRACT_ADDR_1 as &Address,
};
let mut pending = Vec::new();
{
let mut exec = |addr, data| {
let mut nonce = nonce.borrow_mut();
let transaction = Transaction {
nonce: *nonce,
gas_price: 0.into(),
gas: 1_000_000.into(),
action: Action::Call(addr),
value: 0.into(),
data: data,
}.sign(&*RICH_SECRET, client.signing_chain_id());
let data = contract.functions().set_validators().input(new_set.clone());
let mut nonce = nonce.borrow_mut();
let transaction = Transaction {
nonce: *nonce,
gas_price: 0.into(),
gas: 1_000_000.into(),
action: Action::Call(*address),
value: 0.into(),
data,
}.sign(&*RICH_SECRET, client.signing_chain_id());
pending.push(transaction);
*nonce = *nonce + 1.into();
Ok(Vec::new())
};
contract.set_validators(&mut exec, new_set.clone()).wait().unwrap();
}
pending
*nonce = *nonce + 1.into();
vec![transaction]
} else {
make_useless_transactions()
};

View File

@ -486,7 +486,7 @@ impl fmt::Debug for Account {
#[cfg(test)]
mod tests {
use rlp::{UntrustedRlp, RlpType, Compressible};
use ethereum_types::{H256, U256, Address};
use ethereum_types::{H256, Address};
use memorydb::MemoryDB;
use bytes::Bytes;
use super::*;
@ -508,7 +508,7 @@ mod tests {
let mut db = AccountDBMut::new(&mut db, &Address::new());
let rlp = {
let mut a = Account::new_contract(69.into(), 0.into());
a.set_storage(H256::from(&U256::from(0x00u64)), H256::from(&U256::from(0x1234u64)));
a.set_storage(0x00u64.into(), 0x1234u64.into());
a.commit_storage(&Default::default(), &mut db).unwrap();
a.init_code(vec![]);
a.commit_code(&mut db);
@ -516,9 +516,9 @@ mod tests {
};
let a = Account::from_rlp(&rlp);
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x00u64))).unwrap(), H256::from(&U256::from(0x1234u64)));
assert_eq!(a.storage_at(&db.immutable(), &H256::from(&U256::from(0x01u64))).unwrap(), H256::new());
assert_eq!(*a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into());
assert_eq!(a.storage_at(&db.immutable(), &0x00u64.into()).unwrap(), 0x1234u64.into());
assert_eq!(a.storage_at(&db.immutable(), &0x01u64.into()).unwrap(), H256::default());
}
#[test]
@ -548,7 +548,7 @@ mod tests {
a.set_storage(0.into(), 0x1234.into());
assert_eq!(a.storage_root(), None);
a.commit_storage(&Default::default(), &mut db).unwrap();
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
assert_eq!(*a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into());
}
#[test]
@ -562,7 +562,7 @@ mod tests {
a.commit_storage(&Default::default(), &mut db).unwrap();
a.set_storage(1.into(), 0.into());
a.commit_storage(&Default::default(), &mut db).unwrap();
assert_eq!(a.storage_root().unwrap().hex(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2");
assert_eq!(*a.storage_root().unwrap(), "c57e1afb758b07f8d2c8f13a3b6e44fa5ff94ab266facc5a4fd3f062426e50b2".into());
}
#[test]
@ -574,7 +574,7 @@ mod tests {
assert_eq!(a.code_filth, Filth::Dirty);
assert_eq!(a.code_size(), Some(3));
a.commit_code(&mut db);
assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb");
assert_eq!(a.code_hash(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb".into());
}
#[test]
@ -586,16 +586,16 @@ mod tests {
assert_eq!(a.code_filth, Filth::Dirty);
a.commit_code(&mut db);
assert_eq!(a.code_filth, Filth::Clean);
assert_eq!(a.code_hash().hex(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb");
assert_eq!(a.code_hash(), "af231e631776a517ca23125370d542873eca1fb4d613ed9b5d5335a46ae5b7eb".into());
a.reset_code(vec![0x55]);
assert_eq!(a.code_filth, Filth::Dirty);
a.commit_code(&mut db);
assert_eq!(a.code_hash().hex(), "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be");
assert_eq!(a.code_hash(), "37bf2238b11b68cdc8382cece82651b59d3c3988873b6e0f33d79694aa45f1be".into());
}
#[test]
fn rlpio() {
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new());
let b = Account::from_rlp(&a.rlp());
assert_eq!(a.balance(), b.balance());
assert_eq!(a.nonce(), b.nonce());
@ -605,17 +605,17 @@ mod tests {
#[test]
fn new_account() {
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new());
assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
assert_eq!(a.balance(), &U256::from(69u8));
assert_eq!(a.nonce(), &U256::from(0u8));
assert_eq!(*a.balance(), 69u8.into());
assert_eq!(*a.nonce(), 0u8.into());
assert_eq!(a.code_hash(), KECCAK_EMPTY);
assert_eq!(a.storage_root().unwrap(), &KECCAK_NULL_RLP);
}
#[test]
fn create_account() {
let a = Account::new(U256::from(69u8), U256::from(0u8), HashMap::new(), Bytes::new());
let a = Account::new(69u8.into(), 0u8.into(), HashMap::new(), Bytes::new());
assert_eq!(a.rlp().to_hex(), "f8448045a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470");
}

View File

@ -644,7 +644,7 @@ impl<B: Backend> State<B> {
/// Mutate storage of account `a` so that it is `value` for `key`.
pub fn set_storage(&mut self, a: &Address, key: H256, value: H256) -> trie::Result<()> {
trace!(target: "state", "set_storage({}:{} to {})", a, key.hex(), value.hex());
trace!(target: "state", "set_storage({}:{:x} to {:x})", a, key, value);
if self.storage_at(a, &key)? != value {
self.require(a, false)?.set_storage(key, value)
}
@ -2085,7 +2085,7 @@ mod tests {
let a = Address::zero();
state.require(&a, false).unwrap();
state.commit().unwrap();
assert_eq!(state.root().hex(), "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785");
assert_eq!(*state.root(), "0ce23f3c809de377b008a4a3ee94a0834aac8bec1f86e28ffe4fdb5a15b0c785".into());
}
#[test]
@ -2122,7 +2122,7 @@ mod tests {
fn create_empty() {
let mut state = get_temp_state();
state.commit().unwrap();
assert_eq!(state.root().hex(), "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421");
assert_eq!(*state.root(), "56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421".into());
}
#[test]

View File

@ -19,15 +19,15 @@
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use ethereum_types::{H256, Address};
use native_contracts::TransactAcl as Contract;
use client::{BlockChainClient, BlockId, ChainNotify};
use bytes::Bytes;
use parking_lot::Mutex;
use futures::{self, Future};
use spec::CommonParams;
use transaction::{Action, SignedTransaction};
use hash::KECCAK_EMPTY;
use_contract!(transact_acl, "TransactAcl", "res/contracts/tx_acl.json");
const MAX_CACHE_SIZE: usize = 4096;
mod tx_permissions {
@ -41,7 +41,7 @@ mod tx_permissions {
/// Connection filter that uses a contract to manage permissions.
pub struct TransactionFilter {
contract: Mutex<Option<Contract>>,
contract: transact_acl::TransactAcl,
contract_address: Address,
permission_cache: Mutex<HashMap<(H256, Address), u32>>,
}
@ -51,7 +51,7 @@ impl TransactionFilter {
pub fn from_params(params: &CommonParams) -> Option<TransactionFilter> {
params.transaction_permission_contract.map(|address|
TransactionFilter {
contract: Mutex::new(None),
contract: transact_acl::TransactAcl::default(),
contract_address: address,
permission_cache: Mutex::new(HashMap::new()),
}
@ -79,23 +79,15 @@ impl TransactionFilter {
match cache.entry((*parent_hash, sender)) {
Entry::Occupied(entry) => *entry.get() & tx_type != 0,
Entry::Vacant(entry) => {
let mut contract = self.contract.lock();
if contract.is_none() {
*contract = Some(Contract::new(self.contract_address));
}
let permissions = match &*contract {
&Some(ref contract) => {
contract.allowed_tx_types(
|addr, data| futures::done(client.call_contract(BlockId::Hash(*parent_hash), addr, data)),
sender,
).wait().unwrap_or_else(|e| {
debug!("Error callling tx permissions contract: {:?}", e);
tx_permissions::NONE
})
}
_ => tx_permissions::NONE,
};
let contract_address = self.contract_address;
let permissions = self.contract.functions()
.allowed_tx_types()
.call(sender, &|data| client.call_contract(BlockId::Hash(*parent_hash), contract_address, data))
.map(|p| p.low_u32())
.unwrap_or_else(|e| {
debug!("Error callling tx permissions contract: {:?}", e);
tx_permissions::NONE
});
if len < MAX_CACHE_SIZE {
entry.insert(permissions);

View File

@ -12,7 +12,7 @@ heapsize = "0.4"
keccak-hash = { path = "../../util/hash" }
rlp = { path = "../../util/rlp" }
unexpected = { path = "../../util/unexpected" }
ethereum-types = "0.1"
ethereum-types = "0.2"
[dev-dependencies]
rustc-hex= "1.0"

View File

@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
rlp = { path = "../../util/rlp" }
rlp_derive = { path = "../../util/rlp_derive" }
ethcore-bytes = { path = "../../util/bytes" }
ethereum-types = "0.1"
ethereum-types = "0.2"
ethjson = { path = "../../json" }
keccak-hash = { path = "../../util/hash" }
heapsize = "0.4"

View File

@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
byteorder = "1.0"
ethcore-bytes = { path = "../../util/bytes" }
ethereum-types = "0.1"
ethereum-types = "0.2"
patricia-trie = { path = "../../util/patricia_trie" }
log = "0.3"
common-types = { path = "../types" }

View File

@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
byteorder = "1.0"
ethereum-types = "0.1"
ethereum-types = "0.2"
log = "0.3"
parity-wasm = "0.23"
libc = "0.2"

View File

@ -7,7 +7,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
serde = "1"
serde_json = "1"
serde_derive = "1"
ethereum-types = "0.1"
ethereum-types = "0.2"
ethjson = { path = "../../../json" }
vm = { path = "../../vm" }
wasm = { path = "../" }

View File

@ -8,5 +8,5 @@ rust-crypto = "0.2.36"
tiny-keccak = "1.3"
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
ethkey = { path = "../ethkey" }
ethereum-types = "0.1"
ethereum-types = "0.2"
subtle = "0.1"

View File

@ -7,11 +7,11 @@ authors = ["Parity Technologies <admin@parity.io>"]
byteorder = "1.0"
edit-distance = "2.0"
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
ethereum-types = "0.1"
ethereum-types = "0.2"
lazy_static = "1.0"
log = "0.3"
parity-wordlist = "1.2"
rand = "0.3.14"
rand = "0.4"
rust-crypto = "0.2"
rustc-hex = "1.0"
tiny-keccak = "1.3"

View File

@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
log = "0.3"
libc = "0.2"
rand = "0.3"
rand = "0.4"
ethkey = { path = "../ethkey" }
serde = "1.0"
serde_json = "1.0"
@ -18,7 +18,7 @@ time = "0.1.34"
itertools = "0.5"
parking_lot = "0.5"
ethcrypto = { path = "../ethcrypto" }
ethereum-types = "0.1"
ethereum-types = "0.2"
dir = { path = "../util/dir" }
smallvec = "0.4"
parity-wordlist = "1.0"

View File

@ -14,7 +14,7 @@ ethcore = { path = "../ethcore" }
ethjson = { path = "../json" }
ethcore-bytes = { path = "../util/bytes" }
ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.1"
ethereum-types = "0.2"
evm = { path = "../ethcore/evm" }
panic_hook = { path = "../util/panic_hook" }
rustc-hex = "1.0"

View File

@ -49,7 +49,7 @@ impl Informant {
}
fn stack(&self) -> String {
let items = self.stack.iter().map(display::u256_as_str).collect::<Vec<_>>();
let items = self.stack.iter().map(|i| format!("\"0x{:x}\"", i)).collect::<Vec<_>>();
format!("[{}]", items.join(","))
}
@ -124,12 +124,12 @@ impl trace::VMTracer for Informant {
let info = ::evm::INSTRUCTIONS[self.instruction as usize];
let trace = format!(
"{{\"pc\":{pc},\"op\":{op},\"opName\":\"{name}\",\"gas\":{gas},\"gasCost\":{gas_cost},\"memory\":{memory},\"stack\":{stack},\"storage\":{storage},\"depth\":{depth}}}",
"{{\"pc\":{pc},\"op\":{op},\"opName\":\"{name}\",\"gas\":\"0x{gas:x}\",\"gasCost\":\"0x{gas_cost:x}\",\"memory\":{memory},\"stack\":{stack},\"storage\":{storage},\"depth\":{depth}}}",
pc = self.pc,
op = self.instruction,
name = info.name,
gas = display::u256_as_str(&(gas_used.saturating_add(self.gas_cost))),
gas_cost = display::u256_as_str(&self.gas_cost),
gas = gas_used.saturating_add(self.gas_cost),
gas_cost = self.gas_cost,
memory = self.memory(),
stack = self.stack(),
storage = self.storage(),

View File

@ -17,7 +17,6 @@
//! VM Output display utils.
use std::time::Duration;
use ethereum_types::U256;
pub mod json;
pub mod std_json;
@ -32,13 +31,3 @@ pub fn format_time(time: &Duration) -> String {
pub fn as_micros(time: &Duration) -> u64 {
time.as_secs() * 1_000_000 + time.subsec_nanos() as u64 / 1_000
}
/// Converts U256 into string.
/// TODO Overcomes: https://github.com/paritytech/bigint/issues/13
pub fn u256_as_str(v: &U256) -> String {
if v.is_zero() {
"\"0x0\"".into()
} else {
format!("\"{:x}\"", v)
}
}

View File

@ -67,7 +67,7 @@ impl<T: Writer> Informant<T> {
impl<T: Writer> Informant<T> {
fn stack(&self) -> String {
let items = self.stack.iter().map(display::u256_as_str).collect::<Vec<_>>();
let items = self.stack.iter().map(|i| format!("\"0x{:x}\"", i)).collect::<Vec<_>>();
format!("[{}]", items.join(","))
}
@ -125,11 +125,11 @@ impl<T: Writer> trace::VMTracer for Informant<T> {
writeln!(
&mut self.sink,
"{{\"pc\":{pc},\"op\":{op},\"opName\":\"{name}\",\"gas\":{gas},\"stack\":{stack},\"storage\":{storage},\"depth\":{depth}}}",
"{{\"pc\":{pc},\"op\":{op},\"opName\":\"{name}\",\"gas\":\"0x{gas:x}\",\"stack\":{stack},\"storage\":{storage},\"depth\":{depth}}}",
pc = pc,
op = instruction,
name = info.name,
gas = display::u256_as_str(&current_gas),
gas = current_gas,
stack = stack,
storage = storage,
depth = self.depth,

View File

@ -11,15 +11,17 @@ futures = "0.1"
log = "0.3"
mime = "0.3"
mime_guess = "2.0.0-alpha.2"
rand = "0.3"
rand = "0.4"
rustc-hex = "1.0"
fetch = { path = "../util/fetch" }
ethcore-bytes = { path = "../util/bytes" }
ethereum-types = "0.1"
ethereum-types = "0.2"
parity-reactor = { path = "../util/reactor" }
native-contracts = { path = "../ethcore/native_contracts" }
keccak-hash = { path = "../util/hash" }
ethabi = "5.1"
ethabi-derive = "5.0"
ethabi-contract = "5.0"
[dev-dependencies]
ethabi = "4.0"
parking_lot = "0.5"

View File

@ -23,7 +23,7 @@ use std::path::PathBuf;
use hash::keccak_buffer;
use fetch::{Fetch, Response, Error as FetchError, Client as FetchClient};
use futures::Future;
use futures::{Future, IntoFuture};
use parity_reactor::Remote;
use urlhint::{ContractClient, URLHintContract, URLHint, URLHintResult};
use ethereum_types::H256;
@ -143,7 +143,7 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
let random_path = self.random_path.clone();
let remote_fetch = self.fetch.clone();
let future = self.contract.resolve(hash.to_vec())
let future = self.contract.resolve(hash)
.map_err(|e| { warn!("Error resolving URL: {}", e); Error::NoResolution })
.and_then(|maybe_url| maybe_url.ok_or(Error::NoResolution))
.map(|content| match content {
@ -157,6 +157,7 @@ impl<F: Fetch + 'static> HashFetch for Client<F> {
content.url
},
})
.into_future()
.and_then(move |url| {
debug!(target: "fetch", "Resolved {:?} to {:?}. Fetching...", hash, url);
let future = remote_fetch.fetch(&url).then(move |result| {

View File

@ -21,23 +21,25 @@
#[macro_use]
extern crate log;
extern crate ethabi;
extern crate ethcore_bytes as bytes;
extern crate ethereum_types;
extern crate futures;
extern crate keccak_hash as hash;
extern crate mime;
extern crate mime_guess;
extern crate native_contracts;
extern crate parity_reactor;
extern crate rand;
extern crate rustc_hex;
pub extern crate fetch;
#[macro_use]
extern crate ethabi_derive;
#[macro_use]
extern crate ethabi_contract;
#[cfg(test)]
extern crate parking_lot;
#[cfg(test)]
extern crate ethabi;
mod client;

View File

@ -23,13 +23,12 @@ use mime_guess;
use hash::keccak;
use futures::{future, Future};
use native_contracts::{Registry, Urlhint};
use ethereum_types::{H160, H256, Address};
use futures::future::Either;
use ethereum_types::{H256, Address};
use bytes::Bytes;
/// Boxed future that can be shared between threads.
/// TODO [ToDr] Use concrete types!
pub type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
use_contract!(registry, "Registry", "res/registrar.json");
use_contract!(urlhint, "Urlhint", "res/urlhint.json");
const COMMIT_LEN: usize = 20;
/// GithubHint entries with commit set as `0x0..01` should be treated
@ -43,7 +42,7 @@ pub trait ContractClient: Send + Sync {
/// Get registrar address
fn registrar(&self) -> Result<Address, String>;
/// Call Contract
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String>;
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item = Bytes, Error = String> + Send>;
}
/// Github-hosted dapp.
@ -106,14 +105,13 @@ pub enum URLHintResult {
/// URLHint Contract interface
pub trait URLHint: Send + Sync {
/// Resolves given id to registrar entry.
fn resolve(&self, id: Bytes) -> BoxFuture<Option<URLHintResult>, String>;
fn resolve(&self, id: H256) -> Box<Future<Item = Option<URLHintResult>, Error = String> + Send>;
}
/// `URLHintContract` API
#[derive(Clone)]
pub struct URLHintContract {
urlhint: Arc<Urlhint>,
registrar: Registry,
urlhint: urlhint::Urlhint,
registrar: registry::Registry,
client: Arc<ContractClient>,
}
@ -121,9 +119,9 @@ impl URLHintContract {
/// Creates new `URLHintContract`
pub fn new(client: Arc<ContractClient>) -> Self {
URLHintContract {
urlhint: Arc::new(Urlhint::new(Default::default())),
registrar: Registry::new(Default::default()),
client: client,
urlhint: urlhint::Urlhint::default(),
registrar: registry::Registry::default(),
client,
}
}
}
@ -137,7 +135,7 @@ fn get_urlhint_content(account_slash_repo: String, owner: Address) -> Content {
}
}
fn decode_urlhint_output(output: (String, H160, Address)) -> Option<URLHintResult> {
fn decode_urlhint_output(output: (String, [u8; 20], Address)) -> Option<URLHintResult> {
let (account_slash_repo, commit, owner) = output;
if owner == Address::default() {
@ -173,36 +171,30 @@ fn decode_urlhint_output(output: (String, H160, Address)) -> Option<URLHintResul
}
impl URLHint for URLHintContract {
fn resolve(&self, id: Bytes) -> BoxFuture<Option<URLHintResult>, String> {
use futures::future::Either;
let do_call = |_, data| {
let addr = match self.client.registrar() {
Ok(addr) => addr,
Err(e) => return Box::new(future::err(e))
as BoxFuture<Vec<u8>, _>,
};
self.client.call(addr, data)
fn resolve(&self, id: H256) -> Box<Future<Item = Option<URLHintResult>, Error = String> + Send> {
let address = match self.client.registrar() {
Ok(a) => a,
Err(e) => return Box::new(future::err(e)),
};
let urlhint = self.urlhint.clone();
let client = self.client.clone();
Box::new(self.registrar.get_address(do_call, keccak("githubhint"), "A".into())
.map(|addr| if addr == Address::default() { None } else { Some(addr) })
.and_then(move |address| {
let mut fixed_id = [0; 32];
let len = ::std::cmp::min(32, id.len());
fixed_id[..len].copy_from_slice(&id[..len]);
let get_address = self.registrar.functions().get_address();
let entries = self.urlhint.functions().entries();
let data = get_address.input(keccak("githubhint"), "A");
match address {
None => Either::A(future::ok(None)),
Some(address) => {
let do_call = move |_, data| client.call(address, data);
Either::B(urlhint.entries(do_call, H256(fixed_id)).map(decode_urlhint_output))
}
}
}))
let future = client.call(address, data)
.and_then(move |output| get_address.output(&output).map_err(|e| e.to_string()))
.and_then(move |addr| if !addr.is_zero() {
let data = entries.input(id);
let result = client.call(addr, data)
.and_then(move |output| entries.output(&output).map_err(|e| e.to_string()))
.map(decode_urlhint_output);
Either::B(result)
} else {
Either::A(future::ok(None))
});
Box::new(future)
}
}
@ -270,7 +262,7 @@ pub mod tests {
Ok(REGISTRAR.parse().unwrap())
}
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item = Bytes, Error = String> + Send> {
self.calls.lock().push((address.to_hex(), data.to_hex()));
let res = self.responses.lock().remove(0);
Box::new(res.into_future())
@ -283,7 +275,7 @@ pub mod tests {
let registrar = FakeRegistrar::new();
let resolve_result = {
use ethabi::{encode, Token};
encode(&[Token::String(String::new()), Token::FixedBytes(vec![0; 20]), Token::Address([0; 20])])
encode(&[Token::String(String::new()), Token::FixedBytes(vec![0; 20]), Token::Address([0; 20].into())])
};
registrar.responses.lock()[1] = Ok(resolve_result);
@ -293,7 +285,7 @@ pub mod tests {
// when
let res = urlhint.resolve("test".bytes().collect()).wait().unwrap();
let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap();
let calls = calls.lock();
let call0 = calls.get(0).expect("Registrar resolve called");
let call1 = calls.get(1).expect("URLHint Resolve called");
@ -321,7 +313,7 @@ pub mod tests {
let urlhint = URLHintContract::new(Arc::new(registrar));
// when
let res = urlhint.resolve("test".bytes().collect()).wait().unwrap();
let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap();
// then
assert_eq!(res, Some(URLHintResult::Dapp(GithubApp {
@ -343,7 +335,7 @@ pub mod tests {
let urlhint = URLHintContract::new(Arc::new(registrar));
// when
let res = urlhint.resolve("test".bytes().collect()).wait().unwrap();
let res = urlhint.resolve("test".as_bytes().into()).wait().unwrap();
// then
assert_eq!(res, Some(URLHintResult::Content(Content {

View File

@ -14,7 +14,7 @@ hidapi = { git = "https://github.com/paritytech/hidapi-rs" }
libusb = { git = "https://github.com/paritytech/libusb-rs" }
trezor-sys = { git = "https://github.com/paritytech/trezor-sys" }
ethkey = { path = "../ethkey" }
ethereum-types = "0.1"
ethereum-types = "0.2"
[dev-dependencies]
rustc-hex = "1.0"

View File

@ -8,7 +8,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethcore = { path = "../ethcore" }
ethcore-bytes = { path = "../util/bytes" }
ethereum-types = "0.1"
ethereum-types = "0.2"
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.10" }
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.10" }
rlp = { path = "../util/rlp" }

View File

@ -4,7 +4,7 @@ version = "0.1.0"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethereum-types = "0.1"
ethereum-types = "0.2"
rustc-hex = "1.0"
serde = "1.0"
serde_json = "1.0"

View File

@ -5,4 +5,4 @@ description = "Generalization of a state machine for consensus engines"
authors = ["Parity Technologies <admin@parity.io>"]
[dependencies]
ethereum-types = "0.1"
ethereum-types = "0.2"

View File

@ -11,16 +11,18 @@ authors = ["Parity Technologies <admin@parity.io>"]
hyper = { git = "https://github.com/paritytech/hyper", default-features = false }
common-types = { path = "../ethcore/types" }
ethabi = "5.1"
ethabi-contract = "5.0"
ethabi-derive = "5.0"
ethash = { path = "../ethash" }
ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.1"
ethereum-types = "0.2"
ethkey = { path = "../ethkey" }
futures = "0.1"
heapsize = "0.4"
keccak-hash = { path = "../util/hash" }
linked-hash-map = "0.5"
log = "0.3"
native-contracts = { path = "../ethcore/native_contracts" }
parking_lot = "0.5"
rustc-hex = "1.0"
table = { path = "../util/table" }

View File

@ -20,24 +20,27 @@
//! Keeps track of transactions and mined block.
extern crate common_types as types;
extern crate ethereum_types;
extern crate ethabi;
extern crate ethcore_transaction as transaction;
extern crate ethereum_types;
extern crate futures;
extern crate heapsize;
extern crate keccak_hash as hash;
extern crate linked_hash_map;
extern crate native_contracts;
extern crate parking_lot;
extern crate table;
extern crate transient_hashmap;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate rustc_hex;
extern crate ethabi_derive;
#[macro_use]
extern crate ethabi_contract;
#[cfg(test)]
extern crate ethkey;
#[macro_use]
extern crate log;
#[cfg(test)]
extern crate rustc_hex;
pub mod banning_queue;
pub mod external;

View File

@ -16,13 +16,12 @@
//! A service transactions contract checker.
use futures::{future, Future};
use native_contracts::ServiceTransactionChecker as Contract;
use ethereum_types::{U256, Address};
use parking_lot::Mutex;
use ethereum_types::Address;
use transaction::SignedTransaction;
use types::ids::BlockId;
use_contract!(service_transaction, "ServiceTransaction", "res/service_transaction.json");
const SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME: &'static str = "service_transaction_checker";
/// A contract calling interface.
@ -37,34 +36,22 @@ pub trait ContractCaller {
/// Service transactions checker.
#[derive(Default)]
pub struct ServiceTransactionChecker {
contract: Mutex<Option<Contract>>,
contract: service_transaction::ServiceTransaction,
}
impl ServiceTransactionChecker {
/// Try to create instance, reading contract address from given chain client.
pub fn update_from_chain_client(&self, client: &ContractCaller) {
let mut contract = self.contract.lock();
if contract.is_none() {
*contract = client.registry_address(SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME)
.and_then(|contract_addr| {
trace!(target: "txqueue", "Configuring for service transaction checker contract from {}", contract_addr);
Some(Contract::new(contract_addr))
})
}
}
/// Checks if service transaction can be appended to the transaction queue.
pub fn check(&self, client: &ContractCaller, tx: &SignedTransaction) -> Result<bool, String> {
debug_assert_eq!(tx.gas_price, U256::zero());
assert!(tx.gas_price.is_zero());
if let Some(ref contract) = *self.contract.lock() {
contract.certified(
|addr, data| future::done(client.call_contract(BlockId::Latest, addr, data)),
tx.sender()
).wait()
} else {
Err("contract is not configured".to_owned())
}
let address = client.registry_address(SERVICE_TRANSACTION_CONTRACT_REGISTRY_NAME)
.ok_or_else(|| "contract is not configured")?;
trace!(target: "txqueue", "Checking service transaction checker contract from {}", address);
self.contract.functions()
.certified()
.call(tx.sender(), &|data| client.call_contract(BlockId::Latest, address, data))
.map_err(|e| e.to_string())
}
}

View File

@ -88,8 +88,8 @@ impl NotifyWork for WorkPoster {
let seed_hash = &self.seed_compute.lock().hash_block_number(number);
let seed_hash = H256::from_slice(&seed_hash[..]);
let body = format!(
r#"{{ "result": ["0x{}","0x{}","0x{}","0x{:x}"] }}"#,
pow_hash.hex(), seed_hash.hex(), target.hex(), number
r#"{{ "result": ["0x{:x}","0x{:x}","0x{:x}","0x{:x}"] }}"#,
pow_hash, seed_hash, target, number
);
let mut client = self.client.lock();
for u in &self.urls {

View File

@ -645,17 +645,17 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> {
if i != 0 {
out.write(b",").expect("Write error");
}
out.write_fmt(format_args!("\n\"0x{}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", account.hex(), balance, client.nonce(&account, at).unwrap_or_else(U256::zero))).expect("Write error");
out.write_fmt(format_args!("\n\"0x{:x}\": {{\"balance\": \"{:x}\", \"nonce\": \"{:x}\"", account, balance, client.nonce(&account, at).unwrap_or_else(U256::zero))).expect("Write error");
let code = client.code(&account, at).unwrap_or(None).unwrap_or_else(Vec::new);
if !code.is_empty() {
out.write_fmt(format_args!(", \"code_hash\": \"0x{}\"", keccak(&code).hex())).expect("Write error");
out.write_fmt(format_args!(", \"code_hash\": \"0x{:x}\"", keccak(&code))).expect("Write error");
if cmd.code {
out.write_fmt(format_args!(", \"code\": \"{}\"", code.to_hex())).expect("Write error");
}
}
let storage_root = client.storage_root(&account, at).unwrap_or(KECCAK_NULL_RLP);
if storage_root != KECCAK_NULL_RLP {
out.write_fmt(format_args!(", \"storage_root\": \"0x{}\"", storage_root.hex())).expect("Write error");
out.write_fmt(format_args!(", \"storage_root\": \"0x{:x}\"", storage_root)).expect("Write error");
if cmd.storage {
out.write_fmt(format_args!(", \"storage\": {{")).expect("Write error");
let mut last_storage: Option<H256> = None;
@ -669,7 +669,7 @@ fn execute_export_state(cmd: ExportState) -> Result<(), String> {
if last_storage.is_some() {
out.write(b",").expect("Write error");
}
out.write_fmt(format_args!("\n\t\"0x{}\": \"0x{}\"", key.hex(), client.storage_at(&account, &key, at).unwrap_or_else(Default::default).hex())).expect("Write error");
out.write_fmt(format_args!("\n\t\"0x{:x}\": \"0x{:x}\"", key, client.storage_at(&account, &key, at).unwrap_or_else(Default::default))).expect("Write error");
last_storage = Some(key);
}
}

View File

@ -1384,7 +1384,13 @@ mod tests {
acc_conf: Default::default(),
gas_pricer_conf: Default::default(),
miner_extras: Default::default(),
update_policy: UpdatePolicy { enable_downloading: true, require_consensus: true, filter: UpdateFilter::Critical, track: ReleaseTrack::Unknown, path: default_hypervisor_path() },
update_policy: UpdatePolicy {
enable_downloading: true,
require_consensus: true,
filter: UpdateFilter::Critical,
track: ReleaseTrack::Unknown,
path: default_hypervisor_path()
},
mode: Default::default(),
tracing: Default::default(),
compaction: Default::default(),

View File

@ -22,7 +22,7 @@ use dir::default_data_path;
use dir::helpers::replace_home;
use ethcore::client::{Client, BlockChainClient, BlockId};
use ethsync::LightSync;
use futures::{future, IntoFuture, Future};
use futures::{Future, future, IntoFuture};
use hash_fetch::fetch::Client as FetchClient;
use hash_fetch::urlhint::ContractClient;
use light::client::LightChainClient;
@ -70,18 +70,22 @@ pub struct FullRegistrar {
pub client: Arc<Client>,
}
impl FullRegistrar {
pub fn new(client: Arc<Client>) -> Self {
FullRegistrar {
client,
}
}
}
impl ContractClient for FullRegistrar {
fn registrar(&self) -> Result<Address, String> {
self.client.additional_params().get("registrar")
self.client.registrar_address()
.ok_or_else(|| "Registrar not defined.".into())
.and_then(|registrar| {
registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e))
})
}
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item=Bytes, Error=String> + Send> {
Box::new(self.client.call_contract(BlockId::Latest, address, data)
.into_future())
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item = Bytes, Error = String> + Send> {
Box::new(self.client.call_contract(BlockId::Latest, address, data).into_future())
}
}
@ -104,13 +108,13 @@ impl<T: LightChainClient + 'static> ContractClient for LightRegistrar<T> {
})
}
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item=Bytes, Error=String> + Send> {
fn call(&self, address: Address, data: Bytes) -> Box<Future<Item = Bytes, Error = String> + Send> {
let header = self.client.best_block_header();
let env_info = self.client.env_info(BlockId::Hash(header.hash()))
.ok_or_else(|| format!("Cannot fetch env info for header {}", header.hash()));
let env_info = match env_info {
Ok(x) => x,
Ok(e) => e,
Err(e) => return Box::new(future::err(e)),
};

View File

@ -144,7 +144,7 @@ fn print_hash_of(maybe_file: Option<String>) -> Result<String, String> {
if let Some(file) = maybe_file {
let mut f = BufReader::new(File::open(&file).map_err(|_| "Unable to open file".to_owned())?);
let hash = keccak_buffer(&mut f).map_err(|_| "Unable to read from file".to_owned())?;
Ok(hash.hex())
Ok(format!("{:x}", hash))
} else {
Err("Streaming from standard input not yet supported. Specify a file.".to_owned())
}

View File

@ -35,6 +35,7 @@ use ethcore_logger::{Config as LogConfig, RotatingLogger};
use ethsync::{self, SyncConfig};
use fdlimit::raise_fd_limit;
use hash_fetch::fetch::{Fetch, Client as FetchClient};
use hash_fetch;
use informant::{Informant, LightNodeInformantData, FullNodeInformantData};
use journaldb::Algorithm;
use light::Cache as LightDataCache;
@ -304,11 +305,11 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
// the dapps server
let signer_service = Arc::new(signer::new_service(&cmd.ws_conf, &cmd.ui_conf, &cmd.logger_config));
let (node_health, dapps_deps) = {
let contract_client = Arc::new(::dapps::LightRegistrar {
let contract_client = ::dapps::LightRegistrar {
client: client.clone(),
sync: light_sync.clone(),
on_demand: on_demand.clone(),
});
};
struct LightSyncStatus(Arc<LightSync>);
impl fmt::Debug for LightSyncStatus {
@ -334,7 +335,7 @@ fn execute_light_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger
(node_health.clone(), dapps::Dependencies {
sync_status,
node_health,
contract_client: contract_client,
contract_client: Arc::new(contract_client),
fetch: fetch.clone(),
signer: signer_service.clone(),
ui_address: cmd.ui_conf.redirection_address(),
@ -676,13 +677,14 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
// spin up event loop
let event_loop = EventLoop::spawn();
let contract_client = Arc::new(::dapps::FullRegistrar::new(client.clone()));
// the updater service
let updater = Updater::new(
Arc::downgrade(&(service.client() as Arc<BlockChainClient>)),
Arc::downgrade(&sync_provider),
update_policy,
fetch.clone(),
event_loop.remote(),
hash_fetch::Client::with_fetch(contract_client.clone(), fetch.clone(), event_loop.remote())
);
service.add_notify(updater.clone());
@ -698,7 +700,6 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
// the dapps server
let (node_health, dapps_deps) = {
let (sync, client) = (sync_provider.clone(), client.clone());
let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() });
struct SyncStatus(Arc<ethsync::SyncProvider>, Arc<Client>, ethsync::NetworkConfiguration);
impl fmt::Debug for SyncStatus {
@ -725,7 +726,7 @@ pub fn execute_impl(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>)
(node_health.clone(), dapps::Dependencies {
sync_status,
node_health,
contract_client: contract_client,
contract_client,
fetch: fetch.clone(),
signer: signer_service.clone(),
ui_address: cmd.ui_conf.redirection_address(),

View File

@ -16,7 +16,7 @@ log = "0.3"
multihash ="0.7"
order-stat = "0.1"
parking_lot = "0.5"
rand = "0.3"
rand = "0.4"
rust-crypto = "0.2"
rustc-hex = "1.0"
semver = "0.6"
@ -46,7 +46,7 @@ ethcore-light = { path = "../ethcore/light" }
ethcore-logger = { path = "../logger" }
ethcore-miner = { path = "../miner" }
ethcore-transaction = { path = "../ethcore/transaction" }
ethereum-types = "0.1"
ethereum-types = "0.2"
ethcrypto = { path = "../ethcrypto" }
ethjson = { path = "../json" }

View File

@ -455,7 +455,7 @@ fn execute_tx(gas_known: bool, params: ExecuteParams) -> BoxFuture<ExecutionResu
// exception?
if executed.exception.is_some() {
let old_gas = params.tx.gas;
params.tx.gas = params.tx.gas * 2.into();
params.tx.gas = params.tx.gas * 2u32;
if params.tx.gas > params.hdr.gas_limit() {
params.tx.gas = old_gas;
} else {

View File

@ -874,12 +874,12 @@ fn rpc_eth_sign_transaction() {
r#""input":"0x","# +
r#""nonce":"0x1","# +
&format!("\"publicKey\":\"0x{:?}\",", t.recover_public().unwrap()) +
&format!("\"r\":\"0x{}\",", U256::from(signature.r()).to_hex()) +
&format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) +
&format!("\"raw\":\"0x{}\",", rlp.to_hex()) +
&format!("\"s\":\"0x{}\",", U256::from(signature.s()).to_hex()) +
&format!("\"standardV\":\"0x{}\",", U256::from(t.standard_v()).to_hex()) +
&format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) +
&format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) +
r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# +
&format!("\"v\":\"0x{}\",", U256::from(t.original_v()).to_hex()) +
&format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) +
r#""value":"0x9184e72a""# +
r#"}},"id":1}"#;

View File

@ -134,14 +134,14 @@ fn rpc_parity_accounts_info() {
deps.accounts.set_account_meta(address.clone(), "{foo: 69}".into()).unwrap();
let request = r#"{"jsonrpc": "2.0", "method": "parity_accountsInfo", "params": [], "id": 1}"#;
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{}\":{{\"name\":\"Test\"}}}},\"id\":1}}", address.hex());
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"name\":\"Test\"}}}},\"id\":1}}", address);
assert_eq!(io.handle_request_sync(request), Some(response));
// Change the whitelist
let address = Address::from(1);
deps.accounts.set_new_dapps_addresses(Some(vec![address.clone()])).unwrap();
let request = r#"{"jsonrpc": "2.0", "method": "parity_accountsInfo", "params": [], "id": 1}"#;
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{}\":{{\"name\":\"XX\"}}}},\"id\":1}}", address.hex());
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"name\":\"XX\"}}}},\"id\":1}}", address);
assert_eq!(io.handle_request_sync(request), Some(response));
}
@ -154,7 +154,7 @@ fn rpc_parity_default_account() {
// Check empty
let address = Address::default();
let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultAccount", "params": [], "id": 1}"#;
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":\"0x{}\",\"id\":1}}", address.hex());
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":\"0x{:x}\",\"id\":1}}", address);
assert_eq!(io.handle_request_sync(request), Some(response));
// With account
@ -164,7 +164,7 @@ fn rpc_parity_default_account() {
let address = accounts[0];
let request = r#"{"jsonrpc": "2.0", "method": "parity_defaultAccount", "params": [], "id": 1}"#;
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":\"0x{}\",\"id\":1}}", address.hex());
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":\"0x{:x}\",\"id\":1}}", address);
assert_eq!(io.handle_request_sync(request), Some(response));
}

View File

@ -75,7 +75,7 @@ fn should_be_able_to_get_account_info() {
let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#;
let res = tester.io.handle_request_sync(request);
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address.hex(), uuid);
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid);
assert_eq!(res, Some(response));
}
@ -87,7 +87,7 @@ fn should_be_able_to_set_name() {
assert_eq!(accounts.len(), 1);
let address = accounts[0];
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_setAccountName", "params": ["0x{}", "Test"], "id": 1}}"#, address.hex());
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_setAccountName", "params": ["0x{:x}", "Test"], "id": 1}}"#, address);
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
let res = tester.io.handle_request_sync(&request);
assert_eq!(res, Some(response.into()));
@ -96,7 +96,7 @@ fn should_be_able_to_set_name() {
let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#;
let res = tester.io.handle_request_sync(request);
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{}\":{{\"meta\":\"{{}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address.hex(), uuid);
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{}}\",\"name\":\"Test\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid);
assert_eq!(res, Some(response));
}
@ -108,7 +108,7 @@ fn should_be_able_to_set_meta() {
assert_eq!(accounts.len(), 1);
let address = accounts[0];
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_setAccountMeta", "params": ["0x{}", "{{foo: 69}}"], "id": 1}}"#, address.hex());
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_setAccountMeta", "params": ["0x{:x}", "{{foo: 69}}"], "id": 1}}"#, address);
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
let res = tester.io.handle_request_sync(&request);
assert_eq!(res, Some(response.into()));
@ -117,7 +117,7 @@ fn should_be_able_to_set_meta() {
let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params": [], "id": 1}"#;
let res = tester.io.handle_request_sync(request);
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"\",\"uuid\":\"{}\"}}}},\"id\":1}}", address.hex(), uuid);
let response = format!("{{\"jsonrpc\":\"2.0\",\"result\":{{\"0x{:x}\":{{\"meta\":\"{{foo: 69}}\",\"name\":\"\",\"uuid\":\"{}\"}}}},\"id\":1}}", address, uuid);
assert_eq!(res, Some(response));
}
@ -235,7 +235,7 @@ fn should_be_able_to_kill_account() {
let res = tester.io.handle_request_sync(&request);
assert_eq!(res, Some(response.into()));
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0x{}", "password"], "id": 1}}"#, address.hex());
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_killAccount", "params": ["0x{:x}", "password"], "id": 1}}"#, address);
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
let res = tester.io.handle_request_sync(&request);
assert_eq!(res, Some(response.into()));
@ -334,7 +334,7 @@ fn rpc_parity_change_vault() {
let (address, _) = tester.accounts.new_account_and_public("root_password").unwrap();
assert!(tester.accounts.create_vault("vault1", "password1").is_ok());
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_changeVault", "params":["0x{}", "vault1"], "id": 1}}"#, address.hex());
let request = format!(r#"{{"jsonrpc": "2.0", "method": "parity_changeVault", "params":["0x{:x}", "vault1"], "id": 1}}"#, address);
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
assert_eq!(tester.io.handle_request_sync(&request), Some(response.to_owned()));
@ -351,7 +351,7 @@ fn rpc_parity_vault_adds_vault_field_to_acount_meta() {
assert!(tester.accounts.change_vault(address1, "vault1").is_ok());
let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#;
let response = format!(r#"{{"jsonrpc":"2.0","result":{{"0x{}":{{"meta":"{{\"vault\":\"vault1\"}}","name":"","uuid":"{}"}}}},"id":1}}"#, address1.hex(), uuid1);
let response = format!(r#"{{"jsonrpc":"2.0","result":{{"0x{:x}":{{"meta":"{{\"vault\":\"vault1\"}}","name":"","uuid":"{}"}}}},"id":1}}"#, address1, uuid1);
assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned()));
@ -359,7 +359,7 @@ fn rpc_parity_vault_adds_vault_field_to_acount_meta() {
assert!(tester.accounts.change_vault(address1, "").is_ok());
let request = r#"{"jsonrpc": "2.0", "method": "parity_allAccountsInfo", "params":[], "id": 1}"#;
let response = format!(r#"{{"jsonrpc":"2.0","result":{{"0x{}":{{"meta":"{{}}","name":"","uuid":"{}"}}}},"id":1}}"#, address1.hex(), uuid1);
let response = format!(r#"{{"jsonrpc":"2.0","result":{{"0x{:x}":{{"meta":"{{}}","name":"","uuid":"{}"}}}},"id":1}}"#, address1, uuid1);
assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned()));
}

View File

@ -466,12 +466,12 @@ fn should_confirm_sign_transaction_with_rlp() {
r#""input":"0x","# +
r#""nonce":"0x0","# +
&format!("\"publicKey\":\"0x{:?}\",", t.public_key().unwrap()) +
&format!("\"r\":\"0x{}\",", U256::from(signature.r()).to_hex()) +
&format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) +
&format!("\"raw\":\"0x{}\",", rlp.to_hex()) +
&format!("\"s\":\"0x{}\",", U256::from(signature.s()).to_hex()) +
&format!("\"standardV\":\"0x{}\",", U256::from(t.standard_v()).to_hex()) +
&format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) +
&format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) +
r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# +
&format!("\"v\":\"0x{}\",", U256::from(t.original_v()).to_hex()) +
&format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) +
r#""value":"0x1""# +
r#"}},"id":1}"#;

View File

@ -317,12 +317,12 @@ fn should_add_sign_transaction_to_the_queue() {
r#""input":"0x","# +
r#""nonce":"0x1","# +
&format!("\"publicKey\":\"0x{:?}\",", t.public_key().unwrap()) +
&format!("\"r\":\"0x{}\",", U256::from(signature.r()).to_hex()) +
&format!("\"r\":\"0x{:x}\",", U256::from(signature.r())) +
&format!("\"raw\":\"0x{}\",", rlp.to_hex()) +
&format!("\"s\":\"0x{}\",", U256::from(signature.s()).to_hex()) +
&format!("\"standardV\":\"0x{}\",", U256::from(t.standard_v()).to_hex()) +
&format!("\"s\":\"0x{:x}\",", U256::from(signature.s())) +
&format!("\"standardV\":\"0x{:x}\",", U256::from(t.standard_v())) +
r#""to":"0xd46e8dd67c5d32be8058bb8eb970870f07244567","transactionIndex":null,"# +
&format!("\"v\":\"0x{}\",", U256::from(t.original_v()).to_hex()) +
&format!("\"v\":\"0x{:x}\",", U256::from(t.original_v())) +
r#""value":"0x9184e72a""# +
r#"}},"id":1}"#;

View File

@ -103,7 +103,7 @@ impl From<ethsync::EthProtocolInfo> for EthProtocolInfo {
EthProtocolInfo {
version: info.version,
difficulty: info.difficulty.map(Into::into),
head: info.head.hex(),
head: format!("{:x}", info.head),
}
}
}
@ -124,7 +124,7 @@ impl From<ethsync::PipProtocolInfo> for PipProtocolInfo {
PipProtocolInfo {
version: info.version,
difficulty: info.difficulty.into(),
head: info.head.hex(),
head: format!("{:x}", info.head),
}
}
}

View File

@ -102,13 +102,13 @@ impl_uint!(U64, u64, 1);
impl serde::Serialize for U128 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
serializer.serialize_str(&format!("0x{}", self.0.to_hex()))
serializer.serialize_str(&format!("0x{:x}", self.0))
}
}
impl serde::Serialize for U256 {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
serializer.serialize_str(&format!("0x{}", self.0.to_hex()))
serializer.serialize_str(&format!("0x{:x}", self.0))
}
}

View File

@ -24,7 +24,7 @@ tokio-proto = "0.1"
url = "1.0"
ethcore = { path = "../ethcore" }
ethcore-bytes = { path = "../util/bytes" }
ethereum-types = "0.1"
ethereum-types = "0.2"
ethsync = { path = "../sync" }
kvdb = { path = "../util/kvdb" }
kvdb-rocksdb = { path = "../util/kvdb-rocksdb" }
@ -32,8 +32,10 @@ keccak-hash = { path = "../util/hash" }
ethcore-logger = { path = "../logger" }
ethcrypto = { path = "../ethcrypto" }
ethkey = { path = "../ethkey" }
native-contracts = { path = "../ethcore/native_contracts" }
lazy_static = "1.0"
ethabi = "5.1"
ethabi-derive = "5.0"
ethabi-contract = "5.0"
[dev-dependencies]
tempdir = "0.3"

View File

@ -16,16 +16,16 @@
use std::sync::Arc;
use std::collections::{HashMap, HashSet};
use futures::{future, Future};
use parking_lot::{Mutex, RwLock};
use ethkey::public_to_address;
use ethcore::client::{BlockChainClient, BlockId, ChainNotify};
use native_contracts::SecretStoreAclStorage;
use ethereum_types::{H256, Address};
use bytes::Bytes;
use trusted_client::TrustedClient;
use types::all::{Error, ServerKeyId, Public};
use_contract!(acl_storage, "AclStorage", "res/acl_storage.json");
const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker";
/// ACL storage of Secret Store
@ -47,7 +47,7 @@ struct CachedContract {
/// Contract address.
contract_addr: Option<Address>,
/// Contract at given address.
contract: Option<SecretStoreAclStorage>,
contract: acl_storage::AclStorage,
}
/// Dummy ACL storage implementation (check always passed).
@ -86,23 +86,20 @@ impl ChainNotify for OnChainAclStorage {
impl CachedContract {
pub fn new(client: TrustedClient) -> Self {
CachedContract {
client: client,
client,
contract_addr: None,
contract: None,
contract: acl_storage::AclStorage::default(),
}
}
pub fn update(&mut self) {
if let Some(client) = self.client.get() {
let new_contract_addr = client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned());
if self.contract_addr.as_ref() != new_contract_addr.as_ref() {
self.contract = new_contract_addr.map(|contract_addr| {
trace!(target: "secretstore", "Configuring for ACL checker contract from {}", contract_addr);
SecretStoreAclStorage::new(contract_addr)
});
self.contract_addr = new_contract_addr;
match client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()) {
Some(new_contract_addr) if Some(new_contract_addr).as_ref() != self.contract_addr.as_ref() => {
trace!(target: "secretstore", "Configuring for ACL checker contract from {}", new_contract_addr);
self.contract_addr = Some(new_contract_addr);
},
Some(_) | None => ()
}
}
}
@ -110,13 +107,14 @@ impl CachedContract {
pub fn check(&mut self, public: &Public, document: &ServerKeyId) -> Result<bool, Error> {
if let Some(client) = self.client.get() {
// call contract to check accesss
match self.contract.as_ref() {
Some(contract) => {
match self.contract_addr {
Some(contract_address) => {
let address = public_to_address(&public);
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
contract.check_permissions(do_call, address, document.clone())
.map_err(|err| Error::Internal(err))
.wait()
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
self.contract.functions()
.check_permissions()
.call(address, document.clone(), &do_call)
.map_err(|e| Error::Internal(e.to_string()))
},
None => Err(Error::Internal("ACL checker contract is not configured".to_owned())),
}

View File

@ -17,12 +17,10 @@
use std::sync::Arc;
use std::net::SocketAddr;
use std::collections::{BTreeMap, HashSet};
use futures::{future, Future, IntoFuture};
use parking_lot::Mutex;
use ethcore::filter::Filter;
use ethcore::client::{Client, BlockChainClient, BlockId, ChainNotify};
use ethkey::public_to_address;
use native_contracts::KeyServerSet as KeyServerSetContract;
use hash::keccak;
use ethereum_types::{H256, Address};
use bytes::Bytes;
@ -30,7 +28,7 @@ use types::all::{Error, Public, NodeAddress, NodeId};
use trusted_client::TrustedClient;
use {NodeKeyPair};
type BoxFuture<A, B> = Box<Future<Item = A, Error = B> + Send>;
use_contract!(key_server, "KeyServerSet", "res/key_server_set.json");
/// Name of KeyServerSet contract in registry.
const KEY_SERVER_SET_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_server_set";
@ -118,7 +116,9 @@ struct CachedContract {
/// Blockchain client.
client: TrustedClient,
/// Contract address.
contract: Option<KeyServerSetContract>,
contract_address: Option<Address>,
/// Contract interface.
contract: key_server::KeyServerSet,
/// Is auto-migrate enabled?
auto_migrate_enabled: bool,
/// Current contract state.
@ -168,6 +168,77 @@ impl ChainNotify for OnChainKeyServerSet {
}
}
trait KeyServerSubset<F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String>;
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String>;
fn read_address(&self, address: Address, f: &F) -> Result<String, String>;
}
#[derive(Default)]
struct CurrentKeyServerSubset {
read_list: key_server::functions::GetCurrentKeyServers,
read_public: key_server::functions::GetCurrentKeyServerPublic,
read_address: key_server::functions::GetCurrentKeyServerAddress,
}
impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for CurrentKeyServerSubset {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
self.read_list.call(f).map_err(|e| e.to_string())
}
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
self.read_public.call(address, f).map_err(|e| e.to_string())
}
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
self.read_address.call(address, f).map_err(|e| e.to_string())
}
}
#[derive(Default)]
struct MigrationKeyServerSubset {
read_list: key_server::functions::GetMigrationKeyServers,
read_public: key_server::functions::GetMigrationKeyServerPublic,
read_address: key_server::functions::GetMigrationKeyServerAddress,
}
impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for MigrationKeyServerSubset {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
self.read_list.call(f).map_err(|e| e.to_string())
}
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
self.read_public.call(address, f).map_err(|e| e.to_string())
}
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
self.read_address.call(address, f).map_err(|e| e.to_string())
}
}
#[derive(Default)]
struct NewKeyServerSubset {
read_list: key_server::functions::GetNewKeyServers,
read_public: key_server::functions::GetNewKeyServerPublic,
read_address: key_server::functions::GetNewKeyServerAddress,
}
impl <F: Fn(Vec<u8>) -> Result<Vec<u8>, String>> KeyServerSubset<F> for NewKeyServerSubset {
fn read_list(&self, f: &F) -> Result<Vec<Address>, String> {
self.read_list.call(f).map_err(|e| e.to_string())
}
fn read_public(&self, address: Address, f: &F) -> Result<Bytes, String> {
self.read_public.call(address, f).map_err(|e| e.to_string())
}
fn read_address(&self, address: Address, f: &F) -> Result<String, String> {
self.read_address.call(address, f).map_err(|e| e.to_string())
}
}
impl CachedContract {
pub fn new(client: TrustedClient, self_key_pair: Arc<NodeKeyPair>, auto_migrate_enabled: bool, key_servers: BTreeMap<Public, NodeAddress>) -> Result<Self, Error> {
let server_set = key_servers.into_iter()
@ -179,7 +250,8 @@ impl CachedContract {
.collect::<Result<BTreeMap<_, _>, Error>>()?;
Ok(CachedContract {
client: client,
contract: None,
contract_address: None,
contract: key_server::KeyServerSet::default(),
auto_migrate_enabled: auto_migrate_enabled,
future_new_set: None,
confirm_migration_tx: None,
@ -209,53 +281,39 @@ impl CachedContract {
fn start_migration(&mut self, migration_id: H256) {
// trust is not needed here, because it is the reaction to the read of the trusted client
if let (Some(client), Some(contract)) = (self.client.get_untrusted(), self.contract.as_ref()) {
if let (Some(client), Some(contract_address)) = (self.client.get_untrusted(), self.contract_address) {
// check if we need to send start migration transaction
if !update_last_transaction_block(&*client, &migration_id, &mut self.start_migration_tx) {
return;
}
// prepare transaction data
let transaction_data = match contract.encode_start_migration_input(migration_id) {
Ok(transaction_data) => transaction_data,
Err(error) => {
warn!(target: "secretstore_net", "{}: failed to prepare auto-migration start transaction: {}",
self.self_key_pair.public(), error);
return;
},
};
let transaction_data = self.contract.functions().start_migration().input(migration_id);
// send transaction
if let Err(error) = client.transact_contract(contract.address.clone(), transaction_data) {
if let Err(error) = client.transact_contract(contract_address, transaction_data) {
warn!(target: "secretstore_net", "{}: failed to submit auto-migration start transaction: {}",
self.self_key_pair.public(), error);
} else {
trace!(target: "secretstore_net", "{}: sent auto-migration start transaction",
self.self_key_pair.public(), );
self.self_key_pair.public());
}
}
}
fn confirm_migration(&mut self, migration_id: H256) {
// trust is not needed here, because we have already completed the action
if let (Some(client), Some(contract)) = (self.client.get(), self.contract.as_ref()) {
if let (Some(client), Some(contract_address)) = (self.client.get(), self.contract_address) {
// check if we need to send start migration transaction
if !update_last_transaction_block(&*client, &migration_id, &mut self.confirm_migration_tx) {
return;
}
// prepare transaction data
let transaction_data = match contract.encode_confirm_migration_input(migration_id) {
Ok(transaction_data) => transaction_data,
Err(error) => {
warn!(target: "secretstore_net", "{}: failed to prepare auto-migration confirmation transaction: {}",
self.self_key_pair.public(), error);
return;
},
};
let transaction_data = self.contract.functions().confirm_migration().input(migration_id);
// send transaction
if let Err(error) = client.transact_contract(contract.address.clone(), transaction_data) {
if let Err(error) = client.transact_contract(contract_address, transaction_data) {
warn!(target: "secretstore_net", "{}: failed to submit auto-migration confirmation transaction: {}",
self.self_key_pair.public(), error);
} else {
@ -270,18 +328,18 @@ impl CachedContract {
let new_contract_addr = client.registry_address(KEY_SERVER_SET_CONTRACT_REGISTRY_NAME.to_owned());
// new contract installed => read nodes set from the contract
if self.contract.as_ref().map(|c| &c.address) != new_contract_addr.as_ref() {
if self.contract_address.as_ref() != new_contract_addr.as_ref() {
self.read_from_registry(&*client, new_contract_addr);
return;
}
// check for contract events
let is_set_changed = self.contract.is_some() && enacted.iter()
let is_set_changed = self.contract_address.is_some() && enacted.iter()
.chain(retracted.iter())
.any(|block_hash| !client.logs(Filter {
from_block: BlockId::Hash(block_hash.clone()),
to_block: BlockId::Hash(block_hash.clone()),
address: self.contract.as_ref().map(|c| vec![c.address.clone()]),
address: self.contract_address.map(|address| vec![address]),
topics: vec![
Some(vec![*ADDED_EVENT_NAME_HASH, *REMOVED_EVENT_NAME_HASH,
*MIGRATION_STARTED_EVENT_NAME_HASH, *MIGRATION_COMPLETED_EVENT_NAME_HASH]),
@ -299,14 +357,13 @@ impl CachedContract {
}
fn read_from_registry(&mut self, client: &Client, new_contract_address: Option<Address>) {
self.contract = new_contract_address.map(|contract_addr| {
if let Some(ref contract_addr) = new_contract_address {
trace!(target: "secretstore", "Configuring for key server set contract from {}", contract_addr);
}
self.contract_address = new_contract_address;
KeyServerSetContract::new(contract_addr)
});
let contract = match self.contract.as_ref() {
Some(contract) => contract,
let contract_address = match self.contract_address {
Some(contract_address) => contract_address,
None => {
// no contract installed => empty snapshot
// WARNING: after restart current_set will be reset to the set from configuration file
@ -318,28 +375,25 @@ impl CachedContract {
},
};
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
let current_set = Self::read_key_server_set(&contract, &do_call, &KeyServerSetContract::get_current_key_servers,
&KeyServerSetContract::get_current_key_server_public, &KeyServerSetContract::get_current_key_server_address);
let current_set = Self::read_key_server_set(CurrentKeyServerSubset::default(), &do_call);
// read migration-related data if auto migration is enabled
let (new_set, migration) = match self.auto_migrate_enabled {
true => {
let new_set = Self::read_key_server_set(&contract, &do_call, &KeyServerSetContract::get_new_key_servers,
&KeyServerSetContract::get_new_key_server_public, &KeyServerSetContract::get_new_key_server_address);
let migration_set = Self::read_key_server_set(&contract, &do_call, &KeyServerSetContract::get_migration_key_servers,
&KeyServerSetContract::get_migration_key_server_public, &KeyServerSetContract::get_migration_key_server_address);
let new_set = Self::read_key_server_set(NewKeyServerSubset::default(), &do_call);
let migration_set = Self::read_key_server_set(MigrationKeyServerSubset::default(), &do_call);
let migration_id = match migration_set.is_empty() {
false => contract.get_migration_id(&do_call).wait()
false => self.contract.functions().get_migration_id().call(&do_call)
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration id from contract", err); err })
.ok(),
true => None,
};
let migration_master = match migration_set.is_empty() {
false => contract.get_migration_master(&do_call).wait()
false => self.contract.functions().get_migration_master().call(&do_call)
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration master from contract", err); err })
.ok()
.and_then(|address| current_set.keys().chain(migration_set.keys())
@ -350,7 +404,7 @@ impl CachedContract {
let is_migration_confirmed = match migration_set.is_empty() {
false if current_set.contains_key(self.self_key_pair.public()) || migration_set.contains_key(self.self_key_pair.public()) =>
contract.is_migration_confirmed(&do_call, self.self_key_pair.address()).wait()
self.contract.functions().is_migration_confirmed().call(self.self_key_pair.address(), &do_call)
.map_err(|err| { trace!(target: "secretstore", "Error {} reading migration confirmation from contract", err); err })
.ok(),
_ => None,
@ -387,23 +441,19 @@ impl CachedContract {
self.snapshot = new_snapshot;
}
fn read_key_server_set<F, U, FL, FP, FA>(contract: &KeyServerSetContract, do_call: F, read_list: FL, read_public: FP, read_address: FA) -> BTreeMap<Public, SocketAddr>
fn read_key_server_set<T, F>(subset: T, do_call: F) -> BTreeMap<Public, SocketAddr>
where
F: FnOnce(Address, Vec<u8>) -> U + Copy,
U: IntoFuture<Item=Vec<u8>, Error=String>,
U::Future: Send + 'static,
FL: Fn(&KeyServerSetContract, F) -> BoxFuture<Vec<Address>, String>,
FP: Fn(&KeyServerSetContract, F, Address) -> BoxFuture<Vec<u8>, String>,
FA: Fn(&KeyServerSetContract, F, Address) -> BoxFuture<String, String> {
T: KeyServerSubset<F>,
F: Fn(Vec<u8>) -> Result<Vec<u8>, String> {
let mut key_servers = BTreeMap::new();
let mut key_servers_addresses = HashSet::new();
let key_servers_list = read_list(contract, do_call).wait()
let key_servers_list = subset.read_list(&do_call)
.map_err(|err| { warn!(target: "secretstore_net", "error {} reading list of key servers from contract", err); err })
.unwrap_or_default();
for key_server in key_servers_list {
let key_server_public = read_public(contract, do_call, key_server).wait()
let key_server_public = subset.read_public(key_server, &do_call)
.and_then(|p| if p.len() == 64 { Ok(Public::from_slice(&p)) } else { Err(format!("Invalid public length {}", p.len())) });
let key_server_address: Result<SocketAddr, _> = read_address(contract, do_call, key_server).wait()
let key_server_address: Result<SocketAddr, _> = subset.read_address(key_server, &do_call)
.and_then(|a| a.parse().map_err(|e| format!("Invalid ip address: {}", e)));
// only add successfully parsed nodes

View File

@ -15,38 +15,42 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
extern crate byteorder;
#[macro_use]
extern crate log;
#[macro_use]
extern crate futures;
extern crate ethabi;
extern crate ethcore;
extern crate ethcore_bytes as bytes;
extern crate ethcore_logger as logger;
extern crate ethcrypto;
extern crate ethereum_types;
extern crate ethkey;
extern crate ethsync;
extern crate futures_cpupool;
extern crate hyper;
#[macro_use]
extern crate lazy_static;
extern crate keccak_hash as hash;
extern crate kvdb;
extern crate kvdb_rocksdb;
extern crate parking_lot;
extern crate rustc_hex;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
extern crate tiny_keccak;
extern crate tokio_io;
extern crate tokio_core;
extern crate tokio_service;
extern crate tokio_io;
extern crate tokio_proto;
extern crate tokio_service;
extern crate url;
extern crate ethcore;
extern crate ethcore_bytes as bytes;
extern crate ethcore_logger as logger;
extern crate ethereum_types;
extern crate ethcrypto;
extern crate ethkey;
extern crate ethsync;
extern crate native_contracts;
extern crate keccak_hash as hash;
extern crate kvdb;
extern crate kvdb_rocksdb;
#[macro_use]
extern crate ethabi_derive;
#[macro_use]
extern crate ethabi_contract;
#[macro_use]
extern crate futures;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
mod key_server_cluster;
mod types;

View File

@ -15,18 +15,18 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::sync::Arc;
use futures::{future, Future};
use parking_lot::RwLock;
use ethcore::filter::Filter;
use ethcore::client::{Client, BlockChainClient, BlockId};
use ethkey::{Public, Signature, public_to_address};
use native_contracts::SecretStoreService;
use hash::keccak;
use ethereum_types::{H256, U256};
use ethereum_types::{H256, U256, Address};
use listener::service_contract_listener::ServiceTask;
use trusted_client::TrustedClient;
use {ServerKeyId, NodeKeyPair, ContractAddress};
use_contract!(service, "Service", "res/service.json");
/// Name of the SecretStore contract in the registry.
const SERVICE_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_service";
@ -61,13 +61,15 @@ pub struct OnChainServiceContract {
/// Contract addresss.
address: ContractAddress,
/// Contract.
data: RwLock<SecretStoreServiceData>,
data: RwLock<ServiceData>,
}
/// On-chain service contract data.
struct SecretStoreServiceData {
struct ServiceData {
/// Contract.
pub contract: Arc<SecretStoreService>,
pub contract: service::Service,
/// Contract address.
pub contract_address: Address,
/// Last block we have read logs from.
pub last_log_block: Option<H256>,
}
@ -77,7 +79,9 @@ struct PendingRequestsIterator {
/// Blockchain client.
client: Arc<Client>,
/// Contract.
contract: Arc<SecretStoreService>,
contract: service::Service,
/// Contract address.
contract_address: Address,
/// This node key pair.
self_key_pair: Arc<NodeKeyPair>,
/// Block, this iterator is created for.
@ -110,8 +114,9 @@ impl OnChainServiceContract {
client: client,
self_key_pair: self_key_pair,
address: address,
data: RwLock::new(SecretStoreServiceData {
contract: Arc::new(SecretStoreService::new(contract_addr)),
data: RwLock::new(ServiceData {
contract: service::Service::default(),
contract_address: contract_addr,
last_log_block: None,
}),
}
@ -126,15 +131,15 @@ impl ServiceContract for OnChainServiceContract {
if let Some(client) = self.client.get() {
// update contract address from registry
let service_contract_addr = client.registry_address(SERVICE_CONTRACT_REGISTRY_NAME.to_owned()).unwrap_or_default();
if self.data.read().contract.address != service_contract_addr {
if self.data.read().contract_address != service_contract_addr {
trace!(target: "secretstore", "{}: installing service contract from address {}",
self.self_key_pair.public(), service_contract_addr);
self.data.write().contract = Arc::new(SecretStoreService::new(service_contract_addr));
self.data.write().contract_address = service_contract_addr;
}
}
}
self.data.read().contract.address != Default::default()
self.data.read().contract_address != Default::default()
&& self.client.get().is_some()
}
@ -151,7 +156,7 @@ impl ServiceContract for OnChainServiceContract {
// prepare range of blocks to read logs from
let (address, first_block, last_block) = {
let mut data = self.data.write();
let address = data.contract.address.clone();
let address = data.contract_address;
let confirmed_block = match get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED) {
Some(confirmed_block) => confirmed_block,
None => return Box::new(::std::iter::empty()), // no block with enough confirmations
@ -197,12 +202,13 @@ impl ServiceContract for OnChainServiceContract {
// we only need requests that are here for more than REQUEST_CONFIRMATIONS_REQUIRED blocks
// => we're reading from Latest - (REQUEST_CONFIRMATIONS_REQUIRED + 1) block
let data = self.data.read();
match data.contract.address == Default::default() {
match data.contract_address == Default::default() {
true => Box::new(::std::iter::empty()),
false => get_confirmed_block_hash(&*client, REQUEST_CONFIRMATIONS_REQUIRED + 1)
.and_then(|b| {
let do_call = |a, d| future::done(client.call_contract(BlockId::Hash(b.clone()), a, d));
data.contract.server_key_generation_requests_count(&do_call).wait()
let contract_address = data.contract_address;
let do_call = |data| client.call_contract(BlockId::Hash(b), contract_address, data);
data.contract.functions().server_key_generation_requests_count().call(&do_call)
.map_err(|error| {
warn!(target: "secretstore", "{}: call to server_key_generation_requests_count failed: {}",
self.self_key_pair.public(), error);
@ -213,7 +219,8 @@ impl ServiceContract for OnChainServiceContract {
})
.map(|(b, l)| Box::new(PendingRequestsIterator {
client: client,
contract: data.contract.clone(),
contract: service::Service::default(),
contract_address: data.contract_address,
self_key_pair: self.self_key_pair.clone(),
block: b,
index: 0.into(),
@ -226,7 +233,7 @@ impl ServiceContract for OnChainServiceContract {
fn publish_server_key(&self, server_key_id: &ServerKeyId, server_key: &Public) -> Result<(), String> {
// only publish if contract address is set && client is online
let data = self.data.read();
if data.contract.address == Default::default() {
if data.contract_address == Default::default() {
// it is not an error, because key could be generated even without contract
return Ok(());
}
@ -239,9 +246,13 @@ impl ServiceContract for OnChainServiceContract {
// only publish key if contract waits for publication
// failing is ok here - it could be that enough confirmations have been recevied
// or key has been requested using HTTP API
let do_call = |a, d| future::done(client.call_contract(BlockId::Latest, a, d));
let contract_address = data.contract_address;
let do_call = |data| client.call_contract(BlockId::Latest, contract_address, data);
let self_address = public_to_address(self.self_key_pair.public());
if data.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait().unwrap_or(false) {
if data.contract.functions()
.get_server_key_confirmation_status()
.call(*server_key_id, self_address, &do_call)
.unwrap_or(false) {
return Ok(());
}
@ -249,16 +260,18 @@ impl ServiceContract for OnChainServiceContract {
let server_key_hash = keccak(server_key);
let signed_server_key = self.self_key_pair.sign(&server_key_hash).map_err(|e| format!("{}", e))?;
let signed_server_key: Signature = signed_server_key.into_electrum().into();
let transaction_data = data.contract.encode_server_key_generated_input(server_key_id.clone(),
server_key.to_vec(),
signed_server_key.v(),
signed_server_key.r().into(),
signed_server_key.s().into()
)?;
let transaction_data = data.contract.functions()
.server_key_generated()
.input(*server_key_id,
server_key.to_vec(),
signed_server_key.v(),
signed_server_key.r(),
signed_server_key.s(),
);
// send transaction
client.transact_contract(
data.contract.address.clone(),
data.contract_address,
transaction_data
).map_err(|e| format!("{}", e))?;
@ -278,13 +291,14 @@ impl Iterator for PendingRequestsIterator {
self.index = self.index + 1.into();
let self_address = public_to_address(self.self_key_pair.public());
let do_call = |a, d| future::done(self.client.call_contract(BlockId::Hash(self.block.clone()), a, d));
self.contract.get_server_key_id(&do_call, index).wait()
let contract_address = self.contract_address;
let do_call = |data| self.client.call_contract(BlockId::Hash(self.block.clone()), contract_address, data);
self.contract.functions().get_server_key_id().call(index, &do_call)
.and_then(|server_key_id|
self.contract.get_server_key_threshold(&do_call, server_key_id.clone()).wait()
self.contract.functions().get_server_key_threshold().call(server_key_id, &do_call)
.map(|threshold| (server_key_id, threshold)))
.and_then(|(server_key_id, threshold)|
self.contract.get_server_key_confirmation_status(&do_call, server_key_id.clone(), self_address).wait()
self.contract.functions().get_server_key_confirmation_status().call(server_key_id, self_address, &do_call)
.map(|is_confirmed| (server_key_id, threshold, is_confirmed)))
.map(|(server_key_id, threshold, is_confirmed)|
Some((is_confirmed, ServiceTask::GenerateServerKey(server_key_id, threshold.into()))))

View File

@ -376,7 +376,7 @@ fn is_processed_by_this_key_server(key_server_set: &KeyServerSet, self_key_pair:
let server_key_id_value: U256 = server_key_id.into();
let range_interval = U256::max_value() / total_servers_count.into();
let range_begin = (range_interval + 1.into()) * this_server_index.into();
let range_begin = (range_interval + 1.into()) * this_server_index as u32;
let range_end = range_begin.saturating_add(range_interval);
server_key_id_value >= range_begin && server_key_id_value <= range_end

Some files were not shown because too many files have changed in this diff Show More