Merge branch 'client-ipc-refact' into client-ipc-tests
This commit is contained in:
commit
1f1b420230
101
Cargo.lock
generated
101
Cargo.lock
generated
@ -70,7 +70,6 @@ dependencies = [
|
|||||||
"heapsize 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -149,15 +148,6 @@ dependencies = [
|
|||||||
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "cookie"
|
|
||||||
version = "0.1.21"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"url 0.2.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cookie"
|
name = "cookie"
|
||||||
version = "0.2.4"
|
version = "0.2.4"
|
||||||
@ -225,15 +215,13 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "eth-secp256k1"
|
name = "eth-secp256k1"
|
||||||
version = "0.5.4"
|
version = "0.5.4"
|
||||||
source = "git+https://github.com/ethcore/rust-secp256k1#b6fdd43bbcf6d46adb72a92dd1632a0fc834cbf5"
|
source = "git+https://github.com/ethcore/rust-secp256k1#a9a0b1be1f39560ca86e8fc8e55e205a753ff25c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
|
"gcc 0.3.28 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"serde_json 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -391,9 +379,8 @@ dependencies = [
|
|||||||
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
|
"eth-secp256k1 0.5.4 (git+https://github.com/ethcore/rust-secp256k1)",
|
||||||
"ethcore-devtools 1.3.0",
|
"ethcore-devtools 1.3.0",
|
||||||
"heapsize 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"igd 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"igd 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"json-tests 0.1.0",
|
|
||||||
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -404,12 +391,13 @@ dependencies = [
|
|||||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"sha3 0.1.0",
|
"sha3 0.1.0",
|
||||||
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"table 0.1.0",
|
||||||
"target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"using_queue 0.1.0",
|
||||||
"vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -509,26 +497,6 @@ name = "httparse"
|
|||||||
version = "1.1.2"
|
version = "1.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "hyper"
|
|
||||||
version = "0.6.16"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"cookie 0.1.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"language-tags 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"mime 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"solicit 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"traitobject 0.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"typeable 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"url 0.2.38 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "hyper"
|
name = "hyper"
|
||||||
version = "0.8.1"
|
version = "0.8.1"
|
||||||
@ -582,10 +550,10 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "igd"
|
name = "igd"
|
||||||
version = "0.4.2"
|
version = "0.5.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hyper 0.6.16 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
"regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)",
|
"xml-rs 0.1.26 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -612,14 +580,6 @@ dependencies = [
|
|||||||
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "json-tests"
|
|
||||||
version = "0.1.0"
|
|
||||||
dependencies = [
|
|
||||||
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-core"
|
name = "jsonrpc-core"
|
||||||
version = "2.0.7"
|
version = "2.0.7"
|
||||||
@ -650,11 +610,6 @@ dependencies = [
|
|||||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "language-tags"
|
|
||||||
version = "0.0.7"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "language-tags"
|
name = "language-tags"
|
||||||
version = "0.2.2"
|
version = "0.2.2"
|
||||||
@ -688,15 +643,6 @@ dependencies = [
|
|||||||
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "mime"
|
|
||||||
version = "0.1.3"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"serde 0.6.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mime"
|
name = "mime"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
@ -1157,14 +1103,6 @@ dependencies = [
|
|||||||
"nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"nom 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "serde"
|
|
||||||
version = "0.6.15"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"num 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde"
|
name = "serde"
|
||||||
version = "0.7.9"
|
version = "0.7.9"
|
||||||
@ -1256,6 +1194,10 @@ dependencies = [
|
|||||||
"unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicode-xid 0.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "table"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "target_info"
|
name = "target_info"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -1362,16 +1304,6 @@ name = "unicode-xid"
|
|||||||
version = "0.0.3"
|
version = "0.0.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "url"
|
|
||||||
version = "0.2.38"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"uuid 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "url"
|
name = "url"
|
||||||
version = "0.5.9"
|
version = "0.5.9"
|
||||||
@ -1393,20 +1325,15 @@ dependencies = [
|
|||||||
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "using_queue"
|
||||||
|
version = "0.1.0"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "utf8-ranges"
|
name = "utf8-ranges"
|
||||||
version = "0.1.3"
|
version = "0.1.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "uuid"
|
|
||||||
version = "0.1.18"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uuid"
|
name = "uuid"
|
||||||
version = "0.2.1"
|
version = "0.2.1"
|
||||||
|
10
README.md
10
README.md
@ -1,7 +1,11 @@
|
|||||||
# [Parity](https://ethcore.io/parity.html)
|
# [Parity](https://ethcore.io/parity.html)
|
||||||
### Fast, light, and robust Ethereum implementation
|
### Fast, light, and robust Ethereum implementation
|
||||||
|
|
||||||
[![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![Join the chat at https://gitter.im/trogdoro/xiki][gitter-image]][gitter-url] [![GPLv3][license-image]][license-url]
|
[![Build Status][travis-image]][travis-url] [![Coverage Status][coveralls-image]][coveralls-url] [![Join the chat at https://gitter.im/ethcore/parity][gitter-image]][gitter-url] [![GPLv3][license-image]][license-url]
|
||||||
|
|
||||||
|
[Internal Documentation][doc-url]
|
||||||
|
|
||||||
|
Be sure to check out [our wiki][wiki-url] for more information.
|
||||||
|
|
||||||
[travis-image]: https://travis-ci.org/ethcore/parity.svg?branch=master
|
[travis-image]: https://travis-ci.org/ethcore/parity.svg?branch=master
|
||||||
[travis-url]: https://travis-ci.org/ethcore/parity
|
[travis-url]: https://travis-ci.org/ethcore/parity
|
||||||
@ -11,8 +15,8 @@
|
|||||||
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
||||||
[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg
|
[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg
|
||||||
[license-url]: http://www.gnu.org/licenses/gpl-3.0.en.html
|
[license-url]: http://www.gnu.org/licenses/gpl-3.0.en.html
|
||||||
|
[doc-url]: http://ethcore.github.io/parity/ethcore/index.html
|
||||||
[Internal Documentation](http://ethcore.github.io/parity/ethcore/index.html)
|
[wiki-url]: https://github.com/ethcore/parity/wiki
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
|
@ -25,8 +25,8 @@ mod inner {
|
|||||||
pub fn main() {
|
pub fn main() {
|
||||||
let out_dir = env::var_os("OUT_DIR").unwrap();
|
let out_dir = env::var_os("OUT_DIR").unwrap();
|
||||||
|
|
||||||
let src = Path::new("./src/api/mod.rs.in");
|
let src = Path::new("./src/api/types.rs.in");
|
||||||
let dst = Path::new(&out_dir).join("mod.rs");
|
let dst = Path::new(&out_dir).join("types.rs");
|
||||||
|
|
||||||
let mut registry = syntex::Registry::new();
|
let mut registry = syntex::Registry::new();
|
||||||
|
|
||||||
|
@ -15,38 +15,16 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use endpoint::{Endpoint, Endpoints, EndpointInfo, Handler, EndpointPath};
|
use endpoint::{Endpoint, Endpoints, Handler, EndpointPath};
|
||||||
|
use api::types::{App, ApiError};
|
||||||
use api::response::as_json;
|
use api::response::{as_json, as_json_error};
|
||||||
|
use hyper::{server, net, Decoder, Encoder, Next};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
pub struct RestApi {
|
pub struct RestApi {
|
||||||
endpoints: Arc<Endpoints>,
|
endpoints: Arc<Endpoints>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
|
||||||
pub struct App {
|
|
||||||
pub id: String,
|
|
||||||
pub name: String,
|
|
||||||
pub description: String,
|
|
||||||
pub version: String,
|
|
||||||
pub author: String,
|
|
||||||
#[serde(rename="iconUrl")]
|
|
||||||
pub icon_url: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl App {
|
|
||||||
fn from_info(id: &str, info: &EndpointInfo) -> Self {
|
|
||||||
App {
|
|
||||||
id: id.to_owned(),
|
|
||||||
name: info.name.to_owned(),
|
|
||||||
description: info.description.to_owned(),
|
|
||||||
version: info.version.to_owned(),
|
|
||||||
author: info.author.to_owned(),
|
|
||||||
icon_url: info.icon_url.to_owned(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RestApi {
|
impl RestApi {
|
||||||
pub fn new(endpoints: Arc<Endpoints>) -> Box<Endpoint> {
|
pub fn new(endpoints: Arc<Endpoints>) -> Box<Endpoint> {
|
||||||
Box::new(RestApi {
|
Box::new(RestApi {
|
||||||
@ -63,7 +41,39 @@ impl RestApi {
|
|||||||
|
|
||||||
impl Endpoint for RestApi {
|
impl Endpoint for RestApi {
|
||||||
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
|
fn to_handler(&self, _path: EndpointPath) -> Box<Handler> {
|
||||||
as_json(&self.list_apps())
|
Box::new(RestApiRouter {
|
||||||
|
api: self.clone(),
|
||||||
|
handler: as_json_error(&ApiError {
|
||||||
|
code: "404".into(),
|
||||||
|
title: "Not Found".into(),
|
||||||
|
detail: "Resource you requested has not been found.".into(),
|
||||||
|
}),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct RestApiRouter {
|
||||||
|
api: RestApi,
|
||||||
|
handler: Box<Handler>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl server::Handler<net::HttpStream> for RestApiRouter {
|
||||||
|
|
||||||
|
fn on_request(&mut self, _request: server::Request<net::HttpStream>) -> Next {
|
||||||
|
self.handler = as_json(&self.api.list_apps());
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_request_readable(&mut self, decoder: &mut Decoder<net::HttpStream>) -> Next {
|
||||||
|
self.handler.on_request_readable(decoder)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_response(&mut self, res: &mut server::Response) -> Next {
|
||||||
|
self.handler.on_response(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_response_writable(&mut self, encoder: &mut Encoder<net::HttpStream>) -> Next {
|
||||||
|
self.handler.on_response_writable(encoder)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -16,13 +16,12 @@
|
|||||||
|
|
||||||
//! REST API
|
//! REST API
|
||||||
|
|
||||||
#![warn(missing_docs)]
|
|
||||||
#![cfg_attr(feature="nightly", feature(custom_derive, custom_attribute, plugin))]
|
#![cfg_attr(feature="nightly", feature(custom_derive, custom_attribute, plugin))]
|
||||||
#![cfg_attr(feature="nightly", plugin(serde_macros, clippy))]
|
#![cfg_attr(feature="nightly", plugin(serde_macros, clippy))]
|
||||||
|
|
||||||
#[cfg(feature = "serde_macros")]
|
mod api;
|
||||||
include!("mod.rs.in");
|
mod response;
|
||||||
|
mod types;
|
||||||
#[cfg(not(feature = "serde_macros"))]
|
|
||||||
include!(concat!(env!("OUT_DIR"), "/mod.rs"));
|
|
||||||
|
|
||||||
|
pub use self::api::RestApi;
|
||||||
|
pub use self::types::App;
|
||||||
|
@ -16,8 +16,13 @@
|
|||||||
|
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use endpoint::{ContentHandler, Handler};
|
use endpoint::Handler;
|
||||||
|
use handlers::ContentHandler;
|
||||||
|
|
||||||
pub fn as_json<T : Serialize>(val: &T) -> Box<Handler> {
|
pub fn as_json<T : Serialize>(val: &T) -> Box<Handler> {
|
||||||
Box::new(ContentHandler::new(serde_json::to_string(val).unwrap(), "application/json".to_owned()))
|
Box::new(ContentHandler::ok(serde_json::to_string(val).unwrap(), "application/json".to_owned()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn as_json_error<T : Serialize>(val: &T) -> Box<Handler> {
|
||||||
|
Box::new(ContentHandler::not_found(serde_json::to_string(val).unwrap(), "application/json".to_owned()))
|
||||||
}
|
}
|
||||||
|
@ -14,11 +14,10 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use rustc_serialize::hex::FromHex;
|
#[cfg(feature = "serde_macros")]
|
||||||
|
include!("types.rs.in");
|
||||||
|
|
||||||
|
#[cfg(not(feature = "serde_macros"))]
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/types.rs"));
|
||||||
|
|
||||||
|
|
||||||
pub fn hex_or_string(s: &str) -> Vec<u8> {
|
|
||||||
match s.starts_with("0x") {
|
|
||||||
true => s[2..].from_hex().unwrap(),
|
|
||||||
false => From::from(s)
|
|
||||||
}
|
|
||||||
}
|
|
51
dapps/src/api/types.rs.in
Normal file
51
dapps/src/api/types.rs.in
Normal file
@ -0,0 +1,51 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use endpoint::EndpointInfo;
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct App {
|
||||||
|
pub id: String,
|
||||||
|
pub name: String,
|
||||||
|
pub description: String,
|
||||||
|
pub version: String,
|
||||||
|
pub author: String,
|
||||||
|
#[serde(rename="iconUrl")]
|
||||||
|
pub icon_url: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl App {
|
||||||
|
/// Creates `App` instance from `EndpointInfo` and `id`.
|
||||||
|
pub fn from_info(id: &str, info: &EndpointInfo) -> Self {
|
||||||
|
App {
|
||||||
|
id: id.to_owned(),
|
||||||
|
name: info.name.to_owned(),
|
||||||
|
description: info.description.to_owned(),
|
||||||
|
version: info.version.to_owned(),
|
||||||
|
author: info.author.to_owned(),
|
||||||
|
icon_url: info.icon_url.to_owned(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
pub struct ApiError {
|
||||||
|
pub code: String,
|
||||||
|
pub title: String,
|
||||||
|
pub detail: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
@ -16,11 +16,7 @@
|
|||||||
|
|
||||||
//! URL Endpoint traits
|
//! URL Endpoint traits
|
||||||
|
|
||||||
use hyper::status::StatusCode;
|
use hyper::{server, net};
|
||||||
use hyper::{header, server, Decoder, Encoder, Next};
|
|
||||||
use hyper::net::HttpStream;
|
|
||||||
|
|
||||||
use std::io::Write;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Default, Clone)]
|
#[derive(Debug, PartialEq, Default, Clone)]
|
||||||
@ -42,58 +38,8 @@ pub struct EndpointInfo {
|
|||||||
pub trait Endpoint : Send + Sync {
|
pub trait Endpoint : Send + Sync {
|
||||||
fn info(&self) -> Option<&EndpointInfo> { None }
|
fn info(&self) -> Option<&EndpointInfo> { None }
|
||||||
|
|
||||||
fn to_handler(&self, path: EndpointPath) -> Box<server::Handler<HttpStream> + Send>;
|
fn to_handler(&self, path: EndpointPath) -> Box<server::Handler<net::HttpStream> + Send>;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Endpoints = BTreeMap<String, Box<Endpoint>>;
|
pub type Endpoints = BTreeMap<String, Box<Endpoint>>;
|
||||||
pub type Handler = server::Handler<HttpStream> + Send;
|
pub type Handler = server::Handler<net::HttpStream> + Send;
|
||||||
|
|
||||||
pub struct ContentHandler {
|
|
||||||
content: String,
|
|
||||||
mimetype: String,
|
|
||||||
write_pos: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ContentHandler {
|
|
||||||
pub fn new(content: String, mimetype: String) -> Self {
|
|
||||||
ContentHandler {
|
|
||||||
content: content,
|
|
||||||
mimetype: mimetype,
|
|
||||||
write_pos: 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl server::Handler<HttpStream> for ContentHandler {
|
|
||||||
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_response(&mut self, res: &mut server::Response) -> Next {
|
|
||||||
res.set_status(StatusCode::Ok);
|
|
||||||
res.headers_mut().set(header::ContentType(self.mimetype.parse().unwrap()));
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
|
|
||||||
let bytes = self.content.as_bytes();
|
|
||||||
if self.write_pos == bytes.len() {
|
|
||||||
return Next::end();
|
|
||||||
}
|
|
||||||
|
|
||||||
match encoder.write(&bytes[self.write_pos..]) {
|
|
||||||
Ok(bytes) => {
|
|
||||||
self.write_pos += bytes;
|
|
||||||
Next::write()
|
|
||||||
},
|
|
||||||
Err(e) => match e.kind() {
|
|
||||||
::std::io::ErrorKind::WouldBlock => Next::write(),
|
|
||||||
_ => Next::end()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
44
dapps/src/handlers/auth.rs
Normal file
44
dapps/src/handlers/auth.rs
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Authorization Handlers
|
||||||
|
|
||||||
|
use hyper::{server, Decoder, Encoder, Next};
|
||||||
|
use hyper::net::HttpStream;
|
||||||
|
use hyper::status::StatusCode;
|
||||||
|
|
||||||
|
pub struct AuthRequiredHandler;
|
||||||
|
|
||||||
|
impl server::Handler<HttpStream> for AuthRequiredHandler {
|
||||||
|
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_response(&mut self, res: &mut server::Response) -> Next {
|
||||||
|
res.set_status(StatusCode::Unauthorized);
|
||||||
|
res.headers_mut().set_raw("WWW-Authenticate", vec![b"Basic realm=\"Parity\"".to_vec()]);
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_response_writable(&mut self, _encoder: &mut Encoder<HttpStream>) -> Next {
|
||||||
|
Next::end()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
92
dapps/src/handlers/content.rs
Normal file
92
dapps/src/handlers/content.rs
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Simple Content Handler
|
||||||
|
|
||||||
|
use std::io::Write;
|
||||||
|
use hyper::{header, server, Decoder, Encoder, Next};
|
||||||
|
use hyper::net::HttpStream;
|
||||||
|
use hyper::status::StatusCode;
|
||||||
|
|
||||||
|
pub struct ContentHandler {
|
||||||
|
code: StatusCode,
|
||||||
|
content: String,
|
||||||
|
mimetype: String,
|
||||||
|
write_pos: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ContentHandler {
|
||||||
|
pub fn ok(content: String, mimetype: String) -> Self {
|
||||||
|
ContentHandler {
|
||||||
|
code: StatusCode::Ok,
|
||||||
|
content: content,
|
||||||
|
mimetype: mimetype,
|
||||||
|
write_pos: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn not_found(content: String, mimetype: String) -> Self {
|
||||||
|
ContentHandler {
|
||||||
|
code: StatusCode::NotFound,
|
||||||
|
content: content,
|
||||||
|
mimetype: mimetype,
|
||||||
|
write_pos: 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn new(code: StatusCode, content: String, mimetype: String) -> Self {
|
||||||
|
ContentHandler {
|
||||||
|
code: code,
|
||||||
|
content: content,
|
||||||
|
mimetype: mimetype,
|
||||||
|
write_pos: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl server::Handler<HttpStream> for ContentHandler {
|
||||||
|
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_response(&mut self, res: &mut server::Response) -> Next {
|
||||||
|
res.set_status(self.code);
|
||||||
|
res.headers_mut().set(header::ContentType(self.mimetype.parse().unwrap()));
|
||||||
|
Next::write()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
|
||||||
|
let bytes = self.content.as_bytes();
|
||||||
|
if self.write_pos == bytes.len() {
|
||||||
|
return Next::end();
|
||||||
|
}
|
||||||
|
|
||||||
|
match encoder.write(&bytes[self.write_pos..]) {
|
||||||
|
Ok(bytes) => {
|
||||||
|
self.write_pos += bytes;
|
||||||
|
Next::write()
|
||||||
|
},
|
||||||
|
Err(e) => match e.kind() {
|
||||||
|
::std::io::ErrorKind::WouldBlock => Next::write(),
|
||||||
|
_ => Next::end()
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
25
dapps/src/handlers/mod.rs
Normal file
25
dapps/src/handlers/mod.rs
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Hyper handlers implementations.
|
||||||
|
|
||||||
|
mod auth;
|
||||||
|
mod content;
|
||||||
|
mod redirect;
|
||||||
|
|
||||||
|
pub use self::auth::AuthRequiredHandler;
|
||||||
|
pub use self::content::ContentHandler;
|
||||||
|
pub use self::redirect::Redirection;
|
@ -59,6 +59,7 @@ mod endpoint;
|
|||||||
mod apps;
|
mod apps;
|
||||||
mod page;
|
mod page;
|
||||||
mod router;
|
mod router;
|
||||||
|
mod handlers;
|
||||||
mod rpc;
|
mod rpc;
|
||||||
mod api;
|
mod api;
|
||||||
mod proxypac;
|
mod proxypac;
|
||||||
|
@ -16,7 +16,8 @@
|
|||||||
|
|
||||||
//! Serving ProxyPac file
|
//! Serving ProxyPac file
|
||||||
|
|
||||||
use endpoint::{Endpoint, Handler, ContentHandler, EndpointPath};
|
use endpoint::{Endpoint, Handler, EndpointPath};
|
||||||
|
use handlers::ContentHandler;
|
||||||
use apps::DAPPS_DOMAIN;
|
use apps::DAPPS_DOMAIN;
|
||||||
|
|
||||||
pub struct ProxyPac;
|
pub struct ProxyPac;
|
||||||
@ -41,7 +42,7 @@ function FindProxyForURL(url, host) {{
|
|||||||
}}
|
}}
|
||||||
"#,
|
"#,
|
||||||
DAPPS_DOMAIN, path.host, path.port);
|
DAPPS_DOMAIN, path.host, path.port);
|
||||||
Box::new(ContentHandler::new(content, "application/javascript".to_owned()))
|
Box::new(ContentHandler::ok(content, "application/javascript".to_owned()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,24 +16,23 @@
|
|||||||
|
|
||||||
//! HTTP Authorization implementations
|
//! HTTP Authorization implementations
|
||||||
|
|
||||||
use std::io::Write;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use hyper::{header, server, Decoder, Encoder, Next};
|
use hyper::{server, net, header, status};
|
||||||
use hyper::net::HttpStream;
|
use endpoint::Handler;
|
||||||
use hyper::status::StatusCode;
|
use handlers::{AuthRequiredHandler, ContentHandler};
|
||||||
|
|
||||||
/// Authorization result
|
/// Authorization result
|
||||||
pub enum Authorized {
|
pub enum Authorized {
|
||||||
/// Authorization was successful.
|
/// Authorization was successful.
|
||||||
Yes,
|
Yes,
|
||||||
/// Unsuccessful authorization. Handler for further work is returned.
|
/// Unsuccessful authorization. Handler for further work is returned.
|
||||||
No(Box<server::Handler<HttpStream> + Send>),
|
No(Box<Handler>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Authorization interface
|
/// Authorization interface
|
||||||
pub trait Authorization : Send + Sync {
|
pub trait Authorization : Send + Sync {
|
||||||
/// Checks if authorization is valid.
|
/// Checks if authorization is valid.
|
||||||
fn is_authorized(&self, req: &server::Request<HttpStream>)-> Authorized;
|
fn is_authorized(&self, req: &server::Request<net::HttpStream>)-> Authorized;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// HTTP Basic Authorization handler
|
/// HTTP Basic Authorization handler
|
||||||
@ -45,18 +44,22 @@ pub struct HttpBasicAuth {
|
|||||||
pub struct NoAuth;
|
pub struct NoAuth;
|
||||||
|
|
||||||
impl Authorization for NoAuth {
|
impl Authorization for NoAuth {
|
||||||
fn is_authorized(&self, _req: &server::Request<HttpStream>)-> Authorized {
|
fn is_authorized(&self, _req: &server::Request<net::HttpStream>)-> Authorized {
|
||||||
Authorized::Yes
|
Authorized::Yes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Authorization for HttpBasicAuth {
|
impl Authorization for HttpBasicAuth {
|
||||||
fn is_authorized(&self, req: &server::Request<HttpStream>) -> Authorized {
|
fn is_authorized(&self, req: &server::Request<net::HttpStream>) -> Authorized {
|
||||||
let auth = self.check_auth(&req);
|
let auth = self.check_auth(&req);
|
||||||
|
|
||||||
match auth {
|
match auth {
|
||||||
Access::Denied => {
|
Access::Denied => {
|
||||||
Authorized::No(Box::new(UnauthorizedHandler { write_pos: 0 }))
|
Authorized::No(Box::new(ContentHandler::new(
|
||||||
|
status::StatusCode::Unauthorized,
|
||||||
|
"<h1>Unauthorized</h1>".into(),
|
||||||
|
"text/html".into(),
|
||||||
|
)))
|
||||||
},
|
},
|
||||||
Access::AuthRequired => {
|
Access::AuthRequired => {
|
||||||
Authorized::No(Box::new(AuthRequiredHandler))
|
Authorized::No(Box::new(AuthRequiredHandler))
|
||||||
@ -89,7 +92,7 @@ impl HttpBasicAuth {
|
|||||||
self.users.get(&username.to_owned()).map_or(false, |pass| pass == password)
|
self.users.get(&username.to_owned()).map_or(false, |pass| pass == password)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_auth(&self, req: &server::Request<HttpStream>) -> Access {
|
fn check_auth(&self, req: &server::Request<net::HttpStream>) -> Access {
|
||||||
match req.headers().get::<header::Authorization<header::Basic>>() {
|
match req.headers().get::<header::Authorization<header::Basic>>() {
|
||||||
Some(&header::Authorization(
|
Some(&header::Authorization(
|
||||||
header::Basic { ref username, password: Some(ref password) }
|
header::Basic { ref username, password: Some(ref password) }
|
||||||
@ -99,63 +102,3 @@ impl HttpBasicAuth {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct UnauthorizedHandler {
|
|
||||||
write_pos: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl server::Handler<HttpStream> for UnauthorizedHandler {
|
|
||||||
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_response(&mut self, res: &mut server::Response) -> Next {
|
|
||||||
res.set_status(StatusCode::Unauthorized);
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_response_writable(&mut self, encoder: &mut Encoder<HttpStream>) -> Next {
|
|
||||||
let response = "Unauthorized".as_bytes();
|
|
||||||
|
|
||||||
if self.write_pos == response.len() {
|
|
||||||
return Next::end();
|
|
||||||
}
|
|
||||||
|
|
||||||
match encoder.write(&response[self.write_pos..]) {
|
|
||||||
Ok(bytes) => {
|
|
||||||
self.write_pos += bytes;
|
|
||||||
Next::write()
|
|
||||||
},
|
|
||||||
Err(e) => match e.kind() {
|
|
||||||
::std::io::ErrorKind::WouldBlock => Next::write(),
|
|
||||||
_ => Next::end()
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct AuthRequiredHandler;
|
|
||||||
|
|
||||||
impl server::Handler<HttpStream> for AuthRequiredHandler {
|
|
||||||
fn on_request(&mut self, _request: server::Request<HttpStream>) -> Next {
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_request_readable(&mut self, _decoder: &mut Decoder<HttpStream>) -> Next {
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_response(&mut self, res: &mut server::Response) -> Next {
|
|
||||||
res.set_status(StatusCode::Unauthorized);
|
|
||||||
res.headers_mut().set_raw("WWW-Authenticate", vec![b"Basic realm=\"Parity\"".to_vec()]);
|
|
||||||
Next::write()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn on_response_writable(&mut self, _encoder: &mut Encoder<HttpStream>) -> Next {
|
|
||||||
Next::end()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
//! Processes request handling authorization and dispatching it to proper application.
|
//! Processes request handling authorization and dispatching it to proper application.
|
||||||
|
|
||||||
mod url;
|
mod url;
|
||||||
mod redirect;
|
|
||||||
pub mod auth;
|
pub mod auth;
|
||||||
|
|
||||||
use DAPPS_DOMAIN;
|
use DAPPS_DOMAIN;
|
||||||
@ -33,7 +32,7 @@ use apps;
|
|||||||
use endpoint::{Endpoint, Endpoints, EndpointPath};
|
use endpoint::{Endpoint, Endpoints, EndpointPath};
|
||||||
use self::url::Url;
|
use self::url::Url;
|
||||||
use self::auth::{Authorization, Authorized};
|
use self::auth::{Authorization, Authorized};
|
||||||
use self::redirect::Redirection;
|
use handlers::Redirection;
|
||||||
|
|
||||||
/// Special endpoints are accessible on every domain (every dapp)
|
/// Special endpoints are accessible on every domain (every dapp)
|
||||||
#[derive(Debug, PartialEq, Hash, Eq)]
|
#[derive(Debug, PartialEq, Hash, Eq)]
|
||||||
|
@ -45,3 +45,4 @@ json-tests = []
|
|||||||
test-heavy = []
|
test-heavy = []
|
||||||
dev = ["clippy"]
|
dev = ["clippy"]
|
||||||
default = []
|
default = []
|
||||||
|
benches = []
|
||||||
|
@ -33,7 +33,7 @@ fn main() {
|
|||||||
|
|
||||||
// client interface
|
// client interface
|
||||||
{
|
{
|
||||||
let src = Path::new("src/client/client_ipc.rs");
|
let src = Path::new("src/client/client.rs");
|
||||||
let intermediate = Path::new(&out_dir).join("client.intermediate.rs.in");
|
let intermediate = Path::new(&out_dir).join("client.intermediate.rs.in");
|
||||||
let mut registry = syntex::Registry::new();
|
let mut registry = syntex::Registry::new();
|
||||||
codegen::register(&mut registry);
|
codegen::register(&mut registry);
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x118c30",
|
"frontierCompatibilityModeLimit": "0x118c30"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x118c30",
|
"frontierCompatibilityModeLimit": "0x118c30"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x118c30",
|
"frontierCompatibilityModeLimit": "0x118c30"
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
|
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": 0,
|
"frontierCompatibilityModeLimit": 0
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": 0,
|
"frontierCompatibilityModeLimit": 0
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar": "",
|
"registrar": "",
|
||||||
"frontierCompatibilityModeLimit": "0x789b0",
|
"frontierCompatibilityModeLimit": "0x789b0"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -9,8 +9,7 @@
|
|||||||
"durationLimit": "0x08",
|
"durationLimit": "0x08",
|
||||||
"blockReward": "0x14D1120D7B160000",
|
"blockReward": "0x14D1120D7B160000",
|
||||||
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
|
"registrar": "5e70c0bbcd5636e0f9f9316e9f8633feb64d4050",
|
||||||
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
|
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -127,7 +127,7 @@ impl Account {
|
|||||||
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
SecTrieDBMut would not set it to an invalid state root. Therefore the root is valid and DB creation \
|
||||||
using it will not fail.");
|
using it will not fail.");
|
||||||
|
|
||||||
(Filth::Clean, H256::from(db.get(key.bytes()).map_or(U256::zero(), |v| -> U256 {decode(v)})))
|
(Filth::Clean, H256::from(db.get(key).map_or(U256::zero(), |v| -> U256 {decode(v)})))
|
||||||
}).1.clone()
|
}).1.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,7 +203,6 @@ mod tests {
|
|||||||
timestamp: 0,
|
timestamp: 0,
|
||||||
difficulty: 0.into(),
|
difficulty: 0.into(),
|
||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
});
|
});
|
||||||
@ -254,7 +253,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
let seal = engine.generate_seal(b.block(), Some(&tap)).unwrap();
|
||||||
assert!(b.try_seal(engine.deref(), seal).is_ok());
|
assert!(b.try_seal(engine.deref(), seal).is_ok());
|
||||||
|
@ -183,7 +183,6 @@ pub struct OpenBlock<'x> {
|
|||||||
engine: &'x Engine,
|
engine: &'x Engine,
|
||||||
vm_factory: &'x EvmFactory,
|
vm_factory: &'x EvmFactory,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
/// Just like `OpenBlock`, except that we've applied `Engine::on_close_block`, finished up the non-seal header fields,
|
||||||
@ -195,7 +194,6 @@ pub struct ClosedBlock {
|
|||||||
block: ExecutedBlock,
|
block: ExecutedBlock,
|
||||||
uncle_bytes: Bytes,
|
uncle_bytes: Bytes,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
unclosed_state: State,
|
unclosed_state: State,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -227,7 +225,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
author: Address,
|
author: Address,
|
||||||
gas_range_target: (U256, U256),
|
gas_range_target: (U256, U256),
|
||||||
extra_data: Bytes,
|
extra_data: Bytes,
|
||||||
@ -238,7 +235,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
engine: engine,
|
engine: engine,
|
||||||
vm_factory: vm_factory,
|
vm_factory: vm_factory,
|
||||||
last_hashes: last_hashes,
|
last_hashes: last_hashes,
|
||||||
dao_rescue_block_gas_limit: dao_rescue_block_gas_limit,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
r.block.base.header.parent_hash = parent.hash();
|
r.block.base.header.parent_hash = parent.hash();
|
||||||
@ -295,7 +291,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
/// Get the environment info concerning this block.
|
/// Get the environment info concerning this block.
|
||||||
pub fn env_info(&self) -> EnvInfo {
|
pub fn env_info(&self) -> EnvInfo {
|
||||||
// TODO: memoise.
|
// TODO: memoise.
|
||||||
const SOFT_FORK_BLOCK: u64 = 1_800_000;
|
|
||||||
EnvInfo {
|
EnvInfo {
|
||||||
number: self.block.base.header.number,
|
number: self.block.base.header.number,
|
||||||
author: self.block.base.header.author.clone(),
|
author: self.block.base.header.author.clone(),
|
||||||
@ -304,7 +299,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
last_hashes: self.last_hashes.clone(), // TODO: should be a reference.
|
last_hashes: self.last_hashes.clone(), // TODO: should be a reference.
|
||||||
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
|
gas_used: self.block.receipts.last().map_or(U256::zero(), |r| r.gas_used),
|
||||||
gas_limit: self.block.base.header.gas_limit.clone(),
|
gas_limit: self.block.base.header.gas_limit.clone(),
|
||||||
dao_rescue_block_gas_limit: if self.block.base.header.number == SOFT_FORK_BLOCK { Some(self.block.base.header.gas_limit) } else { self.dao_rescue_block_gas_limit },
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,7 +345,6 @@ impl<'x> OpenBlock<'x> {
|
|||||||
block: s.block,
|
block: s.block,
|
||||||
uncle_bytes: uncle_bytes,
|
uncle_bytes: uncle_bytes,
|
||||||
last_hashes: s.last_hashes,
|
last_hashes: s.last_hashes,
|
||||||
dao_rescue_block_gas_limit: s.dao_rescue_block_gas_limit,
|
|
||||||
unclosed_state: unclosed_state,
|
unclosed_state: unclosed_state,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -411,7 +404,6 @@ impl ClosedBlock {
|
|||||||
engine: engine,
|
engine: engine,
|
||||||
vm_factory: vm_factory,
|
vm_factory: vm_factory,
|
||||||
last_hashes: self.last_hashes,
|
last_hashes: self.last_hashes,
|
||||||
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -481,7 +473,6 @@ pub fn enact(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
@ -492,7 +483,7 @@ pub fn enact(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut b = try!(OpenBlock::new(engine, vm_factory, trie_factory, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, header.author().clone(), (3141562.into(), 31415620.into()), header.extra_data().clone()));
|
let mut b = try!(OpenBlock::new(engine, vm_factory, trie_factory, tracing, db, parent, last_hashes, header.author().clone(), (3141562.into(), 31415620.into()), header.extra_data().clone()));
|
||||||
b.set_difficulty(*header.difficulty());
|
b.set_difficulty(*header.difficulty());
|
||||||
b.set_gas_limit(*header.gas_limit());
|
b.set_gas_limit(*header.gas_limit());
|
||||||
b.set_timestamp(header.timestamp());
|
b.set_timestamp(header.timestamp());
|
||||||
@ -510,13 +501,12 @@ pub fn enact_bytes(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
let block = BlockView::new(block_bytes);
|
let block = BlockView::new(block_bytes);
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)
|
enact(&header, &block.transactions(), &block.uncles(), engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header
|
||||||
@ -528,12 +518,11 @@ pub fn enact_verified(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<LockedBlock, Error> {
|
) -> Result<LockedBlock, Error> {
|
||||||
let view = BlockView::new(&block.bytes);
|
let view = BlockView::new(&block.bytes);
|
||||||
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)
|
enact(&block.header, &block.transactions, &view.uncles(), engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
/// Enact the block given by `block_bytes` using `engine` on the database `db` with given `parent` block header. Seal the block aferwards
|
||||||
@ -545,12 +534,11 @@ pub fn enact_and_seal(
|
|||||||
db: Box<JournalDB>,
|
db: Box<JournalDB>,
|
||||||
parent: &Header,
|
parent: &Header,
|
||||||
last_hashes: LastHashes,
|
last_hashes: LastHashes,
|
||||||
dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
vm_factory: &EvmFactory,
|
vm_factory: &EvmFactory,
|
||||||
trie_factory: TrieFactory,
|
trie_factory: TrieFactory,
|
||||||
) -> Result<SealedBlock, Error> {
|
) -> Result<SealedBlock, Error> {
|
||||||
let header = BlockView::new(block_bytes).header_view();
|
let header = BlockView::new(block_bytes).header_view();
|
||||||
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, dao_rescue_block_gas_limit, vm_factory, trie_factory)).seal(engine, header.seal())))
|
Ok(try!(try!(enact_bytes(block_bytes, engine, tracing, db, parent, last_hashes, vm_factory, trie_factory)).seal(engine, header.seal())))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -570,7 +558,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close_and_lock();
|
let b = b.close_and_lock();
|
||||||
let _ = b.seal(engine.deref(), vec![]);
|
let _ = b.seal(engine.deref(), vec![]);
|
||||||
}
|
}
|
||||||
@ -586,7 +574,7 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap()
|
||||||
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
|
.close_and_lock().seal(engine.deref(), vec![]).unwrap();
|
||||||
let orig_bytes = b.rlp_bytes();
|
let orig_bytes = b.rlp_bytes();
|
||||||
let orig_db = b.drain();
|
let orig_db = b.drain();
|
||||||
@ -594,7 +582,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default(), Default::default()).unwrap();
|
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default(), Default::default()).unwrap();
|
||||||
|
|
||||||
assert_eq!(e.rlp_bytes(), orig_bytes);
|
assert_eq!(e.rlp_bytes(), orig_bytes);
|
||||||
|
|
||||||
@ -614,7 +602,7 @@ mod tests {
|
|||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let mut open_block = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, vec![genesis_header.hash()], Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let mut uncle1_header = Header::new();
|
let mut uncle1_header = Header::new();
|
||||||
uncle1_header.extra_data = b"uncle1".to_vec();
|
uncle1_header.extra_data = b"uncle1".to_vec();
|
||||||
let mut uncle2_header = Header::new();
|
let mut uncle2_header = Header::new();
|
||||||
@ -629,7 +617,7 @@ mod tests {
|
|||||||
let mut db_result = get_temp_journal_db();
|
let mut db_result = get_temp_journal_db();
|
||||||
let mut db = db_result.take();
|
let mut db = db_result.take();
|
||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], None, &Default::default(), Default::default()).unwrap();
|
let e = enact_and_seal(&orig_bytes, engine.deref(), false, db, &genesis_header, vec![genesis_header.hash()], &Default::default(), Default::default()).unwrap();
|
||||||
|
|
||||||
let bytes = e.rlp_bytes();
|
let bytes = e.rlp_bytes();
|
||||||
assert_eq!(bytes, orig_bytes);
|
assert_eq!(bytes, orig_bytes);
|
||||||
|
@ -14,8 +14,982 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! Blockchain database client.
|
use std::path::PathBuf;
|
||||||
|
use std::collections::{HashSet, HashMap};
|
||||||
|
use std::ops::Deref;
|
||||||
|
use std::mem;
|
||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::sync::*;
|
||||||
|
use std::path::Path;
|
||||||
|
use std::fmt;
|
||||||
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
|
use std::time::Instant;
|
||||||
|
|
||||||
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
|
// util
|
||||||
include!(concat!(env!("OUT_DIR"), "/client.ipc.rs"));
|
use util::numbers::*;
|
||||||
|
use util::panics::*;
|
||||||
|
use util::network::*;
|
||||||
|
use util::io::*;
|
||||||
|
use util::rlp;
|
||||||
|
use util::sha3::*;
|
||||||
|
use util::{Bytes};
|
||||||
|
use util::rlp::{RlpStream, Rlp, UntrustedRlp};
|
||||||
|
use util::journaldb;
|
||||||
|
use util::journaldb::JournalDB;
|
||||||
|
use util::kvdb::*;
|
||||||
|
use util::{Applyable, Stream, View, PerfTimer, Itertools, Colour};
|
||||||
|
|
||||||
|
// other
|
||||||
|
use views::BlockView;
|
||||||
|
use error::{ImportError, ExecutionError, BlockError, ImportResult};
|
||||||
|
use header::BlockNumber;
|
||||||
|
use state::State;
|
||||||
|
use spec::Spec;
|
||||||
|
use engine::Engine;
|
||||||
|
use views::HeaderView;
|
||||||
|
use service::{NetSyncMessage, SyncMessage};
|
||||||
|
use env_info::LastHashes;
|
||||||
|
use verification;
|
||||||
|
use verification::{PreverifiedBlock, Verifier};
|
||||||
|
use block::*;
|
||||||
|
use transaction::{LocalizedTransaction, SignedTransaction, Action};
|
||||||
|
use blockchain::extras::TransactionAddress;
|
||||||
|
use types::filter::Filter;
|
||||||
|
use log_entry::LocalizedLogEntry;
|
||||||
|
use block_queue::{BlockQueue, BlockQueueInfo};
|
||||||
|
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
||||||
|
use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig,
|
||||||
|
DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient,
|
||||||
|
TraceFilter, CallAnalytics, BlockImportError, Mode};
|
||||||
|
use client::Error as ClientError;
|
||||||
|
use env_info::EnvInfo;
|
||||||
|
use executive::{Executive, Executed, TransactOptions, contract_address};
|
||||||
|
use receipt::LocalizedReceipt;
|
||||||
|
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
|
||||||
|
use trace;
|
||||||
|
use evm::Factory as EvmFactory;
|
||||||
|
use miner::{Miner, MinerService};
|
||||||
|
use util::TrieFactory;
|
||||||
|
use ipc::IpcConfig;
|
||||||
|
use ipc::binary::{BinaryConvertError};
|
||||||
|
|
||||||
|
// re-export
|
||||||
|
pub use types::blockchain_info::BlockChainInfo;
|
||||||
|
pub use types::block_status::BlockStatus;
|
||||||
|
pub use blockchain::CacheSize as BlockChainCacheSize;
|
||||||
|
|
||||||
|
const MAX_TX_QUEUE_SIZE: usize = 4096;
|
||||||
|
const MAX_QUEUE_SIZE_TO_SLEEP_ON: usize = 2;
|
||||||
|
|
||||||
|
impl fmt::Display for BlockChainInfo {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Report on the status of a client.
|
||||||
|
#[derive(Default, Clone, Debug, Eq, PartialEq)]
|
||||||
|
pub struct ClientReport {
|
||||||
|
/// How many blocks have been imported so far.
|
||||||
|
pub blocks_imported: usize,
|
||||||
|
/// How many transactions have been applied so far.
|
||||||
|
pub transactions_applied: usize,
|
||||||
|
/// How much gas has been processed so far.
|
||||||
|
pub gas_processed: U256,
|
||||||
|
/// Memory used by state DB
|
||||||
|
pub state_db_mem: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientReport {
|
||||||
|
/// Alter internal reporting to reflect the additional `block` has been processed.
|
||||||
|
pub fn accrue_block(&mut self, block: &PreverifiedBlock) {
|
||||||
|
self.blocks_imported += 1;
|
||||||
|
self.transactions_applied += block.transactions.len();
|
||||||
|
self.gas_processed = self.gas_processed + block.header.gas_used;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct SleepState {
|
||||||
|
last_activity: Option<Instant>,
|
||||||
|
last_autosleep: Option<Instant>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SleepState {
|
||||||
|
fn new(awake: bool) -> Self {
|
||||||
|
SleepState {
|
||||||
|
last_activity: match awake { false => None, true => Some(Instant::now()) },
|
||||||
|
last_autosleep: match awake { false => Some(Instant::now()), true => None },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
||||||
|
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
||||||
|
pub struct Client {
|
||||||
|
mode: Mode,
|
||||||
|
chain: Arc<BlockChain>,
|
||||||
|
tracedb: Arc<TraceDB<BlockChain>>,
|
||||||
|
engine: Arc<Box<Engine>>,
|
||||||
|
state_db: Mutex<Box<JournalDB>>,
|
||||||
|
block_queue: BlockQueue,
|
||||||
|
report: RwLock<ClientReport>,
|
||||||
|
import_lock: Mutex<()>,
|
||||||
|
panic_handler: Arc<PanicHandler>,
|
||||||
|
verifier: Box<Verifier>,
|
||||||
|
vm_factory: Arc<EvmFactory>,
|
||||||
|
trie_factory: TrieFactory,
|
||||||
|
miner: Arc<Miner>,
|
||||||
|
sleep_state: Mutex<SleepState>,
|
||||||
|
liveness: AtomicBool,
|
||||||
|
io_channel: IoChannel<NetSyncMessage>,
|
||||||
|
queue_transactions: AtomicUsize,
|
||||||
|
previous_enode: Mutex<Option<String>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const HISTORY: u64 = 1200;
|
||||||
|
// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING.
|
||||||
|
// Altering it will force a blanket DB update for *all* JournalDB-derived
|
||||||
|
// databases.
|
||||||
|
// Instead, add/upgrade the version string of the individual JournalDB-derived database
|
||||||
|
// of which you actually want force an upgrade.
|
||||||
|
const CLIENT_DB_VER_STR: &'static str = "5.3";
|
||||||
|
|
||||||
|
/// Get the path for the databases given the root path and information on the databases.
|
||||||
|
pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf {
|
||||||
|
let mut dir = path.to_path_buf();
|
||||||
|
dir.push(H64::from(genesis_hash).hex());
|
||||||
|
//TODO: sec/fat: pruned/full versioning
|
||||||
|
// version here is a bit useless now, since it's controlled only be the pruning algo.
|
||||||
|
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning));
|
||||||
|
dir
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Append a path element to the given path and return the string.
|
||||||
|
pub fn append_path(path: &Path, item: &str) -> String {
|
||||||
|
let mut p = path.to_path_buf();
|
||||||
|
p.push(item);
|
||||||
|
p.to_str().unwrap().to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Client {
|
||||||
|
/// Create a new client with given spec and DB path and custom verifier.
|
||||||
|
pub fn new(
|
||||||
|
config: ClientConfig,
|
||||||
|
spec: Spec,
|
||||||
|
path: &Path,
|
||||||
|
miner: Arc<Miner>,
|
||||||
|
message_channel: IoChannel<NetSyncMessage>
|
||||||
|
) -> Result<Arc<Client>, ClientError> {
|
||||||
|
let path = get_db_path(path, config.pruning, spec.genesis_header().hash());
|
||||||
|
let gb = spec.genesis_block();
|
||||||
|
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
|
||||||
|
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
|
||||||
|
|
||||||
|
let mut state_db_config = match config.db_cache_size {
|
||||||
|
None => DatabaseConfig::default(),
|
||||||
|
Some(cache_size) => DatabaseConfig::with_cache(cache_size),
|
||||||
|
};
|
||||||
|
|
||||||
|
if config.db_compaction == DatabaseCompactionProfile::HDD {
|
||||||
|
state_db_config = state_db_config.compaction(CompactionProfile::hdd());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut state_db = journaldb::new(
|
||||||
|
&append_path(&path, "state"),
|
||||||
|
config.pruning,
|
||||||
|
state_db_config
|
||||||
|
);
|
||||||
|
|
||||||
|
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
|
||||||
|
state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
|
||||||
|
}
|
||||||
|
|
||||||
|
let engine = Arc::new(spec.engine);
|
||||||
|
|
||||||
|
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
|
||||||
|
let panic_handler = PanicHandler::new_in_arc();
|
||||||
|
panic_handler.forward_from(&block_queue);
|
||||||
|
|
||||||
|
let awake = match config.mode { Mode::Dark(..) => false, _ => true };
|
||||||
|
let client = Client {
|
||||||
|
sleep_state: Mutex::new(SleepState::new(awake)),
|
||||||
|
liveness: AtomicBool::new(awake),
|
||||||
|
mode: config.mode,
|
||||||
|
chain: chain,
|
||||||
|
tracedb: tracedb,
|
||||||
|
engine: engine,
|
||||||
|
state_db: Mutex::new(state_db),
|
||||||
|
block_queue: block_queue,
|
||||||
|
report: RwLock::new(Default::default()),
|
||||||
|
import_lock: Mutex::new(()),
|
||||||
|
panic_handler: panic_handler,
|
||||||
|
verifier: verification::new(config.verifier_type),
|
||||||
|
vm_factory: Arc::new(EvmFactory::new(config.vm_type)),
|
||||||
|
trie_factory: TrieFactory::new(config.trie_spec),
|
||||||
|
miner: miner,
|
||||||
|
io_channel: message_channel,
|
||||||
|
queue_transactions: AtomicUsize::new(0),
|
||||||
|
previous_enode: Mutex::new(None),
|
||||||
|
};
|
||||||
|
Ok(Arc::new(client))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Flush the block import queue.
|
||||||
|
pub fn flush_queue(&self) {
|
||||||
|
self.block_queue.flush();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
|
||||||
|
let mut last_hashes = LastHashes::new();
|
||||||
|
last_hashes.resize(256, H256::new());
|
||||||
|
last_hashes[0] = parent_hash;
|
||||||
|
for i in 0..255 {
|
||||||
|
match self.chain.block_details(&last_hashes[i]) {
|
||||||
|
Some(details) => {
|
||||||
|
last_hashes[i + 1] = details.parent.clone();
|
||||||
|
},
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_hashes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<LockedBlock, ()> {
|
||||||
|
let engine = self.engine.deref().deref();
|
||||||
|
let header = &block.header;
|
||||||
|
|
||||||
|
// Check the block isn't so old we won't be able to enact it.
|
||||||
|
let best_block_number = self.chain.best_block_number();
|
||||||
|
if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY {
|
||||||
|
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify Block Family
|
||||||
|
let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref());
|
||||||
|
if let Err(e) = verify_family_result {
|
||||||
|
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
|
return Err(());
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check if Parent is in chain
|
||||||
|
let chain_has_parent = self.chain.block_header(&header.parent_hash);
|
||||||
|
if let None = chain_has_parent {
|
||||||
|
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
||||||
|
return Err(());
|
||||||
|
};
|
||||||
|
|
||||||
|
// Enact Verified Block
|
||||||
|
let parent = chain_has_parent.unwrap();
|
||||||
|
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
||||||
|
let db = self.state_db.lock().unwrap().boxed_clone();
|
||||||
|
|
||||||
|
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, &self.vm_factory, self.trie_factory.clone());
|
||||||
|
if let Err(e) = enact_result {
|
||||||
|
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
|
return Err(());
|
||||||
|
};
|
||||||
|
|
||||||
|
// Final Verification
|
||||||
|
let locked_block = enact_result.unwrap();
|
||||||
|
if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) {
|
||||||
|
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(locked_block)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) {
|
||||||
|
fn map_to_vec(map: Vec<(H256, bool)>) -> Vec<H256> {
|
||||||
|
map.into_iter().map(|(k, _v)| k).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
// In ImportRoute we get all the blocks that have been enacted and retracted by single insert.
|
||||||
|
// Because we are doing multiple inserts some of the blocks that were enacted in import `k`
|
||||||
|
// could be retracted in import `k+1`. This is why to understand if after all inserts
|
||||||
|
// the block is enacted or retracted we iterate over all routes and at the end final state
|
||||||
|
// will be in the hashmap
|
||||||
|
let map = import_results.iter().fold(HashMap::new(), |mut map, route| {
|
||||||
|
for hash in &route.enacted {
|
||||||
|
map.insert(hash.clone(), true);
|
||||||
|
}
|
||||||
|
for hash in &route.retracted {
|
||||||
|
map.insert(hash.clone(), false);
|
||||||
|
}
|
||||||
|
map
|
||||||
|
});
|
||||||
|
|
||||||
|
// Split to enacted retracted (using hashmap value)
|
||||||
|
let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v);
|
||||||
|
// And convert tuples to keys
|
||||||
|
(map_to_vec(enacted), map_to_vec(retracted))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
||||||
|
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
|
||||||
|
let max_blocks_to_import = 64;
|
||||||
|
|
||||||
|
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
|
||||||
|
let mut invalid_blocks = HashSet::new();
|
||||||
|
let mut import_results = Vec::with_capacity(max_blocks_to_import);
|
||||||
|
|
||||||
|
let _import_lock = self.import_lock.lock();
|
||||||
|
let _timer = PerfTimer::new("import_verified_blocks");
|
||||||
|
let blocks = self.block_queue.drain(max_blocks_to_import);
|
||||||
|
|
||||||
|
let original_best = self.chain_info().best_block_hash;
|
||||||
|
|
||||||
|
for block in blocks {
|
||||||
|
let header = &block.header;
|
||||||
|
|
||||||
|
if invalid_blocks.contains(&header.parent_hash) {
|
||||||
|
invalid_blocks.insert(header.hash());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let closed_block = self.check_and_close_block(&block);
|
||||||
|
if let Err(_) = closed_block {
|
||||||
|
invalid_blocks.insert(header.hash());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let closed_block = closed_block.unwrap();
|
||||||
|
imported_blocks.push(header.hash());
|
||||||
|
|
||||||
|
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
|
||||||
|
import_results.push(route);
|
||||||
|
|
||||||
|
self.report.write().unwrap().accrue_block(&block);
|
||||||
|
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
||||||
|
}
|
||||||
|
|
||||||
|
let imported = imported_blocks.len();
|
||||||
|
let invalid_blocks = invalid_blocks.into_iter().collect::<Vec<H256>>();
|
||||||
|
|
||||||
|
{
|
||||||
|
if !invalid_blocks.is_empty() {
|
||||||
|
self.block_queue.mark_as_bad(&invalid_blocks);
|
||||||
|
}
|
||||||
|
if !imported_blocks.is_empty() {
|
||||||
|
self.block_queue.mark_as_good(&imported_blocks);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
||||||
|
let (enacted, retracted) = self.calculate_enacted_retracted(&import_results);
|
||||||
|
|
||||||
|
if self.queue_info().is_empty() {
|
||||||
|
self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted);
|
||||||
|
}
|
||||||
|
|
||||||
|
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
||||||
|
imported: imported_blocks,
|
||||||
|
invalid: invalid_blocks,
|
||||||
|
enacted: enacted,
|
||||||
|
retracted: retracted,
|
||||||
|
sealed: Vec::new(),
|
||||||
|
})).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.chain_info().best_block_hash != original_best {
|
||||||
|
self.miner.update_sealing(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
imported
|
||||||
|
}
|
||||||
|
|
||||||
|
fn commit_block<B>(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain {
|
||||||
|
let number = block.header().number();
|
||||||
|
// Are we committing an era?
|
||||||
|
let ancient = if number >= HISTORY {
|
||||||
|
let n = number - HISTORY;
|
||||||
|
Some((n, self.chain.block_hash(n).unwrap()))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
// Commit results
|
||||||
|
let receipts = block.receipts().clone();
|
||||||
|
let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
|
||||||
|
|
||||||
|
// CHECK! I *think* this is fine, even if the state_root is equal to another
|
||||||
|
// already-imported block of the same number.
|
||||||
|
// TODO: Prove it with a test.
|
||||||
|
block.drain().commit(number, hash, ancient).expect("State DB commit failed.");
|
||||||
|
|
||||||
|
// And update the chain after commit to prevent race conditions
|
||||||
|
// (when something is in chain but you are not able to fetch details)
|
||||||
|
let route = self.chain.insert_block(block_data, receipts);
|
||||||
|
self.tracedb.import(TraceImportRequest {
|
||||||
|
traces: traces,
|
||||||
|
block_hash: hash.clone(),
|
||||||
|
block_number: number,
|
||||||
|
enacted: route.enacted.clone(),
|
||||||
|
retracted: route.retracted.len()
|
||||||
|
});
|
||||||
|
route
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Import transactions from the IO queue
|
||||||
|
pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize {
|
||||||
|
let _timer = PerfTimer::new("import_queued_transactions");
|
||||||
|
self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst);
|
||||||
|
let txs = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
|
||||||
|
let results = self.miner.import_external_transactions(self, txs);
|
||||||
|
results.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempt to get a copy of a specific block's state.
|
||||||
|
///
|
||||||
|
/// This will not fail if given BlockID::Latest.
|
||||||
|
/// Otherwise, this can fail (but may not) if the DB prunes state.
|
||||||
|
pub fn state_at(&self, id: BlockID) -> Option<State> {
|
||||||
|
// fast path for latest state.
|
||||||
|
if let BlockID::Latest = id.clone() {
|
||||||
|
return Some(self.state())
|
||||||
|
}
|
||||||
|
|
||||||
|
let block_number = match self.block_number(id.clone()) {
|
||||||
|
Some(num) => num,
|
||||||
|
None => return None,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.block_header(id).and_then(|header| {
|
||||||
|
let db = self.state_db.lock().unwrap().boxed_clone();
|
||||||
|
|
||||||
|
// early exit for pruned blocks
|
||||||
|
if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let root = HeaderView::new(&header).state_root();
|
||||||
|
|
||||||
|
State::from_existing(db, root, self.engine.account_start_nonce(), self.trie_factory.clone()).ok()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a copy of the best block's state.
|
||||||
|
pub fn state(&self) -> State {
|
||||||
|
State::from_existing(
|
||||||
|
self.state_db.lock().unwrap().boxed_clone(),
|
||||||
|
HeaderView::new(&self.best_block_header()).state_root(),
|
||||||
|
self.engine.account_start_nonce(),
|
||||||
|
self.trie_factory.clone())
|
||||||
|
.expect("State root of best block header always valid.")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get info on the cache.
|
||||||
|
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
|
||||||
|
self.chain.cache_size()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the report.
|
||||||
|
pub fn report(&self) -> ClientReport {
|
||||||
|
let mut report = self.report.read().unwrap().clone();
|
||||||
|
report.state_db_mem = self.state_db.lock().unwrap().mem_used();
|
||||||
|
report
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Tick the client.
|
||||||
|
// TODO: manage by real events.
|
||||||
|
pub fn tick(&self) {
|
||||||
|
self.chain.collect_garbage();
|
||||||
|
self.block_queue.collect_garbage();
|
||||||
|
|
||||||
|
match self.mode {
|
||||||
|
Mode::Dark(timeout) => {
|
||||||
|
let mut ss = self.sleep_state.lock().unwrap();
|
||||||
|
if let Some(t) = ss.last_activity {
|
||||||
|
if Instant::now() > t + timeout {
|
||||||
|
self.sleep();
|
||||||
|
ss.last_activity = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Mode::Passive(timeout, wakeup_after) => {
|
||||||
|
let mut ss = self.sleep_state.lock().unwrap();
|
||||||
|
let now = Instant::now();
|
||||||
|
if let Some(t) = ss.last_activity {
|
||||||
|
if now > t + timeout {
|
||||||
|
self.sleep();
|
||||||
|
ss.last_activity = None;
|
||||||
|
ss.last_autosleep = Some(now);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(t) = ss.last_autosleep {
|
||||||
|
if now > t + wakeup_after {
|
||||||
|
self.wake_up();
|
||||||
|
ss.last_activity = Some(now);
|
||||||
|
ss.last_autosleep = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set up the cache behaviour.
|
||||||
|
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
|
||||||
|
self.chain.configure_cache(pref_cache_size, max_cache_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Look up the block number for the given block ID.
|
||||||
|
pub fn block_number(&self, id: BlockID) -> Option<BlockNumber> {
|
||||||
|
match id {
|
||||||
|
BlockID::Number(number) => Some(number),
|
||||||
|
BlockID::Hash(ref hash) => self.chain.block_number(hash),
|
||||||
|
BlockID::Earliest => Some(0),
|
||||||
|
BlockID::Latest => Some(self.chain.best_block_number())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_hash(chain: &BlockChain, id: BlockID) -> Option<H256> {
|
||||||
|
match id {
|
||||||
|
BlockID::Hash(hash) => Some(hash),
|
||||||
|
BlockID::Number(number) => chain.block_hash(number),
|
||||||
|
BlockID::Earliest => chain.block_hash(0),
|
||||||
|
BlockID::Latest => Some(chain.best_block_hash())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transaction_address(&self, id: TransactionID) -> Option<TransactionAddress> {
|
||||||
|
match id {
|
||||||
|
TransactionID::Hash(ref hash) => self.chain.transaction_address(hash),
|
||||||
|
TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress {
|
||||||
|
block_hash: hash,
|
||||||
|
index: index,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn wake_up(&self) {
|
||||||
|
if !self.liveness.load(AtomicOrdering::Relaxed) {
|
||||||
|
self.liveness.store(true, AtomicOrdering::Relaxed);
|
||||||
|
self.io_channel.send(NetworkIoMessage::User(SyncMessage::StartNetwork)).unwrap();
|
||||||
|
trace!(target: "mode", "wake_up: Waking.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sleep(&self) {
|
||||||
|
if self.liveness.load(AtomicOrdering::Relaxed) {
|
||||||
|
// only sleep if the import queue is mostly empty.
|
||||||
|
if self.queue_info().total_queue_size() <= MAX_QUEUE_SIZE_TO_SLEEP_ON {
|
||||||
|
self.liveness.store(false, AtomicOrdering::Relaxed);
|
||||||
|
self.io_channel.send(NetworkIoMessage::User(SyncMessage::StopNetwork)).unwrap();
|
||||||
|
trace!(target: "mode", "sleep: Sleeping.");
|
||||||
|
} else {
|
||||||
|
trace!(target: "mode", "sleep: Cannot sleep - syncing ongoing.");
|
||||||
|
// TODO: Consider uncommenting.
|
||||||
|
//*self.last_activity.lock().unwrap() = Some(Instant::now());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Notify us that the network has been started.
|
||||||
|
pub fn network_started(&self, url: &String) {
|
||||||
|
let mut previous_enode = self.previous_enode.lock().unwrap();
|
||||||
|
if let Some(ref u) = *previous_enode {
|
||||||
|
if u == url {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*previous_enode = Some(url.clone());
|
||||||
|
info!(target: "mode", "Public node URL: {}", url.apply(Colour::White.bold()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Ipc)]
|
||||||
|
#[ipc(client_ident="RemoteClient")]
|
||||||
|
impl BlockChainClient for Client {
|
||||||
|
fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError> {
|
||||||
|
let header = self.block_header(BlockID::Latest).unwrap();
|
||||||
|
let view = HeaderView::new(&header);
|
||||||
|
let last_hashes = self.build_last_hashes(view.hash());
|
||||||
|
let env_info = EnvInfo {
|
||||||
|
number: view.number(),
|
||||||
|
author: view.author(),
|
||||||
|
timestamp: view.timestamp(),
|
||||||
|
difficulty: view.difficulty(),
|
||||||
|
last_hashes: last_hashes,
|
||||||
|
gas_used: U256::zero(),
|
||||||
|
gas_limit: U256::max_value(),
|
||||||
|
};
|
||||||
|
// that's just a copy of the state.
|
||||||
|
let mut state = self.state();
|
||||||
|
let sender = try!(t.sender().map_err(|e| {
|
||||||
|
let message = format!("Transaction malformed: {:?}", e);
|
||||||
|
ExecutionError::TransactionMalformed(message)
|
||||||
|
}));
|
||||||
|
let balance = state.balance(&sender);
|
||||||
|
let needed_balance = t.value + t.gas * t.gas_price;
|
||||||
|
if balance < needed_balance {
|
||||||
|
// give the sender a sufficient balance
|
||||||
|
state.add_balance(&sender, &(needed_balance - balance));
|
||||||
|
}
|
||||||
|
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
|
||||||
|
let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options);
|
||||||
|
|
||||||
|
// TODO gav move this into Executive.
|
||||||
|
if analytics.state_diffing {
|
||||||
|
if let Ok(ref mut x) = ret {
|
||||||
|
x.state_diff = Some(state.diff_from(self.state()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
fn keep_alive(&self) {
|
||||||
|
if self.mode != Mode::Active {
|
||||||
|
self.wake_up();
|
||||||
|
(*self.sleep_state.lock().unwrap()).last_activity = Some(Instant::now());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_header(&self, id: BlockID) -> Option<Bytes> {
|
||||||
|
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_body(&self, id: BlockID) -> Option<Bytes> {
|
||||||
|
Self::block_hash(&self.chain, id).and_then(|hash| {
|
||||||
|
self.chain.block(&hash).map(|bytes| {
|
||||||
|
let rlp = Rlp::new(&bytes);
|
||||||
|
let mut body = RlpStream::new_list(2);
|
||||||
|
body.append_raw(rlp.at(1).as_raw(), 1);
|
||||||
|
body.append_raw(rlp.at(2).as_raw(), 1);
|
||||||
|
body.out()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block(&self, id: BlockID) -> Option<Bytes> {
|
||||||
|
Self::block_hash(&self.chain, id).and_then(|hash| {
|
||||||
|
self.chain.block(&hash)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_status(&self, id: BlockID) -> BlockStatus {
|
||||||
|
match Self::block_hash(&self.chain, id) {
|
||||||
|
Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain,
|
||||||
|
Some(hash) => self.block_queue.block_status(&hash),
|
||||||
|
None => BlockStatus::Unknown
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_total_difficulty(&self, id: BlockID) -> Option<U256> {
|
||||||
|
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn nonce(&self, address: &Address, id: BlockID) -> Option<U256> {
|
||||||
|
self.state_at(id).map(|s| s.nonce(address))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_hash(&self, id: BlockID) -> Option<H256> {
|
||||||
|
Self::block_hash(&self.chain, id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn code(&self, address: &Address) -> Option<Bytes> {
|
||||||
|
self.state().code(address)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn balance(&self, address: &Address, id: BlockID) -> Option<U256> {
|
||||||
|
self.state_at(id).map(|s| s.balance(address))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option<H256> {
|
||||||
|
self.state_at(id).map(|s| s.storage_at(address, position))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transaction(&self, id: TransactionID) -> Option<LocalizedTransaction> {
|
||||||
|
self.transaction_address(id).and_then(|address| self.chain.transaction(&address))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn uncle(&self, id: UncleID) -> Option<Bytes> {
|
||||||
|
let index = id.position;
|
||||||
|
self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
|
||||||
|
self.transaction_address(id).and_then(|address| {
|
||||||
|
let t = self.chain.block(&address.block_hash)
|
||||||
|
.and_then(|block| BlockView::new(&block).localized_transaction_at(address.index));
|
||||||
|
|
||||||
|
match (t, self.chain.transaction_receipt(&address)) {
|
||||||
|
(Some(tx), Some(receipt)) => {
|
||||||
|
let block_hash = tx.block_hash.clone();
|
||||||
|
let block_number = tx.block_number.clone();
|
||||||
|
let transaction_hash = tx.hash();
|
||||||
|
let transaction_index = tx.transaction_index;
|
||||||
|
let prior_gas_used = match tx.transaction_index {
|
||||||
|
0 => U256::zero(),
|
||||||
|
i => {
|
||||||
|
let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 };
|
||||||
|
let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed");
|
||||||
|
prior_receipt.gas_used
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Some(LocalizedReceipt {
|
||||||
|
transaction_hash: tx.hash(),
|
||||||
|
transaction_index: tx.transaction_index,
|
||||||
|
block_hash: tx.block_hash,
|
||||||
|
block_number: tx.block_number,
|
||||||
|
cumulative_gas_used: receipt.gas_used,
|
||||||
|
gas_used: receipt.gas_used - prior_gas_used,
|
||||||
|
contract_address: match tx.action {
|
||||||
|
Action::Call(_) => None,
|
||||||
|
Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce))
|
||||||
|
},
|
||||||
|
logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry {
|
||||||
|
entry: log,
|
||||||
|
block_hash: block_hash.clone(),
|
||||||
|
block_number: block_number,
|
||||||
|
transaction_hash: transaction_hash.clone(),
|
||||||
|
transaction_index: transaction_index,
|
||||||
|
log_index: i
|
||||||
|
}).collect()
|
||||||
|
})
|
||||||
|
},
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
||||||
|
match self.chain.is_known(from) && self.chain.is_known(to) {
|
||||||
|
true => Some(self.chain.tree_route(from.clone(), to.clone())),
|
||||||
|
false => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn find_uncles(&self, hash: &H256) -> Option<Vec<H256>> {
|
||||||
|
self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
||||||
|
self.state_db.lock().unwrap().state(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
||||||
|
self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn import_block(&self, bytes: Bytes) -> Result<H256, BlockImportError> {
|
||||||
|
{
|
||||||
|
let header = BlockView::new(&bytes).header_view();
|
||||||
|
if self.chain.is_known(&header.sha3()) {
|
||||||
|
return Err(BlockImportError::Import(ImportError::AlreadyInChain));
|
||||||
|
}
|
||||||
|
if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown {
|
||||||
|
return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash())));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(try!(self.block_queue.import_block(bytes)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
|
self.block_queue.queue_info()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clear_queue(&self) {
|
||||||
|
self.block_queue.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
BlockChainInfo {
|
||||||
|
total_difficulty: self.chain.best_block_total_difficulty(),
|
||||||
|
pending_total_difficulty: self.chain.best_block_total_difficulty(),
|
||||||
|
genesis_hash: self.chain.genesis_hash(),
|
||||||
|
best_block_hash: self.chain.best_block_hash(),
|
||||||
|
best_block_number: From::from(self.chain.best_block_number())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>> {
|
||||||
|
match (self.block_number(from_block), self.block_number(to_block)) {
|
||||||
|
(Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)),
|
||||||
|
_ => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
|
// TODO: lock blockchain only once
|
||||||
|
|
||||||
|
let mut blocks = filter.bloom_possibilities().iter()
|
||||||
|
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
||||||
|
.flat_map(|m| m)
|
||||||
|
// remove duplicate elements
|
||||||
|
.collect::<HashSet<u64>>()
|
||||||
|
.into_iter()
|
||||||
|
.collect::<Vec<u64>>();
|
||||||
|
|
||||||
|
blocks.sort();
|
||||||
|
|
||||||
|
blocks.into_iter()
|
||||||
|
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
|
||||||
|
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
||||||
|
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
||||||
|
.flat_map(|(number, hash, receipts, hashes)| {
|
||||||
|
let mut log_index = 0;
|
||||||
|
receipts.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.flat_map(|(index, receipt)| {
|
||||||
|
log_index += receipt.logs.len();
|
||||||
|
receipt.logs.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.filter(|tuple| filter.matches(&tuple.1))
|
||||||
|
.map(|(i, log)| LocalizedLogEntry {
|
||||||
|
entry: log,
|
||||||
|
block_hash: hash.clone(),
|
||||||
|
block_number: number,
|
||||||
|
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new),
|
||||||
|
transaction_index: index,
|
||||||
|
log_index: log_index + i
|
||||||
|
})
|
||||||
|
.collect::<Vec<LocalizedLogEntry>>()
|
||||||
|
})
|
||||||
|
.collect::<Vec<LocalizedLogEntry>>()
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
||||||
|
let start = self.block_number(filter.range.start);
|
||||||
|
let end = self.block_number(filter.range.end);
|
||||||
|
|
||||||
|
if start.is_some() && end.is_some() {
|
||||||
|
let filter = trace::Filter {
|
||||||
|
range: start.unwrap() as usize..end.unwrap() as usize,
|
||||||
|
from_address: From::from(filter.from_address),
|
||||||
|
to_address: From::from(filter.to_address),
|
||||||
|
};
|
||||||
|
|
||||||
|
let traces = self.tracedb.filter(&filter);
|
||||||
|
Some(traces)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn trace(&self, trace: TraceId) -> Option<LocalizedTrace> {
|
||||||
|
let trace_address = trace.address;
|
||||||
|
self.transaction_address(trace.transaction)
|
||||||
|
.and_then(|tx_address| {
|
||||||
|
self.block_number(BlockID::Hash(tx_address.block_hash))
|
||||||
|
.and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transaction_traces(&self, transaction: TransactionID) -> Option<Vec<LocalizedTrace>> {
|
||||||
|
self.transaction_address(transaction)
|
||||||
|
.and_then(|tx_address| {
|
||||||
|
self.block_number(BlockID::Hash(tx_address.block_hash))
|
||||||
|
.and_then(|number| self.tracedb.transaction_traces(number, tx_address.index))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_traces(&self, block: BlockID) -> Option<Vec<LocalizedTrace>> {
|
||||||
|
self.block_number(block)
|
||||||
|
.and_then(|number| self.tracedb.block_traces(number))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_hashes(&self) -> LastHashes {
|
||||||
|
self.build_last_hashes(self.chain.best_block_hash())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_transactions(&self, transactions: Vec<Bytes>) {
|
||||||
|
if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE {
|
||||||
|
debug!("Ignoring {} transactions: queue is full", transactions.len());
|
||||||
|
} else {
|
||||||
|
let len = transactions.len();
|
||||||
|
match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) {
|
||||||
|
Ok(_) => {
|
||||||
|
self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst);
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
debug!("Ignoring {} transactions: error queueing: {}", len, e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
||||||
|
self.miner.pending_transactions()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MiningBlockChainClient for Client {
|
||||||
|
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
|
||||||
|
let engine = self.engine.deref().deref();
|
||||||
|
let h = self.chain.best_block_hash();
|
||||||
|
|
||||||
|
let mut open_block = OpenBlock::new(
|
||||||
|
engine,
|
||||||
|
&self.vm_factory,
|
||||||
|
self.trie_factory.clone(),
|
||||||
|
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
|
||||||
|
self.state_db.lock().unwrap().boxed_clone(),
|
||||||
|
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
|
||||||
|
self.build_last_hashes(h.clone()),
|
||||||
|
author,
|
||||||
|
gas_range_target,
|
||||||
|
extra_data,
|
||||||
|
).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed");
|
||||||
|
|
||||||
|
// Add uncles
|
||||||
|
self.chain
|
||||||
|
.find_uncle_headers(&h, engine.maximum_uncle_age())
|
||||||
|
.unwrap()
|
||||||
|
.into_iter()
|
||||||
|
.take(engine.maximum_uncle_count())
|
||||||
|
.foreach(|h| {
|
||||||
|
open_block.push_uncle(h).unwrap();
|
||||||
|
});
|
||||||
|
|
||||||
|
open_block
|
||||||
|
}
|
||||||
|
|
||||||
|
fn vm_factory(&self) -> &EvmFactory {
|
||||||
|
&self.vm_factory
|
||||||
|
}
|
||||||
|
|
||||||
|
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
|
||||||
|
let _import_lock = self.import_lock.lock();
|
||||||
|
let _timer = PerfTimer::new("import_sealed_block");
|
||||||
|
|
||||||
|
let original_best = self.chain_info().best_block_hash;
|
||||||
|
|
||||||
|
let h = block.header().hash();
|
||||||
|
let number = block.header().number();
|
||||||
|
|
||||||
|
let block_data = block.rlp_bytes();
|
||||||
|
let route = self.commit_block(block, &h, &block_data);
|
||||||
|
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
|
||||||
|
|
||||||
|
{
|
||||||
|
let (enacted, retracted) = self.calculate_enacted_retracted(&[route]);
|
||||||
|
self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);
|
||||||
|
|
||||||
|
self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
||||||
|
imported: vec![h.clone()],
|
||||||
|
invalid: vec![],
|
||||||
|
enacted: enacted,
|
||||||
|
retracted: retracted,
|
||||||
|
sealed: vec![h.clone()],
|
||||||
|
})).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.chain_info().best_block_hash != original_best {
|
||||||
|
self.miner.update_sealing(self);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(h)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MayPanic for Client {
|
||||||
|
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
|
||||||
|
self.panic_handler.on_panic(closure);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IpcConfig for Client { }
|
||||||
|
@ -1,921 +0,0 @@
|
|||||||
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
|
||||||
// This file is part of Parity.
|
|
||||||
|
|
||||||
// Parity is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
|
|
||||||
// Parity is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
use std::path::PathBuf;
|
|
||||||
use std::collections::{HashSet, HashMap};
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::mem;
|
|
||||||
use std::collections::VecDeque;
|
|
||||||
use std::sync::*;
|
|
||||||
use std::path::Path;
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
|
|
||||||
|
|
||||||
// util
|
|
||||||
use util::numbers::*;
|
|
||||||
use util::panics::*;
|
|
||||||
use util::network::*;
|
|
||||||
use util::io::*;
|
|
||||||
use util::rlp;
|
|
||||||
use util::sha3::*;
|
|
||||||
use util::{Bytes};
|
|
||||||
use util::rlp::{RlpStream, Rlp, UntrustedRlp};
|
|
||||||
use util::journaldb;
|
|
||||||
use util::journaldb::JournalDB;
|
|
||||||
use util::kvdb::*;
|
|
||||||
use util::Itertools;
|
|
||||||
use util::PerfTimer;
|
|
||||||
use util::View;
|
|
||||||
use util::Stream;
|
|
||||||
|
|
||||||
// other
|
|
||||||
use views::BlockView;
|
|
||||||
use error::{ImportError, ExecutionError, BlockError, ImportResult};
|
|
||||||
use header::BlockNumber;
|
|
||||||
use state::State;
|
|
||||||
use spec::Spec;
|
|
||||||
use engine::Engine;
|
|
||||||
use views::HeaderView;
|
|
||||||
use service::{NetSyncMessage, SyncMessage};
|
|
||||||
use env_info::LastHashes;
|
|
||||||
use verification;
|
|
||||||
use verification::{PreverifiedBlock, Verifier};
|
|
||||||
use block::*;
|
|
||||||
use transaction::{LocalizedTransaction, SignedTransaction, Action};
|
|
||||||
use blockchain::extras::TransactionAddress;
|
|
||||||
use types::filter::Filter;
|
|
||||||
use log_entry::LocalizedLogEntry;
|
|
||||||
use block_queue::{BlockQueue, BlockQueueInfo};
|
|
||||||
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
|
||||||
use client::{BlockID, TransactionID, UncleID, TraceId, ClientConfig,
|
|
||||||
DatabaseCompactionProfile, BlockChainClient, MiningBlockChainClient,
|
|
||||||
TraceFilter, CallAnalytics, BlockImportError, TransactionImportError, TransactionImportResult};
|
|
||||||
use client::Error as ClientError;
|
|
||||||
use env_info::EnvInfo;
|
|
||||||
use executive::{Executive, Executed, TransactOptions, contract_address};
|
|
||||||
use receipt::LocalizedReceipt;
|
|
||||||
use trace::{TraceDB, ImportRequest as TraceImportRequest, LocalizedTrace, Database as TraceDatabase};
|
|
||||||
use trace;
|
|
||||||
use evm::Factory as EvmFactory;
|
|
||||||
use miner::{Miner, MinerService, AccountDetails};
|
|
||||||
use util::TrieFactory;
|
|
||||||
use ipc::IpcConfig;
|
|
||||||
use ipc::binary::{BinaryConvertError};
|
|
||||||
|
|
||||||
// re-export
|
|
||||||
pub use types::blockchain_info::BlockChainInfo;
|
|
||||||
pub use types::block_status::BlockStatus;
|
|
||||||
pub use blockchain::CacheSize as BlockChainCacheSize;
|
|
||||||
|
|
||||||
const MAX_TX_QUEUE_SIZE: usize = 4096;
|
|
||||||
|
|
||||||
impl fmt::Display for BlockChainInfo {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "#{}.{}", self.best_block_number, self.best_block_hash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Report on the status of a client.
|
|
||||||
#[derive(Default, Clone, Debug, Eq, PartialEq)]
|
|
||||||
pub struct ClientReport {
|
|
||||||
/// How many blocks have been imported so far.
|
|
||||||
pub blocks_imported: usize,
|
|
||||||
/// How many transactions have been applied so far.
|
|
||||||
pub transactions_applied: usize,
|
|
||||||
/// How much gas has been processed so far.
|
|
||||||
pub gas_processed: U256,
|
|
||||||
/// Memory used by state DB
|
|
||||||
pub state_db_mem: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClientReport {
|
|
||||||
/// Alter internal reporting to reflect the additional `block` has been processed.
|
|
||||||
pub fn accrue_block(&mut self, block: &PreverifiedBlock) {
|
|
||||||
self.blocks_imported += 1;
|
|
||||||
self.transactions_applied += block.transactions.len();
|
|
||||||
self.gas_processed = self.gas_processed + block.header.gas_used;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Blockchain database client backed by a persistent database. Owns and manages a blockchain and a block queue.
|
|
||||||
/// Call `import_block()` to import a block asynchronously; `flush_queue()` flushes the queue.
|
|
||||||
pub struct Client {
|
|
||||||
chain: Arc<BlockChain>,
|
|
||||||
tracedb: Arc<TraceDB<BlockChain>>,
|
|
||||||
engine: Arc<Box<Engine>>,
|
|
||||||
state_db: Mutex<Box<JournalDB>>,
|
|
||||||
block_queue: BlockQueue,
|
|
||||||
report: RwLock<ClientReport>,
|
|
||||||
import_lock: Mutex<()>,
|
|
||||||
panic_handler: Arc<PanicHandler>,
|
|
||||||
verifier: Box<Verifier>,
|
|
||||||
vm_factory: Arc<EvmFactory>,
|
|
||||||
trie_factory: TrieFactory,
|
|
||||||
miner: Arc<Miner>,
|
|
||||||
io_channel: IoChannel<NetSyncMessage>,
|
|
||||||
queue_transactions: AtomicUsize,
|
|
||||||
}
|
|
||||||
|
|
||||||
const HISTORY: u64 = 1200;
|
|
||||||
// DO NOT TOUCH THIS ANY MORE UNLESS YOU REALLY KNOW WHAT YOU'RE DOING.
|
|
||||||
// Altering it will force a blanket DB update for *all* JournalDB-derived
|
|
||||||
// databases.
|
|
||||||
// Instead, add/upgrade the version string of the individual JournalDB-derived database
|
|
||||||
// of which you actually want force an upgrade.
|
|
||||||
const CLIENT_DB_VER_STR: &'static str = "5.3";
|
|
||||||
|
|
||||||
/// Get the path for the databases given the root path and information on the databases.
|
|
||||||
pub fn get_db_path(path: &Path, pruning: journaldb::Algorithm, genesis_hash: H256) -> PathBuf {
|
|
||||||
let mut dir = path.to_path_buf();
|
|
||||||
dir.push(H64::from(genesis_hash).hex());
|
|
||||||
//TODO: sec/fat: pruned/full versioning
|
|
||||||
// version here is a bit useless now, since it's controlled only be the pruning algo.
|
|
||||||
dir.push(format!("v{}-sec-{}", CLIENT_DB_VER_STR, pruning));
|
|
||||||
dir
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Append a path element to the given path and return the string.
|
|
||||||
pub fn append_path(path: &Path, item: &str) -> String {
|
|
||||||
let mut p = path.to_path_buf();
|
|
||||||
p.push(item);
|
|
||||||
p.to_str().unwrap().to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Client {
|
|
||||||
/// Create a new client with given spec and DB path and custom verifier.
|
|
||||||
pub fn new(
|
|
||||||
config: ClientConfig,
|
|
||||||
spec: Spec,
|
|
||||||
path: &Path,
|
|
||||||
miner: Arc<Miner>,
|
|
||||||
message_channel: IoChannel<NetSyncMessage>)
|
|
||||||
-> Result<Arc<Client>, ClientError>
|
|
||||||
{
|
|
||||||
let path = get_db_path(path, config.pruning, spec.genesis_header().hash());
|
|
||||||
let gb = spec.genesis_block();
|
|
||||||
let chain = Arc::new(BlockChain::new(config.blockchain, &gb, &path));
|
|
||||||
let tracedb = Arc::new(try!(TraceDB::new(config.tracing, &path, chain.clone())));
|
|
||||||
|
|
||||||
let mut state_db_config = match config.db_cache_size {
|
|
||||||
None => DatabaseConfig::default(),
|
|
||||||
Some(cache_size) => DatabaseConfig::with_cache(cache_size),
|
|
||||||
};
|
|
||||||
|
|
||||||
if config.db_compaction == DatabaseCompactionProfile::HDD {
|
|
||||||
state_db_config = state_db_config.compaction(CompactionProfile::hdd());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut state_db = journaldb::new(
|
|
||||||
&append_path(&path, "state"),
|
|
||||||
config.pruning,
|
|
||||||
state_db_config
|
|
||||||
);
|
|
||||||
|
|
||||||
if state_db.is_empty() && spec.ensure_db_good(state_db.as_hashdb_mut()) {
|
|
||||||
state_db.commit(0, &spec.genesis_header().hash(), None).expect("Error commiting genesis state to state DB");
|
|
||||||
}
|
|
||||||
|
|
||||||
let engine = Arc::new(spec.engine);
|
|
||||||
|
|
||||||
let block_queue = BlockQueue::new(config.queue, engine.clone(), message_channel.clone());
|
|
||||||
let panic_handler = PanicHandler::new_in_arc();
|
|
||||||
panic_handler.forward_from(&block_queue);
|
|
||||||
|
|
||||||
let client = Client {
|
|
||||||
chain: chain,
|
|
||||||
tracedb: tracedb,
|
|
||||||
engine: engine,
|
|
||||||
state_db: Mutex::new(state_db),
|
|
||||||
block_queue: block_queue,
|
|
||||||
report: RwLock::new(Default::default()),
|
|
||||||
import_lock: Mutex::new(()),
|
|
||||||
panic_handler: panic_handler,
|
|
||||||
verifier: verification::new(config.verifier_type),
|
|
||||||
vm_factory: Arc::new(EvmFactory::new(config.vm_type)),
|
|
||||||
trie_factory: TrieFactory::new(config.trie_spec),
|
|
||||||
miner: miner,
|
|
||||||
io_channel: message_channel,
|
|
||||||
queue_transactions: AtomicUsize::new(0),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Arc::new(client))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Flush the block import queue.
|
|
||||||
pub fn flush_queue(&self) {
|
|
||||||
self.block_queue.flush();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn build_last_hashes(&self, parent_hash: H256) -> LastHashes {
|
|
||||||
let mut last_hashes = LastHashes::new();
|
|
||||||
last_hashes.resize(256, H256::new());
|
|
||||||
last_hashes[0] = parent_hash;
|
|
||||||
for i in 0..255 {
|
|
||||||
match self.chain.block_details(&last_hashes[i]) {
|
|
||||||
Some(details) => {
|
|
||||||
last_hashes[i + 1] = details.parent.clone();
|
|
||||||
},
|
|
||||||
None => break,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
last_hashes
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_and_close_block(&self, block: &PreverifiedBlock) -> Result<LockedBlock, ()> {
|
|
||||||
let engine = self.engine.deref().deref();
|
|
||||||
let header = &block.header;
|
|
||||||
|
|
||||||
// Check the block isn't so old we won't be able to enact it.
|
|
||||||
let best_block_number = self.chain.best_block_number();
|
|
||||||
if best_block_number >= HISTORY && header.number() <= best_block_number - HISTORY {
|
|
||||||
warn!(target: "client", "Block import failed for #{} ({})\nBlock is ancient (current best block: #{}).", header.number(), header.hash(), best_block_number);
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify Block Family
|
|
||||||
let verify_family_result = self.verifier.verify_block_family(&header, &block.bytes, engine, self.chain.deref());
|
|
||||||
if let Err(e) = verify_family_result {
|
|
||||||
warn!(target: "client", "Stage 3 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
|
||||||
return Err(());
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check if Parent is in chain
|
|
||||||
let chain_has_parent = self.chain.block_header(&header.parent_hash);
|
|
||||||
if let None = chain_has_parent {
|
|
||||||
warn!(target: "client", "Block import failed for #{} ({}): Parent not found ({}) ", header.number(), header.hash(), header.parent_hash);
|
|
||||||
return Err(());
|
|
||||||
};
|
|
||||||
|
|
||||||
// Enact Verified Block
|
|
||||||
let parent = chain_has_parent.unwrap();
|
|
||||||
let last_hashes = self.build_last_hashes(header.parent_hash.clone());
|
|
||||||
let db = self.state_db.lock().unwrap().boxed_clone();
|
|
||||||
|
|
||||||
let enact_result = enact_verified(&block, engine, self.tracedb.tracing_enabled(), db, &parent, last_hashes, self.dao_rescue_block_gas_limit(header.parent_hash.clone()), &self.vm_factory, self.trie_factory.clone());
|
|
||||||
if let Err(e) = enact_result {
|
|
||||||
warn!(target: "client", "Block import failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
|
||||||
return Err(());
|
|
||||||
};
|
|
||||||
|
|
||||||
// Final Verification
|
|
||||||
let locked_block = enact_result.unwrap();
|
|
||||||
if let Err(e) = self.verifier.verify_block_final(&header, locked_block.block().header()) {
|
|
||||||
warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e);
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(locked_block)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn calculate_enacted_retracted(&self, import_results: &[ImportRoute]) -> (Vec<H256>, Vec<H256>) {
|
|
||||||
fn map_to_vec(map: Vec<(H256, bool)>) -> Vec<H256> {
|
|
||||||
map.into_iter().map(|(k, _v)| k).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
// In ImportRoute we get all the blocks that have been enacted and retracted by single insert.
|
|
||||||
// Because we are doing multiple inserts some of the blocks that were enacted in import `k`
|
|
||||||
// could be retracted in import `k+1`. This is why to understand if after all inserts
|
|
||||||
// the block is enacted or retracted we iterate over all routes and at the end final state
|
|
||||||
// will be in the hashmap
|
|
||||||
let map = import_results.iter().fold(HashMap::new(), |mut map, route| {
|
|
||||||
for hash in &route.enacted {
|
|
||||||
map.insert(hash.clone(), true);
|
|
||||||
}
|
|
||||||
for hash in &route.retracted {
|
|
||||||
map.insert(hash.clone(), false);
|
|
||||||
}
|
|
||||||
map
|
|
||||||
});
|
|
||||||
|
|
||||||
// Split to enacted retracted (using hashmap value)
|
|
||||||
let (enacted, retracted) = map.into_iter().partition(|&(_k, v)| v);
|
|
||||||
// And convert tuples to keys
|
|
||||||
(map_to_vec(enacted), map_to_vec(retracted))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This is triggered by a message coming from a block queue when the block is ready for insertion
|
|
||||||
pub fn import_verified_blocks(&self, io: &IoChannel<NetSyncMessage>) -> usize {
|
|
||||||
let max_blocks_to_import = 64;
|
|
||||||
|
|
||||||
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
|
|
||||||
let mut invalid_blocks = HashSet::new();
|
|
||||||
let mut import_results = Vec::with_capacity(max_blocks_to_import);
|
|
||||||
|
|
||||||
let _import_lock = self.import_lock.lock();
|
|
||||||
let _timer = PerfTimer::new("import_verified_blocks");
|
|
||||||
let blocks = self.block_queue.drain(max_blocks_to_import);
|
|
||||||
|
|
||||||
let original_best = self.chain_info().best_block_hash;
|
|
||||||
|
|
||||||
for block in blocks {
|
|
||||||
let header = &block.header;
|
|
||||||
|
|
||||||
if invalid_blocks.contains(&header.parent_hash) {
|
|
||||||
invalid_blocks.insert(header.hash());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let closed_block = self.check_and_close_block(&block);
|
|
||||||
if let Err(_) = closed_block {
|
|
||||||
invalid_blocks.insert(header.hash());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let closed_block = closed_block.unwrap();
|
|
||||||
imported_blocks.push(header.hash());
|
|
||||||
|
|
||||||
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
|
|
||||||
import_results.push(route);
|
|
||||||
|
|
||||||
self.report.write().unwrap().accrue_block(&block);
|
|
||||||
trace!(target: "client", "Imported #{} ({})", header.number(), header.hash());
|
|
||||||
}
|
|
||||||
|
|
||||||
let imported = imported_blocks.len();
|
|
||||||
let invalid_blocks = invalid_blocks.into_iter().collect::<Vec<H256>>();
|
|
||||||
|
|
||||||
{
|
|
||||||
if !invalid_blocks.is_empty() {
|
|
||||||
self.block_queue.mark_as_bad(&invalid_blocks);
|
|
||||||
}
|
|
||||||
if !imported_blocks.is_empty() {
|
|
||||||
self.block_queue.mark_as_good(&imported_blocks);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
if !imported_blocks.is_empty() && self.block_queue.queue_info().is_empty() {
|
|
||||||
let (enacted, retracted) = self.calculate_enacted_retracted(&import_results);
|
|
||||||
|
|
||||||
if self.queue_info().is_empty() {
|
|
||||||
self.miner.chain_new_blocks(self, &imported_blocks, &invalid_blocks, &enacted, &retracted);
|
|
||||||
}
|
|
||||||
|
|
||||||
io.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
|
||||||
imported: imported_blocks,
|
|
||||||
invalid: invalid_blocks,
|
|
||||||
enacted: enacted,
|
|
||||||
retracted: retracted,
|
|
||||||
sealed: Vec::new(),
|
|
||||||
})).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.chain_info().best_block_hash != original_best {
|
|
||||||
self.miner.update_sealing(self);
|
|
||||||
}
|
|
||||||
|
|
||||||
imported
|
|
||||||
}
|
|
||||||
|
|
||||||
fn commit_block<B>(&self, block: B, hash: &H256, block_data: &[u8]) -> ImportRoute where B: IsBlock + Drain {
|
|
||||||
let number = block.header().number();
|
|
||||||
// Are we committing an era?
|
|
||||||
let ancient = if number >= HISTORY {
|
|
||||||
let n = number - HISTORY;
|
|
||||||
Some((n, self.chain.block_hash(n).unwrap()))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
// Commit results
|
|
||||||
let receipts = block.receipts().clone();
|
|
||||||
let traces = From::from(block.traces().clone().unwrap_or_else(Vec::new));
|
|
||||||
|
|
||||||
// CHECK! I *think* this is fine, even if the state_root is equal to another
|
|
||||||
// already-imported block of the same number.
|
|
||||||
// TODO: Prove it with a test.
|
|
||||||
block.drain().commit(number, hash, ancient).expect("State DB commit failed.");
|
|
||||||
|
|
||||||
// And update the chain after commit to prevent race conditions
|
|
||||||
// (when something is in chain but you are not able to fetch details)
|
|
||||||
let route = self.chain.insert_block(block_data, receipts);
|
|
||||||
self.tracedb.import(TraceImportRequest {
|
|
||||||
traces: traces,
|
|
||||||
block_hash: hash.clone(),
|
|
||||||
block_number: number,
|
|
||||||
enacted: route.enacted.clone(),
|
|
||||||
retracted: route.retracted.len()
|
|
||||||
});
|
|
||||||
route
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Import transactions from the IO queue
|
|
||||||
pub fn import_queued_transactions(&self, transactions: &[Bytes]) -> usize {
|
|
||||||
let _timer = PerfTimer::new("import_queued_transactions");
|
|
||||||
self.queue_transactions.fetch_sub(transactions.len(), AtomicOrdering::SeqCst);
|
|
||||||
let fetch_account = |a: &Address| AccountDetails {
|
|
||||||
nonce: self.latest_nonce(a),
|
|
||||||
balance: self.latest_balance(a),
|
|
||||||
};
|
|
||||||
let tx = transactions.iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
|
|
||||||
let results = self.miner.import_transactions(self, tx, fetch_account);
|
|
||||||
results.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Attempt to get a copy of a specific block's state.
|
|
||||||
///
|
|
||||||
/// This will not fail if given BlockID::Latest.
|
|
||||||
/// Otherwise, this can fail (but may not) if the DB prunes state.
|
|
||||||
pub fn state_at(&self, id: BlockID) -> Option<State> {
|
|
||||||
// fast path for latest state.
|
|
||||||
if let BlockID::Latest = id.clone() {
|
|
||||||
return Some(self.state())
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_number = match self.block_number(id.clone()) {
|
|
||||||
Some(num) => num,
|
|
||||||
None => return None,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.block_header(id).and_then(|header| {
|
|
||||||
let db = self.state_db.lock().unwrap().boxed_clone();
|
|
||||||
|
|
||||||
// early exit for pruned blocks
|
|
||||||
if db.is_pruned() && self.chain.best_block_number() >= block_number + HISTORY {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let root = HeaderView::new(&header).state_root();
|
|
||||||
|
|
||||||
State::from_existing(db, root, self.engine.account_start_nonce(), self.trie_factory.clone()).ok()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a copy of the best block's state.
|
|
||||||
pub fn state(&self) -> State {
|
|
||||||
State::from_existing(
|
|
||||||
self.state_db.lock().unwrap().boxed_clone(),
|
|
||||||
HeaderView::new(&self.best_block_header()).state_root(),
|
|
||||||
self.engine.account_start_nonce(),
|
|
||||||
self.trie_factory.clone())
|
|
||||||
.expect("State root of best block header always valid.")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get info on the cache.
|
|
||||||
pub fn blockchain_cache_info(&self) -> BlockChainCacheSize {
|
|
||||||
self.chain.cache_size()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the report.
|
|
||||||
pub fn report(&self) -> ClientReport {
|
|
||||||
let mut report = self.report.read().unwrap().clone();
|
|
||||||
report.state_db_mem = self.state_db.lock().unwrap().mem_used();
|
|
||||||
report
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Tick the client.
|
|
||||||
pub fn tick(&self) {
|
|
||||||
self.chain.collect_garbage();
|
|
||||||
self.block_queue.collect_garbage();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set up the cache behaviour.
|
|
||||||
pub fn configure_cache(&self, pref_cache_size: usize, max_cache_size: usize) {
|
|
||||||
self.chain.configure_cache(pref_cache_size, max_cache_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Look up the block number for the given block ID.
|
|
||||||
pub fn block_number(&self, id: BlockID) -> Option<BlockNumber> {
|
|
||||||
match id {
|
|
||||||
BlockID::Number(number) => Some(number),
|
|
||||||
BlockID::Hash(ref hash) => self.chain.block_number(hash),
|
|
||||||
BlockID::Earliest => Some(0),
|
|
||||||
BlockID::Latest => Some(self.chain.best_block_number())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_hash(chain: &BlockChain, id: BlockID) -> Option<H256> {
|
|
||||||
match id {
|
|
||||||
BlockID::Hash(hash) => Some(hash),
|
|
||||||
BlockID::Number(number) => chain.block_hash(number),
|
|
||||||
BlockID::Earliest => chain.block_hash(0),
|
|
||||||
BlockID::Latest => Some(chain.best_block_hash())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transaction_address(&self, id: TransactionID) -> Option<TransactionAddress> {
|
|
||||||
match id {
|
|
||||||
TransactionID::Hash(ref hash) => self.chain.transaction_address(hash),
|
|
||||||
TransactionID::Location(id, index) => Self::block_hash(&self.chain, id).map(|hash| TransactionAddress {
|
|
||||||
block_hash: hash,
|
|
||||||
index: index,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Ipc)]
|
|
||||||
#[ipc(client_ident="RemoteClient")]
|
|
||||||
impl BlockChainClient for Client {
|
|
||||||
fn call(&self, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, ExecutionError> {
|
|
||||||
let header = self.block_header(BlockID::Latest).unwrap();
|
|
||||||
let view = HeaderView::new(&header);
|
|
||||||
let last_hashes = self.build_last_hashes(view.hash());
|
|
||||||
let env_info = EnvInfo {
|
|
||||||
number: view.number(),
|
|
||||||
author: view.author(),
|
|
||||||
timestamp: view.timestamp(),
|
|
||||||
difficulty: view.difficulty(),
|
|
||||||
last_hashes: last_hashes,
|
|
||||||
gas_used: U256::zero(),
|
|
||||||
gas_limit: U256::max_value(),
|
|
||||||
dao_rescue_block_gas_limit: self.dao_rescue_block_gas_limit(view.parent_hash()),
|
|
||||||
};
|
|
||||||
// that's just a copy of the state.
|
|
||||||
let mut state = self.state();
|
|
||||||
let sender = try!(t.sender().map_err(|e| {
|
|
||||||
let message = format!("Transaction malformed: {:?}", e);
|
|
||||||
ExecutionError::TransactionMalformed(message)
|
|
||||||
}));
|
|
||||||
let balance = state.balance(&sender);
|
|
||||||
let needed_balance = t.value + t.gas * t.gas_price;
|
|
||||||
if balance < needed_balance {
|
|
||||||
// give the sender a sufficient balance
|
|
||||||
state.add_balance(&sender, &(needed_balance - balance));
|
|
||||||
}
|
|
||||||
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
|
|
||||||
let mut ret = Executive::new(&mut state, &env_info, self.engine.deref().deref(), &self.vm_factory).transact(t, options);
|
|
||||||
|
|
||||||
// TODO gav move this into Executive.
|
|
||||||
if analytics.state_diffing {
|
|
||||||
if let Ok(ref mut x) = ret {
|
|
||||||
x.state_diff = Some(state.diff_from(self.state()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ret
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fn block_header(&self, id: BlockID) -> Option<Bytes> {
|
|
||||||
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block(&hash).map(|bytes| BlockView::new(&bytes).rlp().at(0).as_raw().to_vec()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_body(&self, id: BlockID) -> Option<Bytes> {
|
|
||||||
Self::block_hash(&self.chain, id).and_then(|hash| {
|
|
||||||
self.chain.block(&hash).map(|bytes| {
|
|
||||||
let rlp = Rlp::new(&bytes);
|
|
||||||
let mut body = RlpStream::new_list(2);
|
|
||||||
body.append_raw(rlp.at(1).as_raw(), 1);
|
|
||||||
body.append_raw(rlp.at(2).as_raw(), 1);
|
|
||||||
body.out()
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block(&self, id: BlockID) -> Option<Bytes> {
|
|
||||||
Self::block_hash(&self.chain, id).and_then(|hash| {
|
|
||||||
self.chain.block(&hash)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_status(&self, id: BlockID) -> BlockStatus {
|
|
||||||
match Self::block_hash(&self.chain, id) {
|
|
||||||
Some(ref hash) if self.chain.is_known(hash) => BlockStatus::InChain,
|
|
||||||
Some(hash) => self.block_queue.block_status(&hash),
|
|
||||||
None => BlockStatus::Unknown
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_total_difficulty(&self, id: BlockID) -> Option<U256> {
|
|
||||||
Self::block_hash(&self.chain, id).and_then(|hash| self.chain.block_details(&hash)).map(|d| d.total_difficulty)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn nonce(&self, address: &Address, id: BlockID) -> Option<U256> {
|
|
||||||
self.state_at(id).map(|s| s.nonce(address))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_hash(&self, id: BlockID) -> Option<H256> {
|
|
||||||
Self::block_hash(&self.chain, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn code(&self, address: &Address) -> Option<Bytes> {
|
|
||||||
self.state().code(address)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn balance(&self, address: &Address, id: BlockID) -> Option<U256> {
|
|
||||||
self.state_at(id).map(|s| s.balance(address))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option<H256> {
|
|
||||||
self.state_at(id).map(|s| s.storage_at(address, position))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transaction(&self, id: TransactionID) -> Option<LocalizedTransaction> {
|
|
||||||
self.transaction_address(id).and_then(|address| self.chain.transaction(&address))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn uncle(&self, id: UncleID) -> Option<Bytes> {
|
|
||||||
let index = id.position;
|
|
||||||
self.block(id.block).and_then(|block| BlockView::new(&block).uncle_rlp_at(index))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transaction_receipt(&self, id: TransactionID) -> Option<LocalizedReceipt> {
|
|
||||||
self.transaction_address(id).and_then(|address| {
|
|
||||||
let t = self.chain.block(&address.block_hash)
|
|
||||||
.and_then(|block| BlockView::new(&block).localized_transaction_at(address.index));
|
|
||||||
|
|
||||||
match (t, self.chain.transaction_receipt(&address)) {
|
|
||||||
(Some(tx), Some(receipt)) => {
|
|
||||||
let block_hash = tx.block_hash.clone();
|
|
||||||
let block_number = tx.block_number.clone();
|
|
||||||
let transaction_hash = tx.hash();
|
|
||||||
let transaction_index = tx.transaction_index;
|
|
||||||
let prior_gas_used = match tx.transaction_index {
|
|
||||||
0 => U256::zero(),
|
|
||||||
i => {
|
|
||||||
let prior_address = TransactionAddress { block_hash: address.block_hash, index: i - 1 };
|
|
||||||
let prior_receipt = self.chain.transaction_receipt(&prior_address).expect("Transaction receipt at `address` exists; `prior_address` has lower index in same block; qed");
|
|
||||||
prior_receipt.gas_used
|
|
||||||
}
|
|
||||||
};
|
|
||||||
Some(LocalizedReceipt {
|
|
||||||
transaction_hash: tx.hash(),
|
|
||||||
transaction_index: tx.transaction_index,
|
|
||||||
block_hash: tx.block_hash,
|
|
||||||
block_number: tx.block_number,
|
|
||||||
cumulative_gas_used: receipt.gas_used,
|
|
||||||
gas_used: receipt.gas_used - prior_gas_used,
|
|
||||||
contract_address: match tx.action {
|
|
||||||
Action::Call(_) => None,
|
|
||||||
Action::Create => Some(contract_address(&tx.sender().unwrap(), &tx.nonce))
|
|
||||||
},
|
|
||||||
logs: receipt.logs.into_iter().enumerate().map(|(i, log)| LocalizedLogEntry {
|
|
||||||
entry: log,
|
|
||||||
block_hash: block_hash.clone(),
|
|
||||||
block_number: block_number,
|
|
||||||
transaction_hash: transaction_hash.clone(),
|
|
||||||
transaction_index: transaction_index,
|
|
||||||
log_index: i
|
|
||||||
}).collect()
|
|
||||||
})
|
|
||||||
},
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tree_route(&self, from: &H256, to: &H256) -> Option<TreeRoute> {
|
|
||||||
match self.chain.is_known(from) && self.chain.is_known(to) {
|
|
||||||
true => Some(self.chain.tree_route(from.clone(), to.clone())),
|
|
||||||
false => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn find_uncles(&self, hash: &H256) -> Option<Vec<H256>> {
|
|
||||||
self.chain.find_uncle_hashes(hash, self.engine.maximum_uncle_age())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes> {
|
|
||||||
self.state_db.lock().unwrap().state(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
|
||||||
self.chain.block_receipts(hash).map(|receipts| rlp::encode(&receipts).to_vec())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn import_block(&self, bytes: Bytes) -> Result<H256, BlockImportError> {
|
|
||||||
{
|
|
||||||
let header = BlockView::new(&bytes).header_view();
|
|
||||||
if self.chain.is_known(&header.sha3()) {
|
|
||||||
return Err(BlockImportError::Import(ImportError::AlreadyInChain));
|
|
||||||
}
|
|
||||||
if self.block_status(BlockID::Hash(header.parent_hash())) == BlockStatus::Unknown {
|
|
||||||
return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash())));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(try!(self.block_queue.import_block(bytes)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn queue_info(&self) -> BlockQueueInfo {
|
|
||||||
self.block_queue.queue_info()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_queue(&self) {
|
|
||||||
self.block_queue.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn chain_info(&self) -> BlockChainInfo {
|
|
||||||
BlockChainInfo {
|
|
||||||
total_difficulty: self.chain.best_block_total_difficulty(),
|
|
||||||
pending_total_difficulty: self.chain.best_block_total_difficulty(),
|
|
||||||
genesis_hash: self.chain.genesis_hash(),
|
|
||||||
best_block_hash: self.chain.best_block_hash(),
|
|
||||||
best_block_number: From::from(self.chain.best_block_number())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockID, to_block: BlockID) -> Option<Vec<BlockNumber>> {
|
|
||||||
match (self.block_number(from_block), self.block_number(to_block)) {
|
|
||||||
(Some(from), Some(to)) => Some(self.chain.blocks_with_bloom(bloom, from, to)),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
|
||||||
// TODO: lock blockchain only once
|
|
||||||
|
|
||||||
let mut blocks = filter.bloom_possibilities().iter()
|
|
||||||
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
|
||||||
.flat_map(|m| m)
|
|
||||||
// remove duplicate elements
|
|
||||||
.collect::<HashSet<u64>>()
|
|
||||||
.into_iter()
|
|
||||||
.collect::<Vec<u64>>();
|
|
||||||
|
|
||||||
blocks.sort();
|
|
||||||
|
|
||||||
blocks.into_iter()
|
|
||||||
.filter_map(|number| self.chain.block_hash(number).map(|hash| (number, hash)))
|
|
||||||
.filter_map(|(number, hash)| self.chain.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
|
||||||
.filter_map(|(number, hash, receipts)| self.chain.block(&hash).map(|ref b| (number, hash, receipts, BlockView::new(b).transaction_hashes())))
|
|
||||||
.flat_map(|(number, hash, receipts, hashes)| {
|
|
||||||
let mut log_index = 0;
|
|
||||||
receipts.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.flat_map(|(index, receipt)| {
|
|
||||||
log_index += receipt.logs.len();
|
|
||||||
receipt.logs.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter(|tuple| filter.matches(&tuple.1))
|
|
||||||
.map(|(i, log)| LocalizedLogEntry {
|
|
||||||
entry: log,
|
|
||||||
block_hash: hash.clone(),
|
|
||||||
block_number: number,
|
|
||||||
transaction_hash: hashes.get(index).cloned().unwrap_or_else(H256::new),
|
|
||||||
transaction_index: index,
|
|
||||||
log_index: log_index + i
|
|
||||||
})
|
|
||||||
.collect::<Vec<LocalizedLogEntry>>()
|
|
||||||
})
|
|
||||||
.collect::<Vec<LocalizedLogEntry>>()
|
|
||||||
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn filter_traces(&self, filter: TraceFilter) -> Option<Vec<LocalizedTrace>> {
|
|
||||||
let start = self.block_number(filter.range.start);
|
|
||||||
let end = self.block_number(filter.range.end);
|
|
||||||
|
|
||||||
if start.is_some() && end.is_some() {
|
|
||||||
let filter = trace::Filter {
|
|
||||||
range: start.unwrap() as usize..end.unwrap() as usize,
|
|
||||||
from_address: From::from(filter.from_address),
|
|
||||||
to_address: From::from(filter.to_address),
|
|
||||||
};
|
|
||||||
|
|
||||||
let traces = self.tracedb.filter(&filter);
|
|
||||||
Some(traces)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trace(&self, trace: TraceId) -> Option<LocalizedTrace> {
|
|
||||||
let trace_address = trace.address;
|
|
||||||
self.transaction_address(trace.transaction)
|
|
||||||
.and_then(|tx_address| {
|
|
||||||
self.block_number(BlockID::Hash(tx_address.block_hash))
|
|
||||||
.and_then(|number| self.tracedb.trace(number, tx_address.index, trace_address))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transaction_traces(&self, transaction: TransactionID) -> Option<Vec<LocalizedTrace>> {
|
|
||||||
self.transaction_address(transaction)
|
|
||||||
.and_then(|tx_address| {
|
|
||||||
self.block_number(BlockID::Hash(tx_address.block_hash))
|
|
||||||
.and_then(|number| self.tracedb.transaction_traces(number, tx_address.index))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_traces(&self, block: BlockID) -> Option<Vec<LocalizedTrace>> {
|
|
||||||
self.block_number(block)
|
|
||||||
.and_then(|number| self.tracedb.block_traces(number))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn last_hashes(&self) -> LastHashes {
|
|
||||||
self.build_last_hashes(self.chain.best_block_hash())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn import_transactions(&self, transactions: Vec<SignedTransaction>) -> Vec<Result<TransactionImportResult, TransactionImportError>> {
|
|
||||||
let fetch_account = |a: &Address| AccountDetails {
|
|
||||||
nonce: self.latest_nonce(a),
|
|
||||||
balance: self.latest_balance(a),
|
|
||||||
};
|
|
||||||
|
|
||||||
self.miner.import_transactions(self, transactions, &fetch_account)
|
|
||||||
.into_iter()
|
|
||||||
.map(|res| res.map_err(|e| e.into()))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn queue_transactions(&self, transactions: Vec<Bytes>) {
|
|
||||||
if self.queue_transactions.load(AtomicOrdering::Relaxed) > MAX_TX_QUEUE_SIZE {
|
|
||||||
debug!("Ignoring {} transactions: queue is full", transactions.len());
|
|
||||||
} else {
|
|
||||||
let len = transactions.len();
|
|
||||||
match self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewTransactions(transactions))) {
|
|
||||||
Ok(_) => {
|
|
||||||
self.queue_transactions.fetch_add(len, AtomicOrdering::SeqCst);
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
debug!("Ignoring {} transactions: error queueing: {}", len, e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
|
||||||
self.miner.pending_transactions()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MiningBlockChainClient for Client {
|
|
||||||
fn prepare_open_block(&self, author: Address, gas_range_target: (U256, U256), extra_data: Bytes) -> OpenBlock {
|
|
||||||
let engine = self.engine.deref().deref();
|
|
||||||
let h = self.chain.best_block_hash();
|
|
||||||
|
|
||||||
let mut open_block = OpenBlock::new(
|
|
||||||
engine,
|
|
||||||
&self.vm_factory,
|
|
||||||
self.trie_factory.clone(),
|
|
||||||
false, // TODO: this will need to be parameterised once we want to do immediate mining insertion.
|
|
||||||
self.state_db.lock().unwrap().boxed_clone(),
|
|
||||||
&self.chain.block_header(&h).expect("h is best block hash: so it's header must exist: qed"),
|
|
||||||
self.build_last_hashes(h.clone()),
|
|
||||||
self.dao_rescue_block_gas_limit(h.clone()),
|
|
||||||
author,
|
|
||||||
gas_range_target,
|
|
||||||
extra_data,
|
|
||||||
).expect("OpenBlock::new only fails if parent state root invalid; state root of best block's header is never invalid; qed");
|
|
||||||
|
|
||||||
// Add uncles
|
|
||||||
self.chain
|
|
||||||
.find_uncle_headers(&h, engine.maximum_uncle_age())
|
|
||||||
.unwrap()
|
|
||||||
.into_iter()
|
|
||||||
.take(engine.maximum_uncle_count())
|
|
||||||
.foreach(|h| {
|
|
||||||
open_block.push_uncle(h).unwrap();
|
|
||||||
});
|
|
||||||
|
|
||||||
open_block
|
|
||||||
}
|
|
||||||
|
|
||||||
fn vm_factory(&self) -> &EvmFactory {
|
|
||||||
&self.vm_factory
|
|
||||||
}
|
|
||||||
|
|
||||||
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
|
|
||||||
let _import_lock = self.import_lock.lock();
|
|
||||||
let _timer = PerfTimer::new("import_sealed_block");
|
|
||||||
|
|
||||||
let original_best = self.chain_info().best_block_hash;
|
|
||||||
|
|
||||||
let h = block.header().hash();
|
|
||||||
let number = block.header().number();
|
|
||||||
|
|
||||||
let block_data = block.rlp_bytes();
|
|
||||||
let route = self.commit_block(block, &h, &block_data);
|
|
||||||
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
|
|
||||||
|
|
||||||
{
|
|
||||||
let (enacted, retracted) = self.calculate_enacted_retracted(&[route]);
|
|
||||||
self.miner.chain_new_blocks(self, &[h.clone()], &[], &enacted, &retracted);
|
|
||||||
|
|
||||||
self.io_channel.send(NetworkIoMessage::User(SyncMessage::NewChainBlocks {
|
|
||||||
imported: vec![h.clone()],
|
|
||||||
invalid: vec![],
|
|
||||||
enacted: enacted,
|
|
||||||
retracted: retracted,
|
|
||||||
sealed: vec![h.clone()],
|
|
||||||
})).unwrap_or_else(|e| warn!("Error sending IO notification: {:?}", e));
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.chain_info().best_block_hash != original_best {
|
|
||||||
self.miner.update_sealing(self);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MayPanic for Client {
|
|
||||||
fn on_panic<F>(&self, closure: F) where F: OnPanicListener {
|
|
||||||
self.panic_handler.on_panic(closure);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IpcConfig for Client { }
|
|
@ -14,6 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
pub use std::time::Duration;
|
||||||
pub use block_queue::BlockQueueConfig;
|
pub use block_queue::BlockQueueConfig;
|
||||||
pub use blockchain::Config as BlockChainConfig;
|
pub use blockchain::Config as BlockChainConfig;
|
||||||
pub use trace::{Config as TraceConfig, Switch};
|
pub use trace::{Config as TraceConfig, Switch};
|
||||||
@ -35,6 +36,23 @@ impl Default for DatabaseCompactionProfile {
|
|||||||
fn default() -> Self { DatabaseCompactionProfile::Default }
|
fn default() -> Self { DatabaseCompactionProfile::Default }
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Operating mode for the client.
|
||||||
|
#[derive(Debug, Eq, PartialEq)]
|
||||||
|
pub enum Mode {
|
||||||
|
/// Always on.
|
||||||
|
Active,
|
||||||
|
/// Goes offline after RLP is inactive for some (given) time, but
|
||||||
|
/// comes back online after a while of inactivity.
|
||||||
|
Passive(Duration, Duration),
|
||||||
|
/// Goes offline after RLP is inactive for some (given) time and
|
||||||
|
/// stays inactive.
|
||||||
|
Dark(Duration),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for Mode {
|
||||||
|
fn default() -> Self { Mode::Active }
|
||||||
|
}
|
||||||
|
|
||||||
/// Client configuration. Includes configs for all sub-systems.
|
/// Client configuration. Includes configs for all sub-systems.
|
||||||
#[derive(Debug, Default)]
|
#[derive(Debug, Default)]
|
||||||
pub struct ClientConfig {
|
pub struct ClientConfig {
|
||||||
@ -56,6 +74,8 @@ pub struct ClientConfig {
|
|||||||
pub db_cache_size: Option<usize>,
|
pub db_cache_size: Option<usize>,
|
||||||
/// State db compaction profile
|
/// State db compaction profile
|
||||||
pub db_compaction: DatabaseCompactionProfile,
|
pub db_compaction: DatabaseCompactionProfile,
|
||||||
|
/// Operating mode
|
||||||
|
pub mode: Mode,
|
||||||
/// Type of block verifier used by client.
|
/// Type of block verifier used by client.
|
||||||
pub verifier_type: VerifierType,
|
pub verifier_type: VerifierType,
|
||||||
}
|
}
|
||||||
|
@ -16,14 +16,13 @@
|
|||||||
|
|
||||||
//! Blockchain database client.
|
//! Blockchain database client.
|
||||||
|
|
||||||
mod client;
|
|
||||||
mod config;
|
mod config;
|
||||||
mod error;
|
mod error;
|
||||||
mod test_client;
|
mod test_client;
|
||||||
mod trace;
|
mod trace;
|
||||||
|
|
||||||
pub use self::client::*;
|
pub use self::client::*;
|
||||||
pub use self::config::{ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType};
|
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockQueueConfig, BlockChainConfig, Switch, VMType};
|
||||||
pub use self::error::Error;
|
pub use self::error::Error;
|
||||||
pub use types::ids::*;
|
pub use types::ids::*;
|
||||||
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
||||||
@ -42,17 +41,30 @@ use header::{BlockNumber};
|
|||||||
use transaction::{LocalizedTransaction, SignedTransaction};
|
use transaction::{LocalizedTransaction, SignedTransaction};
|
||||||
use log_entry::LocalizedLogEntry;
|
use log_entry::LocalizedLogEntry;
|
||||||
use filter::Filter;
|
use filter::Filter;
|
||||||
use views::{HeaderView, BlockView};
|
use views::{BlockView};
|
||||||
use error::{ImportResult, ExecutionError};
|
use error::{ImportResult, ExecutionError};
|
||||||
use receipt::LocalizedReceipt;
|
use receipt::LocalizedReceipt;
|
||||||
use trace::LocalizedTrace;
|
use trace::LocalizedTrace;
|
||||||
use evm::Factory as EvmFactory;
|
use evm::Factory as EvmFactory;
|
||||||
pub use types::call_analytics::CallAnalytics;
|
pub use types::call_analytics::CallAnalytics;
|
||||||
pub use block_import_error::BlockImportError;
|
pub use block_import_error::BlockImportError;
|
||||||
pub use transaction_import::{TransactionImportResult, TransactionImportError};
|
pub use transaction_import::TransactionImportResult;
|
||||||
|
pub use transaction_import::TransactionImportError;
|
||||||
|
|
||||||
|
pub mod client {
|
||||||
|
//! Blockchain database client.
|
||||||
|
|
||||||
|
#![allow(dead_code, unused_assignments, unused_variables, missing_docs)] // codegen issues
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/client.ipc.rs"));
|
||||||
|
}
|
||||||
|
|
||||||
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
/// Blockchain database client. Owns and manages a blockchain and a block queue.
|
||||||
pub trait BlockChainClient : Sync + Send {
|
pub trait BlockChainClient : Sync + Send {
|
||||||
|
|
||||||
|
/// Should be called by any external-facing interface when actively using the client.
|
||||||
|
/// To minimise chatter, there's no need to call more than once every 30s.
|
||||||
|
fn keep_alive(&self) {}
|
||||||
|
|
||||||
/// Get raw block header data by block id.
|
/// Get raw block header data by block id.
|
||||||
fn block_header(&self, id: BlockID) -> Option<Bytes>;
|
fn block_header(&self, id: BlockID) -> Option<Bytes>;
|
||||||
|
|
||||||
@ -177,9 +189,6 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
/// Get last hashes starting from best block.
|
/// Get last hashes starting from best block.
|
||||||
fn last_hashes(&self) -> LastHashes;
|
fn last_hashes(&self) -> LastHashes;
|
||||||
|
|
||||||
/// import transactions from network/other 3rd party
|
|
||||||
fn import_transactions(&self, transactions: Vec<SignedTransaction>) -> Vec<Result<TransactionImportResult, TransactionImportError>>;
|
|
||||||
|
|
||||||
/// Queue transactions for importing.
|
/// Queue transactions for importing.
|
||||||
fn queue_transactions(&self, transactions: Vec<Bytes>);
|
fn queue_transactions(&self, transactions: Vec<Bytes>);
|
||||||
|
|
||||||
@ -211,28 +220,6 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
Err(())
|
Err(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get `Some` gas limit of SOFT_FORK_BLOCK, or `None` if chain is not yet that long.
|
|
||||||
fn dao_rescue_block_gas_limit(&self, chain_hash: H256) -> Option<U256> {
|
|
||||||
const SOFT_FORK_BLOCK: u64 = 1800000;
|
|
||||||
// shortcut if the canon chain is already known.
|
|
||||||
if self.chain_info().best_block_number > SOFT_FORK_BLOCK + 1000 {
|
|
||||||
return self.block_header(BlockID::Number(SOFT_FORK_BLOCK)).map(|header| HeaderView::new(&header).gas_limit());
|
|
||||||
}
|
|
||||||
// otherwise check according to `chain_hash`.
|
|
||||||
if let Some(mut header) = self.block_header(BlockID::Hash(chain_hash)) {
|
|
||||||
if HeaderView::new(&header).number() < SOFT_FORK_BLOCK {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
while HeaderView::new(&header).number() != SOFT_FORK_BLOCK {
|
|
||||||
header = self.block_header(BlockID::Hash(HeaderView::new(&header).parent_hash())).expect("chain is complete; parent of chain entry must be in chain; qed");
|
|
||||||
}
|
|
||||||
Some(HeaderView::new(&header).gas_limit())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extended client interface used for mining
|
/// Extended client interface used for mining
|
||||||
|
@ -22,7 +22,7 @@ use transaction::{Transaction, LocalizedTransaction, SignedTransaction, Action};
|
|||||||
use blockchain::TreeRoute;
|
use blockchain::TreeRoute;
|
||||||
use client::{BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID,
|
use client::{BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockID,
|
||||||
TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics,
|
TransactionID, UncleID, TraceId, TraceFilter, LastHashes, CallAnalytics,
|
||||||
TransactionImportError, BlockImportError};
|
BlockImportError};
|
||||||
use header::{Header as BlockHeader, BlockNumber};
|
use header::{Header as BlockHeader, BlockNumber};
|
||||||
use filter::Filter;
|
use filter::Filter;
|
||||||
use log_entry::LocalizedLogEntry;
|
use log_entry::LocalizedLogEntry;
|
||||||
@ -39,8 +39,6 @@ use executive::Executed;
|
|||||||
use error::ExecutionError;
|
use error::ExecutionError;
|
||||||
use trace::LocalizedTrace;
|
use trace::LocalizedTrace;
|
||||||
|
|
||||||
use miner::{TransactionImportResult, AccountDetails};
|
|
||||||
|
|
||||||
/// Test client.
|
/// Test client.
|
||||||
pub struct TestBlockChainClient {
|
pub struct TestBlockChainClient {
|
||||||
/// Blocks.
|
/// Blocks.
|
||||||
@ -275,6 +273,10 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn latest_nonce(&self, address: &Address) -> U256 {
|
||||||
|
self.nonce(address, BlockID::Latest).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
fn code(&self, address: &Address) -> Option<Bytes> {
|
fn code(&self, address: &Address) -> Option<Bytes> {
|
||||||
self.code.read().unwrap().get(address).cloned()
|
self.code.read().unwrap().get(address).cloned()
|
||||||
}
|
}
|
||||||
@ -287,6 +289,10 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn latest_balance(&self, address: &Address) -> U256 {
|
||||||
|
self.balance(address, BlockID::Latest).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option<H256> {
|
fn storage_at(&self, address: &Address, position: &H256, id: BlockID) -> Option<H256> {
|
||||||
if let BlockID::Latest = id {
|
if let BlockID::Latest = id {
|
||||||
Some(self.storage.read().unwrap().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new))
|
Some(self.storage.read().unwrap().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new))
|
||||||
@ -488,24 +494,10 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_transactions(&self, transactions: Vec<SignedTransaction>) -> Vec<Result<TransactionImportResult, TransactionImportError>> {
|
|
||||||
let nonces = self.nonces.read().unwrap();
|
|
||||||
let balances = self.balances.read().unwrap();
|
|
||||||
let fetch_account = |a: &Address| AccountDetails {
|
|
||||||
nonce: nonces[a],
|
|
||||||
balance: balances[a],
|
|
||||||
};
|
|
||||||
|
|
||||||
self.miner.import_transactions(self, transactions, &fetch_account)
|
|
||||||
.into_iter()
|
|
||||||
.map(|res| res.map_err(|e| e.into()))
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn queue_transactions(&self, transactions: Vec<Bytes>) {
|
fn queue_transactions(&self, transactions: Vec<Bytes>) {
|
||||||
// import right here
|
// import right here
|
||||||
let tx = transactions.into_iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
|
let txs = transactions.into_iter().filter_map(|bytes| UntrustedRlp::new(&bytes).as_val().ok()).collect();
|
||||||
self.import_transactions(tx);
|
self.miner.import_external_transactions(self, txs);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
fn pending_transactions(&self) -> Vec<SignedTransaction> {
|
||||||
|
@ -39,9 +39,6 @@ pub struct EnvInfo {
|
|||||||
pub last_hashes: LastHashes,
|
pub last_hashes: LastHashes,
|
||||||
/// The gas used.
|
/// The gas used.
|
||||||
pub gas_used: U256,
|
pub gas_used: U256,
|
||||||
|
|
||||||
/// Block gas limit at DAO rescue block SOFT_FORK_BLOCK or None if not yet there.
|
|
||||||
pub dao_rescue_block_gas_limit: Option<U256>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for EnvInfo {
|
impl Default for EnvInfo {
|
||||||
@ -54,7 +51,6 @@ impl Default for EnvInfo {
|
|||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -70,7 +66,6 @@ impl From<ethjson::vm::Env> for EnvInfo {
|
|||||||
timestamp: e.timestamp.into(),
|
timestamp: e.timestamp.into(),
|
||||||
last_hashes: (1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect(),
|
last_hashes: (1..cmp::min(number + 1, 257)).map(|i| format!("{}", number - i).as_bytes().sha3()).collect(),
|
||||||
gas_used: U256::zero(),
|
gas_used: U256::zero(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,8 +39,6 @@ pub struct EthashParams {
|
|||||||
pub registrar: Address,
|
pub registrar: Address,
|
||||||
/// Homestead transition block number.
|
/// Homestead transition block number.
|
||||||
pub frontier_compatibility_mode_limit: u64,
|
pub frontier_compatibility_mode_limit: u64,
|
||||||
/// Enable the soft-fork logic.
|
|
||||||
pub dao_rescue_soft_fork: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<ethjson::spec::EthashParams> for EthashParams {
|
impl From<ethjson::spec::EthashParams> for EthashParams {
|
||||||
@ -53,7 +51,6 @@ impl From<ethjson::spec::EthashParams> for EthashParams {
|
|||||||
block_reward: p.block_reward.into(),
|
block_reward: p.block_reward.into(),
|
||||||
registrar: p.registrar.into(),
|
registrar: p.registrar.into(),
|
||||||
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.into(),
|
frontier_compatibility_mode_limit: p.frontier_compatibility_mode_limit.into(),
|
||||||
dao_rescue_soft_fork: p.dao_rescue_soft_fork.into(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -102,11 +99,7 @@ impl Engine for Ethash {
|
|||||||
if env_info.number < self.ethash_params.frontier_compatibility_mode_limit {
|
if env_info.number < self.ethash_params.frontier_compatibility_mode_limit {
|
||||||
Schedule::new_frontier()
|
Schedule::new_frontier()
|
||||||
} else {
|
} else {
|
||||||
let mut s = Schedule::new_homestead();
|
Schedule::new_homestead()
|
||||||
if self.ethash_params.dao_rescue_soft_fork {
|
|
||||||
s.reject_dao_transactions = env_info.dao_rescue_block_gas_limit.map_or(false, |x| x <= 4_000_000.into());
|
|
||||||
}
|
|
||||||
s
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -325,7 +318,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let b = b.close();
|
let b = b.close();
|
||||||
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
|
assert_eq!(b.state().balance(&Address::zero()), U256::from_str("4563918244f40000").unwrap());
|
||||||
}
|
}
|
||||||
@ -340,7 +333,7 @@ mod tests {
|
|||||||
spec.ensure_db_good(db.as_hashdb_mut());
|
spec.ensure_db_good(db.as_hashdb_mut());
|
||||||
let last_hashes = vec![genesis_header.hash()];
|
let last_hashes = vec![genesis_header.hash()];
|
||||||
let vm_factory = Default::default();
|
let vm_factory = Default::default();
|
||||||
let mut b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, None, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
let mut b = OpenBlock::new(engine.deref(), &vm_factory, Default::default(), false, db, &genesis_header, last_hashes, Address::zero(), (3141562.into(), 31415620.into()), vec![]).unwrap();
|
||||||
let mut uncle = Header::new();
|
let mut uncle = Header::new();
|
||||||
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
|
let uncle_author = address_from_hex("ef2d6d194084c2de36e0dabfce45d046b37d1106");
|
||||||
uncle.author = uncle_author.clone();
|
uncle.author = uncle_author.clone();
|
||||||
@ -369,7 +362,6 @@ mod tests {
|
|||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(schedule.stack_limit > 0);
|
assert!(schedule.stack_limit > 0);
|
||||||
@ -382,7 +374,6 @@ mod tests {
|
|||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
assert!(!schedule.have_delegate_call);
|
assert!(!schedule.have_delegate_call);
|
||||||
|
@ -33,11 +33,8 @@ use super::spec::*;
|
|||||||
pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) }
|
pub fn new_olympic() -> Spec { Spec::load(include_bytes!("../../res/ethereum/olympic.json")) }
|
||||||
|
|
||||||
/// Create a new Frontier mainnet chain spec.
|
/// Create a new Frontier mainnet chain spec.
|
||||||
pub fn new_frontier(dao_rescue: bool) -> Spec {
|
pub fn new_frontier() -> Spec {
|
||||||
Spec::load(match dao_rescue {
|
Spec::load(include_bytes!("../../res/ethereum/frontier.json"))
|
||||||
true => include_bytes!("../../res/ethereum/frontier_dao_rescue.json"),
|
|
||||||
false => include_bytes!("../../res/ethereum/frontier.json"),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a new Frontier chain spec as though it never changes to Homestead.
|
/// Create a new Frontier chain spec as though it never changes to Homestead.
|
||||||
@ -89,7 +86,7 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn frontier() {
|
fn frontier() {
|
||||||
let frontier = new_frontier(true);
|
let frontier = new_frontier();
|
||||||
|
|
||||||
assert_eq!(frontier.state_root(), H256::from_str("d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544").unwrap());
|
assert_eq!(frontier.state_root(), H256::from_str("d7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544").unwrap());
|
||||||
let genesis = frontier.genesis_block();
|
let genesis = frontier.genesis_block();
|
||||||
|
126
ethcore/src/evm/benches/mod.rs
Normal file
126
ethcore/src/evm/benches/mod.rs
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! benchmarking for EVM
|
||||||
|
//! should be started with:
|
||||||
|
//! ```bash
|
||||||
|
//! multirust run nightly cargo bench
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
extern crate test;
|
||||||
|
|
||||||
|
use self::test::{Bencher, black_box};
|
||||||
|
|
||||||
|
use common::*;
|
||||||
|
use evm::{self, Factory, VMType};
|
||||||
|
use evm::tests::FakeExt;
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn simple_loop_log0_usize(b: &mut Bencher) {
|
||||||
|
simple_loop_log0(U256::from(::std::usize::MAX), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn simple_loop_log0_u256(b: &mut Bencher) {
|
||||||
|
simple_loop_log0(!U256::zero(), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn simple_loop_log0(gas: U256, b: &mut Bencher) {
|
||||||
|
let mut vm = Factory::new(VMType::Interpreter).create(gas);
|
||||||
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
|
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
|
||||||
|
let code = black_box(
|
||||||
|
"62ffffff5b600190036000600fa0600357".from_hex().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.address = address.clone();
|
||||||
|
params.gas = gas;
|
||||||
|
params.code = Some(code.clone());
|
||||||
|
|
||||||
|
result(vm.exec(params, &mut ext))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn mem_gas_calculation_same_usize(b: &mut Bencher) {
|
||||||
|
mem_gas_calculation_same(U256::from(::std::usize::MAX), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn mem_gas_calculation_same_u256(b: &mut Bencher) {
|
||||||
|
mem_gas_calculation_same(!U256::zero(), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mem_gas_calculation_same(gas: U256, b: &mut Bencher) {
|
||||||
|
let mut vm = Factory::new(VMType::Interpreter).create(gas);
|
||||||
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
|
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let code = black_box(
|
||||||
|
"6110006001556001546000555b610fff805560016000540380600055600c57".from_hex().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.address = address.clone();
|
||||||
|
params.gas = gas;
|
||||||
|
params.code = Some(code.clone());
|
||||||
|
|
||||||
|
result(vm.exec(params, &mut ext))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn mem_gas_calculation_increasing_usize(b: &mut Bencher) {
|
||||||
|
mem_gas_calculation_increasing(U256::from(::std::usize::MAX), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[bench]
|
||||||
|
fn mem_gas_calculation_increasing_u256(b: &mut Bencher) {
|
||||||
|
mem_gas_calculation_increasing(!U256::zero(), b)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mem_gas_calculation_increasing(gas: U256, b: &mut Bencher) {
|
||||||
|
let mut vm = Factory::new(VMType::Interpreter).create(gas);
|
||||||
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
|
let address = Address::from_str("0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6").unwrap();
|
||||||
|
|
||||||
|
b.iter(|| {
|
||||||
|
let code = black_box(
|
||||||
|
"6110006001556001546000555b610fff60005401805560016000540380600055600c57".from_hex().unwrap()
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.address = address.clone();
|
||||||
|
params.gas = gas;
|
||||||
|
params.code = Some(code.clone());
|
||||||
|
|
||||||
|
result(vm.exec(params, &mut ext))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn result(r: evm::Result<evm::GasLeft>) -> U256 {
|
||||||
|
match r {
|
||||||
|
Ok(evm::GasLeft::Known(v)) => v,
|
||||||
|
Ok(evm::GasLeft::NeedsReturn(v, _)) => v,
|
||||||
|
_ => U256::zero(),
|
||||||
|
}
|
||||||
|
}
|
@ -95,6 +95,61 @@ impl<'a> Finalize for Result<GasLeft<'a>> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub trait CostType: ops::Mul<Output=Self> + ops::Div<Output=Self> + ops::Add<Output=Self> + ops::Sub<Output=Self> + ops::Shr<usize, Output=Self> + ops::Shl<usize, Output=Self> + cmp::Ord + Sized + From<usize> + Copy {
|
||||||
|
fn as_u256(&self) -> U256;
|
||||||
|
fn from_u256(val: U256) -> Result<Self>;
|
||||||
|
fn as_usize(&self) -> usize;
|
||||||
|
fn overflow_add(self, other: Self) -> (Self, bool);
|
||||||
|
fn overflow_mul(self, other: Self) -> (Self, bool);
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CostType for U256 {
|
||||||
|
fn as_u256(&self) -> U256 {
|
||||||
|
*self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_u256(val: U256) -> Result<Self> {
|
||||||
|
Ok(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_usize(&self) -> usize {
|
||||||
|
self.as_u64() as usize
|
||||||
|
}
|
||||||
|
|
||||||
|
fn overflow_add(self, other: Self) -> (Self, bool) {
|
||||||
|
Uint::overflowing_add(self, other)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn overflow_mul(self, other: Self) -> (Self, bool) {
|
||||||
|
Uint::overflowing_mul(self, other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CostType for usize {
|
||||||
|
fn as_u256(&self) -> U256 {
|
||||||
|
U256::from(*self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn from_u256(val: U256) -> Result<Self> {
|
||||||
|
if U256::from(val.low_u64()) != val {
|
||||||
|
return Err(Error::OutOfGas);
|
||||||
|
}
|
||||||
|
Ok(val.low_u64() as usize)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_usize(&self) -> usize {
|
||||||
|
*self
|
||||||
|
}
|
||||||
|
|
||||||
|
fn overflow_add(self, other: Self) -> (Self, bool) {
|
||||||
|
self.overflowing_add(other)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn overflow_mul(self, other: Self) -> (Self, bool) {
|
||||||
|
self.overflowing_mul(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Evm interface
|
/// Evm interface
|
||||||
pub trait Evm {
|
pub trait Evm {
|
||||||
/// This function should be used to execute transaction.
|
/// This function should be used to execute transaction.
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
//! TODO: consider spliting it into two separate files.
|
//! TODO: consider spliting it into two separate files.
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use evm::Evm;
|
use evm::Evm;
|
||||||
|
use util::{U256, Uint};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
/// Type of EVM to use.
|
/// Type of EVM to use.
|
||||||
@ -85,24 +86,30 @@ pub struct Factory {
|
|||||||
|
|
||||||
impl Factory {
|
impl Factory {
|
||||||
/// Create fresh instance of VM
|
/// Create fresh instance of VM
|
||||||
|
/// Might choose implementation depending on supplied gas.
|
||||||
#[cfg(feature = "jit")]
|
#[cfg(feature = "jit")]
|
||||||
pub fn create(&self) -> Box<Evm> {
|
pub fn create(&self, gas: U256) -> Box<Evm> {
|
||||||
match self.evm {
|
match self.evm {
|
||||||
VMType::Jit => {
|
VMType::Jit => {
|
||||||
Box::new(super::jit::JitEvm::default())
|
Box::new(super::jit::JitEvm::default())
|
||||||
},
|
},
|
||||||
VMType::Interpreter => {
|
VMType::Interpreter => if Self::can_fit_in_usize(gas) {
|
||||||
Box::new(super::interpreter::Interpreter::default())
|
Box::new(super::interpreter::Interpreter::<usize>::default())
|
||||||
|
} else {
|
||||||
|
Box::new(super::interpreter::Interpreter::<U256>::default())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create fresh instance of VM
|
/// Create fresh instance of VM
|
||||||
|
/// Might choose implementation depending on supplied gas.
|
||||||
#[cfg(not(feature = "jit"))]
|
#[cfg(not(feature = "jit"))]
|
||||||
pub fn create(&self) -> Box<Evm> {
|
pub fn create(&self, gas: U256) -> Box<Evm> {
|
||||||
match self.evm {
|
match self.evm {
|
||||||
VMType::Interpreter => {
|
VMType::Interpreter => if Self::can_fit_in_usize(gas) {
|
||||||
Box::new(super::interpreter::Interpreter::default())
|
Box::new(super::interpreter::Interpreter::<usize>::default())
|
||||||
|
} else {
|
||||||
|
Box::new(super::interpreter::Interpreter::<U256>::default())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -113,6 +120,10 @@ impl Factory {
|
|||||||
evm: evm
|
evm: evm
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn can_fit_in_usize(gas: U256) -> bool {
|
||||||
|
gas == U256::from(gas.low_u64() as usize)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Factory {
|
impl Default for Factory {
|
||||||
@ -135,7 +146,7 @@ impl Default for Factory {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_create_vm() {
|
fn test_create_vm() {
|
||||||
let _vm = Factory::default().create();
|
let _vm = Factory::default().create(U256::zero());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create tests by injecting different VM factories
|
/// Create tests by injecting different VM factories
|
||||||
|
@ -79,7 +79,7 @@ fn test_get_log_topics() {
|
|||||||
assert_eq!(get_log_topics(LOG4), 4);
|
assert_eq!(get_log_topics(LOG4), 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq)]
|
#[derive(PartialEq, Clone, Copy)]
|
||||||
pub enum GasPriceTier {
|
pub enum GasPriceTier {
|
||||||
/// 0 Zero
|
/// 0 Zero
|
||||||
Zero,
|
Zero,
|
||||||
|
261
ethcore/src/evm/interpreter/gasometer.rs
Normal file
261
ethcore/src/evm/interpreter/gasometer.rs
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use common::*;
|
||||||
|
use super::u256_to_address;
|
||||||
|
use evm::{self, CostType};
|
||||||
|
use evm::instructions::{self, Instruction, InstructionInfo};
|
||||||
|
use evm::interpreter::stack::Stack;
|
||||||
|
|
||||||
|
macro_rules! overflowing {
|
||||||
|
($x: expr) => {{
|
||||||
|
let (v, overflow) = $x;
|
||||||
|
if overflow { return Err(evm::Error::OutOfGas); }
|
||||||
|
v
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
||||||
|
enum InstructionCost<Cost: CostType> {
|
||||||
|
Gas(Cost),
|
||||||
|
GasMem(Cost, Cost),
|
||||||
|
GasMemCopy(Cost, Cost, Cost)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct Gasometer<Gas: CostType> {
|
||||||
|
pub current_gas: Gas,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Gas: CostType> Gasometer<Gas> {
|
||||||
|
|
||||||
|
pub fn new(current_gas: Gas) -> Self {
|
||||||
|
Gasometer {
|
||||||
|
current_gas: current_gas,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verify_gas(&self, gas_cost: &Gas) -> evm::Result<()> {
|
||||||
|
match &self.current_gas < gas_cost {
|
||||||
|
true => Err(evm::Error::OutOfGas),
|
||||||
|
false => Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
||||||
|
pub fn get_gas_cost_mem(
|
||||||
|
&mut self,
|
||||||
|
ext: &evm::Ext,
|
||||||
|
instruction: Instruction,
|
||||||
|
info: &InstructionInfo,
|
||||||
|
stack: &Stack<U256>,
|
||||||
|
current_mem_size: usize,
|
||||||
|
) -> evm::Result<(Gas, usize)> {
|
||||||
|
let schedule = ext.schedule();
|
||||||
|
let tier = instructions::get_tier_idx(info.tier);
|
||||||
|
let default_gas = Gas::from(schedule.tier_step_gas[tier]);
|
||||||
|
|
||||||
|
let cost = match instruction {
|
||||||
|
instructions::SSTORE => {
|
||||||
|
let address = H256::from(stack.peek(0));
|
||||||
|
let newval = stack.peek(1);
|
||||||
|
let val = U256::from(ext.storage_at(&address).as_slice());
|
||||||
|
|
||||||
|
let gas = if U256::zero() == val && &U256::zero() != newval {
|
||||||
|
schedule.sstore_set_gas
|
||||||
|
} else {
|
||||||
|
// Refund for below case is added when actually executing sstore
|
||||||
|
// !self.is_zero(&val) && self.is_zero(newval)
|
||||||
|
schedule.sstore_reset_gas
|
||||||
|
};
|
||||||
|
InstructionCost::Gas(Gas::from(gas))
|
||||||
|
},
|
||||||
|
instructions::SLOAD => {
|
||||||
|
InstructionCost::Gas(Gas::from(schedule.sload_gas))
|
||||||
|
},
|
||||||
|
instructions::MSTORE | instructions::MLOAD => {
|
||||||
|
InstructionCost::GasMem(default_gas, try!(self.mem_needed_const(stack.peek(0), 32)))
|
||||||
|
},
|
||||||
|
instructions::MSTORE8 => {
|
||||||
|
InstructionCost::GasMem(default_gas, try!(self.mem_needed_const(stack.peek(0), 1)))
|
||||||
|
},
|
||||||
|
instructions::RETURN => {
|
||||||
|
InstructionCost::GasMem(default_gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
|
||||||
|
},
|
||||||
|
instructions::SHA3 => {
|
||||||
|
let w = overflowing!(add_gas_usize(try!(Gas::from_u256(*stack.peek(1))), 31));
|
||||||
|
let words = w >> 5;
|
||||||
|
let gas = Gas::from(schedule.sha3_gas) + (Gas::from(schedule.sha3_word_gas) * words);
|
||||||
|
InstructionCost::GasMem(gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
|
||||||
|
},
|
||||||
|
instructions::CALLDATACOPY | instructions::CODECOPY => {
|
||||||
|
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(0), stack.peek(2))), try!(Gas::from_u256(*stack.peek(2))))
|
||||||
|
},
|
||||||
|
instructions::EXTCODECOPY => {
|
||||||
|
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(1), stack.peek(3))), try!(Gas::from_u256(*stack.peek(3))))
|
||||||
|
},
|
||||||
|
instructions::JUMPDEST => {
|
||||||
|
InstructionCost::Gas(Gas::from(1))
|
||||||
|
},
|
||||||
|
instructions::LOG0...instructions::LOG4 => {
|
||||||
|
let no_of_topics = instructions::get_log_topics(instruction);
|
||||||
|
let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics;
|
||||||
|
|
||||||
|
let data_gas = overflowing!(try!(Gas::from_u256(*stack.peek(1))).overflow_mul(Gas::from(schedule.log_data_gas)));
|
||||||
|
let gas = overflowing!(data_gas.overflow_add(Gas::from(log_gas)));
|
||||||
|
InstructionCost::GasMem(gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
|
||||||
|
},
|
||||||
|
instructions::CALL | instructions::CALLCODE => {
|
||||||
|
let mut gas = overflowing!(add_gas_usize(try!(Gas::from_u256(*stack.peek(0))), schedule.call_gas));
|
||||||
|
let mem = cmp::max(
|
||||||
|
try!(self.mem_needed(stack.peek(5), stack.peek(6))),
|
||||||
|
try!(self.mem_needed(stack.peek(3), stack.peek(4)))
|
||||||
|
);
|
||||||
|
|
||||||
|
let address = u256_to_address(stack.peek(1));
|
||||||
|
|
||||||
|
if instruction == instructions::CALL && !ext.exists(&address) {
|
||||||
|
gas = overflowing!(gas.overflow_add(Gas::from(schedule.call_new_account_gas)));
|
||||||
|
};
|
||||||
|
|
||||||
|
if stack.peek(2) > &U256::zero() {
|
||||||
|
gas = overflowing!(gas.overflow_add(Gas::from(schedule.call_value_transfer_gas)));
|
||||||
|
};
|
||||||
|
|
||||||
|
InstructionCost::GasMem(gas,mem)
|
||||||
|
},
|
||||||
|
instructions::DELEGATECALL => {
|
||||||
|
let gas = overflowing!(add_gas_usize(try!(Gas::from_u256(*stack.peek(0))), schedule.call_gas));
|
||||||
|
let mem = cmp::max(
|
||||||
|
try!(self.mem_needed(stack.peek(4), stack.peek(5))),
|
||||||
|
try!(self.mem_needed(stack.peek(2), stack.peek(3)))
|
||||||
|
);
|
||||||
|
InstructionCost::GasMem(gas, mem)
|
||||||
|
},
|
||||||
|
instructions::CREATE => {
|
||||||
|
let gas = Gas::from(schedule.create_gas);
|
||||||
|
let mem = try!(self.mem_needed(stack.peek(1), stack.peek(2)));
|
||||||
|
InstructionCost::GasMem(gas, mem)
|
||||||
|
},
|
||||||
|
instructions::EXP => {
|
||||||
|
let expon = stack.peek(1);
|
||||||
|
let bytes = ((expon.bits() + 7) / 8) as usize;
|
||||||
|
let gas = Gas::from(schedule.exp_gas + schedule.exp_byte_gas * bytes);
|
||||||
|
InstructionCost::Gas(gas)
|
||||||
|
},
|
||||||
|
_ => InstructionCost::Gas(default_gas)
|
||||||
|
};
|
||||||
|
|
||||||
|
match cost {
|
||||||
|
InstructionCost::Gas(gas) => {
|
||||||
|
Ok((gas, 0))
|
||||||
|
},
|
||||||
|
InstructionCost::GasMem(gas, mem_size) => {
|
||||||
|
let (mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, current_mem_size, &mem_size));
|
||||||
|
let gas = overflowing!(gas.overflow_add(mem_gas));
|
||||||
|
Ok((gas, new_mem_size))
|
||||||
|
},
|
||||||
|
InstructionCost::GasMemCopy(gas, mem_size, copy) => {
|
||||||
|
let (mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, current_mem_size, &mem_size));
|
||||||
|
let copy = overflowing!(add_gas_usize(copy, 31));
|
||||||
|
let copy_gas = Gas::from(schedule.copy_gas) * (copy / Gas::from(32 as usize));
|
||||||
|
let gas = overflowing!(gas.overflow_add(copy_gas));
|
||||||
|
let gas = overflowing!(gas.overflow_add(mem_gas));
|
||||||
|
Ok((gas, new_mem_size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_zero(&self, val: &Gas) -> bool {
|
||||||
|
&Gas::from(0) == val
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mem_needed_const(&self, mem: &U256, add: usize) -> evm::Result<Gas> {
|
||||||
|
Gas::from_u256(overflowing!(mem.overflowing_add(U256::from(add))))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mem_needed(&self, offset: &U256, size: &U256) -> evm::Result<Gas> {
|
||||||
|
if self.is_zero(&try!(Gas::from_u256(*size))) {
|
||||||
|
return Ok(Gas::from(0));
|
||||||
|
}
|
||||||
|
|
||||||
|
Gas::from_u256(overflowing!(offset.overflowing_add(*size)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mem_gas_cost(&self, schedule: &evm::Schedule, current_mem_size: usize, mem_size: &Gas) -> evm::Result<(Gas, usize)> {
|
||||||
|
let gas_for_mem = |mem_size: Gas| {
|
||||||
|
let s = mem_size >> 5;
|
||||||
|
// s * memory_gas + s * s / quad_coeff_div
|
||||||
|
let a = overflowing!(s.overflow_mul(Gas::from(schedule.memory_gas)));
|
||||||
|
// We need to go to U512 to calculate s*s/quad_coeff_div
|
||||||
|
let b = U512::from(s.as_u256()) * U512::from(s.as_u256()) / U512::from(schedule.quad_coeff_div);
|
||||||
|
if b > U512::from(!U256::zero()) {
|
||||||
|
Err(evm::Error::OutOfGas)
|
||||||
|
} else {
|
||||||
|
Ok(overflowing!(a.overflow_add(try!(Gas::from_u256(U256::from(b))))))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
let current_mem_size = Gas::from(current_mem_size);
|
||||||
|
let req_mem_size_rounded = (overflowing!(mem_size.overflow_add(Gas::from(31 as usize))) >> 5) << 5;
|
||||||
|
|
||||||
|
let mem_gas_cost = if req_mem_size_rounded > current_mem_size {
|
||||||
|
let new_mem_gas = try!(gas_for_mem(req_mem_size_rounded));
|
||||||
|
let current_mem_gas = try!(gas_for_mem(current_mem_size));
|
||||||
|
new_mem_gas - current_mem_gas
|
||||||
|
} else {
|
||||||
|
Gas::from(0)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok((mem_gas_cost, req_mem_size_rounded.as_usize()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn add_gas_usize<Gas: CostType>(value: Gas, num: usize) -> (Gas, bool) {
|
||||||
|
value.overflow_add(Gas::from(num))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_mem_gas_cost() {
|
||||||
|
// given
|
||||||
|
let gasometer = Gasometer::<U256>::new(U256::zero());
|
||||||
|
let schedule = evm::Schedule::default();
|
||||||
|
let current_mem_size = 5;
|
||||||
|
let mem_size = !U256::zero();
|
||||||
|
|
||||||
|
// when
|
||||||
|
let result = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size);
|
||||||
|
|
||||||
|
// then
|
||||||
|
if let Ok(_) = result {
|
||||||
|
assert!(false, "Should fail with OutOfGas");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_calculate_mem_cost() {
|
||||||
|
// given
|
||||||
|
let gasometer = Gasometer::<usize>::new(0);
|
||||||
|
let schedule = evm::Schedule::default();
|
||||||
|
let current_mem_size = 0;
|
||||||
|
let mem_size = 5;
|
||||||
|
|
||||||
|
// when
|
||||||
|
let (mem_cost, mem_size) = gasometer.mem_gas_cost(&schedule, current_mem_size, &mem_size).unwrap();
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(mem_cost, 3);
|
||||||
|
assert_eq!(mem_size, 32);
|
||||||
|
}
|
150
ethcore/src/evm/interpreter/memory.rs
Normal file
150
ethcore/src/evm/interpreter/memory.rs
Normal file
@ -0,0 +1,150 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use util::{U256, Uint};
|
||||||
|
|
||||||
|
pub trait Memory {
|
||||||
|
/// Retrieve current size of the memory
|
||||||
|
fn size(&self) -> usize;
|
||||||
|
/// Resize (shrink or expand) the memory to specified size (fills 0)
|
||||||
|
fn resize(&mut self, new_size: usize);
|
||||||
|
/// Resize the memory only if its smaller
|
||||||
|
fn expand(&mut self, new_size: usize);
|
||||||
|
/// Write single byte to memory
|
||||||
|
fn write_byte(&mut self, offset: U256, value: U256);
|
||||||
|
/// Write a word to memory. Does not resize memory!
|
||||||
|
fn write(&mut self, offset: U256, value: U256);
|
||||||
|
/// Read a word from memory
|
||||||
|
fn read(&self, offset: U256) -> U256;
|
||||||
|
/// Write slice of bytes to memory. Does not resize memory!
|
||||||
|
fn write_slice(&mut self, offset: U256, &[u8]);
|
||||||
|
/// Retrieve part of the memory between offset and offset + size
|
||||||
|
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
|
||||||
|
/// Retrieve writeable part of memory
|
||||||
|
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut[u8];
|
||||||
|
fn dump(&self);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks whether offset and size is valid memory range
|
||||||
|
fn is_valid_range(off: usize, size: usize) -> bool {
|
||||||
|
// When size is zero we haven't actually expanded the memory
|
||||||
|
let overflow = off.overflowing_add(size).1;
|
||||||
|
size > 0 && !overflow
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Memory for Vec<u8> {
|
||||||
|
fn dump(&self) {
|
||||||
|
println!("MemoryDump:");
|
||||||
|
for i in self.iter() {
|
||||||
|
println!("{:02x} ", i);
|
||||||
|
}
|
||||||
|
println!("");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
|
||||||
|
let off = init_off_u.low_u64() as usize;
|
||||||
|
let size = init_size_u.low_u64() as usize;
|
||||||
|
if !is_valid_range(off, size) {
|
||||||
|
&self[0..0]
|
||||||
|
} else {
|
||||||
|
&self[off..off+size]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read(&self, offset: U256) -> U256 {
|
||||||
|
let off = offset.low_u64() as usize;
|
||||||
|
U256::from(&self[off..off+32])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
|
||||||
|
let off = offset.low_u64() as usize;
|
||||||
|
let s = size.low_u64() as usize;
|
||||||
|
if !is_valid_range(off, s) {
|
||||||
|
&mut self[0..0]
|
||||||
|
} else {
|
||||||
|
&mut self[off..off+s]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
|
||||||
|
let off = offset.low_u64() as usize;
|
||||||
|
|
||||||
|
// TODO [todr] Optimize?
|
||||||
|
for pos in off..off+slice.len() {
|
||||||
|
self[pos] = slice[pos - off];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write(&mut self, offset: U256, value: U256) {
|
||||||
|
let off = offset.low_u64() as usize;
|
||||||
|
let mut val = value;
|
||||||
|
|
||||||
|
let end = off + 32;
|
||||||
|
for pos in 0..32 {
|
||||||
|
self[end - pos - 1] = val.low_u64() as u8;
|
||||||
|
val = val >> 8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_byte(&mut self, offset: U256, value: U256) {
|
||||||
|
let off = offset.low_u64() as usize;
|
||||||
|
let val = value.low_u64() as u64;
|
||||||
|
self[off] = val as u8;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resize(&mut self, new_size: usize) {
|
||||||
|
self.resize(new_size, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn expand(&mut self, size: usize) {
|
||||||
|
if size > self.len() {
|
||||||
|
Memory::resize(self, size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_read_and_write() {
|
||||||
|
// given
|
||||||
|
let mem: &mut Memory = &mut vec![];
|
||||||
|
mem.resize(0x80 + 32);
|
||||||
|
|
||||||
|
// when
|
||||||
|
mem.write(U256::from(0x80), U256::from(0xabcdef));
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_memory_read_and_write_byte() {
|
||||||
|
// given
|
||||||
|
let mem: &mut Memory = &mut vec![];
|
||||||
|
mem.resize(32);
|
||||||
|
|
||||||
|
// when
|
||||||
|
mem.write_byte(U256::from(0x1d), U256::from(0xab));
|
||||||
|
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
|
||||||
|
mem.write_byte(U256::from(0x1f), U256::from(0xef));
|
||||||
|
|
||||||
|
// then
|
||||||
|
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
|
||||||
|
}
|
@ -16,12 +16,6 @@
|
|||||||
|
|
||||||
///! Rust VM implementation
|
///! Rust VM implementation
|
||||||
|
|
||||||
use common::*;
|
|
||||||
use super::instructions as instructions;
|
|
||||||
use super::instructions::{Instruction, get_info};
|
|
||||||
use std::marker::Copy;
|
|
||||||
use evm::{self, MessageCallResult, ContractCreateResult, GasLeft};
|
|
||||||
|
|
||||||
#[cfg(not(feature = "evm-debug"))]
|
#[cfg(not(feature = "evm-debug"))]
|
||||||
macro_rules! evm_debug {
|
macro_rules! evm_debug {
|
||||||
($x: expr) => {}
|
($x: expr) => {}
|
||||||
@ -34,6 +28,19 @@ macro_rules! evm_debug {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod gasometer;
|
||||||
|
mod stack;
|
||||||
|
mod memory;
|
||||||
|
|
||||||
|
use self::gasometer::Gasometer;
|
||||||
|
use self::stack::{Stack, VecStack};
|
||||||
|
use self::memory::Memory;
|
||||||
|
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
use common::*;
|
||||||
|
use super::instructions::{self, Instruction, InstructionInfo};
|
||||||
|
use evm::{self, MessageCallResult, ContractCreateResult, GasLeft, CostType};
|
||||||
|
|
||||||
#[cfg(feature = "evm-debug")]
|
#[cfg(feature = "evm-debug")]
|
||||||
fn color(instruction: Instruction, name: &'static str) -> String {
|
fn color(instruction: Instruction, name: &'static str) -> String {
|
||||||
let c = instruction as usize % 6;
|
let c = instruction as usize % 6;
|
||||||
@ -41,209 +48,9 @@ fn color(instruction: Instruction, name: &'static str) -> String {
|
|||||||
format!("\x1B[1;{}m{}\x1B[0m", colors[c], name)
|
format!("\x1B[1;{}m{}\x1B[0m", colors[c], name)
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! overflowing {
|
|
||||||
($x: expr) => {{
|
|
||||||
let (v, overflow) = $x;
|
|
||||||
if overflow { return Err(evm::Error::OutOfGas); }
|
|
||||||
v
|
|
||||||
}}
|
|
||||||
}
|
|
||||||
|
|
||||||
type CodePosition = usize;
|
type CodePosition = usize;
|
||||||
type Gas = U256;
|
|
||||||
type ProgramCounter = usize;
|
type ProgramCounter = usize;
|
||||||
|
|
||||||
/// Stack trait with VM-friendly API
|
|
||||||
trait Stack<T> {
|
|
||||||
/// Returns `Stack[len(Stack) - no_from_top]`
|
|
||||||
fn peek(&self, no_from_top: usize) -> &T;
|
|
||||||
/// Swaps Stack[len(Stack)] and Stack[len(Stack) - no_from_top]
|
|
||||||
fn swap_with_top(&mut self, no_from_top: usize);
|
|
||||||
/// Returns true if Stack has at least `no_of_elems` elements
|
|
||||||
fn has(&self, no_of_elems: usize) -> bool;
|
|
||||||
/// Get element from top and remove it from Stack. Panics if stack is empty.
|
|
||||||
fn pop_back(&mut self) -> T;
|
|
||||||
/// Get (up to `instructions::MAX_NO_OF_TOPICS`) elements from top and remove them from Stack. Panics if stack is empty.
|
|
||||||
fn pop_n(&mut self, no_of_elems: usize) -> &[T];
|
|
||||||
/// Add element on top of the Stack
|
|
||||||
fn push(&mut self, elem: T);
|
|
||||||
/// Get number of elements on Stack
|
|
||||||
fn size(&self) -> usize;
|
|
||||||
/// Returns all data on stack.
|
|
||||||
fn peek_top(&mut self, no_of_elems: usize) -> &[T];
|
|
||||||
}
|
|
||||||
|
|
||||||
struct VecStack<S> {
|
|
||||||
stack: Vec<S>,
|
|
||||||
logs: [S; instructions::MAX_NO_OF_TOPICS]
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S : Copy> VecStack<S> {
|
|
||||||
fn with_capacity(capacity: usize, zero: S) -> Self {
|
|
||||||
VecStack {
|
|
||||||
stack: Vec::with_capacity(capacity),
|
|
||||||
logs: [zero; instructions::MAX_NO_OF_TOPICS]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S : fmt::Display> Stack<S> for VecStack<S> {
|
|
||||||
fn peek(&self, no_from_top: usize) -> &S {
|
|
||||||
&self.stack[self.stack.len() - no_from_top - 1]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn swap_with_top(&mut self, no_from_top: usize) {
|
|
||||||
let len = self.stack.len();
|
|
||||||
self.stack.swap(len - no_from_top - 1, len - 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn has(&self, no_of_elems: usize) -> bool {
|
|
||||||
self.stack.len() >= no_of_elems
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pop_back(&mut self) -> S {
|
|
||||||
let val = self.stack.pop();
|
|
||||||
match val {
|
|
||||||
Some(x) => {
|
|
||||||
evm_debug!({
|
|
||||||
println!(" POP: {}", x)
|
|
||||||
});
|
|
||||||
x
|
|
||||||
},
|
|
||||||
None => panic!("Tried to pop from empty stack.")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pop_n(&mut self, no_of_elems: usize) -> &[S] {
|
|
||||||
assert!(no_of_elems <= instructions::MAX_NO_OF_TOPICS);
|
|
||||||
|
|
||||||
for i in 0..no_of_elems {
|
|
||||||
self.logs[i] = self.pop_back();
|
|
||||||
}
|
|
||||||
&self.logs[0..no_of_elems]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn push(&mut self, elem: S) {
|
|
||||||
evm_debug!({
|
|
||||||
println!(" PUSH: {}", elem)
|
|
||||||
});
|
|
||||||
self.stack.push(elem);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size(&self) -> usize {
|
|
||||||
self.stack.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn peek_top(&mut self, no_from_top: usize) -> &[S] {
|
|
||||||
assert!(self.stack.len() >= no_from_top, "peek_top asked for more items than exist.");
|
|
||||||
&self.stack[self.stack.len() - no_from_top .. self.stack.len()]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trait Memory {
|
|
||||||
/// Retrieve current size of the memory
|
|
||||||
fn size(&self) -> usize;
|
|
||||||
/// Resize (shrink or expand) the memory to specified size (fills 0)
|
|
||||||
fn resize(&mut self, new_size: usize);
|
|
||||||
/// Resize the memory only if its smaller
|
|
||||||
fn expand(&mut self, new_size: usize);
|
|
||||||
/// Write single byte to memory
|
|
||||||
fn write_byte(&mut self, offset: U256, value: U256);
|
|
||||||
/// Write a word to memory. Does not resize memory!
|
|
||||||
fn write(&mut self, offset: U256, value: U256);
|
|
||||||
/// Read a word from memory
|
|
||||||
fn read(&self, offset: U256) -> U256;
|
|
||||||
/// Write slice of bytes to memory. Does not resize memory!
|
|
||||||
fn write_slice(&mut self, offset: U256, &[u8]);
|
|
||||||
/// Retrieve part of the memory between offset and offset + size
|
|
||||||
fn read_slice(&self, offset: U256, size: U256) -> &[u8];
|
|
||||||
/// Retrieve writeable part of memory
|
|
||||||
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut[u8];
|
|
||||||
fn dump(&self);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks whether offset and size is valid memory range
|
|
||||||
fn is_valid_range(off: usize, size: usize) -> bool {
|
|
||||||
// When size is zero we haven't actually expanded the memory
|
|
||||||
let overflow = off.overflowing_add(size).1;
|
|
||||||
size > 0 && !overflow
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Memory for Vec<u8> {
|
|
||||||
fn dump(&self) {
|
|
||||||
println!("MemoryDump:");
|
|
||||||
for i in self.iter() {
|
|
||||||
println!("{:02x} ", i);
|
|
||||||
}
|
|
||||||
println!("");
|
|
||||||
}
|
|
||||||
|
|
||||||
fn size(&self) -> usize {
|
|
||||||
self.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_slice(&self, init_off_u: U256, init_size_u: U256) -> &[u8] {
|
|
||||||
let off = init_off_u.low_u64() as usize;
|
|
||||||
let size = init_size_u.low_u64() as usize;
|
|
||||||
if !is_valid_range(off, size) {
|
|
||||||
&self[0..0]
|
|
||||||
} else {
|
|
||||||
&self[off..off+size]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read(&self, offset: U256) -> U256 {
|
|
||||||
let off = offset.low_u64() as usize;
|
|
||||||
U256::from(&self[off..off+32])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn writeable_slice(&mut self, offset: U256, size: U256) -> &mut [u8] {
|
|
||||||
let off = offset.low_u64() as usize;
|
|
||||||
let s = size.low_u64() as usize;
|
|
||||||
if !is_valid_range(off, s) {
|
|
||||||
&mut self[0..0]
|
|
||||||
} else {
|
|
||||||
&mut self[off..off+s]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_slice(&mut self, offset: U256, slice: &[u8]) {
|
|
||||||
let off = offset.low_u64() as usize;
|
|
||||||
|
|
||||||
// TODO [todr] Optimize?
|
|
||||||
for pos in off..off+slice.len() {
|
|
||||||
self[pos] = slice[pos - off];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write(&mut self, offset: U256, value: U256) {
|
|
||||||
let off = offset.low_u64() as usize;
|
|
||||||
let mut val = value;
|
|
||||||
|
|
||||||
let end = off + 32;
|
|
||||||
for pos in 0..32 {
|
|
||||||
self[end - pos - 1] = val.low_u64() as u8;
|
|
||||||
val = val >> 8;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_byte(&mut self, offset: U256, value: U256) {
|
|
||||||
let off = offset.low_u64() as usize;
|
|
||||||
let val = value.low_u64() as u64;
|
|
||||||
self[off] = val as u8;
|
|
||||||
}
|
|
||||||
|
|
||||||
fn resize(&mut self, new_size: usize) {
|
|
||||||
self.resize(new_size, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn expand(&mut self, size: usize) {
|
|
||||||
if size > self.len() {
|
|
||||||
Memory::resize(self, size)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Abstraction over raw vector of Bytes. Easier state management of PC.
|
/// Abstraction over raw vector of Bytes. Easier state management of PC.
|
||||||
struct CodeReader<'a> {
|
struct CodeReader<'a> {
|
||||||
position: ProgramCounter,
|
position: ProgramCounter,
|
||||||
@ -265,38 +72,33 @@ impl<'a> CodeReader<'a> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(enum_variant_names))]
|
enum InstructionResult<Gas> {
|
||||||
enum InstructionCost {
|
|
||||||
Gas(U256),
|
|
||||||
GasMem(U256, U256),
|
|
||||||
GasMemCopy(U256, U256, U256)
|
|
||||||
}
|
|
||||||
|
|
||||||
enum InstructionResult {
|
|
||||||
Ok,
|
Ok,
|
||||||
UseAllGas,
|
UseAllGas,
|
||||||
GasLeft(U256),
|
GasLeft(Gas),
|
||||||
UnusedGas(U256),
|
UnusedGas(Gas),
|
||||||
JumpToPosition(U256),
|
JumpToPosition(U256),
|
||||||
// gas left, init_orf, init_size
|
// gas left, init_orf, init_size
|
||||||
StopExecutionNeedsReturn(U256, U256, U256),
|
StopExecutionNeedsReturn(Gas, U256, U256),
|
||||||
StopExecution,
|
StopExecution,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Intepreter EVM implementation
|
/// Intepreter EVM implementation
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
pub struct Interpreter {
|
pub struct Interpreter<Cost: CostType> {
|
||||||
mem: Vec<u8>,
|
mem: Vec<u8>,
|
||||||
|
_type: PhantomData<Cost>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl evm::Evm for Interpreter {
|
impl<Cost: CostType> evm::Evm for Interpreter<Cost> {
|
||||||
fn exec(&mut self, params: ActionParams, ext: &mut evm::Ext) -> evm::Result<GasLeft> {
|
fn exec(&mut self, params: ActionParams, ext: &mut evm::Ext) -> evm::Result<GasLeft> {
|
||||||
self.mem.clear();
|
self.mem.clear();
|
||||||
|
|
||||||
let code = ¶ms.code.as_ref().unwrap();
|
let code = ¶ms.code.as_ref().unwrap();
|
||||||
let valid_jump_destinations = self.find_jump_destinations(&code);
|
let valid_jump_destinations = self.find_jump_destinations(&code);
|
||||||
|
|
||||||
let mut current_gas = params.gas;
|
let mut gasometer = Gasometer::<Cost>::new(try!(Cost::from_u256(params.gas)));
|
||||||
let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero());
|
let mut stack = VecStack::with_capacity(ext.schedule().stack_limit, U256::zero());
|
||||||
let mut reader = CodeReader {
|
let mut reader = CodeReader {
|
||||||
position: 0,
|
position: 0,
|
||||||
@ -305,26 +107,27 @@ impl evm::Evm for Interpreter {
|
|||||||
|
|
||||||
while reader.position < code.len() {
|
while reader.position < code.len() {
|
||||||
let instruction = code[reader.position];
|
let instruction = code[reader.position];
|
||||||
|
|
||||||
// Calculate gas cost
|
|
||||||
let (gas_cost, mem_size) = try!(self.get_gas_cost_mem(ext, instruction, &stack));
|
|
||||||
|
|
||||||
// TODO: make compile-time removable if too much of a performance hit.
|
|
||||||
let trace_executed = ext.trace_prepare_execute(reader.position, instruction, &gas_cost);
|
|
||||||
|
|
||||||
reader.position += 1;
|
reader.position += 1;
|
||||||
|
|
||||||
try!(self.verify_gas(¤t_gas, &gas_cost));
|
let info = instructions::get_info(instruction);
|
||||||
|
try!(self.verify_instruction(ext, instruction, &info, &stack));
|
||||||
|
|
||||||
|
// Calculate gas cost
|
||||||
|
let (gas_cost, mem_size) = try!(gasometer.get_gas_cost_mem(ext, instruction, &info, &stack, self.mem.size()));
|
||||||
|
// TODO: make compile-time removable if too much of a performance hit.
|
||||||
|
let trace_executed = ext.trace_prepare_execute(reader.position - 1, instruction, &gas_cost.as_u256());
|
||||||
|
|
||||||
|
try!(gasometer.verify_gas(&gas_cost));
|
||||||
self.mem.expand(mem_size);
|
self.mem.expand(mem_size);
|
||||||
current_gas = current_gas - gas_cost; //TODO: use operator -=
|
gasometer.current_gas = gasometer.current_gas - gas_cost;
|
||||||
|
|
||||||
evm_debug!({
|
evm_debug!({
|
||||||
println!("[0x{:x}][{}(0x{:x}) Gas: {:x}\n Gas Before: {:x}",
|
println!("[0x{:x}][{}(0x{:x}) Gas: {:x}\n Gas Before: {:x}",
|
||||||
reader.position,
|
reader.position,
|
||||||
color(instruction, instructions::get_info(instruction).name),
|
color(instruction, info.name),
|
||||||
instruction,
|
instruction,
|
||||||
gas_cost,
|
gas_cost,
|
||||||
current_gas + gas_cost
|
gasometer.current_gas + gas_cost
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -335,50 +138,44 @@ impl evm::Evm for Interpreter {
|
|||||||
|
|
||||||
// Execute instruction
|
// Execute instruction
|
||||||
let result = try!(self.exec_instruction(
|
let result = try!(self.exec_instruction(
|
||||||
current_gas, ¶ms, ext, instruction, &mut reader, &mut stack
|
gasometer.current_gas, ¶ms, ext, instruction, &mut reader, &mut stack
|
||||||
));
|
));
|
||||||
|
|
||||||
if trace_executed {
|
if trace_executed {
|
||||||
ext.trace_executed(current_gas, stack.peek_top(get_info(instruction).ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written);
|
ext.trace_executed(gasometer.current_gas.as_u256(), stack.peek_top(info.ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance
|
// Advance
|
||||||
match result {
|
match result {
|
||||||
InstructionResult::Ok => {},
|
InstructionResult::Ok => {},
|
||||||
InstructionResult::UnusedGas(gas) => {
|
InstructionResult::UnusedGas(gas) => {
|
||||||
current_gas = current_gas + gas; //TODO: use operator +=
|
gasometer.current_gas = gasometer.current_gas + gas;
|
||||||
},
|
},
|
||||||
InstructionResult::UseAllGas => {
|
InstructionResult::UseAllGas => {
|
||||||
current_gas = U256::zero();
|
gasometer.current_gas = Cost::from(0);
|
||||||
},
|
},
|
||||||
InstructionResult::GasLeft(gas_left) => {
|
InstructionResult::GasLeft(gas_left) => {
|
||||||
current_gas = gas_left;
|
gasometer.current_gas = gas_left;
|
||||||
},
|
},
|
||||||
InstructionResult::JumpToPosition(position) => {
|
InstructionResult::JumpToPosition(position) => {
|
||||||
let pos = try!(self.verify_jump(position, &valid_jump_destinations));
|
let pos = try!(self.verify_jump(position, &valid_jump_destinations));
|
||||||
reader.position = pos;
|
reader.position = pos;
|
||||||
},
|
},
|
||||||
InstructionResult::StopExecutionNeedsReturn(gas, off, size) => {
|
InstructionResult::StopExecutionNeedsReturn(gas, off, size) => {
|
||||||
return Ok(GasLeft::NeedsReturn(gas, self.mem.read_slice(off, size)));
|
return Ok(GasLeft::NeedsReturn(gas.as_u256(), self.mem.read_slice(off, size)));
|
||||||
},
|
},
|
||||||
InstructionResult::StopExecution => break,
|
InstructionResult::StopExecution => break,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(GasLeft::Known(current_gas))
|
Ok(GasLeft::Known(gasometer.current_gas.as_u256()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Interpreter {
|
impl<Cost: CostType> Interpreter<Cost> {
|
||||||
#[cfg_attr(feature="dev", allow(cyclomatic_complexity))]
|
|
||||||
fn get_gas_cost_mem(
|
fn verify_instruction(&self, ext: &evm::Ext, instruction: Instruction, info: &InstructionInfo, stack: &Stack<U256>) -> evm::Result<()> {
|
||||||
&mut self,
|
|
||||||
ext: &evm::Ext,
|
|
||||||
instruction: Instruction,
|
|
||||||
stack: &Stack<U256>
|
|
||||||
) -> evm::Result<(U256, usize)> {
|
|
||||||
let schedule = ext.schedule();
|
let schedule = ext.schedule();
|
||||||
let info = instructions::get_info(instruction);
|
|
||||||
|
|
||||||
if !schedule.have_delegate_call && instruction == instructions::DELEGATECALL {
|
if !schedule.have_delegate_call && instruction == instructions::DELEGATECALL {
|
||||||
return Err(evm::Error::BadInstruction {
|
return Err(evm::Error::BadInstruction {
|
||||||
@ -391,119 +188,20 @@ impl Interpreter {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
try!(self.verify_instructions_requirements(&info, schedule.stack_limit, stack));
|
if !stack.has(info.args) {
|
||||||
|
Err(evm::Error::StackUnderflow {
|
||||||
let tier = instructions::get_tier_idx(info.tier);
|
instruction: info.name,
|
||||||
let default_gas = U256::from(schedule.tier_step_gas[tier]);
|
wanted: info.args,
|
||||||
|
on_stack: stack.size()
|
||||||
let cost = match instruction {
|
})
|
||||||
instructions::SSTORE => {
|
} else if stack.size() - info.args + info.ret > schedule.stack_limit {
|
||||||
let address = H256::from(stack.peek(0));
|
Err(evm::Error::OutOfStack {
|
||||||
let newval = stack.peek(1);
|
instruction: info.name,
|
||||||
let val = U256::from(ext.storage_at(&address).as_slice());
|
wanted: info.ret - info.args,
|
||||||
|
limit: schedule.stack_limit
|
||||||
let gas = if self.is_zero(&val) && !self.is_zero(newval) {
|
})
|
||||||
schedule.sstore_set_gas
|
} else {
|
||||||
} else {
|
Ok(())
|
||||||
// Refund for below case is added when actually executing sstore
|
|
||||||
// !self.is_zero(&val) && self.is_zero(newval)
|
|
||||||
schedule.sstore_reset_gas
|
|
||||||
};
|
|
||||||
InstructionCost::Gas(U256::from(gas))
|
|
||||||
},
|
|
||||||
instructions::SLOAD => {
|
|
||||||
InstructionCost::Gas(U256::from(schedule.sload_gas))
|
|
||||||
},
|
|
||||||
instructions::MSTORE | instructions::MLOAD => {
|
|
||||||
InstructionCost::GasMem(default_gas, try!(self.mem_needed_const(stack.peek(0), 32)))
|
|
||||||
},
|
|
||||||
instructions::MSTORE8 => {
|
|
||||||
InstructionCost::GasMem(default_gas, try!(self.mem_needed_const(stack.peek(0), 1)))
|
|
||||||
},
|
|
||||||
instructions::RETURN => {
|
|
||||||
InstructionCost::GasMem(default_gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
|
|
||||||
},
|
|
||||||
instructions::SHA3 => {
|
|
||||||
let w = overflowing!(add_u256_usize(stack.peek(1), 31));
|
|
||||||
let words = w >> 5;
|
|
||||||
let gas = U256::from(schedule.sha3_gas) + (U256::from(schedule.sha3_word_gas) * words);
|
|
||||||
InstructionCost::GasMem(gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
|
|
||||||
},
|
|
||||||
instructions::CALLDATACOPY | instructions::CODECOPY => {
|
|
||||||
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(0), stack.peek(2))), stack.peek(2).clone())
|
|
||||||
},
|
|
||||||
instructions::EXTCODECOPY => {
|
|
||||||
InstructionCost::GasMemCopy(default_gas, try!(self.mem_needed(stack.peek(1), stack.peek(3))), stack.peek(3).clone())
|
|
||||||
},
|
|
||||||
instructions::JUMPDEST => {
|
|
||||||
InstructionCost::Gas(U256::one())
|
|
||||||
},
|
|
||||||
instructions::LOG0...instructions::LOG4 => {
|
|
||||||
let no_of_topics = instructions::get_log_topics(instruction);
|
|
||||||
let log_gas = schedule.log_gas + schedule.log_topic_gas * no_of_topics;
|
|
||||||
|
|
||||||
let data_gas = overflowing!(stack.peek(1).overflowing_mul(U256::from(schedule.log_data_gas)));
|
|
||||||
let gas = overflowing!(data_gas.overflowing_add(U256::from(log_gas)));
|
|
||||||
InstructionCost::GasMem(gas, try!(self.mem_needed(stack.peek(0), stack.peek(1))))
|
|
||||||
},
|
|
||||||
instructions::CALL | instructions::CALLCODE => {
|
|
||||||
let mut gas = overflowing!(add_u256_usize(stack.peek(0), schedule.call_gas));
|
|
||||||
let mem = cmp::max(
|
|
||||||
try!(self.mem_needed(stack.peek(5), stack.peek(6))),
|
|
||||||
try!(self.mem_needed(stack.peek(3), stack.peek(4)))
|
|
||||||
);
|
|
||||||
|
|
||||||
let address = u256_to_address(stack.peek(1));
|
|
||||||
|
|
||||||
if instruction == instructions::CALL && !ext.exists(&address) {
|
|
||||||
gas = overflowing!(gas.overflowing_add(U256::from(schedule.call_new_account_gas)));
|
|
||||||
};
|
|
||||||
|
|
||||||
if stack.peek(2).clone() > U256::zero() {
|
|
||||||
gas = overflowing!(gas.overflowing_add(U256::from(schedule.call_value_transfer_gas)));
|
|
||||||
};
|
|
||||||
|
|
||||||
InstructionCost::GasMem(gas,mem)
|
|
||||||
},
|
|
||||||
instructions::DELEGATECALL => {
|
|
||||||
let gas = overflowing!(add_u256_usize(stack.peek(0), schedule.call_gas));
|
|
||||||
let mem = cmp::max(
|
|
||||||
try!(self.mem_needed(stack.peek(4), stack.peek(5))),
|
|
||||||
try!(self.mem_needed(stack.peek(2), stack.peek(3)))
|
|
||||||
);
|
|
||||||
InstructionCost::GasMem(gas, mem)
|
|
||||||
},
|
|
||||||
instructions::CREATE => {
|
|
||||||
let gas = U256::from(schedule.create_gas);
|
|
||||||
let mem = try!(self.mem_needed(stack.peek(1), stack.peek(2)));
|
|
||||||
InstructionCost::GasMem(gas, mem)
|
|
||||||
},
|
|
||||||
instructions::EXP => {
|
|
||||||
let expon = stack.peek(1);
|
|
||||||
let bytes = ((expon.bits() + 7) / 8) as usize;
|
|
||||||
let gas = U256::from(schedule.exp_gas + schedule.exp_byte_gas * bytes);
|
|
||||||
InstructionCost::Gas(gas)
|
|
||||||
},
|
|
||||||
_ => InstructionCost::Gas(default_gas)
|
|
||||||
};
|
|
||||||
|
|
||||||
match cost {
|
|
||||||
InstructionCost::Gas(gas) => {
|
|
||||||
Ok((gas, 0))
|
|
||||||
},
|
|
||||||
InstructionCost::GasMem(gas, mem_size) => {
|
|
||||||
let (mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, self.mem.size(), &mem_size));
|
|
||||||
let gas = overflowing!(gas.overflowing_add(mem_gas));
|
|
||||||
Ok((gas, new_mem_size))
|
|
||||||
},
|
|
||||||
InstructionCost::GasMemCopy(gas, mem_size, copy) => {
|
|
||||||
let (mem_gas, new_mem_size) = try!(self.mem_gas_cost(schedule, self.mem.size(), &mem_size));
|
|
||||||
let copy = overflowing!(add_u256_usize(©, 31));
|
|
||||||
let copy_gas = U256::from(schedule.copy_gas) * (copy / U256::from(32));
|
|
||||||
let gas = overflowing!(gas.overflowing_add(copy_gas));
|
|
||||||
let gas = overflowing!(gas.overflowing_add(mem_gas));
|
|
||||||
Ok((gas, new_mem_size))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -532,53 +230,16 @@ impl Interpreter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mem_gas_cost(&self, schedule: &evm::Schedule, current_mem_size: usize, mem_size: &U256) -> evm::Result<(U256, usize)> {
|
|
||||||
let gas_for_mem = |mem_size: U256| {
|
|
||||||
let s = mem_size >> 5;
|
|
||||||
// s * memory_gas + s * s / quad_coeff_div
|
|
||||||
let a = overflowing!(s.overflowing_mul(U256::from(schedule.memory_gas)));
|
|
||||||
// We need to go to U512 to calculate s*s/quad_coeff_div
|
|
||||||
let b = U512::from(s) * U512::from(s) / U512::from(schedule.quad_coeff_div);
|
|
||||||
if b > U512::from(!U256::zero()) {
|
|
||||||
Err(evm::Error::OutOfGas)
|
|
||||||
} else {
|
|
||||||
Ok(overflowing!(a.overflowing_add(U256::from(b))))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let current_mem_size = U256::from(current_mem_size);
|
|
||||||
let req_mem_size_rounded = (overflowing!(mem_size.overflowing_add(U256::from(31))) >> 5) << 5;
|
|
||||||
let new_mem_gas = try!(gas_for_mem(U256::from(req_mem_size_rounded)));
|
|
||||||
let current_mem_gas = try!(gas_for_mem(current_mem_size));
|
|
||||||
|
|
||||||
Ok((if req_mem_size_rounded > current_mem_size {
|
|
||||||
new_mem_gas - current_mem_gas
|
|
||||||
} else {
|
|
||||||
U256::zero()
|
|
||||||
}, req_mem_size_rounded.low_u64() as usize))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mem_needed_const(&self, mem: &U256, add: usize) -> evm::Result<U256> {
|
|
||||||
Ok(overflowing!(mem.overflowing_add(U256::from(add))))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mem_needed(&self, offset: &U256, size: &U256) -> evm::Result<U256> {
|
|
||||||
if self.is_zero(size) {
|
|
||||||
return Ok(U256::zero());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(overflowing!(offset.overflowing_add(size.clone())))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(too_many_arguments))]
|
#[cfg_attr(feature="dev", allow(too_many_arguments))]
|
||||||
fn exec_instruction(
|
fn exec_instruction(
|
||||||
&mut self,
|
&mut self,
|
||||||
gas: Gas,
|
gas: Cost,
|
||||||
params: &ActionParams,
|
params: &ActionParams,
|
||||||
ext: &mut evm::Ext,
|
ext: &mut evm::Ext,
|
||||||
instruction: Instruction,
|
instruction: Instruction,
|
||||||
code: &mut CodeReader,
|
code: &mut CodeReader,
|
||||||
stack: &mut Stack<U256>
|
stack: &mut Stack<U256>
|
||||||
) -> evm::Result<InstructionResult> {
|
) -> evm::Result<InstructionResult<Cost>> {
|
||||||
match instruction {
|
match instruction {
|
||||||
instructions::JUMP => {
|
instructions::JUMP => {
|
||||||
let jump = stack.pop_back();
|
let jump = stack.pop_back();
|
||||||
@ -611,11 +272,11 @@ impl Interpreter {
|
|||||||
return Ok(InstructionResult::Ok);
|
return Ok(InstructionResult::Ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
let create_result = ext.create(&gas, &endowment, &contract_code);
|
let create_result = ext.create(&gas.as_u256(), &endowment, &contract_code);
|
||||||
return match create_result {
|
return match create_result {
|
||||||
ContractCreateResult::Created(address, gas_left) => {
|
ContractCreateResult::Created(address, gas_left) => {
|
||||||
stack.push(address_to_u256(address));
|
stack.push(address_to_u256(address));
|
||||||
Ok(InstructionResult::GasLeft(gas_left))
|
Ok(InstructionResult::GasLeft(Cost::from_u256(gas_left).expect("Gas left cannot be greater.")))
|
||||||
},
|
},
|
||||||
ContractCreateResult::Failed => {
|
ContractCreateResult::Failed => {
|
||||||
stack.push(U256::zero());
|
stack.push(U256::zero());
|
||||||
@ -626,7 +287,7 @@ impl Interpreter {
|
|||||||
},
|
},
|
||||||
instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL => {
|
instructions::CALL | instructions::CALLCODE | instructions::DELEGATECALL => {
|
||||||
assert!(ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, "overflow possible");
|
assert!(ext.schedule().call_value_transfer_gas > ext.schedule().call_stipend, "overflow possible");
|
||||||
let call_gas = stack.pop_back();
|
let call_gas = Cost::from_u256(stack.pop_back()).expect("Gas is already validated.");
|
||||||
let code_address = stack.pop_back();
|
let code_address = stack.pop_back();
|
||||||
let code_address = u256_to_address(&code_address);
|
let code_address = u256_to_address(&code_address);
|
||||||
|
|
||||||
@ -642,9 +303,9 @@ impl Interpreter {
|
|||||||
let out_size = stack.pop_back();
|
let out_size = stack.pop_back();
|
||||||
|
|
||||||
// Add stipend (only CALL|CALLCODE when value > 0)
|
// Add stipend (only CALL|CALLCODE when value > 0)
|
||||||
let call_gas = call_gas + value.map_or_else(U256::zero, |val| match val > U256::zero() {
|
let call_gas = call_gas + value.map_or_else(|| Cost::from(0), |val| match val > U256::zero() {
|
||||||
true => U256::from(ext.schedule().call_stipend),
|
true => Cost::from(ext.schedule().call_stipend),
|
||||||
false => U256::zero()
|
false => Cost::from(0)
|
||||||
});
|
});
|
||||||
|
|
||||||
// Get sender & receive addresses, check if we have balance
|
// Get sender & receive addresses, check if we have balance
|
||||||
@ -672,13 +333,13 @@ impl Interpreter {
|
|||||||
// and we don't want to copy
|
// and we don't want to copy
|
||||||
let input = unsafe { ::std::mem::transmute(self.mem.read_slice(in_off, in_size)) };
|
let input = unsafe { ::std::mem::transmute(self.mem.read_slice(in_off, in_size)) };
|
||||||
let output = self.mem.writeable_slice(out_off, out_size);
|
let output = self.mem.writeable_slice(out_off, out_size);
|
||||||
ext.call(&call_gas, sender_address, receive_address, value, input, &code_address, output)
|
ext.call(&call_gas.as_u256(), sender_address, receive_address, value, input, &code_address, output)
|
||||||
};
|
};
|
||||||
|
|
||||||
return match call_result {
|
return match call_result {
|
||||||
MessageCallResult::Success(gas_left) => {
|
MessageCallResult::Success(gas_left) => {
|
||||||
stack.push(U256::one());
|
stack.push(U256::one());
|
||||||
Ok(InstructionResult::UnusedGas(gas_left))
|
Ok(InstructionResult::UnusedGas(Cost::from_u256(gas_left).expect("Gas left cannot be greater then current one")))
|
||||||
},
|
},
|
||||||
MessageCallResult::Failed => {
|
MessageCallResult::Failed => {
|
||||||
stack.push(U256::zero());
|
stack.push(U256::zero());
|
||||||
@ -759,7 +420,7 @@ impl Interpreter {
|
|||||||
stack.push(U256::from(code.position - 1));
|
stack.push(U256::from(code.position - 1));
|
||||||
},
|
},
|
||||||
instructions::GAS => {
|
instructions::GAS => {
|
||||||
stack.push(gas.clone());
|
stack.push(gas.as_u256());
|
||||||
},
|
},
|
||||||
instructions::ADDRESS => {
|
instructions::ADDRESS => {
|
||||||
stack.push(address_to_u256(params.address.clone()));
|
stack.push(address_to_u256(params.address.clone()));
|
||||||
@ -876,36 +537,6 @@ impl Interpreter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_instructions_requirements(
|
|
||||||
&self,
|
|
||||||
info: &instructions::InstructionInfo,
|
|
||||||
stack_limit: usize,
|
|
||||||
stack: &Stack<U256>
|
|
||||||
) -> evm::Result<()> {
|
|
||||||
if !stack.has(info.args) {
|
|
||||||
Err(evm::Error::StackUnderflow {
|
|
||||||
instruction: info.name,
|
|
||||||
wanted: info.args,
|
|
||||||
on_stack: stack.size()
|
|
||||||
})
|
|
||||||
} else if stack.size() - info.args + info.ret > stack_limit {
|
|
||||||
Err(evm::Error::OutOfStack {
|
|
||||||
instruction: info.name,
|
|
||||||
wanted: info.ret - info.args,
|
|
||||||
limit: stack_limit
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_gas(&self, current_gas: &U256, gas_cost: &U256) -> evm::Result<()> {
|
|
||||||
match current_gas < gas_cost {
|
|
||||||
true => Err(evm::Error::OutOfGas),
|
|
||||||
false => Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_jump(&self, jump_u: U256, valid_jump_destinations: &HashSet<usize>) -> evm::Result<usize> {
|
fn verify_jump(&self, jump_u: U256, valid_jump_destinations: &HashSet<usize>) -> evm::Result<usize> {
|
||||||
let jump = jump_u.low_u64() as usize;
|
let jump = jump_u.low_u64() as usize;
|
||||||
|
|
||||||
@ -1163,11 +794,6 @@ fn set_sign(value: U256, sign: bool) -> U256 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
|
||||||
fn add_u256_usize(value: &U256, num: usize) -> (U256, bool) {
|
|
||||||
value.clone().overflowing_add(U256::from(num))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
fn u256_to_address(value: &U256) -> Address {
|
fn u256_to_address(value: &U256) -> Address {
|
||||||
Address::from(H256::from(value))
|
Address::from(H256::from(value))
|
||||||
@ -1179,82 +805,14 @@ fn address_to_u256(value: Address) -> U256 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_mem_gas_cost() {
|
fn test_find_jump_destinations() {
|
||||||
// given
|
// given
|
||||||
let interpreter = Interpreter::default();
|
let interpreter = Interpreter::<U256>::default();
|
||||||
let schedule = evm::Schedule::default();
|
let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap();
|
||||||
let current_mem_size = 5;
|
|
||||||
let mem_size = !U256::zero();
|
|
||||||
|
|
||||||
// when
|
// when
|
||||||
let result = interpreter.mem_gas_cost(&schedule, current_mem_size, &mem_size);
|
let valid_jump_destinations = interpreter.find_jump_destinations(&code);
|
||||||
|
|
||||||
// then
|
// then
|
||||||
if let Ok(_) = result {
|
assert!(valid_jump_destinations.contains(&66));
|
||||||
assert!(false, "Should fail with OutOfGas");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use common::*;
|
|
||||||
use super::*;
|
|
||||||
use evm;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_find_jump_destinations() {
|
|
||||||
// given
|
|
||||||
let interpreter = Interpreter::default();
|
|
||||||
let code = "7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff5b01600055".from_hex().unwrap();
|
|
||||||
|
|
||||||
// when
|
|
||||||
let valid_jump_destinations = interpreter.find_jump_destinations(&code);
|
|
||||||
|
|
||||||
// then
|
|
||||||
assert!(valid_jump_destinations.contains(&66));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_calculate_mem_cost() {
|
|
||||||
// given
|
|
||||||
let interpreter = Interpreter::default();
|
|
||||||
let schedule = evm::Schedule::default();
|
|
||||||
let current_mem_size = 0;
|
|
||||||
let mem_size = U256::from(5);
|
|
||||||
|
|
||||||
// when
|
|
||||||
let (mem_cost, mem_size) = interpreter.mem_gas_cost(&schedule, current_mem_size, &mem_size).unwrap();
|
|
||||||
|
|
||||||
// then
|
|
||||||
assert_eq!(mem_cost, U256::from(3));
|
|
||||||
assert_eq!(mem_size, 32);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_memory_read_and_write() {
|
|
||||||
// given
|
|
||||||
let mem: &mut super::Memory = &mut vec![];
|
|
||||||
mem.resize(0x80 + 32);
|
|
||||||
|
|
||||||
// when
|
|
||||||
mem.write(U256::from(0x80), U256::from(0xabcdef));
|
|
||||||
|
|
||||||
// then
|
|
||||||
assert_eq!(mem.read(U256::from(0x80)), U256::from(0xabcdef));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_memory_read_and_write_byte() {
|
|
||||||
// given
|
|
||||||
let mem: &mut super::Memory = &mut vec![];
|
|
||||||
mem.resize(32);
|
|
||||||
|
|
||||||
// when
|
|
||||||
mem.write_byte(U256::from(0x1d), U256::from(0xab));
|
|
||||||
mem.write_byte(U256::from(0x1e), U256::from(0xcd));
|
|
||||||
mem.write_byte(U256::from(0x1f), U256::from(0xef));
|
|
||||||
|
|
||||||
// then
|
|
||||||
assert_eq!(mem.read(U256::from(0x00)), U256::from(0xabcdef));
|
|
||||||
}
|
|
||||||
}
|
}
|
106
ethcore/src/evm/interpreter/stack.rs
Normal file
106
ethcore/src/evm/interpreter/stack.rs
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use evm::instructions;
|
||||||
|
|
||||||
|
/// Stack trait with VM-friendly API
|
||||||
|
pub trait Stack<T> {
|
||||||
|
/// Returns `Stack[len(Stack) - no_from_top]`
|
||||||
|
fn peek(&self, no_from_top: usize) -> &T;
|
||||||
|
/// Swaps Stack[len(Stack)] and Stack[len(Stack) - no_from_top]
|
||||||
|
fn swap_with_top(&mut self, no_from_top: usize);
|
||||||
|
/// Returns true if Stack has at least `no_of_elems` elements
|
||||||
|
fn has(&self, no_of_elems: usize) -> bool;
|
||||||
|
/// Get element from top and remove it from Stack. Panics if stack is empty.
|
||||||
|
fn pop_back(&mut self) -> T;
|
||||||
|
/// Get (up to `instructions::MAX_NO_OF_TOPICS`) elements from top and remove them from Stack. Panics if stack is empty.
|
||||||
|
fn pop_n(&mut self, no_of_elems: usize) -> &[T];
|
||||||
|
/// Add element on top of the Stack
|
||||||
|
fn push(&mut self, elem: T);
|
||||||
|
/// Get number of elements on Stack
|
||||||
|
fn size(&self) -> usize;
|
||||||
|
/// Returns all data on stack.
|
||||||
|
fn peek_top(&mut self, no_of_elems: usize) -> &[T];
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct VecStack<S> {
|
||||||
|
stack: Vec<S>,
|
||||||
|
logs: [S; instructions::MAX_NO_OF_TOPICS]
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S : Copy> VecStack<S> {
|
||||||
|
pub fn with_capacity(capacity: usize, zero: S) -> Self {
|
||||||
|
VecStack {
|
||||||
|
stack: Vec::with_capacity(capacity),
|
||||||
|
logs: [zero; instructions::MAX_NO_OF_TOPICS]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S : fmt::Display> Stack<S> for VecStack<S> {
|
||||||
|
fn peek(&self, no_from_top: usize) -> &S {
|
||||||
|
&self.stack[self.stack.len() - no_from_top - 1]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn swap_with_top(&mut self, no_from_top: usize) {
|
||||||
|
let len = self.stack.len();
|
||||||
|
self.stack.swap(len - no_from_top - 1, len - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn has(&self, no_of_elems: usize) -> bool {
|
||||||
|
self.stack.len() >= no_of_elems
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pop_back(&mut self) -> S {
|
||||||
|
let val = self.stack.pop();
|
||||||
|
match val {
|
||||||
|
Some(x) => {
|
||||||
|
evm_debug!({
|
||||||
|
println!(" POP: {}", x)
|
||||||
|
});
|
||||||
|
x
|
||||||
|
},
|
||||||
|
None => panic!("Tried to pop from empty stack.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pop_n(&mut self, no_of_elems: usize) -> &[S] {
|
||||||
|
assert!(no_of_elems <= instructions::MAX_NO_OF_TOPICS);
|
||||||
|
|
||||||
|
for i in 0..no_of_elems {
|
||||||
|
self.logs[i] = self.pop_back();
|
||||||
|
}
|
||||||
|
&self.logs[0..no_of_elems]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn push(&mut self, elem: S) {
|
||||||
|
evm_debug!({
|
||||||
|
println!(" PUSH: {}", elem)
|
||||||
|
});
|
||||||
|
self.stack.push(elem);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size(&self) -> usize {
|
||||||
|
self.stack.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn peek_top(&mut self, no_from_top: usize) -> &[S] {
|
||||||
|
assert!(self.stack.len() >= no_from_top, "peek_top asked for more items than exist.");
|
||||||
|
&self.stack[self.stack.len() - no_from_top .. self.stack.len()]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -28,8 +28,10 @@ mod jit;
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
#[cfg(all(feature="benches", test))]
|
||||||
|
mod benches;
|
||||||
|
|
||||||
pub use self::evm::{Evm, Error, Finalize, GasLeft, Result};
|
pub use self::evm::{Evm, Error, Finalize, GasLeft, Result, CostType};
|
||||||
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult};
|
pub use self::ext::{Ext, ContractCreateResult, MessageCallResult};
|
||||||
pub use self::factory::{Factory, VMType};
|
pub use self::factory::{Factory, VMType};
|
||||||
pub use self::schedule::Schedule;
|
pub use self::schedule::Schedule;
|
||||||
|
@ -18,18 +18,18 @@ use common::*;
|
|||||||
use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult};
|
use evm::{self, Ext, Schedule, Factory, GasLeft, VMType, ContractCreateResult, MessageCallResult};
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
struct FakeLogEntry {
|
pub struct FakeLogEntry {
|
||||||
topics: Vec<H256>,
|
topics: Vec<H256>,
|
||||||
data: Bytes
|
data: Bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Hash, Debug)]
|
#[derive(PartialEq, Eq, Hash, Debug)]
|
||||||
enum FakeCallType {
|
pub enum FakeCallType {
|
||||||
Call, Create
|
Call, Create
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Hash, Debug)]
|
#[derive(PartialEq, Eq, Hash, Debug)]
|
||||||
struct FakeCall {
|
pub struct FakeCall {
|
||||||
call_type: FakeCallType,
|
call_type: FakeCallType,
|
||||||
gas: U256,
|
gas: U256,
|
||||||
sender_address: Option<Address>,
|
sender_address: Option<Address>,
|
||||||
@ -43,7 +43,7 @@ struct FakeCall {
|
|||||||
///
|
///
|
||||||
/// Can't do recursive calls.
|
/// Can't do recursive calls.
|
||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct FakeExt {
|
pub struct FakeExt {
|
||||||
sstore_clears: usize,
|
sstore_clears: usize,
|
||||||
depth: usize,
|
depth: usize,
|
||||||
store: HashMap<H256, H256>,
|
store: HashMap<H256, H256>,
|
||||||
@ -67,7 +67,7 @@ fn test_finalize(res: Result<GasLeft, evm::Error>) -> Result<U256, evm::Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl FakeExt {
|
impl FakeExt {
|
||||||
fn new() -> Self {
|
pub fn new() -> Self {
|
||||||
FakeExt::default()
|
FakeExt::default()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -181,7 +181,7 @@ fn test_stack_underflow() {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let err = {
|
let err = {
|
||||||
let mut vm : Box<evm::Evm> = Box::new(super::interpreter::Interpreter::default());
|
let mut vm : Box<evm::Evm> = Box::new(super::interpreter::Interpreter::<usize>::default());
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -208,7 +208,7 @@ fn test_add(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -228,7 +228,7 @@ fn test_sha3(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -248,7 +248,7 @@ fn test_address(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -270,7 +270,7 @@ fn test_origin(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -292,7 +292,7 @@ fn test_sender(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -327,7 +327,7 @@ fn test_extcodecopy(factory: super::Factory) {
|
|||||||
ext.codes.insert(sender, sender_code);
|
ext.codes.insert(sender, sender_code);
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -347,7 +347,7 @@ fn test_log_empty(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -379,7 +379,7 @@ fn test_log_sender(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -404,7 +404,7 @@ fn test_blockhash(factory: super::Factory) {
|
|||||||
ext.blockhashes.insert(U256::zero(), blockhash.clone());
|
ext.blockhashes.insert(U256::zero(), blockhash.clone());
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -426,7 +426,7 @@ fn test_calldataload(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -447,7 +447,7 @@ fn test_author(factory: super::Factory) {
|
|||||||
ext.info.author = author;
|
ext.info.author = author;
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -467,7 +467,7 @@ fn test_timestamp(factory: super::Factory) {
|
|||||||
ext.info.timestamp = timestamp;
|
ext.info.timestamp = timestamp;
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -487,7 +487,7 @@ fn test_number(factory: super::Factory) {
|
|||||||
ext.info.number = number;
|
ext.info.number = number;
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -507,7 +507,7 @@ fn test_difficulty(factory: super::Factory) {
|
|||||||
ext.info.difficulty = difficulty;
|
ext.info.difficulty = difficulty;
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -527,7 +527,7 @@ fn test_gas_limit(factory: super::Factory) {
|
|||||||
ext.info.gas_limit = gas_limit;
|
ext.info.gas_limit = gas_limit;
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -545,7 +545,7 @@ fn test_mul(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -563,7 +563,7 @@ fn test_sub(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -581,7 +581,7 @@ fn test_div(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -599,7 +599,7 @@ fn test_div_zero(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -617,7 +617,7 @@ fn test_mod(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -636,7 +636,7 @@ fn test_smod(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -655,7 +655,7 @@ fn test_sdiv(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -674,7 +674,7 @@ fn test_exp(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -694,7 +694,7 @@ fn test_comparison(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -715,7 +715,7 @@ fn test_signed_comparison(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -736,7 +736,7 @@ fn test_bitops(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -759,7 +759,7 @@ fn test_addmod_mulmod(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -780,7 +780,7 @@ fn test_byte(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -799,7 +799,7 @@ fn test_signextend(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -819,7 +819,7 @@ fn test_badinstruction_int() {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let err = {
|
let err = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
test_finalize(vm.exec(params, &mut ext)).unwrap_err()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -839,7 +839,7 @@ fn test_pop(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -859,7 +859,7 @@ fn test_extops(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -882,7 +882,7 @@ fn test_jumps(factory: super::Factory) {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -911,7 +911,7 @@ fn test_calls(factory: super::Factory) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
let gas_left = {
|
let gas_left = {
|
||||||
let mut vm = factory.create();
|
let mut vm = factory.create(params.gas);
|
||||||
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -211,7 +211,7 @@ impl<'a> Executive<'a> {
|
|||||||
let vm_factory = self.vm_factory;
|
let vm_factory = self.vm_factory;
|
||||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer);
|
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer);
|
||||||
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
|
trace!(target: "executive", "ext.schedule.have_delegate_call: {}", ext.schedule().have_delegate_call);
|
||||||
return vm_factory.create().exec(params, &mut ext).finalize(ext);
|
return vm_factory.create(params.gas).exec(params, &mut ext).finalize(ext);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start in new thread to reset stack
|
// Start in new thread to reset stack
|
||||||
@ -222,7 +222,7 @@ impl<'a> Executive<'a> {
|
|||||||
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer);
|
let mut ext = self.as_externalities(OriginInfo::from(¶ms), unconfirmed_substate, output_policy, tracer, vm_tracer);
|
||||||
|
|
||||||
scope.spawn(move || {
|
scope.spawn(move || {
|
||||||
vm_factory.create().exec(params, &mut ext).finalize(ext)
|
vm_factory.create(params.gas).exec(params, &mut ext).finalize(ext)
|
||||||
})
|
})
|
||||||
}).join()
|
}).join()
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,6 @@ mod tests {
|
|||||||
last_hashes: vec![],
|
last_hashes: vec![],
|
||||||
gas_used: 0.into(),
|
gas_used: 0.into(),
|
||||||
gas_limit: 0.into(),
|
gas_limit: 0.into(),
|
||||||
dao_rescue_block_gas_limit: None,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,10 +92,10 @@ impl PartialEq for Header {
|
|||||||
impl Default for Header {
|
impl Default for Header {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Header {
|
Header {
|
||||||
parent_hash: ZERO_H256.clone(),
|
parent_hash: H256::default(),
|
||||||
timestamp: 0,
|
timestamp: 0,
|
||||||
number: 0,
|
number: 0,
|
||||||
author: ZERO_ADDRESS.clone(),
|
author: Address::default(),
|
||||||
|
|
||||||
transactions_root: SHA3_NULL_RLP,
|
transactions_root: SHA3_NULL_RLP,
|
||||||
uncles_hash: SHA3_EMPTY_LIST_RLP,
|
uncles_hash: SHA3_EMPTY_LIST_RLP,
|
||||||
@ -104,10 +104,10 @@ impl Default for Header {
|
|||||||
state_root: SHA3_NULL_RLP,
|
state_root: SHA3_NULL_RLP,
|
||||||
receipts_root: SHA3_NULL_RLP,
|
receipts_root: SHA3_NULL_RLP,
|
||||||
log_bloom: ZERO_LOGBLOOM.clone(),
|
log_bloom: ZERO_LOGBLOOM.clone(),
|
||||||
gas_used: ZERO_U256,
|
gas_used: U256::default(),
|
||||||
gas_limit: ZERO_U256,
|
gas_limit: U256::default(),
|
||||||
|
|
||||||
difficulty: ZERO_U256,
|
difficulty: U256::default(),
|
||||||
seal: vec![],
|
seal: vec![],
|
||||||
hash: RefCell::new(None),
|
hash: RefCell::new(None),
|
||||||
bare_hash: RefCell::new(None),
|
bare_hash: RefCell::new(None),
|
||||||
|
@ -208,7 +208,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec<String> {
|
|||||||
&mut tracer,
|
&mut tracer,
|
||||||
&mut vm_tracer,
|
&mut vm_tracer,
|
||||||
);
|
);
|
||||||
let mut evm = vm_factory.create();
|
let mut evm = vm_factory.create(params.gas);
|
||||||
let res = evm.exec(params, &mut ex);
|
let res = evm.exec(params, &mut ex);
|
||||||
// a return in finalize will not alter callcreates
|
// a return in finalize will not alter callcreates
|
||||||
let callcreates = ex.callcreates.clone();
|
let callcreates = ex.callcreates.clone();
|
||||||
|
@ -23,3 +23,4 @@ mod state;
|
|||||||
mod chain;
|
mod chain;
|
||||||
mod homestead_state;
|
mod homestead_state;
|
||||||
mod homestead_chain;
|
mod homestead_chain;
|
||||||
|
mod trie;
|
||||||
|
69
ethcore/src/json_tests/trie.rs
Normal file
69
ethcore/src/json_tests/trie.rs
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use ethjson;
|
||||||
|
use util::{H256, MemoryDB, TrieMut, TrieSpec, TrieFactory};
|
||||||
|
|
||||||
|
fn test_trie(json: &[u8], trie: TrieSpec) -> Vec<String> {
|
||||||
|
let tests = ethjson::trie::Test::load(json).unwrap();
|
||||||
|
let factory = TrieFactory::new(trie);
|
||||||
|
let mut result = vec![];
|
||||||
|
|
||||||
|
for (name, test) in tests.into_iter() {
|
||||||
|
let mut memdb = MemoryDB::new();
|
||||||
|
let mut root = H256::default();
|
||||||
|
let mut t = factory.create(&mut memdb, &mut root);
|
||||||
|
|
||||||
|
for (key, value) in test.input.data.into_iter() {
|
||||||
|
let key: Vec<u8> = key.into();
|
||||||
|
let value: Vec<u8> = value.map_or_else(Vec::new, Into::into);
|
||||||
|
t.insert(&key, &value);
|
||||||
|
}
|
||||||
|
|
||||||
|
if *t.root() != test.root.into() {
|
||||||
|
result.push(format!("Trie test '{:?}' failed.", name));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in &result {
|
||||||
|
println!("FAILED: {}", i);
|
||||||
|
}
|
||||||
|
|
||||||
|
result
|
||||||
|
}
|
||||||
|
|
||||||
|
mod generic {
|
||||||
|
use util::TrieSpec;
|
||||||
|
|
||||||
|
fn do_json_test(json: &[u8]) -> Vec<String> {
|
||||||
|
super::test_trie(json, TrieSpec::Generic)
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_test!{TrieTests_trietest, "TrieTests/trietest"}
|
||||||
|
declare_test!{TrieTests_trieanyorder, "TrieTests/trieanyorder"}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod secure {
|
||||||
|
use util::TrieSpec;
|
||||||
|
|
||||||
|
fn do_json_test(json: &[u8]) -> Vec<String> {
|
||||||
|
super::test_trie(json, TrieSpec::Secure)
|
||||||
|
}
|
||||||
|
|
||||||
|
declare_test!{TrieTests_hex_encoded_secure, "TrieTests/hex_encoded_securetrie_test"}
|
||||||
|
declare_test!{TrieTests_trietest_secure, "TrieTests/trietest_secureTrie"}
|
||||||
|
declare_test!{TrieTests_trieanyorder_secure, "TrieTests/trieanyorder_secureTrie"}
|
||||||
|
}
|
@ -31,6 +31,7 @@
|
|||||||
#![cfg_attr(feature="dev", allow(needless_borrow))]
|
#![cfg_attr(feature="dev", allow(needless_borrow))]
|
||||||
#![cfg_attr(feature="dev", allow(assign_op_pattern))]
|
#![cfg_attr(feature="dev", allow(assign_op_pattern))]
|
||||||
|
|
||||||
|
#![cfg_attr(feature="benches", feature(test))]
|
||||||
|
|
||||||
//! Ethcore library
|
//! Ethcore library
|
||||||
//!
|
//!
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
use util::migration::Migration;
|
use util::migration::SimpleMigration;
|
||||||
|
|
||||||
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
|
/// This migration reduces the sizes of keys and moves `ExtrasIndex` byte from back to the front.
|
||||||
pub struct ToV6;
|
pub struct ToV6;
|
||||||
@ -17,7 +17,7 @@ impl ToV6 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Migration for ToV6 {
|
impl SimpleMigration for ToV6 {
|
||||||
fn version(&self) -> u32 {
|
fn version(&self) -> u32 {
|
||||||
6
|
6
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ use std::sync::atomic::AtomicBool;
|
|||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use util::Colour::White;
|
use util::using_queue::{UsingQueue, GetAction};
|
||||||
use account_provider::AccountProvider;
|
use account_provider::AccountProvider;
|
||||||
use views::{BlockView, HeaderView};
|
use views::{BlockView, HeaderView};
|
||||||
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics};
|
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockID, CallAnalytics};
|
||||||
@ -200,17 +200,23 @@ impl Miner {
|
|||||||
let hash = tx.hash();
|
let hash = tx.hash();
|
||||||
match open_block.push_transaction(tx, None) {
|
match open_block.push_transaction(tx, None) {
|
||||||
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, .. })) => {
|
Err(Error::Execution(ExecutionError::BlockGasLimitReached { gas_limit, gas_used, .. })) => {
|
||||||
trace!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?}", hash);
|
debug!(target: "miner", "Skipping adding transaction to block because of gas limit: {:?}", hash);
|
||||||
// Exit early if gas left is smaller then min_tx_gas
|
// Exit early if gas left is smaller then min_tx_gas
|
||||||
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
|
let min_tx_gas: U256 = 21000.into(); // TODO: figure this out properly.
|
||||||
if gas_limit - gas_used < min_tx_gas {
|
if gas_limit - gas_used < min_tx_gas {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Err(Error::Transaction(TransactionError::AlreadyImported)) => {} // already have transaction - ignore
|
// Invalid nonce error can happen only if previous transaction is skipped because of gas limit.
|
||||||
|
// If there is errornous state of transaction queue it will be fixed when next block is imported.
|
||||||
|
Err(Error::Execution(ExecutionError::InvalidNonce { .. })) => {
|
||||||
|
debug!(target: "miner", "Skipping adding transaction to block because of invalid nonce: {:?}", hash);
|
||||||
|
},
|
||||||
|
// already have transaction - ignore
|
||||||
|
Err(Error::Transaction(TransactionError::AlreadyImported)) => {},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
invalid_transactions.insert(hash);
|
invalid_transactions.insert(hash);
|
||||||
trace!(target: "miner",
|
debug!(target: "miner",
|
||||||
"Error adding transaction to block: number={}. transaction_hash={:?}, Error: {:?}",
|
"Error adding transaction to block: number={}. transaction_hash={:?}, Error: {:?}",
|
||||||
block_number, hash, e);
|
block_number, hash, e);
|
||||||
},
|
},
|
||||||
@ -309,6 +315,19 @@ impl Miner {
|
|||||||
!have_work
|
!have_work
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn add_transactions_to_queue(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, origin: TransactionOrigin, transaction_queue: &mut TransactionQueue) ->
|
||||||
|
Vec<Result<TransactionImportResult, Error>> {
|
||||||
|
|
||||||
|
let fetch_account = |a: &Address| AccountDetails {
|
||||||
|
nonce: chain.latest_nonce(a),
|
||||||
|
balance: chain.latest_balance(a),
|
||||||
|
};
|
||||||
|
|
||||||
|
transactions.into_iter()
|
||||||
|
.map(|tx| transaction_queue.add(tx, &fetch_account, origin))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
/// Are we allowed to do a non-mandatory reseal?
|
/// Are we allowed to do a non-mandatory reseal?
|
||||||
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock().unwrap() }
|
fn tx_reseal_allowed(&self) -> bool { Instant::now() > *self.next_allowed_reseal.lock().unwrap() }
|
||||||
}
|
}
|
||||||
@ -349,7 +368,6 @@ impl MinerService for Miner {
|
|||||||
last_hashes: last_hashes,
|
last_hashes: last_hashes,
|
||||||
gas_used: U256::zero(),
|
gas_used: U256::zero(),
|
||||||
gas_limit: U256::max_value(),
|
gas_limit: U256::max_value(),
|
||||||
dao_rescue_block_gas_limit: chain.dao_rescue_block_gas_limit(header.parent_hash().clone()),
|
|
||||||
};
|
};
|
||||||
// that's just a copy of the state.
|
// that's just a copy of the state.
|
||||||
let mut state = block.state().clone();
|
let mut state = block.state().clone();
|
||||||
@ -472,27 +490,24 @@ impl MinerService for Miner {
|
|||||||
self.gas_range_target.read().unwrap().1
|
self.gas_range_target.read().unwrap().1
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_transactions<T>(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
|
fn import_external_transactions(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>) ->
|
||||||
Vec<Result<TransactionImportResult, Error>>
|
Vec<Result<TransactionImportResult, Error>> {
|
||||||
where T: Fn(&Address) -> AccountDetails {
|
|
||||||
let results: Vec<Result<TransactionImportResult, Error>> = {
|
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
let results = self.add_transactions_to_queue(chain, transactions, TransactionOrigin::External,
|
||||||
transactions.into_iter()
|
&mut transaction_queue);
|
||||||
.map(|tx| transaction_queue.add(tx, &fetch_account, TransactionOrigin::External))
|
|
||||||
.collect()
|
if !results.is_empty() && self.options.reseal_on_external_tx && self.tx_reseal_allowed() {
|
||||||
};
|
|
||||||
if !results.is_empty() && self.options.reseal_on_external_tx && self.tx_reseal_allowed() {
|
|
||||||
self.update_sealing(chain);
|
self.update_sealing(chain);
|
||||||
}
|
}
|
||||||
results
|
results
|
||||||
}
|
}
|
||||||
|
|
||||||
fn import_own_transaction<T>(
|
fn import_own_transaction(
|
||||||
&self,
|
&self,
|
||||||
chain: &MiningBlockChainClient,
|
chain: &MiningBlockChainClient,
|
||||||
transaction: SignedTransaction,
|
transaction: SignedTransaction,
|
||||||
fetch_account: T
|
) -> Result<TransactionImportResult, Error> {
|
||||||
) -> Result<TransactionImportResult, Error> where T: Fn(&Address) -> AccountDetails {
|
|
||||||
|
|
||||||
let hash = transaction.hash();
|
let hash = transaction.hash();
|
||||||
trace!(target: "own_tx", "Importing transaction: {:?}", transaction);
|
trace!(target: "own_tx", "Importing transaction: {:?}", transaction);
|
||||||
@ -500,7 +515,7 @@ impl MinerService for Miner {
|
|||||||
let imported = {
|
let imported = {
|
||||||
// Be sure to release the lock before we call enable_and_prepare_sealing
|
// Be sure to release the lock before we call enable_and_prepare_sealing
|
||||||
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||||
let import = transaction_queue.add(transaction, &fetch_account, TransactionOrigin::Local);
|
let import = self.add_transactions_to_queue(chain, vec![transaction], TransactionOrigin::Local, &mut transaction_queue).pop().unwrap();
|
||||||
|
|
||||||
match import {
|
match import {
|
||||||
Ok(ref res) => {
|
Ok(ref res) => {
|
||||||
@ -639,7 +654,7 @@ impl MinerService for Miner {
|
|||||||
let n = sealed.header().number();
|
let n = sealed.header().number();
|
||||||
let h = sealed.header().hash();
|
let h = sealed.header().hash();
|
||||||
try!(chain.import_sealed_block(sealed));
|
try!(chain.import_sealed_block(sealed));
|
||||||
info!(target: "miner", "Mined block imported OK. #{}: {}", paint(White.bold(), format!("{}", n)), paint(White.bold(), h.hex()));
|
info!(target: "miner", "Mined block imported OK. #{}: {}", format!("{}", n).apply(Colour::White.bold()), h.hex().apply(Colour::White.bold()));
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -651,7 +666,12 @@ impl MinerService for Miner {
|
|||||||
// Client should send message after commit to db and inserting to chain.
|
// Client should send message after commit to db and inserting to chain.
|
||||||
.expect("Expected in-chain blocks.");
|
.expect("Expected in-chain blocks.");
|
||||||
let block = BlockView::new(&block);
|
let block = BlockView::new(&block);
|
||||||
block.transactions()
|
let txs = block.transactions();
|
||||||
|
// populate sender
|
||||||
|
for tx in &txs {
|
||||||
|
let _sender = tx.sender();
|
||||||
|
}
|
||||||
|
txs
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions
|
// 1. We ignore blocks that were `imported` (because it means that they are not in canon-chain, and transactions
|
||||||
@ -668,14 +688,10 @@ impl MinerService for Miner {
|
|||||||
.par_iter()
|
.par_iter()
|
||||||
.map(|h| fetch_transactions(chain, h));
|
.map(|h| fetch_transactions(chain, h));
|
||||||
out_of_chain.for_each(|txs| {
|
out_of_chain.for_each(|txs| {
|
||||||
// populate sender
|
let mut transaction_queue = self.transaction_queue.lock().unwrap();
|
||||||
for tx in &txs {
|
let _ = self.add_transactions_to_queue(
|
||||||
let _sender = tx.sender();
|
chain, txs, TransactionOrigin::External, &mut transaction_queue
|
||||||
}
|
);
|
||||||
let _ = self.import_transactions(chain, txs, |a| AccountDetails {
|
|
||||||
nonce: chain.latest_nonce(a),
|
|
||||||
balance: chain.latest_balance(a),
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@
|
|||||||
//! use ethcore::miner::{Miner, MinerService};
|
//! use ethcore::miner::{Miner, MinerService};
|
||||||
//!
|
//!
|
||||||
//! fn main() {
|
//! fn main() {
|
||||||
//! let miner: Miner = Miner::with_spec(ethereum::new_frontier(true));
|
//! let miner: Miner = Miner::with_spec(ethereum::new_frontier());
|
||||||
//! // get status
|
//! // get status
|
||||||
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
|
//! assert_eq!(miner.status().transactions_in_pending_queue, 0);
|
||||||
//!
|
//!
|
||||||
@ -107,14 +107,12 @@ pub trait MinerService : Send + Sync {
|
|||||||
fn set_tx_gas_limit(&self, limit: U256);
|
fn set_tx_gas_limit(&self, limit: U256);
|
||||||
|
|
||||||
/// Imports transactions to transaction queue.
|
/// Imports transactions to transaction queue.
|
||||||
fn import_transactions<T>(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
|
fn import_external_transactions(&self, chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>) ->
|
||||||
Vec<Result<TransactionImportResult, Error>>
|
Vec<Result<TransactionImportResult, Error>>;
|
||||||
where T: Fn(&Address) -> AccountDetails, Self: Sized;
|
|
||||||
|
|
||||||
/// Imports own (node owner) transaction to queue.
|
/// Imports own (node owner) transaction to queue.
|
||||||
fn import_own_transaction<T>(&self, chain: &MiningBlockChainClient, transaction: SignedTransaction, fetch_account: T) ->
|
fn import_own_transaction(&self, chain: &MiningBlockChainClient, transaction: SignedTransaction) ->
|
||||||
Result<TransactionImportResult, Error>
|
Result<TransactionImportResult, Error>;
|
||||||
where T: Fn(&Address) -> AccountDetails, Self: Sized;
|
|
||||||
|
|
||||||
/// Returns hashes of transactions currently in pending
|
/// Returns hashes of transactions currently in pending
|
||||||
fn pending_transactions_hashes(&self) -> Vec<H256>;
|
fn pending_transactions_hashes(&self) -> Vec<H256>;
|
||||||
|
@ -87,7 +87,7 @@ use std::cmp;
|
|||||||
use std::collections::{HashMap, BTreeSet};
|
use std::collections::{HashMap, BTreeSet};
|
||||||
use util::numbers::{Uint, U256};
|
use util::numbers::{Uint, U256};
|
||||||
use util::hash::{Address, H256};
|
use util::hash::{Address, H256};
|
||||||
use util::table::*;
|
use util::table::Table;
|
||||||
use transaction::*;
|
use transaction::*;
|
||||||
use error::{Error, TransactionError};
|
use error::{Error, TransactionError};
|
||||||
use client::TransactionImportResult;
|
use client::TransactionImportResult;
|
||||||
@ -432,10 +432,10 @@ impl TransactionQueue {
|
|||||||
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result<TransactionImportResult, Error>
|
pub fn add<T>(&mut self, tx: SignedTransaction, fetch_account: &T, origin: TransactionOrigin) -> Result<TransactionImportResult, Error>
|
||||||
where T: Fn(&Address) -> AccountDetails {
|
where T: Fn(&Address) -> AccountDetails {
|
||||||
|
|
||||||
trace!(target: "miner", "Importing: {:?}", tx.hash());
|
trace!(target: "txqueue", "Importing: {:?}", tx.hash());
|
||||||
|
|
||||||
if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local {
|
if tx.gas_price < self.minimal_gas_price && origin != TransactionOrigin::Local {
|
||||||
trace!(target: "miner",
|
trace!(target: "txqueue",
|
||||||
"Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})",
|
"Dropping transaction below minimal gas price threshold: {:?} (gp: {} < {})",
|
||||||
tx.hash(),
|
tx.hash(),
|
||||||
tx.gas_price,
|
tx.gas_price,
|
||||||
@ -451,7 +451,7 @@ impl TransactionQueue {
|
|||||||
try!(tx.check_low_s());
|
try!(tx.check_low_s());
|
||||||
|
|
||||||
if tx.gas > self.gas_limit || tx.gas > self.tx_gas_limit {
|
if tx.gas > self.gas_limit || tx.gas > self.tx_gas_limit {
|
||||||
trace!(target: "miner",
|
trace!(target: "txqueue",
|
||||||
"Dropping transaction above gas limit: {:?} ({} > min({}, {}))",
|
"Dropping transaction above gas limit: {:?} ({} > min({}, {}))",
|
||||||
tx.hash(),
|
tx.hash(),
|
||||||
tx.gas,
|
tx.gas,
|
||||||
@ -470,7 +470,7 @@ impl TransactionQueue {
|
|||||||
|
|
||||||
let cost = vtx.transaction.value + vtx.transaction.gas_price * vtx.transaction.gas;
|
let cost = vtx.transaction.value + vtx.transaction.gas_price * vtx.transaction.gas;
|
||||||
if client_account.balance < cost {
|
if client_account.balance < cost {
|
||||||
trace!(target: "miner",
|
trace!(target: "txqueue",
|
||||||
"Dropping transaction without sufficient balance: {:?} ({} < {})",
|
"Dropping transaction without sufficient balance: {:?} ({} < {})",
|
||||||
vtx.hash(),
|
vtx.hash(),
|
||||||
client_account.balance,
|
client_account.balance,
|
||||||
@ -558,7 +558,7 @@ impl TransactionQueue {
|
|||||||
if k >= current_nonce {
|
if k >= current_nonce {
|
||||||
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
||||||
} else {
|
} else {
|
||||||
trace!(target: "miner", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||||
// Remove the transaction completely
|
// Remove the transaction completely
|
||||||
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
|
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
|
||||||
}
|
}
|
||||||
@ -579,7 +579,7 @@ impl TransactionQueue {
|
|||||||
if k >= current_nonce {
|
if k >= current_nonce {
|
||||||
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
self.future.insert(*sender, k, order.update_height(k, current_nonce));
|
||||||
} else {
|
} else {
|
||||||
trace!(target: "miner", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
trace!(target: "txqueue", "Removing old transaction: {:?} (nonce: {} < {})", order.hash, k, current_nonce);
|
||||||
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
|
self.by_hash.remove(&order.hash).expect("All transactions in `future` are also in `by_hash`");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -667,7 +667,7 @@ impl TransactionQueue {
|
|||||||
|
|
||||||
if self.by_hash.get(&tx.hash()).is_some() {
|
if self.by_hash.get(&tx.hash()).is_some() {
|
||||||
// Transaction is already imported.
|
// Transaction is already imported.
|
||||||
trace!(target: "miner", "Dropping already imported transaction: {:?}", tx.hash());
|
trace!(target: "txqueue", "Dropping already imported transaction: {:?}", tx.hash());
|
||||||
return Err(TransactionError::AlreadyImported);
|
return Err(TransactionError::AlreadyImported);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -684,7 +684,7 @@ impl TransactionQueue {
|
|||||||
// nonce height would result in overflow.
|
// nonce height would result in overflow.
|
||||||
if nonce < state_nonce {
|
if nonce < state_nonce {
|
||||||
// Droping transaction
|
// Droping transaction
|
||||||
trace!(target: "miner", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce);
|
trace!(target: "txqueue", "Dropping old transaction: {:?} (nonce: {} < {})", tx.hash(), nonce, next_nonce);
|
||||||
return Err(TransactionError::Old);
|
return Err(TransactionError::Old);
|
||||||
} else if nonce > next_nonce {
|
} else if nonce > next_nonce {
|
||||||
// We have a gap - put to future.
|
// We have a gap - put to future.
|
||||||
@ -720,7 +720,7 @@ impl TransactionQueue {
|
|||||||
// Trigger error if the transaction we are importing was removed.
|
// Trigger error if the transaction we are importing was removed.
|
||||||
try!(check_if_removed(&address, &nonce, removed));
|
try!(check_if_removed(&address, &nonce, removed));
|
||||||
|
|
||||||
trace!(target: "miner", "status: {:?}", self.status());
|
trace!(target: "txqueue", "status: {:?}", self.status());
|
||||||
Ok(TransactionImportResult::Current)
|
Ok(TransactionImportResult::Current)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
//! Creates and registers client and network services.
|
//! Creates and registers client and network services.
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use util::Colour::{Yellow, White};
|
|
||||||
use util::panics::*;
|
use util::panics::*;
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use error::*;
|
use error::*;
|
||||||
@ -72,7 +71,7 @@ impl ClientService {
|
|||||||
try!(net_service.start());
|
try!(net_service.start());
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("Configured for {} using {} engine", paint(White.bold(), spec.name.clone()), paint(Yellow.bold(), spec.engine.name().to_owned()));
|
info!("Configured for {} using {} engine", spec.name.clone().apply(Colour::White.bold()), spec.engine.name().apply(Colour::Yellow.bold()));
|
||||||
let client = try!(Client::new(config, spec, db_path, miner, net_service.io().channel()));
|
let client = try!(Client::new(config, spec, db_path, miner, net_service.io().channel()));
|
||||||
panic_handler.forward_from(client.deref());
|
panic_handler.forward_from(client.deref());
|
||||||
let client_io = Arc::new(ClientIoHandler {
|
let client_io = Arc::new(ClientIoHandler {
|
||||||
@ -135,16 +134,14 @@ impl IoHandler<NetSyncMessage> for ClientIoHandler {
|
|||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(single_match))]
|
#[cfg_attr(feature="dev", allow(single_match))]
|
||||||
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
fn message(&self, io: &IoContext<NetSyncMessage>, net_message: &NetSyncMessage) {
|
||||||
if let UserMessage(ref message) = *net_message {
|
match *net_message {
|
||||||
match *message {
|
UserMessage(ref message) => match *message {
|
||||||
SyncMessage::BlockVerified => {
|
SyncMessage::BlockVerified => { self.client.import_verified_blocks(&io.channel()); }
|
||||||
self.client.import_verified_blocks(&io.channel());
|
SyncMessage::NewTransactions(ref transactions) => { self.client.import_queued_transactions(&transactions); }
|
||||||
},
|
_ => {} // ignore other messages
|
||||||
SyncMessage::NewTransactions(ref transactions) => {
|
},
|
||||||
self.client.import_queued_transactions(&transactions);
|
NetworkIoMessage::NetworkStarted(ref url) => { self.client.network_started(url); }
|
||||||
},
|
_ => {} // ignore other messages
|
||||||
_ => {}, // ignore other messages
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,6 @@ pub fn generate_dummy_client_with_spec_and_data<F>(get_test_spec: F, block_numbe
|
|||||||
db,
|
db,
|
||||||
&last_header,
|
&last_header,
|
||||||
last_hashes.clone(),
|
last_hashes.clone(),
|
||||||
None,
|
|
||||||
author.clone(),
|
author.clone(),
|
||||||
(3141562.into(), 31415620.into()),
|
(3141562.into(), 31415620.into()),
|
||||||
vec![]
|
vec![]
|
||||||
|
@ -727,7 +727,6 @@ pub fn get_ipc_meta_items(attr: &ast::Attribute) -> Option<&[P<ast::MetaItem>]>
|
|||||||
fn client_ident_renamed(cx: &ExtCtxt, item: &ast::Item) -> Option<String> {
|
fn client_ident_renamed(cx: &ExtCtxt, item: &ast::Item) -> Option<String> {
|
||||||
for meta_items in item.attrs().iter().filter_map(get_ipc_meta_items) {
|
for meta_items in item.attrs().iter().filter_map(get_ipc_meta_items) {
|
||||||
for meta_item in meta_items {
|
for meta_item in meta_items {
|
||||||
let span = meta_item.span;
|
|
||||||
match meta_item.node {
|
match meta_item.node {
|
||||||
ast::MetaItemKind::NameValue(ref name, ref lit) if name == &"client_ident" => {
|
ast::MetaItemKind::NameValue(ref name, ref lit) if name == &"client_ident" => {
|
||||||
if let Ok(s) = get_str_from_lit(cx, name, lit) {
|
if let Ok(s) = get_str_from_lit(cx, name, lit) {
|
||||||
|
@ -16,15 +16,23 @@
|
|||||||
|
|
||||||
//! Lenient bytes json deserialization for test json files.
|
//! Lenient bytes json deserialization for test json files.
|
||||||
|
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::ops::Deref;
|
||||||
use rustc_serialize::hex::FromHex;
|
use rustc_serialize::hex::FromHex;
|
||||||
use serde::{Deserialize, Deserializer, Error};
|
use serde::{Deserialize, Deserializer, Error};
|
||||||
use serde::de::Visitor;
|
use serde::de::Visitor;
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
/// Lenient bytes json deserialization for test json files.
|
/// Lenient bytes json deserialization for test json files.
|
||||||
#[derive(Default, Debug, PartialEq, Clone)]
|
#[derive(Default, Debug, PartialEq, Eq, Clone, PartialOrd, Ord)]
|
||||||
pub struct Bytes(Vec<u8>);
|
pub struct Bytes(Vec<u8>);
|
||||||
|
|
||||||
|
impl Bytes {
|
||||||
|
/// Creates bytes struct.
|
||||||
|
pub fn new(v: Vec<u8>) -> Self {
|
||||||
|
Bytes(v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Into<Vec<u8>> for Bytes {
|
impl Into<Vec<u8>> for Bytes {
|
||||||
fn into(self) -> Vec<u8> {
|
fn into(self) -> Vec<u8> {
|
||||||
self.0
|
self.0
|
||||||
@ -39,6 +47,25 @@ impl Deref for Bytes {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl FromStr for Bytes {
|
||||||
|
type Err = String;
|
||||||
|
|
||||||
|
fn from_str(value: &str) -> Result<Self, Self::Err> {
|
||||||
|
let v = match value.len() {
|
||||||
|
0 => vec![],
|
||||||
|
2 if value.starts_with("0x") => vec![],
|
||||||
|
_ if value.starts_with("0x") && value.len() % 2 == 1 => {
|
||||||
|
let v = "0".to_owned() + &value[2..];
|
||||||
|
FromHex::from_hex(v.as_ref() as &str).unwrap_or(vec![]),
|
||||||
|
},
|
||||||
|
_ if value.starts_with("0x") => FromHex::from_hex(&value[2..]).unwrap_or(vec![]),
|
||||||
|
_ => FromHex::from_hex(value).unwrap_or(vec![]),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Bytes(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Deserialize for Bytes {
|
impl Deserialize for Bytes {
|
||||||
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
|
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
|
||||||
where D: Deserializer {
|
where D: Deserializer {
|
||||||
@ -52,17 +79,7 @@ impl Visitor for BytesVisitor {
|
|||||||
type Value = Bytes;
|
type Value = Bytes;
|
||||||
|
|
||||||
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
|
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: Error {
|
||||||
let v = match value.len() {
|
Bytes::from_str(value).map_err(Error::custom)
|
||||||
0 => vec![],
|
|
||||||
2 if value.starts_with("0x") => vec![],
|
|
||||||
_ if value.starts_with("0x") && value.len() % 2 == 1 => {
|
|
||||||
let v = "0".to_owned() + &value[2..];
|
|
||||||
FromHex::from_hex(v.as_ref() as &str).unwrap_or(vec![]),
|
|
||||||
},
|
|
||||||
_ if value.starts_with("0x") => FromHex::from_hex(&value[2..]).unwrap_or(vec![]),
|
|
||||||
_ => FromHex::from_hex(value).unwrap_or(vec![]),
|
|
||||||
};
|
|
||||||
Ok(Bytes(v))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
|
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: Error {
|
||||||
|
@ -24,6 +24,7 @@ pub mod uint;
|
|||||||
pub mod bytes;
|
pub mod bytes;
|
||||||
pub mod blockchain;
|
pub mod blockchain;
|
||||||
pub mod spec;
|
pub mod spec;
|
||||||
|
pub mod trie;
|
||||||
pub mod vm;
|
pub mod vm;
|
||||||
pub mod maybe;
|
pub mod maybe;
|
||||||
pub mod state;
|
pub mod state;
|
||||||
|
@ -53,8 +53,7 @@ mod tests {
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit" : "0x",
|
"frontierCompatibilityModeLimit" : "0x"
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}"#;
|
}"#;
|
||||||
|
@ -42,9 +42,6 @@ pub struct EthashParams {
|
|||||||
/// Homestead transition block number.
|
/// Homestead transition block number.
|
||||||
#[serde(rename="frontierCompatibilityModeLimit")]
|
#[serde(rename="frontierCompatibilityModeLimit")]
|
||||||
pub frontier_compatibility_mode_limit: Uint,
|
pub frontier_compatibility_mode_limit: Uint,
|
||||||
/// DAO rescue soft-fork?
|
|
||||||
#[serde(rename="daoRescueSoftFork")]
|
|
||||||
pub dao_rescue_soft_fork: bool,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ethash engine deserialization.
|
/// Ethash engine deserialization.
|
||||||
@ -69,8 +66,7 @@ mod tests {
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar": "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar": "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0x42",
|
"frontierCompatibilityModeLimit": "0x42"
|
||||||
"daoRescueSoftFork": true
|
|
||||||
}
|
}
|
||||||
}"#;
|
}"#;
|
||||||
|
|
||||||
|
@ -63,8 +63,7 @@ mod tests {
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit" : "0x",
|
"frontierCompatibilityModeLimit" : "0x"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
155
json/src/trie/input.rs
Normal file
155
json/src/trie/input.rs
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Trie test input deserialization.
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use serde::{Deserialize, Deserializer, Error};
|
||||||
|
use serde::de::{Visitor, MapVisitor, SeqVisitor};
|
||||||
|
|
||||||
|
/// Trie test input.
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
pub struct Input {
|
||||||
|
/// Input params.
|
||||||
|
pub data: BTreeMap<Bytes, Option<Bytes>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deserialize for Input {
|
||||||
|
fn deserialize<D>(deserializer: &mut D) -> Result<Self, D::Error>
|
||||||
|
where D: Deserializer
|
||||||
|
{
|
||||||
|
deserializer.deserialize(InputVisitor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct InputVisitor;
|
||||||
|
|
||||||
|
impl Visitor for InputVisitor {
|
||||||
|
type Value = Input;
|
||||||
|
|
||||||
|
fn visit_map<V>(&mut self, mut visitor: V) -> Result<Self::Value, V::Error> where V: MapVisitor {
|
||||||
|
let mut result = BTreeMap::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let key_str: Option<String> = try!(visitor.visit_key());
|
||||||
|
let key = match key_str {
|
||||||
|
Some(ref k) if k.starts_with("0x") => try!(Bytes::from_str(k).map_err(Error::custom)),
|
||||||
|
Some(k) => Bytes::new(k.into_bytes()),
|
||||||
|
None => { break; }
|
||||||
|
};
|
||||||
|
|
||||||
|
let val_str: Option<String> = try!(visitor.visit_value());
|
||||||
|
let val = match val_str {
|
||||||
|
Some(ref v) if v.starts_with("0x") => Some(try!(Bytes::from_str(v).map_err(Error::custom))),
|
||||||
|
Some(v) => Some(Bytes::new(v.into_bytes())),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
result.insert(key, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
try!(visitor.end());
|
||||||
|
|
||||||
|
let input = Input {
|
||||||
|
data: result
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(input)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_seq<V>(&mut self, mut visitor: V) -> Result<Self::Value, V::Error> where V: SeqVisitor {
|
||||||
|
let mut result = BTreeMap::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let keyval: Option<Vec<Option<String>>> = try!(visitor.visit());
|
||||||
|
let keyval = match keyval {
|
||||||
|
Some(k) => k,
|
||||||
|
_ => { break; },
|
||||||
|
};
|
||||||
|
|
||||||
|
if keyval.len() != 2 {
|
||||||
|
return Err(Error::custom("Invalid key value pair."));
|
||||||
|
}
|
||||||
|
|
||||||
|
let ref key_str: Option<String> = keyval[0];
|
||||||
|
let ref val_str: Option<String> = keyval[1];
|
||||||
|
|
||||||
|
let key = match *key_str {
|
||||||
|
Some(ref k) if k.starts_with("0x") => try!(Bytes::from_str(k).map_err(Error::custom)),
|
||||||
|
Some(ref k) => Bytes::new(k.clone().into_bytes()),
|
||||||
|
None => { break; }
|
||||||
|
};
|
||||||
|
|
||||||
|
let val = match *val_str {
|
||||||
|
Some(ref v) if v.starts_with("0x") => Some(try!(Bytes::from_str(v).map_err(Error::custom))),
|
||||||
|
Some(ref v) => Some(Bytes::new(v.clone().into_bytes())),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
result.insert(key, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
try!(visitor.end());
|
||||||
|
|
||||||
|
let input = Input {
|
||||||
|
data: result
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use serde_json;
|
||||||
|
use bytes::Bytes;
|
||||||
|
use super::Input;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn input_deserialization_from_map() {
|
||||||
|
let s = r#"{
|
||||||
|
"0x0045" : "0x0123456789",
|
||||||
|
"be" : "e",
|
||||||
|
"0x0a" : null
|
||||||
|
}"#;
|
||||||
|
|
||||||
|
let input: Input = serde_json::from_str(s).unwrap();
|
||||||
|
let mut map = BTreeMap::new();
|
||||||
|
map.insert(Bytes::new(vec![0, 0x45]), Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89])));
|
||||||
|
map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65])));
|
||||||
|
map.insert(Bytes::new(vec![0x0a]), None);
|
||||||
|
assert_eq!(input.data, map);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn input_deserialization_from_array() {
|
||||||
|
let s = r#"[
|
||||||
|
["0x0045", "0x0123456789"],
|
||||||
|
["be", "e"],
|
||||||
|
["0x0a", null]
|
||||||
|
]"#;
|
||||||
|
|
||||||
|
let input: Input = serde_json::from_str(s).unwrap();
|
||||||
|
let mut map = BTreeMap::new();
|
||||||
|
map.insert(Bytes::new(vec![0, 0x45]), Some(Bytes::new(vec![0x01, 0x23, 0x45, 0x67, 0x89])));
|
||||||
|
map.insert(Bytes::new(vec![0x62, 0x65]), Some(Bytes::new(vec![0x65])));
|
||||||
|
map.insert(Bytes::new(vec![0x0a]), None);
|
||||||
|
assert_eq!(input.data, map);
|
||||||
|
}
|
||||||
|
}
|
@ -14,8 +14,12 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
mod api;
|
//! Trie test deserialization.
|
||||||
mod response;
|
|
||||||
|
|
||||||
pub use self::api::RestApi;
|
mod input;
|
||||||
pub use self::api::App;
|
mod trie;
|
||||||
|
mod test;
|
||||||
|
|
||||||
|
pub use self::input::Input;
|
||||||
|
pub use self::trie::Trie;
|
||||||
|
pub use self::test::Test;
|
43
json/src/trie/test.rs
Normal file
43
json/src/trie/test.rs
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! TransactionTest test deserializer.
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
use std::io::Read;
|
||||||
|
use serde_json;
|
||||||
|
use serde_json::Error;
|
||||||
|
use trie::Trie;
|
||||||
|
|
||||||
|
/// TransactionTest test deserializer.
|
||||||
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
pub struct Test(BTreeMap<String, Trie>);
|
||||||
|
|
||||||
|
impl IntoIterator for Test {
|
||||||
|
type Item = <BTreeMap<String, Trie> as IntoIterator>::Item;
|
||||||
|
type IntoIter = <BTreeMap<String, Trie> as IntoIterator>::IntoIter;
|
||||||
|
|
||||||
|
fn into_iter(self) -> Self::IntoIter {
|
||||||
|
self.0.into_iter()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Test {
|
||||||
|
/// Loads test from json.
|
||||||
|
pub fn load<R>(reader: R) -> Result<Self, Error> where R: Read {
|
||||||
|
serde_json::from_reader(reader)
|
||||||
|
}
|
||||||
|
}
|
30
json/src/trie/trie.rs
Normal file
30
json/src/trie/trie.rs
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Trie test deserialization.
|
||||||
|
|
||||||
|
use hash::H256;
|
||||||
|
use trie::Input;
|
||||||
|
|
||||||
|
/// Trie test deserialization.
|
||||||
|
#[derive(Debug, Deserialize, PartialEq)]
|
||||||
|
pub struct Trie {
|
||||||
|
/// Trie test input.
|
||||||
|
#[serde(rename="in")]
|
||||||
|
pub input: Input,
|
||||||
|
/// Trie root hash.
|
||||||
|
pub root: H256,
|
||||||
|
}
|
@ -32,7 +32,19 @@ Usage:
|
|||||||
parity [options]
|
parity [options]
|
||||||
parity ui [options]
|
parity ui [options]
|
||||||
|
|
||||||
Protocol Options:
|
Operating Options:
|
||||||
|
--mode MODE Set the operating mode. MODE can be one of:
|
||||||
|
active - Parity continuously syncs the chain.
|
||||||
|
passive - Parity syncs initially, then sleeps and
|
||||||
|
wakes regularly to resync.
|
||||||
|
dark - Parity syncs only when an external interface
|
||||||
|
is active. [default: active].
|
||||||
|
--mode-timeout SECS Specify the number of seconds before inactivity
|
||||||
|
timeout occurs when mode is dark or passive
|
||||||
|
[default: 300].
|
||||||
|
--mode-alarm SECS Specify the number of seconds before auto sleep
|
||||||
|
reawake timeout occurs when mode is passive
|
||||||
|
[default: 3600].
|
||||||
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
--chain CHAIN Specify the blockchain type. CHAIN may be either a
|
||||||
JSON chain specification file or olympic, frontier,
|
JSON chain specification file or olympic, frontier,
|
||||||
homestead, mainnet, morden, or testnet
|
homestead, mainnet, morden, or testnet
|
||||||
@ -45,9 +57,8 @@ Protocol Options:
|
|||||||
--fork POLICY Specifies the client's fork policy. POLICY must be
|
--fork POLICY Specifies the client's fork policy. POLICY must be
|
||||||
one of:
|
one of:
|
||||||
dogmatic - sticks rigidly to the standard chain.
|
dogmatic - sticks rigidly to the standard chain.
|
||||||
dao-soft - votes for the DAO-rescue soft-fork.
|
none - goes with whatever fork is decided but
|
||||||
normal - goes with whatever fork is decided but
|
votes for none. [default: none].
|
||||||
votes for none. [default: normal].
|
|
||||||
|
|
||||||
Account Options:
|
Account Options:
|
||||||
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
--unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution.
|
||||||
@ -269,6 +280,9 @@ pub struct Args {
|
|||||||
pub arg_pid_file: String,
|
pub arg_pid_file: String,
|
||||||
pub arg_file: Option<String>,
|
pub arg_file: Option<String>,
|
||||||
pub arg_path: Vec<String>,
|
pub arg_path: Vec<String>,
|
||||||
|
pub flag_mode: String,
|
||||||
|
pub flag_mode_timeout: u64,
|
||||||
|
pub flag_mode_alarm: u64,
|
||||||
pub flag_chain: String,
|
pub flag_chain: String,
|
||||||
pub flag_db_path: String,
|
pub flag_db_path: String,
|
||||||
pub flag_identity: String,
|
pub flag_identity: String,
|
||||||
|
@ -28,7 +28,7 @@ use util::*;
|
|||||||
use util::log::Colour::*;
|
use util::log::Colour::*;
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use util::network_settings::NetworkSettings;
|
use util::network_settings::NetworkSettings;
|
||||||
use ethcore::client::{append_path, get_db_path, ClientConfig, DatabaseCompactionProfile, Switch, VMType};
|
use ethcore::client::{append_path, get_db_path, Mode, ClientConfig, DatabaseCompactionProfile, Switch, VMType};
|
||||||
use ethcore::miner::{MinerOptions, PendingSet};
|
use ethcore::miner::{MinerOptions, PendingSet};
|
||||||
use ethcore::ethereum;
|
use ethcore::ethereum;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
@ -49,8 +49,7 @@ pub struct Directories {
|
|||||||
|
|
||||||
#[derive(Eq, PartialEq, Debug)]
|
#[derive(Eq, PartialEq, Debug)]
|
||||||
pub enum Policy {
|
pub enum Policy {
|
||||||
DaoSoft,
|
None,
|
||||||
Normal,
|
|
||||||
Dogmatic,
|
Dogmatic,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,6 +60,15 @@ impl Configuration {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn mode(&self) -> Mode {
|
||||||
|
match &(self.args.flag_mode[..]) {
|
||||||
|
"active" => Mode::Active,
|
||||||
|
"passive" => Mode::Passive(Duration::from_secs(self.args.flag_mode_timeout), Duration::from_secs(self.args.flag_mode_alarm)),
|
||||||
|
"dark" => Mode::Dark(Duration::from_secs(self.args.flag_mode_timeout)),
|
||||||
|
_ => die!("{}: Invalid address for --mode. Must be one of active, passive or dark.", self.args.flag_mode),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn net_port(&self) -> u16 {
|
fn net_port(&self) -> u16 {
|
||||||
self.args.flag_port
|
self.args.flag_port
|
||||||
}
|
}
|
||||||
@ -126,33 +134,24 @@ impl Configuration {
|
|||||||
|
|
||||||
pub fn policy(&self) -> Policy {
|
pub fn policy(&self) -> Policy {
|
||||||
match self.args.flag_fork.as_str() {
|
match self.args.flag_fork.as_str() {
|
||||||
"dao-soft" => Policy::DaoSoft,
|
"none" => Policy::None,
|
||||||
"normal" => Policy::Normal,
|
|
||||||
"dogmatic" => Policy::Dogmatic,
|
"dogmatic" => Policy::Dogmatic,
|
||||||
x => die!("{}: Invalid value given for --policy option. Use --help for more info.", x)
|
x => die!("{}: Invalid value given for --policy option. Use --help for more info.", x)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gas_floor_target(&self) -> U256 {
|
pub fn gas_floor_target(&self) -> U256 {
|
||||||
if self.policy() == Policy::DaoSoft {
|
let d = &self.args.flag_gas_floor_target;
|
||||||
3_141_592.into()
|
U256::from_dec_str(d).unwrap_or_else(|_| {
|
||||||
} else {
|
die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
|
||||||
let d = &self.args.flag_gas_floor_target;
|
})
|
||||||
U256::from_dec_str(d).unwrap_or_else(|_| {
|
|
||||||
die!("{}: Invalid target gas floor given. Must be a decimal unsigned 256-bit number.", d)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gas_ceil_target(&self) -> U256 {
|
pub fn gas_ceil_target(&self) -> U256 {
|
||||||
if self.policy() == Policy::DaoSoft {
|
let d = &self.args.flag_gas_cap;
|
||||||
3_141_592.into()
|
U256::from_dec_str(d).unwrap_or_else(|_| {
|
||||||
} else {
|
die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d)
|
||||||
let d = &self.args.flag_gas_cap;
|
})
|
||||||
U256::from_dec_str(d).unwrap_or_else(|_| {
|
|
||||||
die!("{}: Invalid target gas ceiling given. Must be a decimal unsigned 256-bit number.", d)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn gas_price(&self) -> U256 {
|
pub fn gas_price(&self) -> U256 {
|
||||||
@ -182,7 +181,7 @@ impl Configuration {
|
|||||||
let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
|
let wei_per_usd: f32 = 1.0e18 / usd_per_eth;
|
||||||
let gas_per_tx: f32 = 21000.0;
|
let gas_per_tx: f32 = 21000.0;
|
||||||
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
|
let wei_per_gas: f32 = wei_per_usd * usd_per_tx / gas_per_tx;
|
||||||
info!("Using a conversion rate of Ξ1 = {} ({} wei/gas)", paint(White.bold(), format!("US${}", usd_per_eth)), paint(Yellow.bold(), format!("{}", wei_per_gas)));
|
info!("Using a conversion rate of Ξ1 = {} ({} wei/gas)", format!("US${}", usd_per_eth).apply(White.bold()), format!("{}", wei_per_gas).apply(Yellow.bold()));
|
||||||
U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap()
|
U256::from_dec_str(&format!("{:.0}", wei_per_gas)).unwrap()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -198,7 +197,7 @@ impl Configuration {
|
|||||||
|
|
||||||
pub fn spec(&self) -> Spec {
|
pub fn spec(&self) -> Spec {
|
||||||
match self.chain().as_str() {
|
match self.chain().as_str() {
|
||||||
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(self.policy() != Policy::Dogmatic),
|
"frontier" | "homestead" | "mainnet" => ethereum::new_frontier(),
|
||||||
"morden" | "testnet" => ethereum::new_morden(),
|
"morden" | "testnet" => ethereum::new_morden(),
|
||||||
"olympic" => ethereum::new_olympic(),
|
"olympic" => ethereum::new_olympic(),
|
||||||
f => Spec::load(contents(f).unwrap_or_else(|_| {
|
f => Spec::load(contents(f).unwrap_or_else(|_| {
|
||||||
@ -302,6 +301,8 @@ impl Configuration {
|
|||||||
pub fn client_config(&self, spec: &Spec) -> ClientConfig {
|
pub fn client_config(&self, spec: &Spec) -> ClientConfig {
|
||||||
let mut client_config = ClientConfig::default();
|
let mut client_config = ClientConfig::default();
|
||||||
|
|
||||||
|
client_config.mode = self.mode();
|
||||||
|
|
||||||
match self.args.flag_cache {
|
match self.args.flag_cache {
|
||||||
Some(mb) => {
|
Some(mb) => {
|
||||||
client_config.blockchain.max_cache_size = mb * 1024 * 1024;
|
client_config.blockchain.max_cache_size = mb * 1024 * 1024;
|
||||||
@ -337,7 +338,7 @@ impl Configuration {
|
|||||||
if let journaldb::Algorithm::Archive = client_config.pruning {
|
if let journaldb::Algorithm::Archive = client_config.pruning {
|
||||||
client_config.trie_spec = TrieSpec::Fat;
|
client_config.trie_spec = TrieSpec::Fat;
|
||||||
} else {
|
} else {
|
||||||
die!("Fatdb is not supported. Please rerun with --pruning=archive")
|
die!("Fatdb is not supported. Please re-run with --pruning=archive")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,7 +353,7 @@ impl Configuration {
|
|||||||
};
|
};
|
||||||
|
|
||||||
if self.args.flag_jitvm {
|
if self.args.flag_jitvm {
|
||||||
client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity built without jit vm."))
|
client_config.vm_type = VMType::jit().unwrap_or_else(|| die!("Parity is built without the JIT EVM."))
|
||||||
}
|
}
|
||||||
|
|
||||||
trace!(target: "parity", "Using pruning strategy of {}", client_config.pruning);
|
trace!(target: "parity", "Using pruning strategy of {}", client_config.pruning);
|
||||||
|
@ -80,9 +80,9 @@ use std::thread::sleep;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use rustc_serialize::hex::FromHex;
|
use rustc_serialize::hex::FromHex;
|
||||||
use ctrlc::CtrlC;
|
use ctrlc::CtrlC;
|
||||||
use util::{H256, ToPretty, NetworkConfiguration, PayloadInfo, Bytes, UtilError, paint, Colour, version};
|
use util::{H256, ToPretty, NetworkConfiguration, PayloadInfo, Bytes, UtilError, Colour, Applyable, version, journaldb};
|
||||||
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
use util::panics::{MayPanic, ForwardPanic, PanicHandler};
|
||||||
use ethcore::client::{BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError};
|
use ethcore::client::{Mode, BlockID, BlockChainClient, ClientConfig, get_db_path, BlockImportError};
|
||||||
use ethcore::error::{ImportError};
|
use ethcore::error::{ImportError};
|
||||||
use ethcore::service::ClientService;
|
use ethcore::service::ClientService;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
@ -97,7 +97,7 @@ use rpc::RpcServer;
|
|||||||
use signer::{SignerServer, new_token};
|
use signer::{SignerServer, new_token};
|
||||||
use dapps::WebappServer;
|
use dapps::WebappServer;
|
||||||
use io_handler::ClientIoHandler;
|
use io_handler::ClientIoHandler;
|
||||||
use configuration::Configuration;
|
use configuration::{Policy, Configuration};
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
let conf = Configuration::parse();
|
let conf = Configuration::parse();
|
||||||
@ -188,10 +188,21 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
// Raise fdlimit
|
// Raise fdlimit
|
||||||
unsafe { ::fdlimit::raise_fd_limit(); }
|
unsafe { ::fdlimit::raise_fd_limit(); }
|
||||||
|
|
||||||
info!("Starting {}", paint(Colour::White.bold(), format!("{}", version())));
|
info!("Starting {}", format!("{}", version()).apply(Colour::White.bold()));
|
||||||
|
info!("Using state DB journalling strategy {}", match client_config.pruning {
|
||||||
|
journaldb::Algorithm::Archive => "archive",
|
||||||
|
journaldb::Algorithm::EarlyMerge => "light",
|
||||||
|
journaldb::Algorithm::OverlayRecent => "fast",
|
||||||
|
journaldb::Algorithm::RefCounted => "basic",
|
||||||
|
}.apply(Colour::White.bold()));
|
||||||
|
|
||||||
let net_settings = conf.net_settings(&spec);
|
// Display warning about using experimental journaldb types
|
||||||
let sync_config = conf.sync_config(&spec);
|
match client_config.pruning {
|
||||||
|
journaldb::Algorithm::EarlyMerge | journaldb::Algorithm::RefCounted => {
|
||||||
|
warn!("Your chosen strategy is {}! You can re-run with --pruning to change.", "unstable".apply(Colour::Red.bold()));
|
||||||
|
}
|
||||||
|
_ => {}
|
||||||
|
}
|
||||||
|
|
||||||
// Display warning about using unlock with signer
|
// Display warning about using unlock with signer
|
||||||
if conf.signer_enabled() && conf.args.flag_unlock.is_some() {
|
if conf.signer_enabled() && conf.args.flag_unlock.is_some() {
|
||||||
@ -199,6 +210,14 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account.");
|
warn!("NOTE that Signer will not ask you to confirm transactions from unlocked account.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check fork settings.
|
||||||
|
if conf.policy() != Policy::None {
|
||||||
|
warn!("Value given for --policy, yet no proposed forks exist. Ignoring.");
|
||||||
|
}
|
||||||
|
|
||||||
|
let net_settings = conf.net_settings(&spec);
|
||||||
|
let sync_config = conf.sync_config(&spec);
|
||||||
|
|
||||||
// Secret Store
|
// Secret Store
|
||||||
let account_service = Arc::new(conf.account_service());
|
let account_service = Arc::new(conf.account_service());
|
||||||
|
|
||||||
@ -213,7 +232,12 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
|
|
||||||
// Build client
|
// Build client
|
||||||
let mut service = ClientService::start(
|
let mut service = ClientService::start(
|
||||||
client_config, spec, net_settings, Path::new(&conf.path()), miner.clone(), !conf.args.flag_no_network
|
client_config,
|
||||||
|
spec,
|
||||||
|
net_settings,
|
||||||
|
Path::new(&conf.path()),
|
||||||
|
miner.clone(),
|
||||||
|
match conf.mode() { Mode::Dark(..) => false, _ => !conf.args.flag_no_network }
|
||||||
).unwrap_or_else(|e| die_with_error("Client", e));
|
).unwrap_or_else(|e| die_with_error("Client", e));
|
||||||
|
|
||||||
panic_handler.forward_from(&service);
|
panic_handler.forward_from(&service);
|
||||||
@ -282,7 +306,7 @@ fn execute_client(conf: Configuration, spec: Spec, client_config: ClientConfig)
|
|||||||
});
|
});
|
||||||
|
|
||||||
// Register IO handler
|
// Register IO handler
|
||||||
let io_handler = Arc::new(ClientIoHandler {
|
let io_handler = Arc::new(ClientIoHandler {
|
||||||
client: service.client(),
|
client: service.client(),
|
||||||
info: Informant::new(conf.have_color()),
|
info: Informant::new(conf.have_color()),
|
||||||
sync: sync.clone(),
|
sync: sync.clone(),
|
||||||
|
@ -19,8 +19,7 @@ use std::fs::File;
|
|||||||
use std::io::{Read, Write, Error as IoError, ErrorKind};
|
use std::io::{Read, Write, Error as IoError, ErrorKind};
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::fmt::{Display, Formatter, Error as FmtError};
|
use std::fmt::{Display, Formatter, Error as FmtError};
|
||||||
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, MigrationIterator};
|
use util::migration::{Manager as MigrationManager, Config as MigrationConfig, Error as MigrationError};
|
||||||
use util::kvdb::{Database, DatabaseConfig, CompactionProfile};
|
|
||||||
use ethcore::migrations;
|
use ethcore::migrations;
|
||||||
|
|
||||||
/// Database is assumed to be at default version, when no version file is found.
|
/// Database is assumed to be at default version, when no version file is found.
|
||||||
@ -65,6 +64,15 @@ impl From<IoError> for Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<MigrationError> for Error {
|
||||||
|
fn from(err: MigrationError) -> Self {
|
||||||
|
match err {
|
||||||
|
MigrationError::Io(e) => Error::Io(e),
|
||||||
|
_ => Error::MigrationFailed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the version file path.
|
/// Returns the version file path.
|
||||||
fn version_file_path(path: &PathBuf) -> PathBuf {
|
fn version_file_path(path: &PathBuf) -> PathBuf {
|
||||||
let mut file_path = path.clone();
|
let mut file_path = path.clone();
|
||||||
@ -109,14 +117,6 @@ fn extras_database_path(path: &PathBuf) -> PathBuf {
|
|||||||
extras_path
|
extras_path
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Temporary database path used for migration.
|
|
||||||
fn temp_database_path(path: &PathBuf) -> PathBuf {
|
|
||||||
let mut temp_path = path.clone();
|
|
||||||
temp_path.pop();
|
|
||||||
temp_path.push("temp_migration");
|
|
||||||
temp_path
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Database backup
|
/// Database backup
|
||||||
fn backup_database_path(path: &PathBuf) -> PathBuf {
|
fn backup_database_path(path: &PathBuf) -> PathBuf {
|
||||||
let mut backup_path = path.clone();
|
let mut backup_path = path.clone();
|
||||||
@ -146,44 +146,27 @@ fn extras_database_migrations() -> Result<MigrationManager, Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Migrates database at given position with given migration rules.
|
/// Migrates database at given position with given migration rules.
|
||||||
fn migrate_database(version: u32, path: PathBuf, migrations: MigrationManager) -> Result<(), Error> {
|
fn migrate_database(version: u32, db_path: PathBuf, migrations: MigrationManager) -> Result<(), Error> {
|
||||||
// check if migration is needed
|
// check if migration is needed
|
||||||
if !migrations.is_needed(version) {
|
if !migrations.is_needed(version) {
|
||||||
return Ok(())
|
return Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
let temp_path = temp_database_path(&path);
|
let backup_path = backup_database_path(&db_path);
|
||||||
let backup_path = backup_database_path(&path);
|
// remove the backup dir if it exists
|
||||||
// remote the dir if it exists
|
|
||||||
let _ = fs::remove_dir_all(&temp_path);
|
|
||||||
let _ = fs::remove_dir_all(&backup_path);
|
let _ = fs::remove_dir_all(&backup_path);
|
||||||
|
|
||||||
{
|
// migrate old database to the new one
|
||||||
let db_config = DatabaseConfig {
|
let temp_path = try!(migrations.execute(&db_path, version));
|
||||||
prefix_size: None,
|
|
||||||
max_open_files: 64,
|
|
||||||
cache_size: None,
|
|
||||||
compaction: CompactionProfile::default(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// open old database
|
|
||||||
let old = try!(Database::open(&db_config, path.to_str().unwrap()).map_err(|_| Error::MigrationFailed));
|
|
||||||
|
|
||||||
// create new database
|
|
||||||
let mut temp = try!(Database::open(&db_config, temp_path.to_str().unwrap()).map_err(|_| Error::MigrationFailed));
|
|
||||||
|
|
||||||
// migrate old database to the new one
|
|
||||||
try!(migrations.execute(MigrationIterator::from(old.iter()), version, &mut temp).map_err(|_| Error::MigrationFailed));
|
|
||||||
}
|
|
||||||
|
|
||||||
// create backup
|
// create backup
|
||||||
try!(fs::rename(&path, &backup_path));
|
try!(fs::rename(&db_path, &backup_path));
|
||||||
|
|
||||||
// replace the old database with the new one
|
// replace the old database with the new one
|
||||||
if let Err(err) = fs::rename(&temp_path, &path) {
|
if let Err(err) = fs::rename(&temp_path, &db_path) {
|
||||||
// if something went wrong, bring back backup
|
// if something went wrong, bring back backup
|
||||||
try!(fs::rename(&backup_path, path));
|
try!(fs::rename(&backup_path, &db_path));
|
||||||
return Err(From::from(err));
|
return Err(err.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
// remove backup
|
// remove backup
|
||||||
|
@ -166,7 +166,7 @@ pub fn setup_rpc<T: Extendable>(server: T, deps: Arc<Dependencies>, apis: ApiSet
|
|||||||
server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, deps.logger.clone(), deps.settings.clone(), queue).to_delegate())
|
server.add_delegate(EthcoreClient::new(&deps.client, &deps.miner, deps.logger.clone(), deps.settings.clone(), queue).to_delegate())
|
||||||
},
|
},
|
||||||
Api::EthcoreSet => {
|
Api::EthcoreSet => {
|
||||||
server.add_delegate(EthcoreSetClient::new(&deps.miner, &deps.net_service).to_delegate())
|
server.add_delegate(EthcoreSetClient::new(&deps.client, &deps.miner, &deps.net_service).to_delegate())
|
||||||
},
|
},
|
||||||
Api::Traces => {
|
Api::Traces => {
|
||||||
server.add_delegate(TracesClient::new(&deps.client, &deps.miner).to_delegate())
|
server.add_delegate(TracesClient::new(&deps.client, &deps.miner).to_delegate())
|
||||||
|
@ -14,11 +14,10 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
extern crate ansi_term;
|
|
||||||
use self::ansi_term::Colour::White;
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use util::{Colour, Applyable};
|
||||||
use util::panics::{PanicHandler, ForwardPanic};
|
use util::panics::{PanicHandler, ForwardPanic};
|
||||||
use util::path::restrict_permissions_owner;
|
use util::path::restrict_permissions_owner;
|
||||||
use die::*;
|
use die::*;
|
||||||
@ -67,7 +66,7 @@ pub fn new_token(path: String) -> io::Result<()> {
|
|||||||
let mut codes = try!(signer::AuthCodes::from_file(&path));
|
let mut codes = try!(signer::AuthCodes::from_file(&path));
|
||||||
let code = try!(codes.generate_new());
|
let code = try!(codes.generate_new());
|
||||||
try!(codes.to_file(&path));
|
try!(codes.to_file(&path));
|
||||||
println!("This key code will authorise your System Signer UI: {}", White.bold().paint(code));
|
println!("This key code will authorise your System Signer UI: {}", code.apply(Colour::White.bold()));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,8 +16,10 @@
|
|||||||
|
|
||||||
mod poll_manager;
|
mod poll_manager;
|
||||||
mod poll_filter;
|
mod poll_filter;
|
||||||
|
mod requests;
|
||||||
mod signing_queue;
|
mod signing_queue;
|
||||||
|
|
||||||
pub use self::poll_manager::PollManager;
|
pub use self::poll_manager::PollManager;
|
||||||
pub use self::poll_filter::PollFilter;
|
pub use self::poll_filter::PollFilter;
|
||||||
pub use self::signing_queue::{ConfirmationsQueue, SigningQueue};
|
pub use self::requests::{TransactionRequest, TransactionConfirmation, CallRequest};
|
||||||
|
pub use self::signing_queue::{ConfirmationsQueue, SigningQueue, QueueEvent};
|
||||||
|
64
rpc/src/v1/helpers/requests.rs
Normal file
64
rpc/src/v1/helpers/requests.rs
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use util::{Address, U256};
|
||||||
|
|
||||||
|
/// Transaction request coming from RPC
|
||||||
|
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)]
|
||||||
|
pub struct TransactionRequest {
|
||||||
|
/// Sender
|
||||||
|
pub from: Address,
|
||||||
|
/// Recipient
|
||||||
|
pub to: Option<Address>,
|
||||||
|
/// Gas Price
|
||||||
|
pub gas_price: Option<U256>,
|
||||||
|
/// Gas
|
||||||
|
pub gas: Option<U256>,
|
||||||
|
/// Value of transaction in wei
|
||||||
|
pub value: Option<U256>,
|
||||||
|
/// Additional data sent with transaction
|
||||||
|
pub data: Option<Vec<u8>>,
|
||||||
|
/// Transaction's nonce
|
||||||
|
pub nonce: Option<U256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Transaction confirmation waiting in a queue
|
||||||
|
#[derive(Debug, Clone, Default, Eq, PartialEq, Hash)]
|
||||||
|
pub struct TransactionConfirmation {
|
||||||
|
/// Id of this confirmation
|
||||||
|
pub id: U256,
|
||||||
|
/// TransactionRequest
|
||||||
|
pub transaction: TransactionRequest,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Call request
|
||||||
|
#[derive(Debug, Default, PartialEq)]
|
||||||
|
pub struct CallRequest {
|
||||||
|
/// From
|
||||||
|
pub from: Option<Address>,
|
||||||
|
/// To
|
||||||
|
pub to: Option<Address>,
|
||||||
|
/// Gas Price
|
||||||
|
pub gas_price: Option<U256>,
|
||||||
|
/// Gas
|
||||||
|
pub gas: Option<U256>,
|
||||||
|
/// Value
|
||||||
|
pub value: Option<U256>,
|
||||||
|
/// Data
|
||||||
|
pub data: Option<Vec<u8>>,
|
||||||
|
/// Nonce
|
||||||
|
pub nonce: Option<U256>,
|
||||||
|
}
|
@ -18,9 +18,9 @@ use std::thread;
|
|||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
use std::sync::{mpsc, Mutex, RwLock, Arc};
|
use std::sync::{mpsc, Mutex, RwLock, Arc};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use v1::types::{TransactionRequest, TransactionConfirmation};
|
|
||||||
use util::U256;
|
|
||||||
use jsonrpc_core;
|
use jsonrpc_core;
|
||||||
|
use util::U256;
|
||||||
|
use v1::helpers::{TransactionRequest, TransactionConfirmation};
|
||||||
|
|
||||||
/// Result that can be returned from JSON RPC.
|
/// Result that can be returned from JSON RPC.
|
||||||
pub type RpcResult = Result<jsonrpc_core::Value, jsonrpc_core::Error>;
|
pub type RpcResult = Result<jsonrpc_core::Value, jsonrpc_core::Error>;
|
||||||
@ -301,10 +301,9 @@ mod test {
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use util::hash::Address;
|
use util::{Address, U256, H256};
|
||||||
use util::numbers::{U256, H256};
|
use v1::helpers::{SigningQueue, ConfirmationsQueue, QueueEvent, TransactionRequest};
|
||||||
use v1::types::TransactionRequest;
|
use v1::types::H256 as NH256;
|
||||||
use super::*;
|
|
||||||
use jsonrpc_core::to_value;
|
use jsonrpc_core::to_value;
|
||||||
|
|
||||||
fn request() -> TransactionRequest {
|
fn request() -> TransactionRequest {
|
||||||
@ -337,10 +336,10 @@ mod test {
|
|||||||
// Just wait for the other thread to start
|
// Just wait for the other thread to start
|
||||||
thread::sleep(Duration::from_millis(100));
|
thread::sleep(Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
queue.request_confirmed(id, to_value(&H256::from(1)));
|
queue.request_confirmed(id, to_value(&NH256::from(H256::from(1))));
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(handle.join().expect("Thread should finish nicely"), to_value(&H256::from(1)));
|
assert_eq!(handle.join().expect("Thread should finish nicely"), to_value(&NH256::from(H256::from(1))));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
|
@ -39,7 +39,8 @@ use ethcore::log_entry::LogEntry;
|
|||||||
use ethcore::filter::Filter as EthcoreFilter;
|
use ethcore::filter::Filter as EthcoreFilter;
|
||||||
use self::ethash::SeedHashCompute;
|
use self::ethash::SeedHashCompute;
|
||||||
use v1::traits::Eth;
|
use v1::traits::Eth;
|
||||||
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, OptionalValue, Index, Filter, Log, Receipt};
|
use v1::types::{Block, BlockTransactions, BlockNumber, Bytes, SyncStatus, SyncInfo, Transaction, CallRequest, Index, Filter, Log, Receipt, H64 as RpcH64, H256 as RpcH256, H160 as RpcH160, U256 as RpcU256};
|
||||||
|
use v1::helpers::CallRequest as CRequest;
|
||||||
use v1::impls::{default_gas_price, dispatch_transaction, error_codes};
|
use v1::impls::{default_gas_price, dispatch_transaction, error_codes};
|
||||||
use serde;
|
use serde;
|
||||||
|
|
||||||
@ -86,28 +87,28 @@ impl<C, S, M, EM> EthClient<C, S, M, EM> where
|
|||||||
let block_view = BlockView::new(&bytes);
|
let block_view = BlockView::new(&bytes);
|
||||||
let view = block_view.header_view();
|
let view = block_view.header_view();
|
||||||
let block = Block {
|
let block = Block {
|
||||||
hash: OptionalValue::Value(view.sha3()),
|
hash: Some(view.sha3().into()),
|
||||||
parent_hash: view.parent_hash(),
|
parent_hash: view.parent_hash().into(),
|
||||||
uncles_hash: view.uncles_hash(),
|
uncles_hash: view.uncles_hash().into(),
|
||||||
author: view.author(),
|
author: view.author().into(),
|
||||||
miner: view.author(),
|
miner: view.author().into(),
|
||||||
state_root: view.state_root(),
|
state_root: view.state_root().into(),
|
||||||
transactions_root: view.transactions_root(),
|
transactions_root: view.transactions_root().into(),
|
||||||
receipts_root: view.receipts_root(),
|
receipts_root: view.receipts_root().into(),
|
||||||
number: OptionalValue::Value(U256::from(view.number())),
|
number: Some(view.number().into()),
|
||||||
gas_used: view.gas_used(),
|
gas_used: view.gas_used().into(),
|
||||||
gas_limit: view.gas_limit(),
|
gas_limit: view.gas_limit().into(),
|
||||||
logs_bloom: view.log_bloom(),
|
logs_bloom: view.log_bloom().into(),
|
||||||
timestamp: U256::from(view.timestamp()),
|
timestamp: view.timestamp().into(),
|
||||||
difficulty: view.difficulty(),
|
difficulty: view.difficulty().into(),
|
||||||
total_difficulty: total_difficulty,
|
total_difficulty: total_difficulty.into(),
|
||||||
seal_fields: view.seal().into_iter().map(|f| decode(&f)).map(Bytes::new).collect(),
|
seal_fields: view.seal().into_iter().map(|f| decode(&f)).map(Bytes::new).collect(),
|
||||||
uncles: block_view.uncle_hashes(),
|
uncles: block_view.uncle_hashes().into_iter().map(Into::into).collect(),
|
||||||
transactions: {
|
transactions: {
|
||||||
if include_txs {
|
if include_txs {
|
||||||
BlockTransactions::Full(block_view.localized_transactions().into_iter().map(From::from).collect())
|
BlockTransactions::Full(block_view.localized_transactions().into_iter().map(Into::into).collect())
|
||||||
} else {
|
} else {
|
||||||
BlockTransactions::Hashes(block_view.transaction_hashes())
|
BlockTransactions::Hashes(block_view.transaction_hashes().into_iter().map(Into::into).collect())
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
extra_data: Bytes::new(view.extra_data())
|
extra_data: Bytes::new(view.extra_data())
|
||||||
@ -127,7 +128,6 @@ impl<C, S, M, EM> EthClient<C, S, M, EM> where
|
|||||||
|
|
||||||
fn uncle(&self, id: UncleID) -> Result<Value, Error> {
|
fn uncle(&self, id: UncleID) -> Result<Value, Error> {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
|
|
||||||
let uncle: BlockHeader = match client.uncle(id) {
|
let uncle: BlockHeader = match client.uncle(id) {
|
||||||
Some(rlp) => decode(&rlp),
|
Some(rlp) => decode(&rlp),
|
||||||
None => { return Ok(Value::Null); }
|
None => { return Ok(Value::Null); }
|
||||||
@ -138,22 +138,22 @@ impl<C, S, M, EM> EthClient<C, S, M, EM> where
|
|||||||
};
|
};
|
||||||
|
|
||||||
let block = Block {
|
let block = Block {
|
||||||
hash: OptionalValue::Value(uncle.hash()),
|
hash: Some(uncle.hash().into()),
|
||||||
parent_hash: uncle.parent_hash,
|
parent_hash: uncle.parent_hash.into(),
|
||||||
uncles_hash: uncle.uncles_hash,
|
uncles_hash: uncle.uncles_hash.into(),
|
||||||
author: uncle.author,
|
author: uncle.author.into(),
|
||||||
miner: uncle.author,
|
miner: uncle.author.into(),
|
||||||
state_root: uncle.state_root,
|
state_root: uncle.state_root.into(),
|
||||||
transactions_root: uncle.transactions_root,
|
transactions_root: uncle.transactions_root.into(),
|
||||||
number: OptionalValue::Value(U256::from(uncle.number)),
|
number: Some(uncle.number.into()),
|
||||||
gas_used: uncle.gas_used,
|
gas_used: uncle.gas_used.into(),
|
||||||
gas_limit: uncle.gas_limit,
|
gas_limit: uncle.gas_limit.into(),
|
||||||
logs_bloom: uncle.log_bloom,
|
logs_bloom: uncle.log_bloom.into(),
|
||||||
timestamp: U256::from(uncle.timestamp),
|
timestamp: uncle.timestamp.into(),
|
||||||
difficulty: uncle.difficulty,
|
difficulty: uncle.difficulty.into(),
|
||||||
total_difficulty: uncle.difficulty + parent_difficulty,
|
total_difficulty: (uncle.difficulty + parent_difficulty).into(),
|
||||||
receipts_root: uncle.receipts_root,
|
receipts_root: uncle.receipts_root.into(),
|
||||||
extra_data: Bytes::new(uncle.extra_data),
|
extra_data: uncle.extra_data.into(),
|
||||||
seal_fields: uncle.seal.into_iter().map(|f| decode(&f)).map(Bytes::new).collect(),
|
seal_fields: uncle.seal.into_iter().map(|f| decode(&f)).map(Bytes::new).collect(),
|
||||||
uncles: vec![],
|
uncles: vec![],
|
||||||
transactions: BlockTransactions::Hashes(vec![]),
|
transactions: BlockTransactions::Hashes(vec![]),
|
||||||
@ -161,7 +161,7 @@ impl<C, S, M, EM> EthClient<C, S, M, EM> where
|
|||||||
to_value(&block)
|
to_value(&block)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sign_call(&self, request: CallRequest) -> Result<SignedTransaction, Error> {
|
fn sign_call(&self, request: CRequest) -> Result<SignedTransaction, Error> {
|
||||||
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
||||||
let from = request.from.unwrap_or(Address::zero());
|
let from = request.from.unwrap_or(Address::zero());
|
||||||
Ok(EthTransaction {
|
Ok(EthTransaction {
|
||||||
@ -186,7 +186,7 @@ pub fn pending_logs<M>(miner: &M, filter: &EthcoreFilter) -> Vec<Log> where M: M
|
|||||||
.filter(|pair| filter.matches(&pair.1))
|
.filter(|pair| filter.matches(&pair.1))
|
||||||
.map(|pair| {
|
.map(|pair| {
|
||||||
let mut log = Log::from(pair.1);
|
let mut log = Log::from(pair.1);
|
||||||
log.transaction_hash = Some(pair.0);
|
log.transaction_hash = Some(pair.0.into());
|
||||||
log
|
log
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@ -241,6 +241,19 @@ fn no_author_err() -> Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<C, S, M, EM> EthClient<C, S, M, EM> where
|
||||||
|
C: MiningBlockChainClient + 'static,
|
||||||
|
S: SyncProvider + 'static,
|
||||||
|
M: MinerService + 'static,
|
||||||
|
EM: ExternalMinerService + 'static {
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
||||||
C: MiningBlockChainClient + 'static,
|
C: MiningBlockChainClient + 'static,
|
||||||
S: SyncProvider + 'static,
|
S: SyncProvider + 'static,
|
||||||
@ -248,6 +261,7 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
EM: ExternalMinerService + 'static {
|
EM: ExternalMinerService + 'static {
|
||||||
|
|
||||||
fn protocol_version(&self, params: Params) -> Result<Value, Error> {
|
fn protocol_version(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())),
|
Params::None => Ok(Value::String(format!("{}", take_weak!(self.sync).status().protocol_version).to_owned())),
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
@ -255,6 +269,7 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn syncing(&self, params: Params) -> Result<Value, Error> {
|
fn syncing(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => {
|
Params::None => {
|
||||||
let status = take_weak!(self.sync).status();
|
let status = take_weak!(self.sync).status();
|
||||||
@ -262,15 +277,17 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
SyncState::Idle => SyncStatus::None,
|
SyncState::Idle => SyncStatus::None,
|
||||||
SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead => {
|
SyncState::Waiting | SyncState::Blocks | SyncState::NewBlocks | SyncState::ChainHead => {
|
||||||
let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number);
|
let current_block = U256::from(take_weak!(self.client).chain_info().best_block_number);
|
||||||
|
let highest_block = U256::from(status.highest_block_number.unwrap_or(status.start_block_number));
|
||||||
|
|
||||||
let info = SyncInfo {
|
if highest_block > current_block + U256::from(6) {
|
||||||
starting_block: U256::from(status.start_block_number),
|
let info = SyncInfo {
|
||||||
current_block: current_block,
|
starting_block: status.start_block_number.into(),
|
||||||
highest_block: U256::from(status.highest_block_number.unwrap_or(status.start_block_number))
|
current_block: current_block.into(),
|
||||||
};
|
highest_block: highest_block.into(),
|
||||||
match info.highest_block > info.current_block + U256::from(6) {
|
};
|
||||||
true => SyncStatus::Info(info),
|
SyncStatus::Info(info)
|
||||||
false => SyncStatus::None,
|
} else {
|
||||||
|
SyncStatus::None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -281,13 +298,15 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn author(&self, params: Params) -> Result<Value, Error> {
|
fn author(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&take_weak!(self.miner).author()),
|
Params::None => to_value(&RpcH160::from(take_weak!(self.miner).author())),
|
||||||
_ => Err(Error::invalid_params()),
|
_ => Err(Error::invalid_params()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_mining(&self, params: Params) -> Result<Value, Error> {
|
fn is_mining(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&self.external_miner.is_mining()),
|
Params::None => to_value(&self.external_miner.is_mining()),
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
@ -295,118 +314,148 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn hashrate(&self, params: Params) -> Result<Value, Error> {
|
fn hashrate(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&self.external_miner.hashrate()),
|
Params::None => to_value(&RpcU256::from(self.external_miner.hashrate())),
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gas_price(&self, params: Params) -> Result<Value, Error> {
|
fn gas_price(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => {
|
Params::None => {
|
||||||
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
||||||
to_value(&default_gas_price(&*client, &*miner))
|
to_value(&RpcU256::from(default_gas_price(&*client, &*miner)))
|
||||||
}
|
}
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accounts(&self, _: Params) -> Result<Value, Error> {
|
fn accounts(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
let store = take_weak!(self.accounts);
|
let store = take_weak!(self.accounts);
|
||||||
to_value(&store.accounts())
|
to_value(&store.accounts().into_iter().map(Into::into).collect::<Vec<RpcH160>>())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_number(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&U256::from(take_weak!(self.client).chain_info().best_block_number)),
|
Params::None => to_value(&RpcU256::from(take_weak!(self.client).chain_info().best_block_number)),
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn balance(&self, params: Params) -> Result<Value, Error> {
|
fn balance(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
from_params_default_second(params)
|
||||||
.and_then(|(address, block_number,)| match block_number {
|
.and_then(|(address, block_number,)| {
|
||||||
BlockNumber::Pending => to_value(&take_weak!(self.miner).balance(take_weak!(self.client).deref(), &address)),
|
let address: Address = RpcH160::into(address);
|
||||||
id => to_value(&try!(take_weak!(self.client).balance(&address, id.into()).ok_or_else(make_unsupported_err))),
|
match block_number {
|
||||||
})
|
BlockNumber::Pending => to_value(&RpcU256::from(take_weak!(self.miner).balance(take_weak!(self.client).deref(), &address))),
|
||||||
}
|
id => to_value(&RpcU256::from(try!(take_weak!(self.client).balance(&address, id.into()).ok_or_else(make_unsupported_err)))),
|
||||||
|
|
||||||
fn storage_at(&self, params: Params) -> Result<Value, Error> {
|
|
||||||
from_params_default_third::<Address, U256>(params)
|
|
||||||
.and_then(|(address, position, block_number,)| match block_number {
|
|
||||||
BlockNumber::Pending => to_value(&U256::from(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position)))),
|
|
||||||
id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) {
|
|
||||||
Some(s) => to_value(&U256::from(s)),
|
|
||||||
None => Err(make_unsupported_err()), // None is only returned on unsupported requests.
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn storage_at(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
|
from_params_default_third::<RpcH160, RpcU256>(params)
|
||||||
|
.and_then(|(address, position, block_number,)| {
|
||||||
|
let address: Address = RpcH160::into(address);
|
||||||
|
let position: U256 = RpcU256::into(position);
|
||||||
|
match block_number {
|
||||||
|
BlockNumber::Pending => to_value(&RpcU256::from(take_weak!(self.miner).storage_at(&*take_weak!(self.client), &address, &H256::from(position)))),
|
||||||
|
id => match take_weak!(self.client).storage_at(&address, &H256::from(position), id.into()) {
|
||||||
|
Some(s) => to_value(&RpcU256::from(s)),
|
||||||
|
None => Err(make_unsupported_err()), // None is only returned on unsupported requests.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
fn transaction_count(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_count(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
from_params_default_second(params)
|
||||||
.and_then(|(address, block_number,)| match block_number {
|
.and_then(|(address, block_number,)| {
|
||||||
BlockNumber::Pending => to_value(&take_weak!(self.miner).nonce(take_weak!(self.client).deref(), &address)),
|
let address: Address = RpcH160::into(address);
|
||||||
id => to_value(&take_weak!(self.client).nonce(&address, id.into())),
|
match block_number {
|
||||||
|
BlockNumber::Pending => to_value(&RpcU256::from(take_weak!(self.miner).nonce(take_weak!(self.client).deref(), &address))),
|
||||||
|
id => to_value(&take_weak!(self.client).nonce(&address, id.into()).map(RpcU256::from)),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_transaction_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn block_transaction_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256,)>(params)
|
try!(self.active());
|
||||||
|
from_params::<(RpcH256,)>(params)
|
||||||
.and_then(|(hash,)| // match
|
.and_then(|(hash,)| // match
|
||||||
take_weak!(self.client).block(BlockID::Hash(hash))
|
take_weak!(self.client).block(BlockID::Hash(hash.into()))
|
||||||
.map_or(Ok(Value::Null), |bytes| to_value(&U256::from(BlockView::new(&bytes).transactions_count()))))
|
.map_or(Ok(Value::Null), |bytes| to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count()))))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_transaction_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_transaction_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(BlockNumber,)>(params)
|
from_params::<(BlockNumber,)>(params)
|
||||||
.and_then(|(block_number,)| match block_number {
|
.and_then(|(block_number,)| match block_number {
|
||||||
BlockNumber::Pending => to_value(
|
BlockNumber::Pending => to_value(
|
||||||
&U256::from(take_weak!(self.miner).status().transactions_in_pending_block)
|
&RpcU256::from(take_weak!(self.miner).status().transactions_in_pending_block)
|
||||||
),
|
),
|
||||||
_ => take_weak!(self.client).block(block_number.into())
|
_ => take_weak!(self.client).block(block_number.into())
|
||||||
.map_or(Ok(Value::Null), |bytes| to_value(&U256::from(BlockView::new(&bytes).transactions_count())))
|
.map_or(Ok(Value::Null), |bytes| to_value(&RpcU256::from(BlockView::new(&bytes).transactions_count())))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_uncles_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn block_uncles_count_by_hash(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256,)>(params)
|
try!(self.active());
|
||||||
|
from_params::<(RpcH256,)>(params)
|
||||||
.and_then(|(hash,)|
|
.and_then(|(hash,)|
|
||||||
take_weak!(self.client).block(BlockID::Hash(hash))
|
take_weak!(self.client).block(BlockID::Hash(hash.into()))
|
||||||
.map_or(Ok(Value::Null), |bytes| to_value(&U256::from(BlockView::new(&bytes).uncles_count()))))
|
.map_or(Ok(Value::Null), |bytes| to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count()))))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_uncles_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_uncles_count_by_number(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(BlockNumber,)>(params)
|
from_params::<(BlockNumber,)>(params)
|
||||||
.and_then(|(block_number,)| match block_number {
|
.and_then(|(block_number,)| match block_number {
|
||||||
BlockNumber::Pending => to_value(&U256::from(0)),
|
BlockNumber::Pending => to_value(&RpcU256::from(0)),
|
||||||
_ => take_weak!(self.client).block(block_number.into())
|
_ => take_weak!(self.client).block(block_number.into())
|
||||||
.map_or(Ok(Value::Null), |bytes| to_value(&U256::from(BlockView::new(&bytes).uncles_count())))
|
.map_or(Ok(Value::Null), |bytes| to_value(&RpcU256::from(BlockView::new(&bytes).uncles_count())))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn code_at(&self, params: Params) -> Result<Value, Error> {
|
fn code_at(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
from_params_default_second(params)
|
||||||
.and_then(|(address, block_number,)| match block_number {
|
.and_then(|(address, block_number,)| {
|
||||||
BlockNumber::Pending => to_value(&take_weak!(self.miner).code(take_weak!(self.client).deref(), &address).map_or_else(Bytes::default, Bytes::new)),
|
let address: Address = RpcH160::into(address);
|
||||||
BlockNumber::Latest => to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new)),
|
match block_number {
|
||||||
_ => Err(Error::invalid_params()),
|
BlockNumber::Pending => to_value(&take_weak!(self.miner).code(take_weak!(self.client).deref(), &address).map_or_else(Bytes::default, Bytes::new)),
|
||||||
|
BlockNumber::Latest => to_value(&take_weak!(self.client).code(&address).map_or_else(Bytes::default, Bytes::new)),
|
||||||
|
_ => Err(Error::invalid_params()),
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn block_by_hash(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256, bool)>(params)
|
try!(self.active());
|
||||||
.and_then(|(hash, include_txs)| self.block(BlockID::Hash(hash), include_txs))
|
from_params::<(RpcH256, bool)>(params)
|
||||||
|
.and_then(|(hash, include_txs)| self.block(BlockID::Hash(hash.into()), include_txs))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_by_number(&self, params: Params) -> Result<Value, Error> {
|
fn block_by_number(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(BlockNumber, bool)>(params)
|
from_params::<(BlockNumber, bool)>(params)
|
||||||
.and_then(|(number, include_txs)| self.block(number.into(), include_txs))
|
.and_then(|(number, include_txs)| self.block(number.into(), include_txs))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_by_hash(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_by_hash(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256,)>(params)
|
try!(self.active());
|
||||||
|
from_params::<(RpcH256,)>(params)
|
||||||
.and_then(|(hash,)| {
|
.and_then(|(hash,)| {
|
||||||
let miner = take_weak!(self.miner);
|
let miner = take_weak!(self.miner);
|
||||||
|
let hash: H256 = hash.into();
|
||||||
match miner.transaction(&hash) {
|
match miner.transaction(&hash) {
|
||||||
Some(pending_tx) => to_value(&Transaction::from(pending_tx)),
|
Some(pending_tx) => to_value(&Transaction::from(pending_tx)),
|
||||||
None => self.transaction(TransactionID::Hash(hash))
|
None => self.transaction(TransactionID::Hash(hash))
|
||||||
@ -415,19 +464,23 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_by_block_hash_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_by_block_hash_and_index(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256, Index)>(params)
|
try!(self.active());
|
||||||
.and_then(|(hash, index)| self.transaction(TransactionID::Location(BlockID::Hash(hash), index.value())))
|
from_params::<(RpcH256, Index)>(params)
|
||||||
|
.and_then(|(hash, index)| self.transaction(TransactionID::Location(BlockID::Hash(hash.into()), index.value())))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_by_block_number_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_by_block_number_and_index(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(BlockNumber, Index)>(params)
|
from_params::<(BlockNumber, Index)>(params)
|
||||||
.and_then(|(number, index)| self.transaction(TransactionID::Location(number.into(), index.value())))
|
.and_then(|(number, index)| self.transaction(TransactionID::Location(number.into(), index.value())))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_receipt(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_receipt(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256,)>(params)
|
try!(self.active());
|
||||||
|
from_params::<(RpcH256,)>(params)
|
||||||
.and_then(|(hash,)| {
|
.and_then(|(hash,)| {
|
||||||
let miner = take_weak!(self.miner);
|
let miner = take_weak!(self.miner);
|
||||||
|
let hash: H256 = hash.into();
|
||||||
match miner.pending_receipts().get(&hash) {
|
match miner.pending_receipts().get(&hash) {
|
||||||
Some(receipt) if self.allow_pending_receipt_query => to_value(&Receipt::from(receipt.clone())),
|
Some(receipt) if self.allow_pending_receipt_query => to_value(&Receipt::from(receipt.clone())),
|
||||||
_ => {
|
_ => {
|
||||||
@ -440,16 +493,19 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn uncle_by_block_hash_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn uncle_by_block_hash_and_index(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H256, Index)>(params)
|
try!(self.active());
|
||||||
.and_then(|(hash, index)| self.uncle(UncleID { block: BlockID::Hash(hash), position: index.value() }))
|
from_params::<(RpcH256, Index)>(params)
|
||||||
|
.and_then(|(hash, index)| self.uncle(UncleID { block: BlockID::Hash(hash.into()), position: index.value() }))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uncle_by_block_number_and_index(&self, params: Params) -> Result<Value, Error> {
|
fn uncle_by_block_number_and_index(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(BlockNumber, Index)>(params)
|
from_params::<(BlockNumber, Index)>(params)
|
||||||
.and_then(|(number, index)| self.uncle(UncleID { block: number.into(), position: index.value() }))
|
.and_then(|(number, index)| self.uncle(UncleID { block: number.into(), position: index.value() }))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compilers(&self, params: Params) -> Result<Value, Error> {
|
fn compilers(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&vec![] as &Vec<String>),
|
Params::None => to_value(&vec![] as &Vec<String>),
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
@ -457,6 +513,7 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn logs(&self, params: Params) -> Result<Value, Error> {
|
fn logs(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(Filter,)>(params)
|
from_params::<(Filter,)>(params)
|
||||||
.and_then(|(filter,)| {
|
.and_then(|(filter,)| {
|
||||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||||
@ -476,6 +533,7 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn work(&self, params: Params) -> Result<Value, Error> {
|
fn work(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => {
|
Params::None => {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
@ -503,8 +561,9 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
miner.map_sealing_work(client.deref(), |b| {
|
miner.map_sealing_work(client.deref(), |b| {
|
||||||
let pow_hash = b.hash();
|
let pow_hash = b.hash();
|
||||||
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
|
let target = Ethash::difficulty_to_boundary(b.block().header().difficulty());
|
||||||
let seed_hash = &self.seed_compute.lock().unwrap().get_seedhash(b.block().header().number());
|
let seed_hash = self.seed_compute.lock().unwrap().get_seedhash(b.block().header().number());
|
||||||
to_value(&(pow_hash, H256::from_slice(&seed_hash[..]), target, &U256::from(b.block().header().number())))
|
let block_number = RpcU256::from(b.block().header().number());
|
||||||
|
to_value(&(RpcH256::from(pow_hash), RpcH256::from(seed_hash), RpcH256::from(target), block_number))
|
||||||
}).unwrap_or(Err(Error::internal_error())) // no work found.
|
}).unwrap_or(Err(Error::internal_error())) // no work found.
|
||||||
},
|
},
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
@ -512,7 +571,11 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn submit_work(&self, params: Params) -> Result<Value, Error> {
|
fn submit_work(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(H64, H256, H256)>(params).and_then(|(nonce, pow_hash, mix_hash)| {
|
try!(self.active());
|
||||||
|
from_params::<(RpcH64, RpcH256, RpcH256)>(params).and_then(|(nonce, pow_hash, mix_hash)| {
|
||||||
|
let nonce: H64 = nonce.into();
|
||||||
|
let pow_hash: H256 = pow_hash.into();
|
||||||
|
let mix_hash: H256 = mix_hash.into();
|
||||||
trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
|
trace!(target: "miner", "submit_work: Decoded: nonce={}, pow_hash={}, mix_hash={}", nonce, pow_hash, mix_hash);
|
||||||
let miner = take_weak!(self.miner);
|
let miner = take_weak!(self.miner);
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
@ -523,27 +586,31 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn submit_hashrate(&self, params: Params) -> Result<Value, Error> {
|
fn submit_hashrate(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(U256, H256)>(params).and_then(|(rate, id)| {
|
try!(self.active());
|
||||||
self.external_miner.submit_hashrate(rate, id);
|
from_params::<(RpcU256, RpcH256)>(params).and_then(|(rate, id)| {
|
||||||
|
self.external_miner.submit_hashrate(rate.into(), id.into());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_raw_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn send_raw_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(Bytes, )>(params)
|
from_params::<(Bytes, )>(params)
|
||||||
.and_then(|(raw_transaction, )| {
|
.and_then(|(raw_transaction, )| {
|
||||||
let raw_transaction = raw_transaction.to_vec();
|
let raw_transaction = raw_transaction.to_vec();
|
||||||
match UntrustedRlp::new(&raw_transaction).as_val() {
|
match UntrustedRlp::new(&raw_transaction).as_val() {
|
||||||
Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction),
|
Ok(signed_transaction) => dispatch_transaction(&*take_weak!(self.client), &*take_weak!(self.miner), signed_transaction),
|
||||||
Err(_) => to_value(&H256::zero()),
|
Err(_) => to_value(&RpcH256::from(H256::from(0))),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&self, params: Params) -> Result<Value, Error> {
|
fn call(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
trace!(target: "jsonrpc", "call: {:?}", params);
|
trace!(target: "jsonrpc", "call: {:?}", params);
|
||||||
from_params_default_second(params)
|
from_params_default_second(params)
|
||||||
.and_then(|(request, block_number,)| {
|
.and_then(|(request, block_number,)| {
|
||||||
|
let request = CallRequest::into(request);
|
||||||
let signed = try!(self.sign_call(request));
|
let signed = try!(self.sign_call(request));
|
||||||
let r = match block_number {
|
let r = match block_number {
|
||||||
BlockNumber::Pending => take_weak!(self.miner).call(take_weak!(self.client).deref(), &signed, Default::default()),
|
BlockNumber::Pending => take_weak!(self.miner).call(take_weak!(self.client).deref(), &signed, Default::default()),
|
||||||
@ -555,27 +622,32 @@ impl<C, S, M, EM> Eth for EthClient<C, S, M, EM> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn estimate_gas(&self, params: Params) -> Result<Value, Error> {
|
fn estimate_gas(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params_default_second(params)
|
from_params_default_second(params)
|
||||||
.and_then(|(request, block_number,)| {
|
.and_then(|(request, block_number,)| {
|
||||||
|
let request = CallRequest::into(request);
|
||||||
let signed = try!(self.sign_call(request));
|
let signed = try!(self.sign_call(request));
|
||||||
let r = match block_number {
|
let r = match block_number {
|
||||||
BlockNumber::Pending => take_weak!(self.miner).call(take_weak!(self.client).deref(), &signed, Default::default()),
|
BlockNumber::Pending => take_weak!(self.miner).call(take_weak!(self.client).deref(), &signed, Default::default()),
|
||||||
BlockNumber::Latest => take_weak!(self.client).call(&signed, Default::default()),
|
BlockNumber::Latest => take_weak!(self.client).call(&signed, Default::default()),
|
||||||
_ => return Err(Error::invalid_params()),
|
_ => return Err(Error::invalid_params()),
|
||||||
};
|
};
|
||||||
to_value(&r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0)))
|
to_value(&RpcU256::from(r.map(|res| res.gas_used + res.refunded).unwrap_or(From::from(0))))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compile_lll(&self, _: Params) -> Result<Value, Error> {
|
fn compile_lll(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
rpc_unimplemented!()
|
rpc_unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compile_serpent(&self, _: Params) -> Result<Value, Error> {
|
fn compile_serpent(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
rpc_unimplemented!()
|
rpc_unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn compile_solidity(&self, _: Params) -> Result<Value, Error> {
|
fn compile_solidity(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
rpc_unimplemented!()
|
rpc_unimplemented!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ use ethcore::miner::MinerService;
|
|||||||
use ethcore::filter::Filter as EthcoreFilter;
|
use ethcore::filter::Filter as EthcoreFilter;
|
||||||
use ethcore::client::{BlockChainClient, BlockID};
|
use ethcore::client::{BlockChainClient, BlockID};
|
||||||
use v1::traits::EthFilter;
|
use v1::traits::EthFilter;
|
||||||
use v1::types::{BlockNumber, Index, Filter, Log};
|
use v1::types::{BlockNumber, Index, Filter, Log, H256 as RpcH256, U256 as RpcU256};
|
||||||
use v1::helpers::{PollFilter, PollManager};
|
use v1::helpers::{PollFilter, PollManager};
|
||||||
use v1::impls::eth::pending_logs;
|
use v1::impls::eth::pending_logs;
|
||||||
|
|
||||||
@ -52,6 +52,12 @@ impl<C, M> EthFilterClient<C, M> where
|
|||||||
polls: Mutex::new(PollManager::new()),
|
polls: Mutex::new(PollManager::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> EthFilter for EthFilterClient<C, M> where
|
impl<C, M> EthFilter for EthFilterClient<C, M> where
|
||||||
@ -59,40 +65,44 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
|
|||||||
M: MinerService + 'static {
|
M: MinerService + 'static {
|
||||||
|
|
||||||
fn new_filter(&self, params: Params) -> Result<Value, Error> {
|
fn new_filter(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(Filter,)>(params)
|
from_params::<(Filter,)>(params)
|
||||||
.and_then(|(filter,)| {
|
.and_then(|(filter,)| {
|
||||||
let mut polls = self.polls.lock().unwrap();
|
let mut polls = self.polls.lock().unwrap();
|
||||||
let block_number = take_weak!(self.client).chain_info().best_block_number;
|
let block_number = take_weak!(self.client).chain_info().best_block_number;
|
||||||
let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter));
|
let id = polls.create_poll(PollFilter::Logs(block_number, Default::default(), filter));
|
||||||
to_value(&U256::from(id))
|
to_value(&RpcU256::from(id))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_block_filter(&self, params: Params) -> Result<Value, Error> {
|
fn new_block_filter(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => {
|
Params::None => {
|
||||||
let mut polls = self.polls.lock().unwrap();
|
let mut polls = self.polls.lock().unwrap();
|
||||||
let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number));
|
let id = polls.create_poll(PollFilter::Block(take_weak!(self.client).chain_info().best_block_number));
|
||||||
to_value(&U256::from(id))
|
to_value(&RpcU256::from(id))
|
||||||
},
|
},
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_pending_transaction_filter(&self, params: Params) -> Result<Value, Error> {
|
fn new_pending_transaction_filter(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => {
|
Params::None => {
|
||||||
let mut polls = self.polls.lock().unwrap();
|
let mut polls = self.polls.lock().unwrap();
|
||||||
let pending_transactions = take_weak!(self.miner).pending_transactions_hashes();
|
let pending_transactions = take_weak!(self.miner).pending_transactions_hashes();
|
||||||
let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions));
|
let id = polls.create_poll(PollFilter::PendingTransaction(pending_transactions));
|
||||||
|
|
||||||
to_value(&U256::from(id))
|
to_value(&RpcU256::from(id))
|
||||||
},
|
},
|
||||||
_ => Err(Error::invalid_params())
|
_ => Err(Error::invalid_params())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn filter_changes(&self, params: Params) -> Result<Value, Error> {
|
fn filter_changes(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
from_params::<(Index,)>(params)
|
from_params::<(Index,)>(params)
|
||||||
.and_then(|(index,)| {
|
.and_then(|(index,)| {
|
||||||
@ -106,7 +116,8 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
|
|||||||
let hashes = (*block_number..current_number).into_iter()
|
let hashes = (*block_number..current_number).into_iter()
|
||||||
.map(BlockID::Number)
|
.map(BlockID::Number)
|
||||||
.filter_map(|id| client.block_hash(id))
|
.filter_map(|id| client.block_hash(id))
|
||||||
.collect::<Vec<H256>>();
|
.map(Into::into)
|
||||||
|
.collect::<Vec<RpcH256>>();
|
||||||
|
|
||||||
*block_number = current_number;
|
*block_number = current_number;
|
||||||
|
|
||||||
@ -125,7 +136,8 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
|
|||||||
.iter()
|
.iter()
|
||||||
.filter(|hash| !previous_hashes_set.contains(hash))
|
.filter(|hash| !previous_hashes_set.contains(hash))
|
||||||
.cloned()
|
.cloned()
|
||||||
.collect::<Vec<H256>>()
|
.map(Into::into)
|
||||||
|
.collect::<Vec<RpcH256>>()
|
||||||
};
|
};
|
||||||
|
|
||||||
// save all hashes of pending transactions
|
// save all hashes of pending transactions
|
||||||
@ -181,6 +193,7 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn filter_logs(&self, params: Params) -> Result<Value, Error> {
|
fn filter_logs(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(Index,)>(params)
|
from_params::<(Index,)>(params)
|
||||||
.and_then(|(index,)| {
|
.and_then(|(index,)| {
|
||||||
let mut polls = self.polls.lock().unwrap();
|
let mut polls = self.polls.lock().unwrap();
|
||||||
@ -206,6 +219,7 @@ impl<C, M> EthFilter for EthFilterClient<C, M> where
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn uninstall_filter(&self, params: Params) -> Result<Value, Error> {
|
fn uninstall_filter(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(Index,)>(params)
|
from_params::<(Index,)>(params)
|
||||||
.and_then(|(index,)| {
|
.and_then(|(index,)| {
|
||||||
self.polls.lock().unwrap().remove_poll(&index.value());
|
self.polls.lock().unwrap().remove_poll(&index.value());
|
||||||
|
@ -20,17 +20,17 @@ use std::sync::{Arc, Weak};
|
|||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use ethcore::client::MiningBlockChainClient;
|
use ethcore::client::MiningBlockChainClient;
|
||||||
use util::numbers::*;
|
use util::{U256, Address, H256};
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use v1::helpers::{SigningQueue, ConfirmationsQueue};
|
use v1::helpers::{SigningQueue, ConfirmationsQueue, TransactionRequest as TRequest};
|
||||||
use v1::traits::EthSigning;
|
use v1::traits::EthSigning;
|
||||||
use v1::types::{TransactionRequest, Bytes};
|
use v1::types::{TransactionRequest, H160 as RpcH160, H256 as RpcH256, H520 as RpcH520};
|
||||||
use v1::impls::{default_gas_price, sign_and_dispatch};
|
use v1::impls::{default_gas_price, sign_and_dispatch};
|
||||||
|
|
||||||
fn fill_optional_fields<C, M>(request: &mut TransactionRequest, client: &C, miner: &M)
|
fn fill_optional_fields<C, M>(request: &mut TRequest, client: &C, miner: &M)
|
||||||
where C: MiningBlockChainClient, M: MinerService {
|
where C: MiningBlockChainClient, M: MinerService {
|
||||||
if request.value.is_none() {
|
if request.value.is_none() {
|
||||||
request.value = Some(U256::zero());
|
request.value = Some(U256::from(0));
|
||||||
}
|
}
|
||||||
if request.gas.is_none() {
|
if request.gas.is_none() {
|
||||||
request.gas = Some(miner.sensible_gas_limit());
|
request.gas = Some(miner.sensible_gas_limit());
|
||||||
@ -39,7 +39,7 @@ fn fill_optional_fields<C, M>(request: &mut TransactionRequest, client: &C, mine
|
|||||||
request.gas_price = Some(default_gas_price(client, miner));
|
request.gas_price = Some(default_gas_price(client, miner));
|
||||||
}
|
}
|
||||||
if request.data.is_none() {
|
if request.data.is_none() {
|
||||||
request.data = Some(Bytes::new(Vec::new()));
|
request.data = Some(Vec::new());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -61,6 +61,12 @@ impl<C, M> EthSigningQueueClient<C, M> where C: MiningBlockChainClient, M: Miner
|
|||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> EthSigning for EthSigningQueueClient<C, M>
|
impl<C, M> EthSigning for EthSigningQueueClient<C, M>
|
||||||
@ -68,14 +74,17 @@ impl<C, M> EthSigning for EthSigningQueueClient<C, M>
|
|||||||
{
|
{
|
||||||
|
|
||||||
fn sign(&self, _params: Params) -> Result<Value, Error> {
|
fn sign(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
warn!("Invoking eth_sign is not yet supported with signer enabled.");
|
warn!("Invoking eth_sign is not yet supported with signer enabled.");
|
||||||
// TODO [ToDr] Implement sign when rest of the signing queue is ready.
|
// TODO [ToDr] Implement sign when rest of the signing queue is ready.
|
||||||
rpc_unimplemented!()
|
rpc_unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(TransactionRequest, )>(params)
|
from_params::<(TransactionRequest, )>(params)
|
||||||
.and_then(|(mut request, )| {
|
.and_then(|(request, )| {
|
||||||
|
let mut request: TRequest = request.into();
|
||||||
let accounts = take_weak!(self.accounts);
|
let accounts = take_weak!(self.accounts);
|
||||||
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
let (client, miner) = (take_weak!(self.client), take_weak!(self.miner));
|
||||||
|
|
||||||
@ -83,7 +92,7 @@ impl<C, M> EthSigning for EthSigningQueueClient<C, M>
|
|||||||
let sender = request.from;
|
let sender = request.from;
|
||||||
return match sign_and_dispatch(&*client, &*miner, request, &*accounts, sender) {
|
return match sign_and_dispatch(&*client, &*miner, request, &*accounts, sender) {
|
||||||
Ok(hash) => to_value(&hash),
|
Ok(hash) => to_value(&hash),
|
||||||
_ => to_value(&H256::zero()),
|
_ => to_value(&RpcH256::default()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,7 +100,7 @@ impl<C, M> EthSigning for EthSigningQueueClient<C, M>
|
|||||||
fill_optional_fields(&mut request, &*client, &*miner);
|
fill_optional_fields(&mut request, &*client, &*miner);
|
||||||
let id = queue.add_request(request);
|
let id = queue.add_request(request);
|
||||||
let result = id.wait_with_timeout();
|
let result = id.wait_with_timeout();
|
||||||
result.unwrap_or_else(|| to_value(&H256::new()))
|
result.unwrap_or_else(|| to_value(&RpcH256::default()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -118,6 +127,12 @@ impl<C, M> EthSigningUnsafeClient<C, M> where
|
|||||||
accounts: Arc::downgrade(accounts),
|
accounts: Arc::downgrade(accounts),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> EthSigning for EthSigningUnsafeClient<C, M> where
|
impl<C, M> EthSigning for EthSigningUnsafeClient<C, M> where
|
||||||
@ -125,18 +140,23 @@ impl<C, M> EthSigning for EthSigningUnsafeClient<C, M> where
|
|||||||
M: MinerService + 'static {
|
M: MinerService + 'static {
|
||||||
|
|
||||||
fn sign(&self, params: Params) -> Result<Value, Error> {
|
fn sign(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(Address, H256)>(params).and_then(|(addr, msg)| {
|
try!(self.active());
|
||||||
to_value(&take_weak!(self.accounts).sign(addr, msg).unwrap_or(H520::zero()))
|
from_params::<(RpcH160, RpcH256)>(params).and_then(|(address, msg)| {
|
||||||
|
let address: Address = address.into();
|
||||||
|
let msg: H256 = msg.into();
|
||||||
|
to_value(&take_weak!(self.accounts).sign(address, msg).ok().map_or_else(RpcH520::default, Into::into))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn send_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(TransactionRequest, )>(params)
|
from_params::<(TransactionRequest, )>(params)
|
||||||
.and_then(|(request, )| {
|
.and_then(|(request, )| {
|
||||||
|
let request: TRequest = request.into();
|
||||||
let sender = request.from;
|
let sender = request.from;
|
||||||
match sign_and_dispatch(&*take_weak!(self.client), &*take_weak!(self.miner), request, &*take_weak!(self.accounts), sender) {
|
match sign_and_dispatch(&*take_weak!(self.client), &*take_weak!(self.miner), request, &*take_weak!(self.accounts), sender) {
|
||||||
Ok(hash) => to_value(&hash),
|
Ok(hash) => to_value(&hash),
|
||||||
_ => to_value(&H256::zero()),
|
_ => to_value(&RpcH256::default()),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ use ethcore::client::{MiningBlockChainClient};
|
|||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use v1::traits::Ethcore;
|
use v1::traits::Ethcore;
|
||||||
use v1::types::{Bytes};
|
use v1::types::{Bytes, U256};
|
||||||
use v1::helpers::{SigningQueue, ConfirmationsQueue};
|
use v1::helpers::{SigningQueue, ConfirmationsQueue};
|
||||||
use v1::impls::error_codes;
|
use v1::impls::error_codes;
|
||||||
|
|
||||||
@ -52,56 +52,74 @@ impl<C, M> EthcoreClient<C, M> where C: MiningBlockChainClient, M: MinerService
|
|||||||
confirmations_queue: queue,
|
confirmations_queue: queue,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: MiningBlockChainClient + 'static {
|
impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: MiningBlockChainClient + 'static {
|
||||||
|
|
||||||
fn transactions_limit(&self, _: Params) -> Result<Value, Error> {
|
fn transactions_limit(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&take_weak!(self.miner).transactions_limit())
|
to_value(&take_weak!(self.miner).transactions_limit())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn min_gas_price(&self, _: Params) -> Result<Value, Error> {
|
fn min_gas_price(&self, _: Params) -> Result<Value, Error> {
|
||||||
to_value(&take_weak!(self.miner).minimal_gas_price())
|
try!(self.active());
|
||||||
|
to_value(&U256::from(take_weak!(self.miner).minimal_gas_price()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extra_data(&self, _: Params) -> Result<Value, Error> {
|
fn extra_data(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&Bytes::new(take_weak!(self.miner).extra_data()))
|
to_value(&Bytes::new(take_weak!(self.miner).extra_data()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gas_floor_target(&self, _: Params) -> Result<Value, Error> {
|
fn gas_floor_target(&self, _: Params) -> Result<Value, Error> {
|
||||||
to_value(&take_weak!(self.miner).gas_floor_target())
|
try!(self.active());
|
||||||
|
to_value(&U256::from(take_weak!(self.miner).gas_floor_target()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn gas_ceil_target(&self, _: Params) -> Result<Value, Error> {
|
fn gas_ceil_target(&self, _: Params) -> Result<Value, Error> {
|
||||||
to_value(&take_weak!(self.miner).gas_ceil_target())
|
try!(self.active());
|
||||||
|
to_value(&U256::from(take_weak!(self.miner).gas_ceil_target()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dev_logs(&self, _params: Params) -> Result<Value, Error> {
|
fn dev_logs(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
let logs = self.logger.logs();
|
let logs = self.logger.logs();
|
||||||
to_value(&logs.deref().as_slice())
|
to_value(&logs.deref().as_slice())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn dev_logs_levels(&self, _params: Params) -> Result<Value, Error> {
|
fn dev_logs_levels(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&self.logger.levels())
|
to_value(&self.logger.levels())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn net_chain(&self, _params: Params) -> Result<Value, Error> {
|
fn net_chain(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&self.settings.chain)
|
to_value(&self.settings.chain)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn net_max_peers(&self, _params: Params) -> Result<Value, Error> {
|
fn net_max_peers(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&self.settings.max_peers)
|
to_value(&self.settings.max_peers)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn net_port(&self, _params: Params) -> Result<Value, Error> {
|
fn net_port(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&self.settings.network_port)
|
to_value(&self.settings.network_port)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn node_name(&self, _params: Params) -> Result<Value, Error> {
|
fn node_name(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
to_value(&self.settings.name)
|
to_value(&self.settings.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rpc_settings(&self, _params: Params) -> Result<Value, Error> {
|
fn rpc_settings(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
map.insert("enabled".to_owned(), Value::Bool(self.settings.rpc_enabled));
|
map.insert("enabled".to_owned(), Value::Bool(self.settings.rpc_enabled));
|
||||||
map.insert("interface".to_owned(), Value::String(self.settings.rpc_interface.clone()));
|
map.insert("interface".to_owned(), Value::String(self.settings.rpc_interface.clone()));
|
||||||
@ -110,6 +128,7 @@ impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: M
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn default_extra_data(&self, params: Params) -> Result<Value, Error> {
|
fn default_extra_data(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => to_value(&Bytes::new(version_data())),
|
Params::None => to_value(&Bytes::new(version_data())),
|
||||||
_ => Err(Error::invalid_params()),
|
_ => Err(Error::invalid_params()),
|
||||||
@ -117,11 +136,12 @@ impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: M
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn gas_price_statistics(&self, params: Params) -> Result<Value, Error> {
|
fn gas_price_statistics(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match params {
|
match params {
|
||||||
Params::None => match take_weak!(self.client).gas_price_statistics(100, 8) {
|
Params::None => match take_weak!(self.client).gas_price_statistics(100, 8) {
|
||||||
Ok(stats) => to_value(&stats
|
Ok(stats) => to_value(&stats
|
||||||
.iter()
|
.into_iter()
|
||||||
.map(|x| to_value(&x).expect("x must be U256; qed"))
|
.map(|x| to_value(&U256::from(x)).expect("x must be U256; qed"))
|
||||||
.collect::<Vec<_>>()),
|
.collect::<Vec<_>>()),
|
||||||
_ => Err(Error::internal_error()),
|
_ => Err(Error::internal_error()),
|
||||||
},
|
},
|
||||||
@ -130,6 +150,7 @@ impl<C, M> Ethcore for EthcoreClient<C, M> where M: MinerService + 'static, C: M
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn unsigned_transactions_count(&self, _params: Params) -> Result<Value, Error> {
|
fn unsigned_transactions_count(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
match self.confirmations_queue {
|
match self.confirmations_queue {
|
||||||
None => Err(Error {
|
None => Err(Error {
|
||||||
code: ErrorCode::ServerError(error_codes::SIGNER_DISABLED),
|
code: ErrorCode::ServerError(error_codes::SIGNER_DISABLED),
|
||||||
|
@ -15,57 +15,74 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
/// Ethcore-specific rpc interface for operations altering the settings.
|
/// Ethcore-specific rpc interface for operations altering the settings.
|
||||||
use util::{U256, Address};
|
|
||||||
use util::network::{NetworkService, NonReservedPeerMode};
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
|
use ethcore::client::MiningBlockChainClient;
|
||||||
use ethcore::service::SyncMessage;
|
use ethcore::service::SyncMessage;
|
||||||
|
use util::network::{NetworkService, NonReservedPeerMode};
|
||||||
use v1::traits::EthcoreSet;
|
use v1::traits::EthcoreSet;
|
||||||
use v1::types::Bytes;
|
use v1::types::{Bytes, H160, U256};
|
||||||
|
|
||||||
/// Ethcore-specific rpc interface for operations altering the settings.
|
/// Ethcore-specific rpc interface for operations altering the settings.
|
||||||
pub struct EthcoreSetClient<M> where
|
pub struct EthcoreSetClient<C, M> where
|
||||||
|
C: MiningBlockChainClient,
|
||||||
M: MinerService {
|
M: MinerService {
|
||||||
|
|
||||||
|
client: Weak<C>,
|
||||||
miner: Weak<M>,
|
miner: Weak<M>,
|
||||||
net: Weak<NetworkService<SyncMessage>>,
|
net: Weak<NetworkService<SyncMessage>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M> EthcoreSetClient<M> where M: MinerService {
|
impl<C, M> EthcoreSetClient<C, M> where
|
||||||
|
C: MiningBlockChainClient,
|
||||||
|
M: MinerService {
|
||||||
/// Creates new `EthcoreSetClient`.
|
/// Creates new `EthcoreSetClient`.
|
||||||
pub fn new(miner: &Arc<M>, net: &Arc<NetworkService<SyncMessage>>) -> Self {
|
pub fn new(client: &Arc<C>, miner: &Arc<M>, net: &Arc<NetworkService<SyncMessage>>) -> Self {
|
||||||
EthcoreSetClient {
|
EthcoreSetClient {
|
||||||
|
client: Arc::downgrade(client),
|
||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
net: Arc::downgrade(net),
|
net: Arc::downgrade(net),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M> EthcoreSet for EthcoreSetClient<M> where M: MinerService + 'static {
|
impl<C, M> EthcoreSet for EthcoreSetClient<C, M> where
|
||||||
|
C: MiningBlockChainClient + 'static,
|
||||||
|
M: MinerService + 'static {
|
||||||
|
|
||||||
fn set_min_gas_price(&self, params: Params) -> Result<Value, Error> {
|
fn set_min_gas_price(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(U256,)>(params).and_then(|(gas_price,)| {
|
from_params::<(U256,)>(params).and_then(|(gas_price,)| {
|
||||||
take_weak!(self.miner).set_minimal_gas_price(gas_price);
|
take_weak!(self.miner).set_minimal_gas_price(gas_price.into());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_gas_floor_target(&self, params: Params) -> Result<Value, Error> {
|
fn set_gas_floor_target(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(U256,)>(params).and_then(|(target,)| {
|
from_params::<(U256,)>(params).and_then(|(target,)| {
|
||||||
take_weak!(self.miner).set_gas_floor_target(target);
|
take_weak!(self.miner).set_gas_floor_target(target.into());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_gas_ceil_target(&self, params: Params) -> Result<Value, Error> {
|
fn set_gas_ceil_target(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(U256,)>(params).and_then(|(target,)| {
|
from_params::<(U256,)>(params).and_then(|(target,)| {
|
||||||
take_weak!(self.miner).set_gas_ceil_target(target);
|
take_weak!(self.miner).set_gas_ceil_target(target.into());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_extra_data(&self, params: Params) -> Result<Value, Error> {
|
fn set_extra_data(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(Bytes,)>(params).and_then(|(extra_data,)| {
|
from_params::<(Bytes,)>(params).and_then(|(extra_data,)| {
|
||||||
take_weak!(self.miner).set_extra_data(extra_data.to_vec());
|
take_weak!(self.miner).set_extra_data(extra_data.to_vec());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
@ -73,13 +90,15 @@ impl<M> EthcoreSet for EthcoreSetClient<M> where M: MinerService + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn set_author(&self, params: Params) -> Result<Value, Error> {
|
fn set_author(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(Address,)>(params).and_then(|(author,)| {
|
try!(self.active());
|
||||||
take_weak!(self.miner).set_author(author);
|
from_params::<(H160,)>(params).and_then(|(author,)| {
|
||||||
|
take_weak!(self.miner).set_author(author.into());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_transactions_limit(&self, params: Params) -> Result<Value, Error> {
|
fn set_transactions_limit(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(usize,)>(params).and_then(|(limit,)| {
|
from_params::<(usize,)>(params).and_then(|(limit,)| {
|
||||||
take_weak!(self.miner).set_transactions_limit(limit);
|
take_weak!(self.miner).set_transactions_limit(limit);
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
@ -87,6 +106,7 @@ impl<M> EthcoreSet for EthcoreSetClient<M> where M: MinerService + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn set_tx_gas_limit(&self, params: Params) -> Result<Value, Error> {
|
fn set_tx_gas_limit(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(U256,)>(params).and_then(|(limit,)| {
|
from_params::<(U256,)>(params).and_then(|(limit,)| {
|
||||||
take_weak!(self.miner).set_tx_gas_limit(limit.into());
|
take_weak!(self.miner).set_tx_gas_limit(limit.into());
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
@ -94,6 +114,7 @@ impl<M> EthcoreSet for EthcoreSetClient<M> where M: MinerService + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn add_reserved_peer(&self, params: Params) -> Result<Value, Error> {
|
fn add_reserved_peer(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(String,)>(params).and_then(|(peer,)| {
|
from_params::<(String,)>(params).and_then(|(peer,)| {
|
||||||
match take_weak!(self.net).add_reserved_peer(&peer) {
|
match take_weak!(self.net).add_reserved_peer(&peer) {
|
||||||
Ok(()) => to_value(&true),
|
Ok(()) => to_value(&true),
|
||||||
@ -103,6 +124,7 @@ impl<M> EthcoreSet for EthcoreSetClient<M> where M: MinerService + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn remove_reserved_peer(&self, params: Params) -> Result<Value, Error> {
|
fn remove_reserved_peer(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(String,)>(params).and_then(|(peer,)| {
|
from_params::<(String,)>(params).and_then(|(peer,)| {
|
||||||
match take_weak!(self.net).remove_reserved_peer(&peer) {
|
match take_weak!(self.net).remove_reserved_peer(&peer) {
|
||||||
Ok(()) => to_value(&true),
|
Ok(()) => to_value(&true),
|
||||||
@ -112,11 +134,13 @@ impl<M> EthcoreSet for EthcoreSetClient<M> where M: MinerService + 'static {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn drop_non_reserved_peers(&self, _: Params) -> Result<Value, Error> {
|
fn drop_non_reserved_peers(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
take_weak!(self.net).set_non_reserved_mode(NonReservedPeerMode::Deny);
|
take_weak!(self.net).set_non_reserved_mode(NonReservedPeerMode::Deny);
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accept_non_reserved_peers(&self, _: Params) -> Result<Value, Error> {
|
fn accept_non_reserved_peers(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
take_weak!(self.net).set_non_reserved_mode(NonReservedPeerMode::Accept);
|
take_weak!(self.net).set_non_reserved_mode(NonReservedPeerMode::Accept);
|
||||||
to_value(&true)
|
to_value(&true)
|
||||||
}
|
}
|
||||||
|
@ -53,9 +53,10 @@ pub use self::ethcore_set::EthcoreSetClient;
|
|||||||
pub use self::traces::TracesClient;
|
pub use self::traces::TracesClient;
|
||||||
pub use self::rpc::RpcClient;
|
pub use self::rpc::RpcClient;
|
||||||
|
|
||||||
use v1::types::TransactionRequest;
|
use v1::helpers::TransactionRequest;
|
||||||
|
use v1::types::H256 as NH256;
|
||||||
use ethcore::error::Error as EthcoreError;
|
use ethcore::error::Error as EthcoreError;
|
||||||
use ethcore::miner::{AccountDetails, MinerService};
|
use ethcore::miner::MinerService;
|
||||||
use ethcore::client::MiningBlockChainClient;
|
use ethcore::client::MiningBlockChainClient;
|
||||||
use ethcore::transaction::{Action, SignedTransaction, Transaction};
|
use ethcore::transaction::{Action, SignedTransaction, Transaction};
|
||||||
use ethcore::account_provider::{AccountProvider, Error as AccountError};
|
use ethcore::account_provider::{AccountProvider, Error as AccountError};
|
||||||
@ -77,14 +78,9 @@ mod error_codes {
|
|||||||
|
|
||||||
fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<Value, Error>
|
fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<Value, Error>
|
||||||
where C: MiningBlockChainClient, M: MinerService {
|
where C: MiningBlockChainClient, M: MinerService {
|
||||||
let hash = signed_transaction.hash();
|
let hash = NH256::from(signed_transaction.hash());
|
||||||
|
|
||||||
let import = miner.import_own_transaction(client, signed_transaction, |a: &Address| {
|
let import = miner.import_own_transaction(client, signed_transaction);
|
||||||
AccountDetails {
|
|
||||||
nonce: client.latest_nonce(&a),
|
|
||||||
balance: client.latest_balance(&a),
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
import
|
import
|
||||||
.map_err(transaction_error)
|
.map_err(transaction_error)
|
||||||
|
@ -18,10 +18,11 @@
|
|||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use v1::traits::Personal;
|
use v1::traits::Personal;
|
||||||
use v1::types::TransactionRequest;
|
use v1::types::{H160 as RpcH160, H256 as RpcH256, TransactionRequest};
|
||||||
use v1::impls::unlock_sign_and_dispatch;
|
use v1::impls::unlock_sign_and_dispatch;
|
||||||
|
use v1::helpers::{TransactionRequest as TRequest};
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use util::numbers::*;
|
use util::Address;
|
||||||
use ethcore::client::MiningBlockChainClient;
|
use ethcore::client::MiningBlockChainClient;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
|
|
||||||
@ -43,27 +44,36 @@ impl<C, M> PersonalClient<C, M> where C: MiningBlockChainClient, M: MinerService
|
|||||||
signer_port: signer_port,
|
signer_port: signer_port,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
|
|
||||||
fn signer_enabled(&self, _: Params) -> Result<Value, Error> {
|
fn signer_enabled(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
self.signer_port
|
self.signer_port
|
||||||
.map(|v| to_value(&v))
|
.map(|v| to_value(&v))
|
||||||
.unwrap_or_else(|| to_value(&false))
|
.unwrap_or_else(|| to_value(&false))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accounts(&self, _: Params) -> Result<Value, Error> {
|
fn accounts(&self, _: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
let store = take_weak!(self.accounts);
|
let store = take_weak!(self.accounts);
|
||||||
to_value(&store.accounts())
|
to_value(&store.accounts().into_iter().map(Into::into).collect::<Vec<RpcH160>>())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn new_account(&self, params: Params) -> Result<Value, Error> {
|
fn new_account(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(String, )>(params).and_then(
|
from_params::<(String, )>(params).and_then(
|
||||||
|(pass, )| {
|
|(pass, )| {
|
||||||
let store = take_weak!(self.accounts);
|
let store = take_weak!(self.accounts);
|
||||||
match store.new_account(&pass) {
|
match store.new_account(&pass) {
|
||||||
Ok(address) => to_value(&address),
|
Ok(address) => to_value(&RpcH160::from(address)),
|
||||||
Err(_) => Err(Error::internal_error())
|
Err(_) => Err(Error::internal_error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -71,8 +81,10 @@ impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBl
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn unlock_account(&self, params: Params) -> Result<Value, Error> {
|
fn unlock_account(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(Address, String, u64)>(params).and_then(
|
try!(self.active());
|
||||||
|
from_params::<(RpcH160, String, u64)>(params).and_then(
|
||||||
|(account, account_pass, _)|{
|
|(account, account_pass, _)|{
|
||||||
|
let account: Address = account.into();
|
||||||
let store = take_weak!(self.accounts);
|
let store = take_weak!(self.accounts);
|
||||||
match store.unlock_account_temporarily(account, account_pass) {
|
match store.unlock_account_temporarily(account, account_pass) {
|
||||||
Ok(_) => Ok(Value::Bool(true)),
|
Ok(_) => Ok(Value::Bool(true)),
|
||||||
@ -82,14 +94,16 @@ impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBl
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sign_and_send_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn sign_and_send_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(TransactionRequest, String)>(params)
|
from_params::<(TransactionRequest, String)>(params)
|
||||||
.and_then(|(request, password)| {
|
.and_then(|(request, password)| {
|
||||||
|
let request: TRequest = request.into();
|
||||||
let sender = request.from;
|
let sender = request.from;
|
||||||
let accounts = take_weak!(self.accounts);
|
let accounts = take_weak!(self.accounts);
|
||||||
|
|
||||||
match unlock_sign_and_dispatch(&*take_weak!(self.client), &*take_weak!(self.miner), request, &*accounts, sender, password) {
|
match unlock_sign_and_dispatch(&*take_weak!(self.client), &*take_weak!(self.miner), request, &*accounts, sender, password) {
|
||||||
Ok(hash) => to_value(&hash),
|
Ok(hash) => Ok(hash),
|
||||||
_ => to_value(&H256::zero()),
|
_ => to_value(&RpcH256::default()),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -18,14 +18,13 @@
|
|||||||
|
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak};
|
||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use v1::traits::PersonalSigner;
|
|
||||||
use v1::types::TransactionModification;
|
|
||||||
use v1::impls::unlock_sign_and_dispatch;
|
|
||||||
use v1::helpers::{SigningQueue, ConfirmationsQueue};
|
|
||||||
use ethcore::account_provider::AccountProvider;
|
use ethcore::account_provider::AccountProvider;
|
||||||
use util::numbers::*;
|
|
||||||
use ethcore::client::MiningBlockChainClient;
|
use ethcore::client::MiningBlockChainClient;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
|
use v1::traits::PersonalSigner;
|
||||||
|
use v1::types::{TransactionModification, TransactionConfirmation, U256};
|
||||||
|
use v1::impls::unlock_sign_and_dispatch;
|
||||||
|
use v1::helpers::{SigningQueue, ConfirmationsQueue};
|
||||||
|
|
||||||
/// Transactions confirmation (personal) rpc implementation.
|
/// Transactions confirmation (personal) rpc implementation.
|
||||||
pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
@ -46,18 +45,27 @@ impl<C: 'static, M: 'static> SignerClient<C, M> where C: MiningBlockChainClient,
|
|||||||
miner: Arc::downgrade(miner),
|
miner: Arc::downgrade(miner),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
|
||||||
|
|
||||||
fn transactions_to_confirm(&self, _params: Params) -> Result<Value, Error> {
|
fn transactions_to_confirm(&self, _params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
let queue = take_weak!(self.queue);
|
let queue = take_weak!(self.queue);
|
||||||
to_value(&queue.requests())
|
to_value(&queue.requests().into_iter().map(From::from).collect::<Vec<TransactionConfirmation>>())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn confirm_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn confirm_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(U256, TransactionModification, String)>(params).and_then(
|
from_params::<(U256, TransactionModification, String)>(params).and_then(
|
||||||
|(id, modification, pass)| {
|
|(id, modification, pass)| {
|
||||||
|
let id = id.into();
|
||||||
let accounts = take_weak!(self.accounts);
|
let accounts = take_weak!(self.accounts);
|
||||||
let queue = take_weak!(self.queue);
|
let queue = take_weak!(self.queue);
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
@ -66,7 +74,7 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
|
|||||||
let mut request = confirmation.transaction;
|
let mut request = confirmation.transaction;
|
||||||
// apply modification
|
// apply modification
|
||||||
if let Some(gas_price) = modification.gas_price {
|
if let Some(gas_price) = modification.gas_price {
|
||||||
request.gas_price = Some(gas_price);
|
request.gas_price = Some(gas_price.into());
|
||||||
}
|
}
|
||||||
|
|
||||||
let sender = request.from;
|
let sender = request.from;
|
||||||
@ -87,10 +95,11 @@ impl<C: 'static, M: 'static> PersonalSigner for SignerClient<C, M> where C: Mini
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn reject_transaction(&self, params: Params) -> Result<Value, Error> {
|
fn reject_transaction(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(U256, )>(params).and_then(
|
from_params::<(U256, )>(params).and_then(
|
||||||
|(id, )| {
|
|(id, )| {
|
||||||
let queue = take_weak!(self.queue);
|
let queue = take_weak!(self.queue);
|
||||||
let res = queue.request_rejected(id);
|
let res = queue.request_rejected(id.into());
|
||||||
to_value(&res.is_some())
|
to_value(&res.is_some())
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
@ -19,12 +19,13 @@
|
|||||||
use std::sync::{Weak, Arc};
|
use std::sync::{Weak, Arc};
|
||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use util::H256;
|
//use util::H256;
|
||||||
use ethcore::client::{BlockChainClient, CallAnalytics, TransactionID, TraceId};
|
use ethcore::client::{BlockChainClient, CallAnalytics, TransactionID, TraceId};
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use ethcore::transaction::{Transaction as EthTransaction, SignedTransaction, Action};
|
use ethcore::transaction::{Transaction as EthTransaction, SignedTransaction, Action};
|
||||||
use v1::traits::Traces;
|
use v1::traits::Traces;
|
||||||
use v1::types::{TraceFilter, LocalizedTrace, Trace, BlockNumber, Index, CallRequest, Bytes, StateDiff, VMTrace};
|
use v1::helpers::CallRequest as CRequest;
|
||||||
|
use v1::types::{TraceFilter, LocalizedTrace, Trace, BlockNumber, Index, CallRequest, Bytes, StateDiff, VMTrace, H256};
|
||||||
|
|
||||||
/// Traces api implementation.
|
/// Traces api implementation.
|
||||||
pub struct TracesClient<C, M> where C: BlockChainClient, M: MinerService {
|
pub struct TracesClient<C, M> where C: BlockChainClient, M: MinerService {
|
||||||
@ -42,7 +43,7 @@ impl<C, M> TracesClient<C, M> where C: BlockChainClient, M: MinerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: share with eth.rs
|
// TODO: share with eth.rs
|
||||||
fn sign_call(&self, request: CallRequest) -> Result<SignedTransaction, Error> {
|
fn sign_call(&self, request: CRequest) -> Result<SignedTransaction, Error> {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
let miner = take_weak!(self.miner);
|
let miner = take_weak!(self.miner);
|
||||||
let from = request.from.unwrap_or(0.into());
|
let from = request.from.unwrap_or(0.into());
|
||||||
@ -55,10 +56,17 @@ impl<C, M> TracesClient<C, M> where C: BlockChainClient, M: MinerService {
|
|||||||
data: request.data.map_or_else(Vec::new, |d| d.to_vec())
|
data: request.data.map_or_else(Vec::new, |d| d.to_vec())
|
||||||
}.fake_sign(from))
|
}.fake_sign(from))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn active(&self) -> Result<(), Error> {
|
||||||
|
// TODO: only call every 30s at most.
|
||||||
|
take_weak!(self.client).keep_alive();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C, M> Traces for TracesClient<C, M> where C: BlockChainClient + 'static, M: MinerService + 'static {
|
impl<C, M> Traces for TracesClient<C, M> where C: BlockChainClient + 'static, M: MinerService + 'static {
|
||||||
fn filter(&self, params: Params) -> Result<Value, Error> {
|
fn filter(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(TraceFilter,)>(params)
|
from_params::<(TraceFilter,)>(params)
|
||||||
.and_then(|(filter, )| {
|
.and_then(|(filter, )| {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
@ -69,6 +77,7 @@ impl<C, M> Traces for TracesClient<C, M> where C: BlockChainClient + 'static, M:
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block_traces(&self, params: Params) -> Result<Value, Error> {
|
fn block_traces(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(BlockNumber,)>(params)
|
from_params::<(BlockNumber,)>(params)
|
||||||
.and_then(|(block_number,)| {
|
.and_then(|(block_number,)| {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
@ -79,21 +88,23 @@ impl<C, M> Traces for TracesClient<C, M> where C: BlockChainClient + 'static, M:
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_traces(&self, params: Params) -> Result<Value, Error> {
|
fn transaction_traces(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(H256,)>(params)
|
from_params::<(H256,)>(params)
|
||||||
.and_then(|(transaction_hash,)| {
|
.and_then(|(transaction_hash,)| {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
let traces = client.transaction_traces(TransactionID::Hash(transaction_hash));
|
let traces = client.transaction_traces(TransactionID::Hash(transaction_hash.into()));
|
||||||
let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect());
|
let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect());
|
||||||
to_value(&traces)
|
to_value(&traces)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn trace(&self, params: Params) -> Result<Value, Error> {
|
fn trace(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
from_params::<(H256, Vec<Index>)>(params)
|
from_params::<(H256, Vec<Index>)>(params)
|
||||||
.and_then(|(transaction_hash, address)| {
|
.and_then(|(transaction_hash, address)| {
|
||||||
let client = take_weak!(self.client);
|
let client = take_weak!(self.client);
|
||||||
let id = TraceId {
|
let id = TraceId {
|
||||||
transaction: TransactionID::Hash(transaction_hash),
|
transaction: TransactionID::Hash(transaction_hash.into()),
|
||||||
address: address.into_iter().map(|i| i.value()).collect()
|
address: address.into_iter().map(|i| i.value()).collect()
|
||||||
};
|
};
|
||||||
let trace = client.trace(id);
|
let trace = client.trace(id);
|
||||||
@ -103,9 +114,11 @@ impl<C, M> Traces for TracesClient<C, M> where C: BlockChainClient + 'static, M:
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn call(&self, params: Params) -> Result<Value, Error> {
|
fn call(&self, params: Params) -> Result<Value, Error> {
|
||||||
|
try!(self.active());
|
||||||
trace!(target: "jsonrpc", "call: {:?}", params);
|
trace!(target: "jsonrpc", "call: {:?}", params);
|
||||||
from_params(params)
|
from_params(params)
|
||||||
.and_then(|(request, flags)| {
|
.and_then(|(request, flags)| {
|
||||||
|
let request = CallRequest::into(request);
|
||||||
let flags: Vec<String> = flags;
|
let flags: Vec<String> = flags;
|
||||||
let analytics = CallAnalytics {
|
let analytics = CallAnalytics {
|
||||||
transaction_tracing: flags.contains(&("trace".to_owned())),
|
transaction_tracing: flags.contains(&("trace".to_owned())),
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
use jsonrpc_core::*;
|
use jsonrpc_core::*;
|
||||||
use util::version;
|
use util::version;
|
||||||
use v1::traits::Web3;
|
use v1::traits::Web3;
|
||||||
use v1::types::Bytes;
|
use v1::types::{H256, Bytes};
|
||||||
use util::sha3::Hashable;
|
use util::sha3::Hashable;
|
||||||
|
|
||||||
/// Web3 rpc implementation.
|
/// Web3 rpc implementation.
|
||||||
@ -40,9 +40,9 @@ impl Web3 for Web3Client {
|
|||||||
fn sha3(&self, params: Params) -> Result<Value, Error> {
|
fn sha3(&self, params: Params) -> Result<Value, Error> {
|
||||||
from_params::<(Bytes,)>(params).and_then(
|
from_params::<(Bytes,)>(params).and_then(
|
||||||
|(data,)| {
|
|(data,)| {
|
||||||
let Bytes(ref v) = data;
|
let Bytes(ref vec) = data;
|
||||||
let sha3 = v.sha3();
|
let sha3 = vec.sha3();
|
||||||
to_value(&sha3)
|
to_value(&H256::from(sha3))
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,7 @@ use util::{U256, H256, Uint};
|
|||||||
use jsonrpc_core::IoHandler;
|
use jsonrpc_core::IoHandler;
|
||||||
use ethjson::blockchain::BlockChain;
|
use ethjson::blockchain::BlockChain;
|
||||||
|
|
||||||
|
use v1::types::U256 as NU256;
|
||||||
use v1::traits::eth::{Eth, EthSigning};
|
use v1::traits::eth::{Eth, EthSigning};
|
||||||
use v1::impls::{EthClient, EthSigningUnsafeClient};
|
use v1::impls::{EthClient, EthSigningUnsafeClient};
|
||||||
use v1::tests::helpers::{TestSyncProvider, Config};
|
use v1::tests::helpers::{TestSyncProvider, Config};
|
||||||
@ -199,8 +200,7 @@ const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
|
|||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"blockReward": "0x4563918244F40000",
|
"blockReward": "0x4563918244F40000",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"frontierCompatibilityModeLimit": "0xffffffffffffffff",
|
"frontierCompatibilityModeLimit": "0xffffffffffffffff"
|
||||||
"daoRescueSoftFork": false
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -330,7 +330,7 @@ fn verify_transaction_counts(name: String, chain: BlockChain) {
|
|||||||
"jsonrpc": "2.0",
|
"jsonrpc": "2.0",
|
||||||
"method": "eth_getBlockTransactionCountByNumber",
|
"method": "eth_getBlockTransactionCountByNumber",
|
||||||
"params": [
|
"params": [
|
||||||
"#.to_owned() + &::serde_json::to_string(&U256::from(num)).unwrap() + r#"
|
"#.to_owned() + &::serde_json::to_string(&NU256::from(num)).unwrap() + r#"
|
||||||
],
|
],
|
||||||
"id": "# + format!("{}", *id).as_ref() + r#"
|
"id": "# + format!("{}", *id).as_ref() + r#"
|
||||||
}"#;
|
}"#;
|
||||||
|
@ -23,7 +23,7 @@ use ethcore::client::{MiningBlockChainClient, Executed, CallAnalytics};
|
|||||||
use ethcore::block::{ClosedBlock, IsBlock};
|
use ethcore::block::{ClosedBlock, IsBlock};
|
||||||
use ethcore::transaction::SignedTransaction;
|
use ethcore::transaction::SignedTransaction;
|
||||||
use ethcore::receipt::Receipt;
|
use ethcore::receipt::Receipt;
|
||||||
use ethcore::miner::{MinerService, MinerStatus, AccountDetails, TransactionImportResult};
|
use ethcore::miner::{MinerService, MinerStatus, TransactionImportResult};
|
||||||
|
|
||||||
/// Test miner service.
|
/// Test miner service.
|
||||||
pub struct TestMinerService {
|
pub struct TestMinerService {
|
||||||
@ -130,14 +130,13 @@ impl MinerService for TestMinerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Imports transactions to transaction queue.
|
/// Imports transactions to transaction queue.
|
||||||
fn import_transactions<T>(&self, _chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>, fetch_account: T) ->
|
fn import_external_transactions(&self, _chain: &MiningBlockChainClient, transactions: Vec<SignedTransaction>) ->
|
||||||
Vec<Result<TransactionImportResult, Error>>
|
Vec<Result<TransactionImportResult, Error>> {
|
||||||
where T: Fn(&Address) -> AccountDetails {
|
|
||||||
// lets assume that all txs are valid
|
// lets assume that all txs are valid
|
||||||
self.imported_transactions.lock().unwrap().extend_from_slice(&transactions);
|
self.imported_transactions.lock().unwrap().extend_from_slice(&transactions);
|
||||||
|
|
||||||
for sender in transactions.iter().filter_map(|t| t.sender().ok()) {
|
for sender in transactions.iter().filter_map(|t| t.sender().ok()) {
|
||||||
let nonce = self.last_nonce(&sender).unwrap_or(fetch_account(&sender).nonce);
|
let nonce = self.last_nonce(&sender).expect("last_nonce must be populated in tests");
|
||||||
self.last_nonces.write().unwrap().insert(sender, nonce + U256::from(1));
|
self.last_nonces.write().unwrap().insert(sender, nonce + U256::from(1));
|
||||||
}
|
}
|
||||||
transactions
|
transactions
|
||||||
@ -147,9 +146,8 @@ impl MinerService for TestMinerService {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Imports transactions to transaction queue.
|
/// Imports transactions to transaction queue.
|
||||||
fn import_own_transaction<T>(&self, chain: &MiningBlockChainClient, transaction: SignedTransaction, _fetch_account: T) ->
|
fn import_own_transaction(&self, chain: &MiningBlockChainClient, transaction: SignedTransaction) ->
|
||||||
Result<TransactionImportResult, Error>
|
Result<TransactionImportResult, Error> {
|
||||||
where T: Fn(&Address) -> AccountDetails {
|
|
||||||
|
|
||||||
// keep the pending nonces up to date
|
// keep the pending nonces up to date
|
||||||
if let Ok(ref sender) = transaction.sender() {
|
if let Ok(ref sender) = transaction.sender() {
|
||||||
|
@ -20,6 +20,7 @@ use jsonrpc_core::IoHandler;
|
|||||||
use v1::{EthcoreSet, EthcoreSetClient};
|
use v1::{EthcoreSet, EthcoreSetClient};
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
use ethcore::service::SyncMessage;
|
use ethcore::service::SyncMessage;
|
||||||
|
use ethcore::client::TestBlockChainClient;
|
||||||
use v1::tests::helpers::TestMinerService;
|
use v1::tests::helpers::TestMinerService;
|
||||||
use util::numbers::*;
|
use util::numbers::*;
|
||||||
use util::network::{NetworkConfiguration, NetworkService};
|
use util::network::{NetworkConfiguration, NetworkService};
|
||||||
@ -29,20 +30,25 @@ fn miner_service() -> Arc<TestMinerService> {
|
|||||||
Arc::new(TestMinerService::default())
|
Arc::new(TestMinerService::default())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn client_service() -> Arc<TestBlockChainClient> {
|
||||||
|
Arc::new(TestBlockChainClient::default())
|
||||||
|
}
|
||||||
|
|
||||||
fn network_service() -> Arc<NetworkService<SyncMessage>> {
|
fn network_service() -> Arc<NetworkService<SyncMessage>> {
|
||||||
Arc::new(NetworkService::new(NetworkConfiguration::new()).unwrap())
|
Arc::new(NetworkService::new(NetworkConfiguration::new()).unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ethcore_set_client(miner: &Arc<TestMinerService>, net: &Arc<NetworkService<SyncMessage>>) -> EthcoreSetClient<TestMinerService> {
|
fn ethcore_set_client(client: &Arc<TestBlockChainClient>, miner: &Arc<TestMinerService>, net: &Arc<NetworkService<SyncMessage>>) -> EthcoreSetClient<TestBlockChainClient, TestMinerService> {
|
||||||
EthcoreSetClient::new(miner, net)
|
EthcoreSetClient::new(client, miner, net)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rpc_ethcore_set_min_gas_price() {
|
fn rpc_ethcore_set_min_gas_price() {
|
||||||
let miner = miner_service();
|
let miner = miner_service();
|
||||||
|
let client = client_service();
|
||||||
let network = network_service();
|
let network = network_service();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
|
io.add_delegate(ethcore_set_client(&client, &miner, &network).to_delegate());
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setMinGasPrice", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||||
@ -50,12 +56,14 @@ fn rpc_ethcore_set_min_gas_price() {
|
|||||||
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
assert_eq!(io.handle_request(request), Some(response.to_owned()));
|
||||||
assert_eq!(miner.minimal_gas_price(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
|
assert_eq!(miner.minimal_gas_price(), U256::from_str("cd1722f3947def4cf144679da39c4c32bdc35681").unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rpc_ethcore_set_gas_floor_target() {
|
fn rpc_ethcore_set_gas_floor_target() {
|
||||||
let miner = miner_service();
|
let miner = miner_service();
|
||||||
|
let client = client_service();
|
||||||
let network = network_service();
|
let network = network_service();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
|
io.add_delegate(ethcore_set_client(&client, &miner, &network).to_delegate());
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setGasFloorTarget", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||||
@ -67,9 +75,10 @@ fn rpc_ethcore_set_gas_floor_target() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn rpc_ethcore_set_extra_data() {
|
fn rpc_ethcore_set_extra_data() {
|
||||||
let miner = miner_service();
|
let miner = miner_service();
|
||||||
|
let client = client_service();
|
||||||
let network = network_service();
|
let network = network_service();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
|
io.add_delegate(ethcore_set_client(&client, &miner, &network).to_delegate());
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setExtraData", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||||
@ -81,9 +90,10 @@ fn rpc_ethcore_set_extra_data() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn rpc_ethcore_set_author() {
|
fn rpc_ethcore_set_author() {
|
||||||
let miner = miner_service();
|
let miner = miner_service();
|
||||||
|
let client = client_service();
|
||||||
let network = network_service();
|
let network = network_service();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
|
io.add_delegate(ethcore_set_client(&client, &miner, &network).to_delegate());
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setAuthor", "params":["0xcd1722f3947def4cf144679da39c4c32bdc35681"], "id": 1}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||||
@ -95,9 +105,10 @@ fn rpc_ethcore_set_author() {
|
|||||||
#[test]
|
#[test]
|
||||||
fn rpc_ethcore_set_transactions_limit() {
|
fn rpc_ethcore_set_transactions_limit() {
|
||||||
let miner = miner_service();
|
let miner = miner_service();
|
||||||
|
let client = client_service();
|
||||||
let network = network_service();
|
let network = network_service();
|
||||||
let io = IoHandler::new();
|
let io = IoHandler::new();
|
||||||
io.add_delegate(ethcore_set_client(&miner, &network).to_delegate());
|
io.add_delegate(ethcore_set_client(&client, &miner, &network).to_delegate());
|
||||||
|
|
||||||
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setTransactionsLimit", "params":[10240240], "id": 1}"#;
|
let request = r#"{"jsonrpc": "2.0", "method": "ethcore_setTransactionsLimit", "params":[10240240], "id": 1}"#;
|
||||||
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#;
|
||||||
|
@ -23,9 +23,7 @@ use ethcore::client::TestBlockChainClient;
|
|||||||
use ethcore::transaction::{Transaction, Action};
|
use ethcore::transaction::{Transaction, Action};
|
||||||
use v1::{SignerClient, PersonalSigner};
|
use v1::{SignerClient, PersonalSigner};
|
||||||
use v1::tests::helpers::TestMinerService;
|
use v1::tests::helpers::TestMinerService;
|
||||||
use v1::helpers::{SigningQueue, ConfirmationsQueue};
|
use v1::helpers::{SigningQueue, ConfirmationsQueue, TransactionRequest};
|
||||||
use v1::types::TransactionRequest;
|
|
||||||
|
|
||||||
|
|
||||||
struct PersonalSignerTester {
|
struct PersonalSignerTester {
|
||||||
queue: Arc<ConfirmationsQueue>,
|
queue: Arc<ConfirmationsQueue>,
|
||||||
|
@ -15,8 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use serde::{Serialize, Serializer};
|
use serde::{Serialize, Serializer};
|
||||||
use util::numbers::*;
|
use v1::types::{Bytes, Transaction, H160, H256, H2048, U256};
|
||||||
use v1::types::{Bytes, Transaction, OptionalValue};
|
|
||||||
|
|
||||||
/// Block Transactions
|
/// Block Transactions
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -41,7 +40,7 @@ impl Serialize for BlockTransactions {
|
|||||||
#[derive(Debug, Serialize)]
|
#[derive(Debug, Serialize)]
|
||||||
pub struct Block {
|
pub struct Block {
|
||||||
/// Hash of the block
|
/// Hash of the block
|
||||||
pub hash: OptionalValue<H256>,
|
pub hash: Option<H256>,
|
||||||
/// Hash of the parent
|
/// Hash of the parent
|
||||||
#[serde(rename="parentHash")]
|
#[serde(rename="parentHash")]
|
||||||
pub parent_hash: H256,
|
pub parent_hash: H256,
|
||||||
@ -49,10 +48,10 @@ pub struct Block {
|
|||||||
#[serde(rename="sha3Uncles")]
|
#[serde(rename="sha3Uncles")]
|
||||||
pub uncles_hash: H256,
|
pub uncles_hash: H256,
|
||||||
/// Authors address
|
/// Authors address
|
||||||
pub author: Address,
|
pub author: H160,
|
||||||
// TODO: get rid of this one
|
// TODO: get rid of this one
|
||||||
/// ?
|
/// ?
|
||||||
pub miner: Address,
|
pub miner: H160,
|
||||||
/// State root hash
|
/// State root hash
|
||||||
#[serde(rename="stateRoot")]
|
#[serde(rename="stateRoot")]
|
||||||
pub state_root: H256,
|
pub state_root: H256,
|
||||||
@ -63,7 +62,7 @@ pub struct Block {
|
|||||||
#[serde(rename="receiptsRoot")]
|
#[serde(rename="receiptsRoot")]
|
||||||
pub receipts_root: H256,
|
pub receipts_root: H256,
|
||||||
/// Block number
|
/// Block number
|
||||||
pub number: OptionalValue<U256>,
|
pub number: Option<U256>,
|
||||||
/// Gas Used
|
/// Gas Used
|
||||||
#[serde(rename="gasUsed")]
|
#[serde(rename="gasUsed")]
|
||||||
pub gas_used: U256,
|
pub gas_used: U256,
|
||||||
@ -95,9 +94,8 @@ pub struct Block {
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use util::numbers::*;
|
use v1::types::{Transaction, H160, H256, H2048, Bytes, U256};
|
||||||
use v1::types::{Transaction, Bytes, OptionalValue};
|
use super::{Block, BlockTransactions};
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_serialize_block_transactions() {
|
fn test_serialize_block_transactions() {
|
||||||
@ -105,7 +103,7 @@ mod tests {
|
|||||||
let serialized = serde_json::to_string(&t).unwrap();
|
let serialized = serde_json::to_string(&t).unwrap();
|
||||||
assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x00","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x00","gasPrice":"0x00","gas":"0x00","input":"0x","creates":null}]"#);
|
assert_eq!(serialized, r#"[{"hash":"0x0000000000000000000000000000000000000000000000000000000000000000","nonce":"0x00","blockHash":null,"blockNumber":null,"transactionIndex":null,"from":"0x0000000000000000000000000000000000000000","to":null,"value":"0x00","gasPrice":"0x00","gas":"0x00","input":"0x","creates":null}]"#);
|
||||||
|
|
||||||
let t = BlockTransactions::Hashes(vec![H256::default()]);
|
let t = BlockTransactions::Hashes(vec![H256::default().into()]);
|
||||||
let serialized = serde_json::to_string(&t).unwrap();
|
let serialized = serde_json::to_string(&t).unwrap();
|
||||||
assert_eq!(serialized, r#"["0x0000000000000000000000000000000000000000000000000000000000000000"]"#);
|
assert_eq!(serialized, r#"["0x0000000000000000000000000000000000000000000000000000000000000000"]"#);
|
||||||
}
|
}
|
||||||
@ -113,15 +111,15 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn test_serialize_block() {
|
fn test_serialize_block() {
|
||||||
let block = Block {
|
let block = Block {
|
||||||
hash: OptionalValue::Value(H256::default()),
|
hash: Some(H256::default()),
|
||||||
parent_hash: H256::default(),
|
parent_hash: H256::default(),
|
||||||
uncles_hash: H256::default(),
|
uncles_hash: H256::default(),
|
||||||
author: Address::default(),
|
author: H160::default(),
|
||||||
miner: Address::default(),
|
miner: H160::default(),
|
||||||
state_root: H256::default(),
|
state_root: H256::default(),
|
||||||
transactions_root: H256::default(),
|
transactions_root: H256::default(),
|
||||||
receipts_root: H256::default(),
|
receipts_root: H256::default(),
|
||||||
number: OptionalValue::Value(U256::default()),
|
number: Some(U256::default()),
|
||||||
gas_used: U256::default(),
|
gas_used: U256::default(),
|
||||||
gas_limit: U256::default(),
|
gas_limit: U256::default(),
|
||||||
extra_data: Bytes::default(),
|
extra_data: Bytes::default(),
|
||||||
@ -131,7 +129,7 @@ mod tests {
|
|||||||
total_difficulty: U256::default(),
|
total_difficulty: U256::default(),
|
||||||
seal_fields: vec![Bytes::default(), Bytes::default()],
|
seal_fields: vec![Bytes::default(), Bytes::default()],
|
||||||
uncles: vec![],
|
uncles: vec![],
|
||||||
transactions: BlockTransactions::Hashes(vec![])
|
transactions: BlockTransactions::Hashes(vec![].into())
|
||||||
};
|
};
|
||||||
|
|
||||||
let serialized = serde_json::to_string(&block).unwrap();
|
let serialized = serde_json::to_string(&block).unwrap();
|
||||||
|
@ -42,6 +42,12 @@ impl From<Vec<u8>> for Bytes {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Into<Vec<u8>> for Bytes {
|
||||||
|
fn into(self) -> Vec<u8> {
|
||||||
|
self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Serialize for Bytes {
|
impl Serialize for Bytes {
|
||||||
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
|
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
|
||||||
where S: Serializer {
|
where S: Serializer {
|
||||||
|
@ -14,17 +14,16 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use util::hash::Address;
|
use v1::helpers::CallRequest as Request;
|
||||||
use util::numbers::U256;
|
use v1::types::{Bytes, H160, U256};
|
||||||
use v1::types::Bytes;
|
|
||||||
|
|
||||||
/// Call request
|
/// Call request
|
||||||
#[derive(Debug, Default, PartialEq, Deserialize)]
|
#[derive(Debug, Default, PartialEq, Deserialize)]
|
||||||
pub struct CallRequest {
|
pub struct CallRequest {
|
||||||
/// From
|
/// From
|
||||||
pub from: Option<Address>,
|
pub from: Option<H160>,
|
||||||
/// To
|
/// To
|
||||||
pub to: Option<Address>,
|
pub to: Option<H160>,
|
||||||
/// Gas Price
|
/// Gas Price
|
||||||
#[serde(rename="gasPrice")]
|
#[serde(rename="gasPrice")]
|
||||||
pub gas_price: Option<U256>,
|
pub gas_price: Option<U256>,
|
||||||
@ -38,18 +37,30 @@ pub struct CallRequest {
|
|||||||
pub nonce: Option<U256>,
|
pub nonce: Option<U256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Into<Request> for CallRequest {
|
||||||
|
fn into(self) -> Request {
|
||||||
|
Request {
|
||||||
|
from: self.from.map(Into::into),
|
||||||
|
to: self.to.map(Into::into),
|
||||||
|
gas_price: self.gas_price.map(Into::into),
|
||||||
|
gas: self.gas.map(Into::into),
|
||||||
|
value: self.value.map(Into::into),
|
||||||
|
data: self.data.map(Into::into),
|
||||||
|
nonce: self.nonce.map(Into::into),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use rustc_serialize::hex::FromHex;
|
use rustc_serialize::hex::FromHex;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
use util::numbers::{U256};
|
use v1::types::{U256, H160};
|
||||||
use util::hash::Address;
|
use super::CallRequest;
|
||||||
use v1::types::Bytes;
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn transaction_request_deserialize() {
|
fn call_request_deserialize() {
|
||||||
let s = r#"{
|
let s = r#"{
|
||||||
"from":"0x0000000000000000000000000000000000000001",
|
"from":"0x0000000000000000000000000000000000000001",
|
||||||
"to":"0x0000000000000000000000000000000000000002",
|
"to":"0x0000000000000000000000000000000000000002",
|
||||||
@ -62,18 +73,18 @@ mod tests {
|
|||||||
let deserialized: CallRequest = serde_json::from_str(s).unwrap();
|
let deserialized: CallRequest = serde_json::from_str(s).unwrap();
|
||||||
|
|
||||||
assert_eq!(deserialized, CallRequest {
|
assert_eq!(deserialized, CallRequest {
|
||||||
from: Some(Address::from(1)),
|
from: Some(H160::from(1)),
|
||||||
to: Some(Address::from(2)),
|
to: Some(H160::from(2)),
|
||||||
gas_price: Some(U256::from(1)),
|
gas_price: Some(U256::from(1)),
|
||||||
gas: Some(U256::from(2)),
|
gas: Some(U256::from(2)),
|
||||||
value: Some(U256::from(3)),
|
value: Some(U256::from(3)),
|
||||||
data: Some(Bytes::new(vec![0x12, 0x34, 0x56])),
|
data: Some(vec![0x12, 0x34, 0x56].into()),
|
||||||
nonce: Some(U256::from(4)),
|
nonce: Some(U256::from(4)),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn transaction_request_deserialize2() {
|
fn call_request_deserialize2() {
|
||||||
let s = r#"{
|
let s = r#"{
|
||||||
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
|
"from": "0xb60e8dd61c5d32be8058bb8eb970870f07233155",
|
||||||
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
|
"to": "0xd46e8dd67c5d32be8058bb8eb970870f07244567",
|
||||||
@ -85,23 +96,23 @@ mod tests {
|
|||||||
let deserialized: CallRequest = serde_json::from_str(s).unwrap();
|
let deserialized: CallRequest = serde_json::from_str(s).unwrap();
|
||||||
|
|
||||||
assert_eq!(deserialized, CallRequest {
|
assert_eq!(deserialized, CallRequest {
|
||||||
from: Some(Address::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap()),
|
from: Some(H160::from_str("b60e8dd61c5d32be8058bb8eb970870f07233155").unwrap()),
|
||||||
to: Some(Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
to: Some(H160::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap()),
|
||||||
gas_price: Some(U256::from_str("9184e72a000").unwrap()),
|
gas_price: Some(U256::from_str("9184e72a000").unwrap()),
|
||||||
gas: Some(U256::from_str("76c0").unwrap()),
|
gas: Some(U256::from_str("76c0").unwrap()),
|
||||||
value: Some(U256::from_str("9184e72a").unwrap()),
|
value: Some(U256::from_str("9184e72a").unwrap()),
|
||||||
data: Some(Bytes::new("d46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675".from_hex().unwrap())),
|
data: Some("d46e8dd67c5d32be8d46e8dd67c5d32be8058bb8eb970870f072445675058bb8eb970870f072445675".from_hex().unwrap().into()),
|
||||||
nonce: None
|
nonce: None
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn transaction_request_deserialize_empty() {
|
fn call_request_deserialize_empty() {
|
||||||
let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#;
|
let s = r#"{"from":"0x0000000000000000000000000000000000000001"}"#;
|
||||||
let deserialized: CallRequest = serde_json::from_str(s).unwrap();
|
let deserialized: CallRequest = serde_json::from_str(s).unwrap();
|
||||||
|
|
||||||
assert_eq!(deserialized, CallRequest {
|
assert_eq!(deserialized, CallRequest {
|
||||||
from: Some(Address::from(1)),
|
from: Some(H160::from(1)),
|
||||||
to: None,
|
to: None,
|
||||||
gas_price: None,
|
gas_price: None,
|
||||||
gas: None,
|
gas: None,
|
||||||
|
@ -17,10 +17,9 @@
|
|||||||
use serde::{Deserialize, Deserializer, Error};
|
use serde::{Deserialize, Deserializer, Error};
|
||||||
use serde_json::value;
|
use serde_json::value;
|
||||||
use jsonrpc_core::Value;
|
use jsonrpc_core::Value;
|
||||||
use util::numbers::*;
|
|
||||||
use v1::types::BlockNumber;
|
|
||||||
use ethcore::filter::Filter as EthFilter;
|
use ethcore::filter::Filter as EthFilter;
|
||||||
use ethcore::client::BlockID;
|
use ethcore::client::BlockID;
|
||||||
|
use v1::types::{BlockNumber, H160, H256};
|
||||||
|
|
||||||
/// Variadic value
|
/// Variadic value
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone)]
|
||||||
@ -49,7 +48,7 @@ impl<T> Deserialize for VariadicValue<T> where T: Deserialize {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Filter Address
|
/// Filter Address
|
||||||
pub type FilterAddress = VariadicValue<Address>;
|
pub type FilterAddress = VariadicValue<H160>;
|
||||||
/// Topic
|
/// Topic
|
||||||
pub type Topic = VariadicValue<H256>;
|
pub type Topic = VariadicValue<H256>;
|
||||||
|
|
||||||
@ -76,14 +75,14 @@ impl Into<EthFilter> for Filter {
|
|||||||
to_block: self.to_block.map_or_else(|| BlockID::Latest, Into::into),
|
to_block: self.to_block.map_or_else(|| BlockID::Latest, Into::into),
|
||||||
address: self.address.and_then(|address| match address {
|
address: self.address.and_then(|address| match address {
|
||||||
VariadicValue::Null => None,
|
VariadicValue::Null => None,
|
||||||
VariadicValue::Single(a) => Some(vec![a]),
|
VariadicValue::Single(a) => Some(vec![a.into()]),
|
||||||
VariadicValue::Multiple(a) => Some(a)
|
VariadicValue::Multiple(a) => Some(a.into_iter().map(Into::into).collect())
|
||||||
}),
|
}),
|
||||||
topics: {
|
topics: {
|
||||||
let mut iter = self.topics.map_or_else(Vec::new, |topics| topics.into_iter().take(4).map(|topic| match topic {
|
let mut iter = self.topics.map_or_else(Vec::new, |topics| topics.into_iter().take(4).map(|topic| match topic {
|
||||||
VariadicValue::Null => None,
|
VariadicValue::Null => None,
|
||||||
VariadicValue::Single(t) => Some(vec![t]),
|
VariadicValue::Single(t) => Some(vec![t.into()]),
|
||||||
VariadicValue::Multiple(t) => Some(t)
|
VariadicValue::Multiple(t) => Some(t.into_iter().map(Into::into).collect())
|
||||||
}).filter_map(|m| m).collect()).into_iter();
|
}).filter_map(|m| m).collect()).into_iter();
|
||||||
vec![iter.next(), iter.next(), iter.next(), iter.next()]
|
vec![iter.next(), iter.next(), iter.next(), iter.next()]
|
||||||
}
|
}
|
||||||
@ -104,11 +103,11 @@ mod tests {
|
|||||||
let s = r#"["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", null, ["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", "0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc"]]"#;
|
let s = r#"["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", null, ["0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b", "0x0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc"]]"#;
|
||||||
let deserialized: Vec<Topic> = serde_json::from_str(s).unwrap();
|
let deserialized: Vec<Topic> = serde_json::from_str(s).unwrap();
|
||||||
assert_eq!(deserialized, vec![
|
assert_eq!(deserialized, vec![
|
||||||
VariadicValue::Single(H256::from_str("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap()),
|
VariadicValue::Single(H256::from_str("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap().into()),
|
||||||
VariadicValue::Null,
|
VariadicValue::Null,
|
||||||
VariadicValue::Multiple(vec![
|
VariadicValue::Multiple(vec![
|
||||||
H256::from_str("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap(),
|
H256::from_str("000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b").unwrap().into(),
|
||||||
H256::from_str("0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc").unwrap()
|
H256::from_str("0000000000000000000000000aff3454fce5edbc8cca8697c15331677e6ebccc").unwrap().into(),
|
||||||
])
|
])
|
||||||
]);
|
]);
|
||||||
}
|
}
|
||||||
|
145
rpc/src/v1/types/hash.rs
Normal file
145
rpc/src/v1/types/hash.rs
Normal file
@ -0,0 +1,145 @@
|
|||||||
|
// Copyright 2015, 2016 Ethcore (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use std::str::FromStr;
|
||||||
|
use std::cmp::Ordering;
|
||||||
|
use std::hash::{Hash, Hasher};
|
||||||
|
use serde;
|
||||||
|
use rustc_serialize::hex::{ToHex, FromHex};
|
||||||
|
use util::{H64 as Eth64, H256 as EthH256, H520 as EthH520, H2048 as Eth2048, Address};
|
||||||
|
|
||||||
|
macro_rules! impl_hash {
|
||||||
|
($name: ident, $other: ident, $size: expr) => {
|
||||||
|
/// Hash serialization
|
||||||
|
#[derive(Eq)]
|
||||||
|
pub struct $name([u8; $size]);
|
||||||
|
|
||||||
|
impl Default for $name {
|
||||||
|
fn default() -> Self {
|
||||||
|
$name([0; $size])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for $name {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> {
|
||||||
|
write!(f, "{}", self.0.to_hex())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> From<T> for $name where $other: From<T> {
|
||||||
|
fn from(o: T) -> Self {
|
||||||
|
$name($other::from(o).0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromStr for $name {
|
||||||
|
type Err = <$other as FromStr>::Err;
|
||||||
|
|
||||||
|
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||||
|
$other::from_str(s).map(|x| $name(x.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Into<$other> for $name {
|
||||||
|
fn into(self) -> $other {
|
||||||
|
$other(self.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for $name {
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
let self_ref: &[u8] = &self.0;
|
||||||
|
let other_ref: &[u8] = &other.0;
|
||||||
|
self_ref == other_ref
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for $name {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
let self_ref: &[u8] = &self.0;
|
||||||
|
let other_ref: &[u8] = &other.0;
|
||||||
|
self_ref.partial_cmp(other_ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for $name {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
let self_ref: &[u8] = &self.0;
|
||||||
|
let other_ref: &[u8] = &other.0;
|
||||||
|
self_ref.cmp(other_ref)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Hash for $name {
|
||||||
|
fn hash<H>(&self, state: &mut H) where H: Hasher {
|
||||||
|
let self_ref: &[u8] = &self.0;
|
||||||
|
Hash::hash(self_ref, state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Clone for $name {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
let mut r = [0; $size];
|
||||||
|
r.copy_from_slice(&self.0);
|
||||||
|
$name(r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl serde::Serialize for $name {
|
||||||
|
fn serialize<S>(&self, serializer: &mut S) -> Result<(), S::Error>
|
||||||
|
where S: serde::Serializer {
|
||||||
|
let mut hex = "0x".to_owned();
|
||||||
|
hex.push_str(&self.0.to_hex());
|
||||||
|
serializer.serialize_str(&hex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl serde::Deserialize for $name {
|
||||||
|
fn deserialize<D>(deserializer: &mut D) -> Result<$name, D::Error> where D: serde::Deserializer {
|
||||||
|
struct HashVisitor;
|
||||||
|
|
||||||
|
impl serde::de::Visitor for HashVisitor {
|
||||||
|
type Value = $name;
|
||||||
|
|
||||||
|
fn visit_str<E>(&mut self, value: &str) -> Result<Self::Value, E> where E: serde::Error {
|
||||||
|
match value[2..].from_hex() {
|
||||||
|
Ok(ref v) if v.len() == $size => {
|
||||||
|
let mut result = [0u8; $size];
|
||||||
|
result.copy_from_slice(v);
|
||||||
|
Ok($name(result))
|
||||||
|
},
|
||||||
|
Ok(_) => Err(serde::Error::custom("Invalid length.")),
|
||||||
|
_ => Err(serde::Error::custom("Invalid hex value."))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn visit_string<E>(&mut self, value: String) -> Result<Self::Value, E> where E: serde::Error {
|
||||||
|
self.visit_str(value.as_ref())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
deserializer.deserialize(HashVisitor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl_hash!(H64, Eth64, 8);
|
||||||
|
impl_hash!(H160, Address, 20);
|
||||||
|
impl_hash!(H256, EthH256, 32);
|
||||||
|
impl_hash!(H520, EthH520, 65);
|
||||||
|
impl_hash!(H2048, Eth2048, 256);
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user