Merge branch 'master' into Fix-4858
This commit is contained in:
commit
50495c6898
12
.github/ISSUE_TEMPLATE.md
vendored
Normal file
12
.github/ISSUE_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
_Before filing a new issue, please **provide the following information**._
|
||||||
|
|
||||||
|
> I'm running:
|
||||||
|
>
|
||||||
|
> - **Parity version**: 0.0.0
|
||||||
|
> - **Operating system**: Windows / MacOS / Linux
|
||||||
|
> - **And installed**: via installer / homebrew / binaries / from source
|
||||||
|
|
||||||
|
_Your issue description goes here below. Try to include **actual** vs. **expected behavior** and **steps to reproduce** the issue._
|
||||||
|
|
||||||
|
---
|
||||||
|
|
132
Cargo.lock
generated
132
Cargo.lock
generated
@ -125,10 +125,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bigint"
|
name = "bigint"
|
||||||
version = "3.0.0"
|
version = "4.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -195,7 +196,7 @@ name = "bloomable"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -301,6 +302,7 @@ dependencies = [
|
|||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
"rlp 0.2.0",
|
"rlp 0.2.0",
|
||||||
|
"rlp_derive 0.1.0",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -345,7 +347,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crunchy"
|
name = "crunchy"
|
||||||
version = "0.1.3"
|
version = "0.1.6"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -470,14 +472,14 @@ dependencies = [
|
|||||||
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ethash"
|
name = "ethash"
|
||||||
version = "1.8.0"
|
version = "1.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"crunchy 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -526,6 +528,7 @@ dependencies = [
|
|||||||
"price-info 1.7.0",
|
"price-info 1.7.0",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.2.0",
|
"rlp 0.2.0",
|
||||||
|
"rlp_derive 0.1.0",
|
||||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -542,9 +545,10 @@ dependencies = [
|
|||||||
name = "ethcore-bigint"
|
name = "ethcore-bigint"
|
||||||
version = "0.1.3"
|
version = "0.1.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bigint 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bigint 4.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"plain_hasher 0.1.0",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -652,6 +656,7 @@ dependencies = [
|
|||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.2.0",
|
"rlp 0.2.0",
|
||||||
|
"rlp_derive 0.1.0",
|
||||||
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -702,7 +707,7 @@ dependencies = [
|
|||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -723,6 +728,7 @@ dependencies = [
|
|||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"native-contracts 0.1.0",
|
"native-contracts 0.1.0",
|
||||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -772,7 +778,6 @@ dependencies = [
|
|||||||
"ethcore-devtools 1.8.0",
|
"ethcore-devtools 1.8.0",
|
||||||
"ethcore-logger 1.8.0",
|
"ethcore-logger 1.8.0",
|
||||||
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -787,7 +792,7 @@ dependencies = [
|
|||||||
"sha3 0.1.0",
|
"sha3 0.1.0",
|
||||||
"target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -800,7 +805,7 @@ dependencies = [
|
|||||||
"ethkey 0.2.0",
|
"ethkey 0.2.0",
|
||||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"subtle 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"subtle 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -826,7 +831,7 @@ dependencies = [
|
|||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -862,7 +867,7 @@ dependencies = [
|
|||||||
"smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -931,6 +936,7 @@ dependencies = [
|
|||||||
"docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore 1.8.0",
|
"ethcore 1.8.0",
|
||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
|
"ethjson 0.1.0",
|
||||||
"evm 0.1.0",
|
"evm 0.1.0",
|
||||||
"panic_hook 0.1.0",
|
"panic_hook 0.1.0",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -943,7 +949,7 @@ dependencies = [
|
|||||||
name = "evmjit"
|
name = "evmjit"
|
||||||
version = "1.8.0"
|
version = "1.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1216,7 +1222,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-core"
|
name = "jsonrpc-core"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1228,7 +1234,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-http-server"
|
name = "jsonrpc-http-server"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
"hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1241,7 +1247,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-ipc-server"
|
name = "jsonrpc-ipc-server"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1254,7 +1260,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-macros"
|
name = "jsonrpc-macros"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1264,7 +1270,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-minihttp-server"
|
name = "jsonrpc-minihttp-server"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1279,7 +1285,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-pubsub"
|
name = "jsonrpc-pubsub"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1289,7 +1295,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-server-utils"
|
name = "jsonrpc-server-utils"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"globset 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1302,7 +1308,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-tcp-server"
|
name = "jsonrpc-tcp-server"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1316,7 +1322,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-ws-server"
|
name = "jsonrpc-ws-server"
|
||||||
version = "7.0.0"
|
version = "7.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#4d3ec22c7aba426988a678b489b2791e95283699"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#b5490782884218c5ccf74cd61e54904cb3a3aeed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1545,7 +1551,7 @@ version = "0.6.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ring 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ring 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1580,7 +1586,7 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-util 1.8.0",
|
"ethcore-bigint 0.1.3",
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"native-contract-generator 0.1.0",
|
"native-contract-generator 0.1.0",
|
||||||
]
|
]
|
||||||
@ -1609,6 +1615,34 @@ dependencies = [
|
|||||||
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "node-filter"
|
||||||
|
version = "1.8.0"
|
||||||
|
dependencies = [
|
||||||
|
"ethcore 1.8.0",
|
||||||
|
"ethcore-io 1.8.0",
|
||||||
|
"ethcore-network 1.8.0",
|
||||||
|
"ethcore-util 1.8.0",
|
||||||
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"native-contracts 0.1.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "node-health"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"parity-reactor 0.1.0",
|
||||||
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"serde_derive 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nodrop"
|
name = "nodrop"
|
||||||
version = "0.1.9"
|
version = "0.1.9"
|
||||||
@ -1786,7 +1820,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity"
|
name = "parity"
|
||||||
version = "1.7.0"
|
version = "1.8.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ansi_term 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1817,6 +1851,8 @@ dependencies = [
|
|||||||
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"node-filter 1.8.0",
|
||||||
|
"node-health 0.1.0",
|
||||||
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"panic_hook 0.1.0",
|
"panic_hook 0.1.0",
|
||||||
@ -1858,14 +1894,14 @@ dependencies = [
|
|||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
"fetch 0.1.0",
|
"fetch 0.1.0",
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ntp 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"node-health 0.1.0",
|
||||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-hash-fetch 1.8.0",
|
"parity-hash-fetch 1.8.0",
|
||||||
"parity-reactor 0.1.0",
|
"parity-reactor 0.1.0",
|
||||||
@ -1970,6 +2006,7 @@ dependencies = [
|
|||||||
"fetch 0.1.0",
|
"fetch 0.1.0",
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures-cpupool 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"itertools 0.5.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
@ -1979,6 +2016,7 @@ dependencies = [
|
|||||||
"jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
"jsonrpc-ws-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"multihash 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"node-health 0.1.0",
|
||||||
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-reactor 0.1.0",
|
"parity-reactor 0.1.0",
|
||||||
"parity-updater 1.8.0",
|
"parity-updater 1.8.0",
|
||||||
@ -2052,7 +2090,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ui-precompiled"
|
name = "parity-ui-precompiled"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
source = "git+https://github.com/paritytech/js-precompiled.git#06f77d96f1b1a771d643f07b60c802d448b6415c"
|
source = "git+https://github.com/paritytech/js-precompiled.git#d809723e58bcb36c0f8d2eca5ca94abbb3690544"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -2113,7 +2151,7 @@ dependencies = [
|
|||||||
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"smallvec 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2191,6 +2229,13 @@ name = "pkg-config"
|
|||||||
version = "0.3.9"
|
version = "0.3.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "plain_hasher"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "podio"
|
name = "podio"
|
||||||
version = "0.1.5"
|
version = "0.1.5"
|
||||||
@ -2208,10 +2253,10 @@ dependencies = [
|
|||||||
name = "price-info"
|
name = "price-info"
|
||||||
version = "1.7.0"
|
version = "1.7.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethcore-util 1.8.0",
|
|
||||||
"fetch 0.1.0",
|
"fetch 0.1.0",
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2304,7 +2349,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "quote"
|
name = "quote"
|
||||||
version = "0.3.10"
|
version = "0.3.15"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2395,6 +2440,15 @@ dependencies = [
|
|||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rlp_derive"
|
||||||
|
version = "0.1.0"
|
||||||
|
dependencies = [
|
||||||
|
"quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rlp 0.2.0",
|
||||||
|
"syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rocksdb"
|
name = "rocksdb"
|
||||||
version = "0.4.5"
|
version = "0.4.5"
|
||||||
@ -2452,7 +2506,7 @@ dependencies = [
|
|||||||
name = "rpc-cli"
|
name = "rpc-cli"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bigint 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bigint 4.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-rpc 1.8.0",
|
"parity-rpc 1.8.0",
|
||||||
@ -2587,7 +2641,7 @@ name = "serde_derive"
|
|||||||
version = "1.0.9"
|
version = "1.0.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_derive_internals 0.15.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"syn 0.11.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -2729,7 +2783,7 @@ name = "syn"
|
|||||||
version = "0.11.11"
|
version = "0.11.11"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"synom 0.11.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicode-xid 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -2863,7 +2917,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tiny-keccak"
|
name = "tiny-keccak"
|
||||||
version = "1.2.1"
|
version = "1.3.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3230,7 +3284,7 @@ dependencies = [
|
|||||||
"checksum backtrace-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3a0d842ea781ce92be2bf78a9b38883948542749640b8378b3b2f03d1fd9f1ff"
|
"checksum backtrace-sys 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3a0d842ea781ce92be2bf78a9b38883948542749640b8378b3b2f03d1fd9f1ff"
|
||||||
"checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1"
|
"checksum base-x 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2f59103b47307f76e03bef1633aec7fa9e29bfb5aa6daf5a334f94233c71f6c1"
|
||||||
"checksum base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9605ba46d61df0410d8ac686b0007add8172eba90e8e909c347856fe794d8c"
|
"checksum base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1b9605ba46d61df0410d8ac686b0007add8172eba90e8e909c347856fe794d8c"
|
||||||
"checksum bigint 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1d0673c930652d3d4d6dcd5c45b5db4fa5f8f33994d7323618c43c083b223e8c"
|
"checksum bigint 4.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f56c9f1cd09cdcafcccdab1fd58797d39b7d4d203238b2e3768807590723bdf0"
|
||||||
"checksum bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e103c8b299b28a9c6990458b7013dc4a8356a9b854c51b9883241f5866fac36e"
|
"checksum bincode 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e103c8b299b28a9c6990458b7013dc4a8356a9b854c51b9883241f5866fac36e"
|
||||||
"checksum bit-set 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6e1e6fb1c9e3d6fcdec57216a74eaa03e41f52a22f13a16438251d8e88b89da"
|
"checksum bit-set 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e6e1e6fb1c9e3d6fcdec57216a74eaa03e41f52a22f13a16438251d8e88b89da"
|
||||||
"checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c"
|
"checksum bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d9bf6104718e80d7b26a68fdbacff3481cfc05df670821affc7e9cbc1884400c"
|
||||||
@ -3255,7 +3309,7 @@ dependencies = [
|
|||||||
"checksum core-foundation 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "20a6d0448d3a99d977ae4a2aa5a98d886a923e863e81ad9ff814645b6feb3bbd"
|
"checksum core-foundation 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "20a6d0448d3a99d977ae4a2aa5a98d886a923e863e81ad9ff814645b6feb3bbd"
|
||||||
"checksum core-foundation-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05eed248dc504a5391c63794fe4fb64f46f071280afaa1b73308f3c0ce4574c5"
|
"checksum core-foundation-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "05eed248dc504a5391c63794fe4fb64f46f071280afaa1b73308f3c0ce4574c5"
|
||||||
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
|
"checksum crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "0c5ea215664ca264da8a9d9c3be80d2eaf30923c259d03e870388eb927508f97"
|
||||||
"checksum crunchy 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "e6aa9cb5f2d7bffc4eecfaf924fe450549dc4f0c3a6502298dc24f968b1eabbe"
|
"checksum crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "a2f4a431c5c9f662e1200b7c7f02c34e91361150e382089a8f2dec3ba680cbda"
|
||||||
"checksum crypt32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e34988f7e069e0b2f3bfc064295161e489b2d4e04a2e4248fb94360cdf00b4ec"
|
"checksum crypt32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e34988f7e069e0b2f3bfc064295161e489b2d4e04a2e4248fb94360cdf00b4ec"
|
||||||
"checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "<none>"
|
"checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "<none>"
|
||||||
"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9"
|
"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9"
|
||||||
@ -3383,7 +3437,7 @@ dependencies = [
|
|||||||
"checksum quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29cec87bc2816766d7e4168302d505dd06b0a825aed41b00633d296e922e02dd"
|
"checksum quasi_macros 0.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "29cec87bc2816766d7e4168302d505dd06b0a825aed41b00633d296e922e02dd"
|
||||||
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
|
"checksum quick-error 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0aad603e8d7fb67da22dbdf1f4b826ce8829e406124109e73cf1b2454b93a71c"
|
||||||
"checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a"
|
"checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a"
|
||||||
"checksum quote 0.3.10 (registry+https://github.com/rust-lang/crates.io-index)" = "6732e32663c9c271bfc7c1823486b471f18c47a2dbf87c066897b7b51afc83be"
|
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
|
||||||
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
|
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
|
||||||
"checksum rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8c83adcb08e5b922e804fe1918142b422602ef11f2fd670b0b52218cb5984a20"
|
"checksum rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8c83adcb08e5b922e804fe1918142b422602ef11f2fd670b0b52218cb5984a20"
|
||||||
"checksum rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "767d91bacddf07d442fe39257bf04fd95897d1c47c545d009f6beb03efd038f8"
|
"checksum rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "767d91bacddf07d442fe39257bf04fd95897d1c47c545d009f6beb03efd038f8"
|
||||||
@ -3446,7 +3500,7 @@ dependencies = [
|
|||||||
"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a"
|
"checksum thread-id 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4437c97558c70d129e40629a5b385b3fb1ffac301e63941335e4d354081ec14a"
|
||||||
"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
|
"checksum thread_local 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "c85048c6260d17cf486ceae3282d9fb6b90be220bf5b28c400f5485ffc29f0c7"
|
||||||
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
|
"checksum time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)" = "3c7ec6d62a20df54e07ab3b78b9a3932972f4b7981de295563686849eb3989af"
|
||||||
"checksum tiny-keccak 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b50173faa6ee499206f77b189d7ff3bef40f6969f228c9ec22b82080df9aa41"
|
"checksum tiny-keccak 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d52d12ad79e4063e0cb0ca5efa202ed7244b6ce4d25f4d3abe410b2a66128292"
|
||||||
"checksum tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "99e958104a67877907c1454386d5482fe8e965a55d60be834a15a44328e7dc76"
|
"checksum tokio-core 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "99e958104a67877907c1454386d5482fe8e965a55d60be834a15a44328e7dc76"
|
||||||
"checksum tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "48f55df1341bb92281f229a6030bc2abffde2c7a44c6d6b802b7687dd8be0775"
|
"checksum tokio-io 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "48f55df1341bb92281f229a6030bc2abffde2c7a44c6d6b802b7687dd8be0775"
|
||||||
"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>"
|
"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "<none>"
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
[package]
|
[package]
|
||||||
description = "Parity Ethereum client"
|
description = "Parity Ethereum client"
|
||||||
name = "parity"
|
name = "parity"
|
||||||
version = "1.7.0"
|
version = "1.8.0"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
@ -42,7 +42,9 @@ ethcore-light = { path = "ethcore/light" }
|
|||||||
ethcore-logger = { path = "logger" }
|
ethcore-logger = { path = "logger" }
|
||||||
ethcore-stratum = { path = "stratum" }
|
ethcore-stratum = { path = "stratum" }
|
||||||
ethcore-network = { path = "util/network" }
|
ethcore-network = { path = "util/network" }
|
||||||
|
node-filter = { path = "ethcore/node_filter" }
|
||||||
ethkey = { path = "ethkey" }
|
ethkey = { path = "ethkey" }
|
||||||
|
node-health = { path = "dapps/node-health" }
|
||||||
rlp = { path = "util/rlp" }
|
rlp = { path = "util/rlp" }
|
||||||
rpc-cli = { path = "rpc_cli" }
|
rpc-cli = { path = "rpc_cli" }
|
||||||
parity-hash-fetch = { path = "hash-fetch" }
|
parity-hash-fetch = { path = "hash-fetch" }
|
||||||
@ -110,4 +112,4 @@ lto = false
|
|||||||
panic = "abort"
|
panic = "abort"
|
||||||
|
|
||||||
[workspace]
|
[workspace]
|
||||||
members = ["ethstore/cli", "ethkey/cli", "evmbin", "whisper", "chainspec"]
|
members = ["ethstore/cli", "ethkey/cli", "evmbin", "whisper", "chainspec", "dapps/node-health"]
|
||||||
|
50
README.md
50
README.md
@ -1,61 +1,47 @@
|
|||||||
# [Parity](https://parity.io/parity.html)
|
# [Parity](https://parity.io/parity.html) - fast, light, and robust Ethereum client
|
||||||
### Fast, light, and robust Ethereum implementation
|
|
||||||
|
|
||||||
### [Download latest release](https://github.com/paritytech/parity/releases)
|
[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master)
|
||||||
|
[![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity)
|
||||||
|
[![GPLv3](https://img.shields.io/badge/license-GPL%20v3-green.svg)](https://www.gnu.org/licenses/gpl-3.0.en.html)
|
||||||
|
|
||||||
[![build status](https://gitlab.parity.io/parity/parity/badges/master/build.svg)](https://gitlab.parity.io/parity/parity/commits/master) [![Coverage Status][coveralls-image]][coveralls-url] [![GPLv3][license-image]][license-url] [![Snap Status](https://build.snapcraft.io/badge/paritytech/parity.svg)](https://build.snapcraft.io/user/paritytech/parity)
|
- [Download the latest release here.](https://github.com/paritytech/parity/releases)
|
||||||
|
|
||||||
### Join the chat!
|
### Join the chat!
|
||||||
|
|
||||||
Parity [![Join the chat at https://gitter.im/ethcore/parity][gitter-image]][gitter-url] and
|
Get in touch with us on Gitter:
|
||||||
parity.js [![Join the chat at https://gitter.im/ethcore/parity.js](https://badges.gitter.im/ethcore/parity.js.svg)](https://gitter.im/ethcore/parity.js?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
[![Gitter: Parity](https://img.shields.io/badge/gitter-parity-4AB495.svg)](https://gitter.im/paritytech/parity)
|
||||||
|
[![Gitter: Parity.js](https://img.shields.io/badge/gitter-parity.js-4AB495.svg)](https://gitter.im/paritytech/parity.js)
|
||||||
[Internal Documentation][doc-url]
|
[![Gitter: Parity/Miners](https://img.shields.io/badge/gitter-parity/miners-4AB495.svg)](https://gitter.im/paritytech/parity/miners)
|
||||||
|
[![Gitter: Parity-PoA](https://img.shields.io/badge/gitter-parity--poa-4AB495.svg)](https://gitter.im/paritytech/parity-poa)
|
||||||
|
|
||||||
Be sure to check out [our wiki][wiki-url] for more information.
|
|
||||||
|
|
||||||
[coveralls-image]: https://coveralls.io/repos/github/paritytech/parity/badge.svg?branch=master
|
|
||||||
[coveralls-url]: https://coveralls.io/github/paritytech/parity?branch=master
|
|
||||||
[gitter-image]: https://badges.gitter.im/Join%20Chat.svg
|
|
||||||
[gitter-url]: https://gitter.im/ethcore/parity?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
|
|
||||||
[license-image]: https://img.shields.io/badge/license-GPL%20v3-green.svg
|
|
||||||
[license-url]: https://www.gnu.org/licenses/gpl-3.0.en.html
|
|
||||||
[doc-url]: https://paritytech.github.io/parity/ethcore/index.html
|
|
||||||
[wiki-url]: https://github.com/paritytech/parity/wiki
|
|
||||||
|
|
||||||
|
Be sure to check out [our wiki](https://github.com/paritytech/parity/wiki) and the [internal documentation](https://paritytech.github.io/parity/ethcore/index.html) for more information.
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
|
|
||||||
## About Parity
|
## About Parity
|
||||||
|
|
||||||
Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and
|
Parity's goal is to be the fastest, lightest, and most secure Ethereum client. We are developing Parity using the sophisticated and cutting-edge Rust programming language. Parity is licensed under the GPLv3, and can be used for all your Ethereum needs.
|
||||||
cutting-edge Rust programming language. Parity is licensed under the GPLv3, and can be used for all your Ethereum needs.
|
|
||||||
|
Parity comes with a built-in wallet. To access [Parity Wallet](http://web3.site/) simply go to http://web3.site/ (if you don't have access to the internet, but still want to use the service, you can also use http://127.0.0.1:8180/). It includes various functionality allowing you to:
|
||||||
|
|
||||||
Parity comes with a built-in wallet. To access [Parity Wallet](http://web3.site/) simply go to http://web3.site/ (if you don't have access to the internet, but still want to use the service, you can also use http://127.0.0.1:8180/). It
|
|
||||||
includes various functionality allowing you to:
|
|
||||||
- create and manage your Ethereum accounts;
|
- create and manage your Ethereum accounts;
|
||||||
- manage your Ether and any Ethereum tokens;
|
- manage your Ether and any Ethereum tokens;
|
||||||
- create and register your own tokens;
|
- create and register your own tokens;
|
||||||
- and much more.
|
- and much more.
|
||||||
|
|
||||||
By default, Parity will also run a JSONRPC server on `127.0.0.1:8545`. This is fully configurable and supports a number
|
By default, Parity will also run a JSONRPC server on `127.0.0.1:8545`. This is fully configurable and supports a number of RPC APIs.
|
||||||
of RPC APIs.
|
|
||||||
|
|
||||||
If you run into an issue while using parity, feel free to file one in this repository
|
If you run into an issue while using parity, feel free to file one in this repository or hop on our [gitter chat room](https://gitter.im/paritytech/parity) to ask a question. We are glad to help!
|
||||||
or hop on our [gitter chat room][gitter-url] to ask a question. We are glad to help!
|
|
||||||
|
|
||||||
**For security-critical issues**, please refer to the security policy outlined in `SECURITY.MD`.
|
**For security-critical issues**, please refer to the security policy outlined in `SECURITY.MD`.
|
||||||
|
|
||||||
Parity's current release is 1.6. You can download it at https://github.com/paritytech/parity/releases or follow the instructions
|
Parity's current release is 1.7. You can download it at https://github.com/paritytech/parity/releases or follow the instructions below to build from source.
|
||||||
below to build from source.
|
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
## Build dependencies
|
## Build dependencies
|
||||||
|
|
||||||
**Parity requires Rust version 1.18.0 to build**
|
**Parity requires Rust version 1.19.0 to build**
|
||||||
|
|
||||||
We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this:
|
We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this:
|
||||||
|
|
||||||
|
@ -9,15 +9,12 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base32 = "0.3"
|
base32 = "0.3"
|
||||||
env_logger = "0.4"
|
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
futures-cpupool = "0.1"
|
|
||||||
linked-hash-map = "0.3"
|
linked-hash-map = "0.3"
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
parity-dapps-glue = "1.7"
|
parity-dapps-glue = "1.7"
|
||||||
mime = "0.2"
|
mime = "0.2"
|
||||||
mime_guess = "1.6.1"
|
mime_guess = "1.6.1"
|
||||||
ntp = "0.2.0"
|
|
||||||
rand = "0.3"
|
rand = "0.3"
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
serde = "1.0"
|
serde = "1.0"
|
||||||
@ -27,19 +24,24 @@ time = "0.1.35"
|
|||||||
unicase = "1.3"
|
unicase = "1.3"
|
||||||
url = "1.0"
|
url = "1.0"
|
||||||
zip = { version = "0.1", default-features = false }
|
zip = { version = "0.1", default-features = false }
|
||||||
|
itertools = "0.5"
|
||||||
|
|
||||||
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
|
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
|
||||||
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
|
jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" }
|
||||||
|
|
||||||
ethcore-devtools = { path = "../devtools" }
|
|
||||||
ethcore-util = { path = "../util" }
|
ethcore-util = { path = "../util" }
|
||||||
fetch = { path = "../util/fetch" }
|
fetch = { path = "../util/fetch" }
|
||||||
|
node-health = { path = "./node-health" }
|
||||||
parity-hash-fetch = { path = "../hash-fetch" }
|
parity-hash-fetch = { path = "../hash-fetch" }
|
||||||
parity-reactor = { path = "../util/reactor" }
|
parity-reactor = { path = "../util/reactor" }
|
||||||
parity-ui = { path = "./ui" }
|
parity-ui = { path = "./ui" }
|
||||||
|
|
||||||
clippy = { version = "0.0.103", optional = true}
|
clippy = { version = "0.0.103", optional = true}
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
env_logger = "0.4"
|
||||||
|
ethcore-devtools = { path = "../devtools" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
dev = ["clippy", "ethcore-util/dev"]
|
dev = ["clippy", "ethcore-util/dev"]
|
||||||
|
|
||||||
|
18
dapps/node-health/Cargo.toml
Normal file
18
dapps/node-health/Cargo.toml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
[package]
|
||||||
|
name = "node-health"
|
||||||
|
description = "Node's health status"
|
||||||
|
version = "0.1.0"
|
||||||
|
license = "GPL-3.0"
|
||||||
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
futures = "0.1"
|
||||||
|
futures-cpupool = "0.1"
|
||||||
|
log = "0.3"
|
||||||
|
ntp = "0.2.0"
|
||||||
|
parking_lot = "0.4"
|
||||||
|
serde = "1.0"
|
||||||
|
serde_derive = "1.0"
|
||||||
|
time = "0.1.35"
|
||||||
|
|
||||||
|
parity-reactor = { path = "../../util/reactor" }
|
122
dapps/node-health/src/health.rs
Normal file
122
dapps/node-health/src/health.rs
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Reporting node's health.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time;
|
||||||
|
use futures::{Future, BoxFuture};
|
||||||
|
use futures::sync::oneshot;
|
||||||
|
use types::{HealthInfo, HealthStatus, Health};
|
||||||
|
use time::{TimeChecker, MAX_DRIFT};
|
||||||
|
use parity_reactor::Remote;
|
||||||
|
use parking_lot::Mutex;
|
||||||
|
use {SyncStatus};
|
||||||
|
|
||||||
|
const TIMEOUT_SECS: u64 = 5;
|
||||||
|
const PROOF: &str = "Only one closure is invoked.";
|
||||||
|
|
||||||
|
/// A struct enabling you to query for node's health.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct NodeHealth {
|
||||||
|
sync_status: Arc<SyncStatus>,
|
||||||
|
time: TimeChecker,
|
||||||
|
remote: Remote,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeHealth {
|
||||||
|
/// Creates new `NodeHealth`.
|
||||||
|
pub fn new(sync_status: Arc<SyncStatus>, time: TimeChecker, remote: Remote) -> Self {
|
||||||
|
NodeHealth { sync_status, time, remote, }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Query latest health report.
|
||||||
|
pub fn health(&self) -> BoxFuture<Health, ()> {
|
||||||
|
trace!(target: "dapps", "Checking node health.");
|
||||||
|
// Check timediff
|
||||||
|
let sync_status = self.sync_status.clone();
|
||||||
|
let time = self.time.time_drift();
|
||||||
|
let (tx, rx) = oneshot::channel();
|
||||||
|
let tx = Arc::new(Mutex::new(Some(tx)));
|
||||||
|
let tx2 = tx.clone();
|
||||||
|
self.remote.spawn_with_timeout(
|
||||||
|
move || time.then(move |result| {
|
||||||
|
let _ = tx.lock().take().expect(PROOF).send(Ok(result));
|
||||||
|
Ok(())
|
||||||
|
}),
|
||||||
|
time::Duration::from_secs(TIMEOUT_SECS),
|
||||||
|
move || {
|
||||||
|
let _ = tx2.lock().take().expect(PROOF).send(Err(()));
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
rx.map_err(|err| {
|
||||||
|
warn!(target: "dapps", "Health request cancelled: {:?}", err);
|
||||||
|
}).and_then(move |time| {
|
||||||
|
// Check peers
|
||||||
|
let peers = {
|
||||||
|
let (connected, max) = sync_status.peers();
|
||||||
|
let (status, message) = match connected {
|
||||||
|
0 => {
|
||||||
|
(HealthStatus::Bad, "You are not connected to any peers. There is most likely some network issue. Fix connectivity.".into())
|
||||||
|
},
|
||||||
|
1 => (HealthStatus::NeedsAttention, "You are connected to only one peer. Your node might not be reliable. Check your network connection.".into()),
|
||||||
|
_ => (HealthStatus::Ok, "".into()),
|
||||||
|
};
|
||||||
|
HealthInfo { status, message, details: (connected, max) }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check sync
|
||||||
|
let sync = {
|
||||||
|
let is_syncing = sync_status.is_major_importing();
|
||||||
|
let (status, message) = if is_syncing {
|
||||||
|
(HealthStatus::NeedsAttention, "Your node is still syncing, the values you see might be outdated. Wait until it's fully synced.".into())
|
||||||
|
} else {
|
||||||
|
(HealthStatus::Ok, "".into())
|
||||||
|
};
|
||||||
|
HealthInfo { status, message, details: is_syncing }
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check time
|
||||||
|
let time = {
|
||||||
|
let (status, message, details) = match time {
|
||||||
|
Ok(Ok(diff)) if diff < MAX_DRIFT && diff > -MAX_DRIFT => {
|
||||||
|
(HealthStatus::Ok, "".into(), diff)
|
||||||
|
},
|
||||||
|
Ok(Ok(diff)) => {
|
||||||
|
(HealthStatus::Bad, format!(
|
||||||
|
"Your clock is not in sync. Detected difference is too big for the protocol to work: {}ms. Synchronize your clock.",
|
||||||
|
diff,
|
||||||
|
), diff)
|
||||||
|
},
|
||||||
|
Ok(Err(err)) => {
|
||||||
|
(HealthStatus::NeedsAttention, format!(
|
||||||
|
"Unable to reach time API: {}. Make sure that your clock is synchronized.",
|
||||||
|
err,
|
||||||
|
), 0)
|
||||||
|
},
|
||||||
|
Err(_) => {
|
||||||
|
(HealthStatus::NeedsAttention, "Time API request timed out. Make sure that the clock is synchronized.".into(), 0)
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
HealthInfo { status, message, details, }
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Health { peers, sync, time})
|
||||||
|
}).boxed()
|
||||||
|
}
|
||||||
|
}
|
49
dapps/node-health/src/lib.rs
Normal file
49
dapps/node-health/src/lib.rs
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Node Health status reporting.
|
||||||
|
|
||||||
|
#![warn(missing_docs)]
|
||||||
|
|
||||||
|
extern crate futures;
|
||||||
|
extern crate futures_cpupool;
|
||||||
|
extern crate ntp;
|
||||||
|
extern crate time as time_crate;
|
||||||
|
extern crate parity_reactor;
|
||||||
|
extern crate parking_lot;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate log;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate serde_derive;
|
||||||
|
|
||||||
|
mod health;
|
||||||
|
mod time;
|
||||||
|
mod types;
|
||||||
|
|
||||||
|
pub use futures_cpupool::CpuPool;
|
||||||
|
pub use health::NodeHealth;
|
||||||
|
pub use types::{Health, HealthInfo, HealthStatus};
|
||||||
|
pub use time::{TimeChecker, Error};
|
||||||
|
|
||||||
|
/// Indicates sync status
|
||||||
|
pub trait SyncStatus: ::std::fmt::Debug + Send + Sync {
|
||||||
|
/// Returns true if there is a major sync happening.
|
||||||
|
fn is_major_importing(&self) -> bool;
|
||||||
|
|
||||||
|
/// Returns number of connected and ideal peers.
|
||||||
|
fn peers(&self) -> (usize, usize);
|
||||||
|
}
|
@ -33,18 +33,22 @@
|
|||||||
|
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::{fmt, mem, time};
|
use std::{fmt, mem, time};
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::VecDeque;
|
use std::collections::VecDeque;
|
||||||
|
use std::sync::atomic::{self, AtomicUsize};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
use futures::{self, Future, BoxFuture};
|
use futures::{self, Future, BoxFuture};
|
||||||
use futures_cpupool::CpuPool;
|
use futures::future::{self, IntoFuture};
|
||||||
|
use futures_cpupool::{CpuPool, CpuFuture};
|
||||||
use ntp;
|
use ntp;
|
||||||
use time::{Duration, Timespec};
|
use parking_lot::RwLock;
|
||||||
use util::RwLock;
|
use time_crate::{Duration, Timespec};
|
||||||
|
|
||||||
/// Time checker error.
|
/// Time checker error.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
/// No servers are currently available for a query.
|
||||||
|
NoServersAvailable,
|
||||||
/// There was an error when trying to reach the NTP server.
|
/// There was an error when trying to reach the NTP server.
|
||||||
Ntp(String),
|
Ntp(String),
|
||||||
/// IO error when reading NTP response.
|
/// IO error when reading NTP response.
|
||||||
@ -56,6 +60,7 @@ impl fmt::Display for Error {
|
|||||||
use self::Error::*;
|
use self::Error::*;
|
||||||
|
|
||||||
match *self {
|
match *self {
|
||||||
|
NoServersAvailable => write!(fmt, "No NTP servers available"),
|
||||||
Ntp(ref err) => write!(fmt, "NTP error: {}", err),
|
Ntp(ref err) => write!(fmt, "NTP error: {}", err),
|
||||||
Io(ref err) => write!(fmt, "Connection Error: {}", err),
|
Io(ref err) => write!(fmt, "Connection Error: {}", err),
|
||||||
}
|
}
|
||||||
@ -72,58 +77,123 @@ impl From<ntp::errors::Error> for Error {
|
|||||||
|
|
||||||
/// NTP time drift checker.
|
/// NTP time drift checker.
|
||||||
pub trait Ntp {
|
pub trait Ntp {
|
||||||
|
/// Returned Future.
|
||||||
|
type Future: IntoFuture<Item=Duration, Error=Error>;
|
||||||
|
|
||||||
/// Returns the current time drift.
|
/// Returns the current time drift.
|
||||||
fn drift(&self) -> BoxFuture<Duration, Error>;
|
fn drift(&self) -> Self::Future;
|
||||||
|
}
|
||||||
|
|
||||||
|
const SERVER_MAX_POLL_INTERVAL_SECS: u64 = 60;
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct Server {
|
||||||
|
pub address: String,
|
||||||
|
next_call: RwLock<time::Instant>,
|
||||||
|
failures: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Server {
|
||||||
|
pub fn is_available(&self) -> bool {
|
||||||
|
*self.next_call.read() < time::Instant::now()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn report_success(&self) {
|
||||||
|
self.failures.store(0, atomic::Ordering::SeqCst);
|
||||||
|
self.update_next_call(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn report_failure(&self) {
|
||||||
|
let errors = self.failures.fetch_add(1, atomic::Ordering::SeqCst);
|
||||||
|
self.update_next_call(1 << errors)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_next_call(&self, delay: usize) {
|
||||||
|
*self.next_call.write() = time::Instant::now() + time::Duration::from_secs(delay as u64 * SERVER_MAX_POLL_INTERVAL_SECS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: AsRef<str>> From<T> for Server {
|
||||||
|
fn from(t: T) -> Self {
|
||||||
|
Server {
|
||||||
|
address: t.as_ref().to_owned(),
|
||||||
|
next_call: RwLock::new(time::Instant::now()),
|
||||||
|
failures: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// NTP client using the SNTP algorithm for calculating drift.
|
/// NTP client using the SNTP algorithm for calculating drift.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct SimpleNtp {
|
pub struct SimpleNtp {
|
||||||
address: Arc<String>,
|
addresses: Vec<Arc<Server>>,
|
||||||
pool: CpuPool,
|
pool: CpuPool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Debug for SimpleNtp {
|
impl fmt::Debug for SimpleNtp {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
write!(f, "Ntp {{ address: {} }}", self.address)
|
f
|
||||||
|
.debug_struct("SimpleNtp")
|
||||||
|
.field("addresses", &self.addresses)
|
||||||
|
.finish()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimpleNtp {
|
impl SimpleNtp {
|
||||||
fn new(address: &str, pool: CpuPool) -> SimpleNtp {
|
fn new<T: AsRef<str>>(addresses: &[T], pool: CpuPool) -> SimpleNtp {
|
||||||
SimpleNtp {
|
SimpleNtp {
|
||||||
address: Arc::new(address.to_owned()),
|
addresses: addresses.iter().map(Server::from).map(Arc::new).collect(),
|
||||||
pool: pool,
|
pool: pool,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Ntp for SimpleNtp {
|
impl Ntp for SimpleNtp {
|
||||||
fn drift(&self) -> BoxFuture<Duration, Error> {
|
type Future = future::Either<
|
||||||
let address = self.address.clone();
|
CpuFuture<Duration, Error>,
|
||||||
if &*address == "none" {
|
future::FutureResult<Duration, Error>,
|
||||||
return futures::future::err(Error::Ntp("NTP server is not provided.".into())).boxed();
|
>;
|
||||||
}
|
|
||||||
|
|
||||||
self.pool.spawn_fn(move || {
|
fn drift(&self) -> Self::Future {
|
||||||
let packet = ntp::request(&*address)?;
|
use self::future::Either::{A, B};
|
||||||
let dest_time = ::time::now_utc().to_timespec();
|
|
||||||
let orig_time = Timespec::from(packet.orig_time);
|
|
||||||
let recv_time = Timespec::from(packet.recv_time);
|
|
||||||
let transmit_time = Timespec::from(packet.transmit_time);
|
|
||||||
|
|
||||||
let drift = ((recv_time - orig_time) + (transmit_time - dest_time)) / 2;
|
let server = self.addresses.iter().find(|server| server.is_available());
|
||||||
|
server.map(|server| {
|
||||||
|
let server = server.clone();
|
||||||
|
A(self.pool.spawn_fn(move || {
|
||||||
|
debug!(target: "dapps", "Fetching time from {}.", server.address);
|
||||||
|
|
||||||
Ok(drift)
|
match ntp::request(&server.address) {
|
||||||
}).boxed()
|
Ok(packet) => {
|
||||||
|
let dest_time = ::time_crate::now_utc().to_timespec();
|
||||||
|
let orig_time = Timespec::from(packet.orig_time);
|
||||||
|
let recv_time = Timespec::from(packet.recv_time);
|
||||||
|
let transmit_time = Timespec::from(packet.transmit_time);
|
||||||
|
|
||||||
|
let drift = ((recv_time - orig_time) + (transmit_time - dest_time)) / 2;
|
||||||
|
|
||||||
|
server.report_success();
|
||||||
|
Ok(drift)
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
server.report_failure();
|
||||||
|
Err(err.into())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
}).unwrap_or_else(|| B(future::err(Error::NoServersAvailable)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NOTE In a positive scenario first results will be seen after:
|
// NOTE In a positive scenario first results will be seen after:
|
||||||
// MAX_RESULTS * UPDATE_TIMEOUT_OK_SECS seconds.
|
// MAX_RESULTS * UPDATE_TIMEOUT_INCOMPLETE_SECS seconds.
|
||||||
const MAX_RESULTS: usize = 7;
|
const MAX_RESULTS: usize = 4;
|
||||||
const UPDATE_TIMEOUT_OK_SECS: u64 = 30;
|
const UPDATE_TIMEOUT_OK_SECS: u64 = 6 * 60 * 60;
|
||||||
const UPDATE_TIMEOUT_ERR_SECS: u64 = 2;
|
const UPDATE_TIMEOUT_WARN_SECS: u64 = 15 * 60;
|
||||||
|
const UPDATE_TIMEOUT_ERR_SECS: u64 = 60;
|
||||||
|
const UPDATE_TIMEOUT_INCOMPLETE_SECS: u64 = 10;
|
||||||
|
|
||||||
|
/// Maximal valid time drift.
|
||||||
|
pub const MAX_DRIFT: i64 = 500;
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
/// A time checker.
|
/// A time checker.
|
||||||
@ -134,13 +204,13 @@ pub struct TimeChecker<N: Ntp = SimpleNtp> {
|
|||||||
|
|
||||||
impl TimeChecker<SimpleNtp> {
|
impl TimeChecker<SimpleNtp> {
|
||||||
/// Creates new time checker given the NTP server address.
|
/// Creates new time checker given the NTP server address.
|
||||||
pub fn new(ntp_address: String, pool: CpuPool) -> Self {
|
pub fn new<T: AsRef<str>>(ntp_addresses: &[T], pool: CpuPool) -> Self {
|
||||||
let last_result = Arc::new(RwLock::new(
|
let last_result = Arc::new(RwLock::new(
|
||||||
// Assume everything is ok at the very beginning.
|
// Assume everything is ok at the very beginning.
|
||||||
(time::Instant::now(), vec![Ok(0)].into())
|
(time::Instant::now(), vec![Ok(0)].into())
|
||||||
));
|
));
|
||||||
|
|
||||||
let ntp = SimpleNtp::new(&ntp_address, pool);
|
let ntp = SimpleNtp::new(ntp_addresses, pool);
|
||||||
|
|
||||||
TimeChecker {
|
TimeChecker {
|
||||||
ntp,
|
ntp,
|
||||||
@ -149,22 +219,34 @@ impl TimeChecker<SimpleNtp> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<N: Ntp> TimeChecker<N> {
|
impl<N: Ntp> TimeChecker<N> where <N::Future as IntoFuture>::Future: Send + 'static {
|
||||||
/// Updates the time
|
/// Updates the time
|
||||||
pub fn update(&self) -> BoxFuture<i64, Error> {
|
pub fn update(&self) -> BoxFuture<i64, Error> {
|
||||||
|
trace!(target: "dapps", "Updating time from NTP.");
|
||||||
let last_result = self.last_result.clone();
|
let last_result = self.last_result.clone();
|
||||||
self.ntp.drift().then(move |res| {
|
self.ntp.drift().into_future().then(move |res| {
|
||||||
|
let res = res.map(|d| d.num_milliseconds());
|
||||||
|
|
||||||
|
if let Err(Error::NoServersAvailable) = res {
|
||||||
|
debug!(target: "dapps", "No NTP servers available. Selecting an older result.");
|
||||||
|
return select_result(last_result.read().1.iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the results.
|
||||||
let mut results = mem::replace(&mut last_result.write().1, VecDeque::new());
|
let mut results = mem::replace(&mut last_result.write().1, VecDeque::new());
|
||||||
|
let has_all_results = results.len() >= MAX_RESULTS;
|
||||||
let valid_till = time::Instant::now() + time::Duration::from_secs(
|
let valid_till = time::Instant::now() + time::Duration::from_secs(
|
||||||
if res.is_ok() && results.len() == MAX_RESULTS {
|
match res {
|
||||||
UPDATE_TIMEOUT_OK_SECS
|
Ok(time) if has_all_results && time < MAX_DRIFT => UPDATE_TIMEOUT_OK_SECS,
|
||||||
} else {
|
Ok(_) if has_all_results => UPDATE_TIMEOUT_WARN_SECS,
|
||||||
UPDATE_TIMEOUT_ERR_SECS
|
Err(_) if has_all_results => UPDATE_TIMEOUT_ERR_SECS,
|
||||||
|
_ => UPDATE_TIMEOUT_INCOMPLETE_SECS,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
|
trace!(target: "dapps", "New time drift received: {:?}", res);
|
||||||
// Push the result.
|
// Push the result.
|
||||||
results.push_back(res.map(|d| d.num_milliseconds()));
|
results.push_back(res);
|
||||||
while results.len() > MAX_RESULTS {
|
while results.len() > MAX_RESULTS {
|
||||||
results.pop_front();
|
results.pop_front();
|
||||||
}
|
}
|
||||||
@ -209,9 +291,9 @@ mod tests {
|
|||||||
use std::cell::{Cell, RefCell};
|
use std::cell::{Cell, RefCell};
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use time::Duration;
|
use time::Duration;
|
||||||
use futures::{self, BoxFuture, Future};
|
use futures::{future, Future};
|
||||||
use super::{Ntp, TimeChecker, Error};
|
use super::{Ntp, TimeChecker, Error};
|
||||||
use util::RwLock;
|
use parking_lot::RwLock;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct FakeNtp(RefCell<Vec<Duration>>, Cell<u64>);
|
struct FakeNtp(RefCell<Vec<Duration>>, Cell<u64>);
|
||||||
@ -224,9 +306,11 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Ntp for FakeNtp {
|
impl Ntp for FakeNtp {
|
||||||
fn drift(&self) -> BoxFuture<Duration, Error> {
|
type Future = future::FutureResult<Duration, Error>;
|
||||||
|
|
||||||
|
fn drift(&self) -> Self::Future {
|
||||||
self.1.set(self.1.get() + 1);
|
self.1.set(self.1.get() + 1);
|
||||||
futures::future::ok(self.0.borrow_mut().pop().expect("Unexpected call to drift().")).boxed()
|
future::ok(self.0.borrow_mut().pop().expect("Unexpected call to drift()."))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
57
dapps/node-health/src/types.rs
Normal file
57
dapps/node-health/src/types.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Base health types.
|
||||||
|
|
||||||
|
/// Health API endpoint status.
|
||||||
|
#[derive(Debug, PartialEq, Serialize)]
|
||||||
|
pub enum HealthStatus {
|
||||||
|
/// Everything's OK.
|
||||||
|
#[serde(rename = "ok")]
|
||||||
|
Ok,
|
||||||
|
/// Node health need attention
|
||||||
|
/// (the issue is not critical, but may need investigation)
|
||||||
|
#[serde(rename = "needsAttention")]
|
||||||
|
NeedsAttention,
|
||||||
|
/// There is something bad detected with the node.
|
||||||
|
#[serde(rename = "bad")]
|
||||||
|
Bad,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Represents a single check in node health.
|
||||||
|
/// Cointains the status of that check and apropriate message and details.
|
||||||
|
#[derive(Debug, PartialEq, Serialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct HealthInfo<T> {
|
||||||
|
/// Check status.
|
||||||
|
pub status: HealthStatus,
|
||||||
|
/// Human-readable message.
|
||||||
|
pub message: String,
|
||||||
|
/// Technical details of the check.
|
||||||
|
pub details: T,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Node Health status.
|
||||||
|
#[derive(Debug, PartialEq, Serialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
|
pub struct Health {
|
||||||
|
/// Status of peers.
|
||||||
|
pub peers: HealthInfo<(usize, usize)>,
|
||||||
|
/// Sync status.
|
||||||
|
pub sync: HealthInfo<bool>,
|
||||||
|
/// Time diff info.
|
||||||
|
pub time: HealthInfo<i64>,
|
||||||
|
}
|
@ -21,32 +21,28 @@ use hyper::method::Method;
|
|||||||
use hyper::status::StatusCode;
|
use hyper::status::StatusCode;
|
||||||
|
|
||||||
use api::{response, types};
|
use api::{response, types};
|
||||||
use api::time::TimeChecker;
|
|
||||||
use apps::fetcher::Fetcher;
|
use apps::fetcher::Fetcher;
|
||||||
use handlers::{self, extract_url};
|
use handlers::{self, extract_url};
|
||||||
use endpoint::{Endpoint, Handler, EndpointPath};
|
use endpoint::{Endpoint, Handler, EndpointPath};
|
||||||
|
use node_health::{NodeHealth, HealthStatus, Health};
|
||||||
use parity_reactor::Remote;
|
use parity_reactor::Remote;
|
||||||
use {SyncStatus};
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct RestApi {
|
pub struct RestApi {
|
||||||
fetcher: Arc<Fetcher>,
|
fetcher: Arc<Fetcher>,
|
||||||
sync_status: Arc<SyncStatus>,
|
health: NodeHealth,
|
||||||
time: TimeChecker,
|
|
||||||
remote: Remote,
|
remote: Remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RestApi {
|
impl RestApi {
|
||||||
pub fn new(
|
pub fn new(
|
||||||
fetcher: Arc<Fetcher>,
|
fetcher: Arc<Fetcher>,
|
||||||
sync_status: Arc<SyncStatus>,
|
health: NodeHealth,
|
||||||
time: TimeChecker,
|
|
||||||
remote: Remote,
|
remote: Remote,
|
||||||
) -> Box<Endpoint> {
|
) -> Box<Endpoint> {
|
||||||
Box::new(RestApi {
|
Box::new(RestApi {
|
||||||
fetcher,
|
fetcher,
|
||||||
sync_status,
|
health,
|
||||||
time,
|
|
||||||
remote,
|
remote,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -90,69 +86,23 @@ impl RestApiRouter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn health(&self, control: Control) -> Box<Handler> {
|
fn health(&self, control: Control) -> Box<Handler> {
|
||||||
use self::types::{HealthInfo, HealthStatus, Health};
|
let map = move |health: Result<Result<Health, ()>, ()>| {
|
||||||
|
let status = match health {
|
||||||
trace!(target: "dapps", "Checking node health.");
|
Ok(Ok(ref health)) => {
|
||||||
// Check timediff
|
if [&health.peers.status, &health.sync.status].iter().any(|x| *x != &HealthStatus::Ok) {
|
||||||
let sync_status = self.api.sync_status.clone();
|
StatusCode::PreconditionFailed // HTTP 412
|
||||||
let map = move |time| {
|
} else {
|
||||||
// Check peers
|
StatusCode::Ok // HTTP 200
|
||||||
let peers = {
|
}
|
||||||
let (connected, max) = sync_status.peers();
|
},
|
||||||
let (status, message) = match connected {
|
_ => StatusCode::ServiceUnavailable, // HTTP 503
|
||||||
0 => {
|
|
||||||
(HealthStatus::Bad, "You are not connected to any peers. There is most likely some network issue. Fix connectivity.".into())
|
|
||||||
},
|
|
||||||
1 => (HealthStatus::NeedsAttention, "You are connected to only one peer. Your node might not be reliable. Check your network connection.".into()),
|
|
||||||
_ => (HealthStatus::Ok, "".into()),
|
|
||||||
};
|
|
||||||
HealthInfo { status, message, details: (connected, max) }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check sync
|
response::as_json(status, &health)
|
||||||
let sync = {
|
|
||||||
let is_syncing = sync_status.is_major_importing();
|
|
||||||
let (status, message) = if is_syncing {
|
|
||||||
(HealthStatus::NeedsAttention, "Your node is still syncing, the values you see might be outdated. Wait until it's fully synced.".into())
|
|
||||||
} else {
|
|
||||||
(HealthStatus::Ok, "".into())
|
|
||||||
};
|
|
||||||
HealthInfo { status, message, details: is_syncing }
|
|
||||||
};
|
|
||||||
|
|
||||||
// Check time
|
|
||||||
let time = {
|
|
||||||
const MAX_DRIFT: i64 = 500;
|
|
||||||
let (status, message, details) = match time {
|
|
||||||
Ok(Ok(diff)) if diff < MAX_DRIFT && diff > -MAX_DRIFT => {
|
|
||||||
(HealthStatus::Ok, "".into(), diff)
|
|
||||||
},
|
|
||||||
Ok(Ok(diff)) => {
|
|
||||||
(HealthStatus::Bad, format!(
|
|
||||||
"Your clock is not in sync. Detected difference is too big for the protocol to work: {}ms. Synchronize your clock.",
|
|
||||||
diff,
|
|
||||||
), diff)
|
|
||||||
},
|
|
||||||
Ok(Err(err)) => {
|
|
||||||
(HealthStatus::NeedsAttention, format!(
|
|
||||||
"Unable to reach time API: {}. Make sure that your clock is synchronized.",
|
|
||||||
err,
|
|
||||||
), 0)
|
|
||||||
},
|
|
||||||
Err(_) => {
|
|
||||||
(HealthStatus::NeedsAttention, "Time API request timed out. Make sure that the clock is synchronized.".into(), 0)
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
HealthInfo { status, message, details, }
|
|
||||||
};
|
|
||||||
|
|
||||||
response::as_json(StatusCode::Ok, &Health { peers, sync, time })
|
|
||||||
};
|
};
|
||||||
|
let health = self.api.health.health();
|
||||||
let time = self.api.time.time_drift();
|
|
||||||
let remote = self.api.remote.clone();
|
let remote = self.api.remote.clone();
|
||||||
Box::new(handlers::AsyncHandler::new(time, map, remote, control))
|
Box::new(handlers::AsyncHandler::new(health, map, remote, control))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,8 +18,6 @@
|
|||||||
|
|
||||||
mod api;
|
mod api;
|
||||||
mod response;
|
mod response;
|
||||||
mod time;
|
|
||||||
mod types;
|
mod types;
|
||||||
|
|
||||||
pub use self::api::RestApi;
|
pub use self::api::RestApi;
|
||||||
pub use self::time::TimeChecker;
|
|
||||||
|
@ -25,43 +25,3 @@ pub struct ApiError {
|
|||||||
/// More technical error details.
|
/// More technical error details.
|
||||||
pub detail: String,
|
pub detail: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Health API endpoint status.
|
|
||||||
#[derive(Debug, PartialEq, Serialize)]
|
|
||||||
pub enum HealthStatus {
|
|
||||||
/// Everything's OK.
|
|
||||||
#[serde(rename = "ok")]
|
|
||||||
Ok,
|
|
||||||
/// Node health need attention
|
|
||||||
/// (the issue is not critical, but may need investigation)
|
|
||||||
#[serde(rename = "needsAttention")]
|
|
||||||
NeedsAttention,
|
|
||||||
/// There is something bad detected with the node.
|
|
||||||
#[serde(rename = "bad")]
|
|
||||||
Bad
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents a single check in node health.
|
|
||||||
/// Cointains the status of that check and apropriate message and details.
|
|
||||||
#[derive(Debug, PartialEq, Serialize)]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub struct HealthInfo<T> {
|
|
||||||
/// Check status.
|
|
||||||
pub status: HealthStatus,
|
|
||||||
/// Human-readable message.
|
|
||||||
pub message: String,
|
|
||||||
/// Technical details of the check.
|
|
||||||
pub details: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Node Health status.
|
|
||||||
#[derive(Debug, PartialEq, Serialize)]
|
|
||||||
#[serde(deny_unknown_fields)]
|
|
||||||
pub struct Health {
|
|
||||||
/// Status of peers.
|
|
||||||
pub peers: HealthInfo<(usize, usize)>,
|
|
||||||
/// Sync status.
|
|
||||||
pub sync: HealthInfo<bool>,
|
|
||||||
/// Time diff info.
|
|
||||||
pub time: HealthInfo<i64>,
|
|
||||||
}
|
|
||||||
|
@ -281,6 +281,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
struct FakeSync(bool);
|
struct FakeSync(bool);
|
||||||
impl SyncStatus for FakeSync {
|
impl SyncStatus for FakeSync {
|
||||||
fn is_major_importing(&self) -> bool { self.0 }
|
fn is_major_importing(&self) -> bool { self.0 }
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@ -30,8 +29,8 @@ use {WebProxyTokens, ParentFrameSettings};
|
|||||||
|
|
||||||
mod app;
|
mod app;
|
||||||
mod cache;
|
mod cache;
|
||||||
mod fs;
|
|
||||||
mod ui;
|
mod ui;
|
||||||
|
pub mod fs;
|
||||||
pub mod fetcher;
|
pub mod fetcher;
|
||||||
pub mod manifest;
|
pub mod manifest;
|
||||||
|
|
||||||
@ -64,9 +63,10 @@ pub fn all_endpoints<F: Fetch>(
|
|||||||
web_proxy_tokens: Arc<WebProxyTokens>,
|
web_proxy_tokens: Arc<WebProxyTokens>,
|
||||||
remote: Remote,
|
remote: Remote,
|
||||||
fetch: F,
|
fetch: F,
|
||||||
) -> Endpoints {
|
) -> (Vec<String>, Endpoints) {
|
||||||
// fetch fs dapps at first to avoid overwriting builtins
|
// fetch fs dapps at first to avoid overwriting builtins
|
||||||
let mut pages = fs::local_endpoints(dapps_path, embeddable.clone());
|
let mut pages = fs::local_endpoints(dapps_path.clone(), embeddable.clone());
|
||||||
|
let local_endpoints: Vec<String> = pages.keys().cloned().collect();
|
||||||
for path in extra_dapps {
|
for path in extra_dapps {
|
||||||
if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone()) {
|
if let Some((id, endpoint)) = fs::local_endpoint(path.clone(), embeddable.clone()) {
|
||||||
pages.insert(id, endpoint);
|
pages.insert(id, endpoint);
|
||||||
@ -80,10 +80,10 @@ pub fn all_endpoints<F: Fetch>(
|
|||||||
pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned()));
|
pages.insert("proxy".into(), ProxyPac::boxed(embeddable.clone(), dapps_domain.to_owned()));
|
||||||
pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone()));
|
pages.insert(WEB_PATH.into(), Web::boxed(embeddable.clone(), web_proxy_tokens.clone(), remote.clone(), fetch.clone()));
|
||||||
|
|
||||||
Arc::new(pages)
|
(local_endpoints, pages)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert<T : WebApp + Default + 'static>(pages: &mut BTreeMap<String, Box<Endpoint>>, id: &str, embed_at: Embeddable) {
|
fn insert<T : WebApp + Default + 'static>(pages: &mut Endpoints, id: &str, embed_at: Embeddable) {
|
||||||
pages.insert(id.to_owned(), Box::new(match embed_at {
|
pages.insert(id.to_owned(), Box::new(match embed_at {
|
||||||
Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address),
|
Embeddable::Yes(address) => PageEndpoint::new_safe_to_embed(T::default(), address),
|
||||||
Embeddable::No => PageEndpoint::new(T::default()),
|
Embeddable::No => PageEndpoint::new(T::default()),
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
//! URL Endpoint traits
|
//! URL Endpoint traits
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use hyper::{self, server, net};
|
use hyper::{self, server, net};
|
||||||
@ -39,7 +38,7 @@ pub struct EndpointInfo {
|
|||||||
pub icon_url: String,
|
pub icon_url: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type Endpoints = Arc<BTreeMap<String, Box<Endpoint>>>;
|
pub type Endpoints = BTreeMap<String, Box<Endpoint>>;
|
||||||
pub type Handler = server::Handler<net::HttpStream> + Send;
|
pub type Handler = server::Handler<net::HttpStream> + Send;
|
||||||
|
|
||||||
pub trait Endpoint : Send + Sync {
|
pub trait Endpoint : Send + Sync {
|
||||||
|
@ -31,8 +31,7 @@ pub use self::redirect::Redirection;
|
|||||||
pub use self::streaming::StreamingHandler;
|
pub use self::streaming::StreamingHandler;
|
||||||
|
|
||||||
use std::iter;
|
use std::iter;
|
||||||
use util::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
use url::Url;
|
use url::Url;
|
||||||
use hyper::{server, header, net, uri};
|
use hyper::{server, header, net, uri};
|
||||||
use {apps, address, Embeddable};
|
use {apps, address, Embeddable};
|
||||||
@ -67,10 +66,20 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
|
|||||||
// Allow fonts from data: and HTTPS.
|
// Allow fonts from data: and HTTPS.
|
||||||
b"font-src 'self' data: https:;".to_vec(),
|
b"font-src 'self' data: https:;".to_vec(),
|
||||||
// Allow inline scripts and scripts eval (webpack/jsconsole)
|
// Allow inline scripts and scripts eval (webpack/jsconsole)
|
||||||
b"script-src 'self' 'unsafe-inline' 'unsafe-eval';".to_vec(),
|
{
|
||||||
// Same restrictions as script-src (fallback) with additional
|
let script_src = embeddable_on.as_ref()
|
||||||
|
.map(|e| e.extra_script_src.iter()
|
||||||
|
.map(|&(ref host, port)| address(host, port))
|
||||||
|
.join(" ")
|
||||||
|
).unwrap_or_default();
|
||||||
|
format!(
|
||||||
|
"script-src 'self' 'unsafe-inline' 'unsafe-eval' {};",
|
||||||
|
script_src
|
||||||
|
).into_bytes()
|
||||||
|
},
|
||||||
|
// Same restrictions as script-src with additional
|
||||||
// blob: that is required for camera access (worker)
|
// blob: that is required for camera access (worker)
|
||||||
b"worker-src 'self' 'unsafe-inline' 'unsafe-eval' blob: ;".to_vec(),
|
b"worker-src 'self' 'unsafe-inline' 'unsafe-eval' https: blob:;".to_vec(),
|
||||||
// Restrict everything else to the same origin.
|
// Restrict everything else to the same origin.
|
||||||
b"default-src 'self';".to_vec(),
|
b"default-src 'self';".to_vec(),
|
||||||
// Run in sandbox mode (although it's not fully safe since we allow same-origin and script)
|
// Run in sandbox mode (although it's not fully safe since we allow same-origin and script)
|
||||||
@ -90,7 +99,7 @@ pub fn add_security_headers(headers: &mut header::Headers, embeddable_on: Embedd
|
|||||||
.into_iter()
|
.into_iter()
|
||||||
.chain(embed.extra_embed_on
|
.chain(embed.extra_embed_on
|
||||||
.iter()
|
.iter()
|
||||||
.map(|&(ref host, port)| format!("{}:{}", host, port))
|
.map(|&(ref host, port)| address(host, port))
|
||||||
);
|
);
|
||||||
|
|
||||||
let ancestors = if embed.host == "127.0.0.1" {
|
let ancestors = if embed.host == "127.0.0.1" {
|
||||||
|
103
dapps/src/lib.rs
103
dapps/src/lib.rs
@ -21,10 +21,9 @@
|
|||||||
|
|
||||||
extern crate base32;
|
extern crate base32;
|
||||||
extern crate futures;
|
extern crate futures;
|
||||||
extern crate futures_cpupool;
|
extern crate itertools;
|
||||||
extern crate linked_hash_map;
|
extern crate linked_hash_map;
|
||||||
extern crate mime_guess;
|
extern crate mime_guess;
|
||||||
extern crate ntp;
|
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate rustc_hex;
|
extern crate rustc_hex;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
@ -39,6 +38,7 @@ extern crate jsonrpc_http_server;
|
|||||||
|
|
||||||
extern crate ethcore_util as util;
|
extern crate ethcore_util as util;
|
||||||
extern crate fetch;
|
extern crate fetch;
|
||||||
|
extern crate node_health;
|
||||||
extern crate parity_dapps_glue as parity_dapps;
|
extern crate parity_dapps_glue as parity_dapps;
|
||||||
extern crate parity_hash_fetch as hash_fetch;
|
extern crate parity_hash_fetch as hash_fetch;
|
||||||
extern crate parity_reactor;
|
extern crate parity_reactor;
|
||||||
@ -56,7 +56,6 @@ extern crate ethcore_devtools as devtools;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
extern crate env_logger;
|
extern crate env_logger;
|
||||||
|
|
||||||
|
|
||||||
mod endpoint;
|
mod endpoint;
|
||||||
mod apps;
|
mod apps;
|
||||||
mod page;
|
mod page;
|
||||||
@ -69,26 +68,21 @@ mod web;
|
|||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::mem;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::HashMap;
|
use util::RwLock;
|
||||||
|
|
||||||
use jsonrpc_http_server::{self as http, hyper, Origin};
|
use jsonrpc_http_server::{self as http, hyper, Origin};
|
||||||
|
|
||||||
use fetch::Fetch;
|
use fetch::Fetch;
|
||||||
use futures_cpupool::CpuPool;
|
use node_health::NodeHealth;
|
||||||
use parity_reactor::Remote;
|
use parity_reactor::Remote;
|
||||||
|
|
||||||
pub use hash_fetch::urlhint::ContractClient;
|
pub use hash_fetch::urlhint::ContractClient;
|
||||||
|
pub use node_health::SyncStatus;
|
||||||
|
|
||||||
/// Indicates sync status
|
|
||||||
pub trait SyncStatus: Send + Sync {
|
|
||||||
/// Returns true if there is a major sync happening.
|
|
||||||
fn is_major_importing(&self) -> bool;
|
|
||||||
|
|
||||||
/// Returns number of connected and ideal peers.
|
|
||||||
fn peers(&self) -> (usize, usize);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Validates Web Proxy tokens
|
/// Validates Web Proxy tokens
|
||||||
pub trait WebProxyTokens: Send + Sync {
|
pub trait WebProxyTokens: Send + Sync {
|
||||||
@ -101,37 +95,59 @@ impl<F> WebProxyTokens for F where F: Fn(String) -> Option<Origin> + Send + Sync
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Current supported endpoints.
|
/// Current supported endpoints.
|
||||||
|
#[derive(Default, Clone)]
|
||||||
pub struct Endpoints {
|
pub struct Endpoints {
|
||||||
endpoints: endpoint::Endpoints,
|
local_endpoints: Arc<RwLock<Vec<String>>>,
|
||||||
|
endpoints: Arc<RwLock<endpoint::Endpoints>>,
|
||||||
|
dapps_path: PathBuf,
|
||||||
|
embeddable: Option<ParentFrameSettings>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Endpoints {
|
impl Endpoints {
|
||||||
/// Returns a current list of app endpoints.
|
/// Returns a current list of app endpoints.
|
||||||
pub fn list(&self) -> Vec<apps::App> {
|
pub fn list(&self) -> Vec<apps::App> {
|
||||||
self.endpoints.iter().filter_map(|(ref k, ref e)| {
|
self.endpoints.read().iter().filter_map(|(ref k, ref e)| {
|
||||||
e.info().map(|ref info| apps::App::from_info(k, info))
|
e.info().map(|ref info| apps::App::from_info(k, info))
|
||||||
}).collect()
|
}).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Check for any changes in the local dapps folder and update.
|
||||||
|
pub fn refresh_local_dapps(&self) {
|
||||||
|
let new_local = apps::fs::local_endpoints(&self.dapps_path, self.embeddable.clone());
|
||||||
|
let old_local = mem::replace(&mut *self.local_endpoints.write(), new_local.keys().cloned().collect());
|
||||||
|
let (_, to_remove): (_, Vec<_>) = old_local
|
||||||
|
.into_iter()
|
||||||
|
.partition(|k| new_local.contains_key(&k.clone()));
|
||||||
|
|
||||||
|
let mut endpoints = self.endpoints.write();
|
||||||
|
// remove the dead dapps
|
||||||
|
for k in to_remove {
|
||||||
|
endpoints.remove(&k);
|
||||||
|
}
|
||||||
|
// new dapps to be added
|
||||||
|
for (k, v) in new_local {
|
||||||
|
if !endpoints.contains_key(&k) {
|
||||||
|
endpoints.insert(k, v);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dapps server as `jsonrpc-http-server` request middleware.
|
/// Dapps server as `jsonrpc-http-server` request middleware.
|
||||||
pub struct Middleware {
|
pub struct Middleware {
|
||||||
|
endpoints: Endpoints,
|
||||||
router: router::Router,
|
router: router::Router,
|
||||||
endpoints: endpoint::Endpoints,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Middleware {
|
impl Middleware {
|
||||||
/// Get local endpoints handle.
|
/// Get local endpoints handle.
|
||||||
pub fn endpoints(&self) -> Endpoints {
|
pub fn endpoints(&self) -> &Endpoints {
|
||||||
Endpoints {
|
&self.endpoints
|
||||||
endpoints: self.endpoints.clone(),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new middleware for UI server.
|
/// Creates new middleware for UI server.
|
||||||
pub fn ui<F: Fetch>(
|
pub fn ui<F: Fetch>(
|
||||||
ntp_server: &str,
|
health: NodeHealth,
|
||||||
pool: CpuPool,
|
|
||||||
remote: Remote,
|
remote: Remote,
|
||||||
dapps_domain: &str,
|
dapps_domain: &str,
|
||||||
registrar: Arc<ContractClient>,
|
registrar: Arc<ContractClient>,
|
||||||
@ -146,11 +162,9 @@ impl Middleware {
|
|||||||
).embeddable_on(None).allow_dapps(false));
|
).embeddable_on(None).allow_dapps(false));
|
||||||
let special = {
|
let special = {
|
||||||
let mut special = special_endpoints(
|
let mut special = special_endpoints(
|
||||||
ntp_server,
|
health,
|
||||||
pool,
|
|
||||||
content_fetcher.clone(),
|
content_fetcher.clone(),
|
||||||
remote.clone(),
|
remote.clone(),
|
||||||
sync_status.clone(),
|
|
||||||
);
|
);
|
||||||
special.insert(router::SpecialEndpoint::Home, Some(apps::ui()));
|
special.insert(router::SpecialEndpoint::Home, Some(apps::ui()));
|
||||||
special
|
special
|
||||||
@ -164,18 +178,18 @@ impl Middleware {
|
|||||||
);
|
);
|
||||||
|
|
||||||
Middleware {
|
Middleware {
|
||||||
router: router,
|
|
||||||
endpoints: Default::default(),
|
endpoints: Default::default(),
|
||||||
|
router: router,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates new Dapps server middleware.
|
/// Creates new Dapps server middleware.
|
||||||
pub fn dapps<F: Fetch>(
|
pub fn dapps<F: Fetch>(
|
||||||
ntp_server: &str,
|
health: NodeHealth,
|
||||||
pool: CpuPool,
|
|
||||||
remote: Remote,
|
remote: Remote,
|
||||||
ui_address: Option<(String, u16)>,
|
ui_address: Option<(String, u16)>,
|
||||||
extra_embed_on: Vec<(String, u16)>,
|
extra_embed_on: Vec<(String, u16)>,
|
||||||
|
extra_script_src: Vec<(String, u16)>,
|
||||||
dapps_path: PathBuf,
|
dapps_path: PathBuf,
|
||||||
extra_dapps: Vec<PathBuf>,
|
extra_dapps: Vec<PathBuf>,
|
||||||
dapps_domain: &str,
|
dapps_domain: &str,
|
||||||
@ -184,15 +198,15 @@ impl Middleware {
|
|||||||
web_proxy_tokens: Arc<WebProxyTokens>,
|
web_proxy_tokens: Arc<WebProxyTokens>,
|
||||||
fetch: F,
|
fetch: F,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let embeddable = as_embeddable(ui_address, extra_embed_on, dapps_domain);
|
let embeddable = as_embeddable(ui_address, extra_embed_on, extra_script_src, dapps_domain);
|
||||||
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
|
let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new(
|
||||||
hash_fetch::urlhint::URLHintContract::new(registrar),
|
hash_fetch::urlhint::URLHintContract::new(registrar),
|
||||||
sync_status.clone(),
|
sync_status.clone(),
|
||||||
remote.clone(),
|
remote.clone(),
|
||||||
fetch.clone(),
|
fetch.clone(),
|
||||||
).embeddable_on(embeddable.clone()).allow_dapps(true));
|
).embeddable_on(embeddable.clone()).allow_dapps(true));
|
||||||
let endpoints = apps::all_endpoints(
|
let (local_endpoints, endpoints) = apps::all_endpoints(
|
||||||
dapps_path,
|
dapps_path.clone(),
|
||||||
extra_dapps,
|
extra_dapps,
|
||||||
dapps_domain,
|
dapps_domain,
|
||||||
embeddable.clone(),
|
embeddable.clone(),
|
||||||
@ -200,14 +214,18 @@ impl Middleware {
|
|||||||
remote.clone(),
|
remote.clone(),
|
||||||
fetch.clone(),
|
fetch.clone(),
|
||||||
);
|
);
|
||||||
|
let endpoints = Endpoints {
|
||||||
|
endpoints: Arc::new(RwLock::new(endpoints)),
|
||||||
|
dapps_path,
|
||||||
|
local_endpoints: Arc::new(RwLock::new(local_endpoints)),
|
||||||
|
embeddable: embeddable.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
let special = {
|
let special = {
|
||||||
let mut special = special_endpoints(
|
let mut special = special_endpoints(
|
||||||
ntp_server,
|
health,
|
||||||
pool,
|
|
||||||
content_fetcher.clone(),
|
content_fetcher.clone(),
|
||||||
remote.clone(),
|
remote.clone(),
|
||||||
sync_status,
|
|
||||||
);
|
);
|
||||||
special.insert(
|
special.insert(
|
||||||
router::SpecialEndpoint::Home,
|
router::SpecialEndpoint::Home,
|
||||||
@ -225,8 +243,8 @@ impl Middleware {
|
|||||||
);
|
);
|
||||||
|
|
||||||
Middleware {
|
Middleware {
|
||||||
router: router,
|
endpoints,
|
||||||
endpoints: endpoints,
|
router,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,19 +256,16 @@ impl http::RequestMiddleware for Middleware {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn special_endpoints(
|
fn special_endpoints(
|
||||||
ntp_server: &str,
|
health: NodeHealth,
|
||||||
pool: CpuPool,
|
|
||||||
content_fetcher: Arc<apps::fetcher::Fetcher>,
|
content_fetcher: Arc<apps::fetcher::Fetcher>,
|
||||||
remote: Remote,
|
remote: Remote,
|
||||||
sync_status: Arc<SyncStatus>,
|
|
||||||
) -> HashMap<router::SpecialEndpoint, Option<Box<endpoint::Endpoint>>> {
|
) -> HashMap<router::SpecialEndpoint, Option<Box<endpoint::Endpoint>>> {
|
||||||
let mut special = HashMap::new();
|
let mut special = HashMap::new();
|
||||||
special.insert(router::SpecialEndpoint::Rpc, None);
|
special.insert(router::SpecialEndpoint::Rpc, None);
|
||||||
special.insert(router::SpecialEndpoint::Utils, Some(apps::utils()));
|
special.insert(router::SpecialEndpoint::Utils, Some(apps::utils()));
|
||||||
special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new(
|
special.insert(router::SpecialEndpoint::Api, Some(api::RestApi::new(
|
||||||
content_fetcher,
|
content_fetcher,
|
||||||
sync_status,
|
health,
|
||||||
api::TimeChecker::new(ntp_server.into(), pool),
|
|
||||||
remote,
|
remote,
|
||||||
)));
|
)));
|
||||||
special
|
special
|
||||||
@ -263,12 +278,14 @@ fn address(host: &str, port: u16) -> String {
|
|||||||
fn as_embeddable(
|
fn as_embeddable(
|
||||||
ui_address: Option<(String, u16)>,
|
ui_address: Option<(String, u16)>,
|
||||||
extra_embed_on: Vec<(String, u16)>,
|
extra_embed_on: Vec<(String, u16)>,
|
||||||
|
extra_script_src: Vec<(String, u16)>,
|
||||||
dapps_domain: &str,
|
dapps_domain: &str,
|
||||||
) -> Option<ParentFrameSettings> {
|
) -> Option<ParentFrameSettings> {
|
||||||
ui_address.map(|(host, port)| ParentFrameSettings {
|
ui_address.map(|(host, port)| ParentFrameSettings {
|
||||||
host,
|
host,
|
||||||
port,
|
port,
|
||||||
extra_embed_on,
|
extra_embed_on,
|
||||||
|
extra_script_src,
|
||||||
dapps_domain: dapps_domain.to_owned(),
|
dapps_domain: dapps_domain.to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -289,8 +306,10 @@ pub struct ParentFrameSettings {
|
|||||||
pub host: String,
|
pub host: String,
|
||||||
/// Port
|
/// Port
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
/// Additional pages the pages can be embedded on.
|
/// Additional URLs the dapps can be embedded on.
|
||||||
pub extra_embed_on: Vec<(String, u16)>,
|
pub extra_embed_on: Vec<(String, u16)>,
|
||||||
|
/// Additional URLs the dapp scripts can be loaded from.
|
||||||
|
pub extra_script_src: Vec<(String, u16)>,
|
||||||
/// Dapps Domain (web3.site)
|
/// Dapps Domain (web3.site)
|
||||||
pub dapps_domain: String,
|
pub dapps_domain: String,
|
||||||
}
|
}
|
||||||
|
@ -28,7 +28,8 @@ use jsonrpc_http_server as http;
|
|||||||
|
|
||||||
use apps;
|
use apps;
|
||||||
use apps::fetcher::Fetcher;
|
use apps::fetcher::Fetcher;
|
||||||
use endpoint::{Endpoint, Endpoints, EndpointPath, Handler};
|
use endpoint::{Endpoint, EndpointPath, Handler};
|
||||||
|
use Endpoints;
|
||||||
use handlers;
|
use handlers;
|
||||||
use Embeddable;
|
use Embeddable;
|
||||||
|
|
||||||
@ -50,26 +51,27 @@ pub struct Router {
|
|||||||
dapps_domain: String,
|
dapps_domain: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl http::RequestMiddleware for Router {
|
impl Router {
|
||||||
fn on_request(&self, req: &server::Request<HttpStream>, control: &Control) -> http::RequestMiddlewareAction {
|
fn resolve_request(&self, req: &server::Request<HttpStream>, control: Control, refresh_dapps: bool) -> (bool, Option<Box<Handler>>) {
|
||||||
// Choose proper handler depending on path / domain
|
// Choose proper handler depending on path / domain
|
||||||
let url = handlers::extract_url(req);
|
let url = handlers::extract_url(req);
|
||||||
let endpoint = extract_endpoint(&url, &self.dapps_domain);
|
let endpoint = extract_endpoint(&url, &self.dapps_domain);
|
||||||
let referer = extract_referer_endpoint(req, &self.dapps_domain);
|
let referer = extract_referer_endpoint(req, &self.dapps_domain);
|
||||||
let is_utils = endpoint.1 == SpecialEndpoint::Utils;
|
let is_utils = endpoint.1 == SpecialEndpoint::Utils;
|
||||||
let is_origin_set = req.headers().get::<header::Origin>().is_some();
|
|
||||||
let is_get_request = *req.method() == hyper::Method::Get;
|
let is_get_request = *req.method() == hyper::Method::Get;
|
||||||
let is_head_request = *req.method() == hyper::Method::Head;
|
let is_head_request = *req.method() == hyper::Method::Head;
|
||||||
|
let has_dapp = |dapp: &str| self.endpoints
|
||||||
|
.as_ref()
|
||||||
|
.map_or(false, |endpoints| endpoints.endpoints.read().contains_key(dapp));
|
||||||
|
|
||||||
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req);
|
trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req);
|
||||||
|
|
||||||
let control = control.clone();
|
|
||||||
debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint);
|
debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint);
|
||||||
let handler: Option<Box<Handler>> = match (endpoint.0, endpoint.1, referer) {
|
|
||||||
|
(is_utils, match (endpoint.0, endpoint.1, referer) {
|
||||||
// Handle invalid web requests that we can recover from
|
// Handle invalid web requests that we can recover from
|
||||||
(ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url)))
|
(ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url)))
|
||||||
if referer.app_id == apps::WEB_PATH
|
if referer.app_id == apps::WEB_PATH
|
||||||
&& self.endpoints.as_ref().map(|ep| ep.contains_key(apps::WEB_PATH)).unwrap_or(false)
|
&& has_dapp(apps::WEB_PATH)
|
||||||
&& !is_web_endpoint(path)
|
&& !is_web_endpoint(path)
|
||||||
=>
|
=>
|
||||||
{
|
{
|
||||||
@ -88,11 +90,13 @@ impl http::RequestMiddleware for Router {
|
|||||||
.map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control))
|
.map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control))
|
||||||
},
|
},
|
||||||
// Then delegate to dapp
|
// Then delegate to dapp
|
||||||
(Some(ref path), _, _) if self.endpoints.as_ref().map(|ep| ep.contains_key(&path.app_id)).unwrap_or(false) => {
|
(Some(ref path), _, _) if has_dapp(&path.app_id) => {
|
||||||
trace!(target: "dapps", "Resolving to local/builtin dapp.");
|
trace!(target: "dapps", "Resolving to local/builtin dapp.");
|
||||||
Some(self.endpoints
|
Some(self.endpoints
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.expect("endpoints known to be set; qed")
|
.expect("endpoints known to be set; qed")
|
||||||
|
.endpoints
|
||||||
|
.read()
|
||||||
.get(&path.app_id)
|
.get(&path.app_id)
|
||||||
.expect("endpoints known to contain key; qed")
|
.expect("endpoints known to contain key; qed")
|
||||||
.to_async_handler(path.clone(), control))
|
.to_async_handler(path.clone(), control))
|
||||||
@ -110,13 +114,19 @@ impl http::RequestMiddleware for Router {
|
|||||||
=>
|
=>
|
||||||
{
|
{
|
||||||
trace!(target: "dapps", "Resolving to 404.");
|
trace!(target: "dapps", "Resolving to 404.");
|
||||||
Some(Box::new(handlers::ContentHandler::error(
|
if refresh_dapps {
|
||||||
hyper::StatusCode::NotFound,
|
debug!(target: "dapps", "Refreshing dapps and re-trying.");
|
||||||
"404 Not Found",
|
self.endpoints.as_ref().map(|endpoints| endpoints.refresh_local_dapps());
|
||||||
"Requested content was not found.",
|
return self.resolve_request(req, control, false)
|
||||||
None,
|
} else {
|
||||||
self.embeddable_on.clone(),
|
Some(Box::new(handlers::ContentHandler::error(
|
||||||
)))
|
hyper::StatusCode::NotFound,
|
||||||
|
"404 Not Found",
|
||||||
|
"Requested content was not found.",
|
||||||
|
None,
|
||||||
|
self.embeddable_on.clone(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
},
|
},
|
||||||
// Any other GET|HEAD requests to home page.
|
// Any other GET|HEAD requests to home page.
|
||||||
_ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => {
|
_ if (is_get_request || is_head_request) && self.special.contains_key(&SpecialEndpoint::Home) => {
|
||||||
@ -130,8 +140,15 @@ impl http::RequestMiddleware for Router {
|
|||||||
trace!(target: "dapps", "Resolving to RPC call.");
|
trace!(target: "dapps", "Resolving to RPC call.");
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
};
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl http::RequestMiddleware for Router {
|
||||||
|
fn on_request(&self, req: &server::Request<HttpStream>, control: &Control) -> http::RequestMiddlewareAction {
|
||||||
|
let control = control.clone();
|
||||||
|
let is_origin_set = req.headers().get::<header::Origin>().is_some();
|
||||||
|
let (is_utils, handler) = self.resolve_request(req, control, self.endpoints.is_some());
|
||||||
match handler {
|
match handler {
|
||||||
Some(handler) => http::RequestMiddlewareAction::Respond {
|
Some(handler) => http::RequestMiddlewareAction::Respond {
|
||||||
should_validate_hosts: !is_utils,
|
should_validate_hosts: !is_utils,
|
||||||
|
@ -39,7 +39,7 @@ fn should_resolve_dapp() {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
response.assert_status("HTTP/1.1 404 Not Found");
|
response.assert_status("HTTP/1.1 404 Not Found");
|
||||||
assert_eq!(registrar.calls.lock().len(), 2);
|
assert_eq!(registrar.calls.lock().len(), 4);
|
||||||
assert_security_headers_for_embed(&response.headers);
|
assert_security_headers_for_embed(&response.headers);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ use jsonrpc_http_server::{self as http, Host, DomainsValidation};
|
|||||||
use devtools::http_client;
|
use devtools::http_client;
|
||||||
use hash_fetch::urlhint::ContractClient;
|
use hash_fetch::urlhint::ContractClient;
|
||||||
use fetch::{Fetch, Client as FetchClient};
|
use fetch::{Fetch, Client as FetchClient};
|
||||||
use futures_cpupool::CpuPool;
|
use node_health::{NodeHealth, TimeChecker, CpuPool};
|
||||||
use parity_reactor::Remote;
|
use parity_reactor::Remote;
|
||||||
|
|
||||||
use {Middleware, SyncStatus, WebProxyTokens};
|
use {Middleware, SyncStatus, WebProxyTokens};
|
||||||
@ -39,6 +39,7 @@ use self::fetch::FakeFetch;
|
|||||||
|
|
||||||
const SIGNER_PORT: u16 = 18180;
|
const SIGNER_PORT: u16 = 18180;
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
struct FakeSync(bool);
|
struct FakeSync(bool);
|
||||||
impl SyncStatus for FakeSync {
|
impl SyncStatus for FakeSync {
|
||||||
fn is_major_importing(&self) -> bool { self.0 }
|
fn is_major_importing(&self) -> bool { self.0 }
|
||||||
@ -254,12 +255,17 @@ impl Server {
|
|||||||
remote: Remote,
|
remote: Remote,
|
||||||
fetch: F,
|
fetch: F,
|
||||||
) -> Result<Server, http::Error> {
|
) -> Result<Server, http::Error> {
|
||||||
|
let health = NodeHealth::new(
|
||||||
|
sync_status.clone(),
|
||||||
|
TimeChecker::new::<String>(&[], CpuPool::new(1)),
|
||||||
|
remote.clone(),
|
||||||
|
);
|
||||||
let middleware = Middleware::dapps(
|
let middleware = Middleware::dapps(
|
||||||
"pool.ntp.org:123",
|
health,
|
||||||
CpuPool::new(4),
|
|
||||||
remote,
|
remote,
|
||||||
signer_address,
|
signer_address,
|
||||||
vec![],
|
vec![],
|
||||||
|
vec![],
|
||||||
dapps_path,
|
dapps_path,
|
||||||
extra_dapps,
|
extra_dapps,
|
||||||
DAPPS_DOMAIN.into(),
|
DAPPS_DOMAIN.into(),
|
||||||
|
@ -204,4 +204,3 @@ fn should_serve_utils() {
|
|||||||
assert_eq!(response.body.contains("function(){"), true);
|
assert_eq!(response.body.contains("function(){"), true);
|
||||||
assert_security_headers(&response.headers);
|
assert_security_headers(&response.headers);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -241,5 +241,3 @@ impl<F: Fetch> server::Handler<net::HttpStream> for WebHandler<F> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,6 +47,7 @@ num_cpus = "1.2"
|
|||||||
price-info = { path = "../price-info" }
|
price-info = { path = "../price-info" }
|
||||||
rand = "0.3"
|
rand = "0.3"
|
||||||
rlp = { path = "../util/rlp" }
|
rlp = { path = "../util/rlp" }
|
||||||
|
rlp_derive = { path = "../util/rlp_derive" }
|
||||||
rust-crypto = "0.2.34"
|
rust-crypto = "0.2.34"
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
semver = "0.6"
|
semver = "0.6"
|
||||||
|
@ -44,7 +44,7 @@ pub trait Memory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Checks whether offset and size is valid memory range
|
/// Checks whether offset and size is valid memory range
|
||||||
fn is_valid_range(off: usize, size: usize) -> bool {
|
pub fn is_valid_range(off: usize, size: usize) -> bool {
|
||||||
// When size is zero we haven't actually expanded the memory
|
// When size is zero we haven't actually expanded the memory
|
||||||
let overflow = off.overflowing_add(size).1;
|
let overflow = off.overflowing_add(size).1;
|
||||||
size > 0 && !overflow
|
size > 0 && !overflow
|
||||||
|
@ -168,7 +168,12 @@ impl<Cost: CostType> vm::Vm for Interpreter<Cost> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if do_trace {
|
if do_trace {
|
||||||
ext.trace_executed(gasometer.current_gas.as_u256(), stack.peek_top(info.ret), mem_written.map(|(o, s)| (o, &(self.mem[o..(o + s)]))), store_written);
|
ext.trace_executed(
|
||||||
|
gasometer.current_gas.as_u256(),
|
||||||
|
stack.peek_top(info.ret),
|
||||||
|
mem_written.map(|(o, s)| (o, &(self.mem[o..o+s]))),
|
||||||
|
store_written,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance
|
// Advance
|
||||||
@ -252,14 +257,20 @@ impl<Cost: CostType> Interpreter<Cost> {
|
|||||||
instruction: Instruction,
|
instruction: Instruction,
|
||||||
stack: &Stack<U256>
|
stack: &Stack<U256>
|
||||||
) -> Option<(usize, usize)> {
|
) -> Option<(usize, usize)> {
|
||||||
match instruction {
|
let read = |pos| stack.peek(pos).low_u64() as usize;
|
||||||
instructions::MSTORE | instructions::MLOAD => Some((stack.peek(0).low_u64() as usize, 32)),
|
let written = match instruction {
|
||||||
instructions::MSTORE8 => Some((stack.peek(0).low_u64() as usize, 1)),
|
instructions::MSTORE | instructions::MLOAD => Some((read(0), 32)),
|
||||||
instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => Some((stack.peek(0).low_u64() as usize, stack.peek(2).low_u64() as usize)),
|
instructions::MSTORE8 => Some((read(0), 1)),
|
||||||
instructions::EXTCODECOPY => Some((stack.peek(1).low_u64() as usize, stack.peek(3).low_u64() as usize)),
|
instructions::CALLDATACOPY | instructions::CODECOPY | instructions::RETURNDATACOPY => Some((read(0), read(2))),
|
||||||
instructions::CALL | instructions::CALLCODE => Some((stack.peek(5).low_u64() as usize, stack.peek(6).low_u64() as usize)),
|
instructions::EXTCODECOPY => Some((read(1), read(3))),
|
||||||
instructions::DELEGATECALL => Some((stack.peek(4).low_u64() as usize, stack.peek(5).low_u64() as usize)),
|
instructions::CALL | instructions::CALLCODE => Some((read(5), read(6))),
|
||||||
|
instructions::DELEGATECALL | instructions::STATICCALL => Some((read(4), read(5))),
|
||||||
_ => None,
|
_ => None,
|
||||||
|
};
|
||||||
|
|
||||||
|
match written {
|
||||||
|
Some((offset, size)) if !memory::is_valid_range(offset, size) => None,
|
||||||
|
written => written,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -862,3 +873,36 @@ fn address_to_u256(value: Address) -> U256 {
|
|||||||
U256::from(&*H256::from(value))
|
U256::from(&*H256::from(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::Arc;
|
||||||
|
use rustc_hex::FromHex;
|
||||||
|
use vmtype::VMType;
|
||||||
|
use factory::Factory;
|
||||||
|
use vm::{self, ActionParams, ActionValue};
|
||||||
|
use vm::tests::{FakeExt, test_finalize};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_not_fail_on_tracing_mem() {
|
||||||
|
let code = "7feeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff006000527faaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffaa6020526000620f120660406000601773945304eb96065b2a98b57a48a06ae28d285a71b56101f4f1600055".from_hex().unwrap();
|
||||||
|
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.address = 5.into();
|
||||||
|
params.gas = 300_000.into();
|
||||||
|
params.gas_price = 1.into();
|
||||||
|
params.value = ActionValue::Transfer(100_000.into());
|
||||||
|
params.code = Some(Arc::new(code));
|
||||||
|
let mut ext = FakeExt::new();
|
||||||
|
ext.balances.insert(5.into(), 1_000_000_000.into());
|
||||||
|
ext.tracing = true;
|
||||||
|
|
||||||
|
let gas_left = {
|
||||||
|
let mut vm = Factory::new(VMType::Interpreter, 1).create(params.gas);
|
||||||
|
test_finalize(vm.exec(params, &mut ext)).unwrap()
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(ext.calls.len(), 1);
|
||||||
|
assert_eq!(gas_left, 248_212.into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -21,6 +21,7 @@ ethcore-devtools = { path = "../../devtools" }
|
|||||||
evm = { path = "../evm" }
|
evm = { path = "../evm" }
|
||||||
vm = { path = "../vm" }
|
vm = { path = "../vm" }
|
||||||
rlp = { path = "../../util/rlp" }
|
rlp = { path = "../../util/rlp" }
|
||||||
|
rlp_derive = { path = "../../util/rlp_derive" }
|
||||||
time = "0.1"
|
time = "0.1"
|
||||||
smallvec = "0.4"
|
smallvec = "0.4"
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
|
@ -100,8 +100,8 @@ pub trait LightChainClient: Send + Sync {
|
|||||||
/// Get an iterator over a block and its ancestry.
|
/// Get an iterator over a block and its ancestry.
|
||||||
fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box<Iterator<Item=encoded::Header> + 'a>;
|
fn ancestry_iter<'a>(&'a self, start: BlockId) -> Box<Iterator<Item=encoded::Header> + 'a>;
|
||||||
|
|
||||||
/// Get the signing network ID.
|
/// Get the signing chain ID.
|
||||||
fn signing_network_id(&self) -> Option<u64>;
|
fn signing_chain_id(&self) -> Option<u64>;
|
||||||
|
|
||||||
/// Get environment info for execution at a given block.
|
/// Get environment info for execution at a given block.
|
||||||
/// Fails if that block's header is not stored.
|
/// Fails if that block's header is not stored.
|
||||||
@ -260,9 +260,9 @@ impl Client {
|
|||||||
self.chain.ancestry_iter(start)
|
self.chain.ancestry_iter(start)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the signing network id.
|
/// Get the signing chain id.
|
||||||
pub fn signing_network_id(&self) -> Option<u64> {
|
pub fn signing_chain_id(&self) -> Option<u64> {
|
||||||
self.engine.signing_network_id(&self.latest_env_info())
|
self.engine.signing_chain_id(&self.latest_env_info())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush the header queue.
|
/// Flush the header queue.
|
||||||
@ -448,8 +448,8 @@ impl LightChainClient for Client {
|
|||||||
Box::new(Client::ancestry_iter(self, start))
|
Box::new(Client::ancestry_iter(self, start))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self) -> Option<u64> {
|
fn signing_chain_id(&self) -> Option<u64> {
|
||||||
Client::signing_network_id(self)
|
Client::signing_chain_id(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn env_info(&self, id: BlockId) -> Option<EnvInfo> {
|
fn env_info(&self, id: BlockId) -> Option<EnvInfo> {
|
||||||
|
@ -76,6 +76,8 @@ extern crate futures;
|
|||||||
extern crate itertools;
|
extern crate itertools;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate rlp;
|
extern crate rlp;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate rlp_derive;
|
||||||
extern crate serde;
|
extern crate serde;
|
||||||
extern crate smallvec;
|
extern crate smallvec;
|
||||||
extern crate stats;
|
extern crate stats;
|
||||||
|
@ -650,7 +650,7 @@ pub mod header {
|
|||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
||||||
|
|
||||||
/// Potentially incomplete headers request.
|
/// Potentially incomplete headers request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Start block.
|
/// Start block.
|
||||||
pub start: Field<HashOrNumber>,
|
pub start: Field<HashOrNumber>,
|
||||||
@ -662,27 +662,6 @@ pub mod header {
|
|||||||
pub reverse: bool,
|
pub reverse: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
start: rlp.val_at(0)?,
|
|
||||||
skip: rlp.val_at(1)?,
|
|
||||||
max: rlp.val_at(2)?,
|
|
||||||
reverse: rlp.val_at(3)?
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4)
|
|
||||||
.append(&self.start)
|
|
||||||
.append(&self.skip)
|
|
||||||
.append(&self.max)
|
|
||||||
.append(&self.reverse);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -784,26 +763,12 @@ pub mod header_proof {
|
|||||||
use util::{Bytes, U256, H256};
|
use util::{Bytes, U256, H256};
|
||||||
|
|
||||||
/// Potentially incomplete header proof request.
|
/// Potentially incomplete header proof request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Block number.
|
/// Block number.
|
||||||
pub num: Field<u64>,
|
pub num: Field<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
num: rlp.val_at(0)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(1).append(&self.num);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -889,30 +854,15 @@ pub mod header_proof {
|
|||||||
/// Request and response for transaction index.
|
/// Request and response for transaction index.
|
||||||
pub mod transaction_index {
|
pub mod transaction_index {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
|
||||||
use util::H256;
|
use util::H256;
|
||||||
|
|
||||||
/// Potentially incomplete transaction index request.
|
/// Potentially incomplete transaction index request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Transaction hash to get index for.
|
/// Transaction hash to get index for.
|
||||||
pub hash: Field<H256>,
|
pub hash: Field<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
hash: rlp.val_at(0)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(1).append(&self.hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -959,7 +909,7 @@ pub mod transaction_index {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The output of a request for transaction index.
|
/// The output of a request for transaction index.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Response {
|
pub struct Response {
|
||||||
/// Block number.
|
/// Block number.
|
||||||
pub num: u64,
|
pub num: u64,
|
||||||
@ -976,55 +926,21 @@ pub mod transaction_index {
|
|||||||
f(1, Output::Hash(self.hash));
|
f(1, Output::Hash(self.hash));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Response {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Response {
|
|
||||||
num: rlp.val_at(0)?,
|
|
||||||
hash: rlp.val_at(1)?,
|
|
||||||
index: rlp.val_at(2)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Response {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(3)
|
|
||||||
.append(&self.num)
|
|
||||||
.append(&self.hash)
|
|
||||||
.append(&self.index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request and response for block receipts
|
/// Request and response for block receipts
|
||||||
pub mod block_receipts {
|
pub mod block_receipts {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use ethcore::receipt::Receipt;
|
use ethcore::receipt::Receipt;
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
|
||||||
use util::H256;
|
use util::H256;
|
||||||
|
|
||||||
/// Potentially incomplete block receipts request.
|
/// Potentially incomplete block receipts request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Block hash to get receipts for.
|
/// Block hash to get receipts for.
|
||||||
pub hash: Field<H256>,
|
pub hash: Field<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
hash: rlp.val_at(0)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(1).append(&self.hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -1068,7 +984,7 @@ pub mod block_receipts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The output of a request for block receipts.
|
/// The output of a request for block receipts.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct Response {
|
pub struct Response {
|
||||||
/// The block receipts.
|
/// The block receipts.
|
||||||
pub receipts: Vec<Receipt>
|
pub receipts: Vec<Receipt>
|
||||||
@ -1078,20 +994,6 @@ pub mod block_receipts {
|
|||||||
/// Fill reusable outputs by providing them to the function.
|
/// Fill reusable outputs by providing them to the function.
|
||||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Response {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Response {
|
|
||||||
receipts: rlp.as_list()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Response {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append_list(&self.receipts);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Request and response for a block body
|
/// Request and response for a block body
|
||||||
@ -1102,26 +1004,12 @@ pub mod block_body {
|
|||||||
use util::H256;
|
use util::H256;
|
||||||
|
|
||||||
/// Potentially incomplete block body request.
|
/// Potentially incomplete block body request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Block hash to get receipts for.
|
/// Block hash to get receipts for.
|
||||||
pub hash: Field<H256>,
|
pub hash: Field<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
hash: rlp.val_at(0)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(1).append(&self.hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -1201,11 +1089,10 @@ pub mod block_body {
|
|||||||
/// A request for an account proof.
|
/// A request for an account proof.
|
||||||
pub mod account {
|
pub mod account {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
|
||||||
use util::{Bytes, U256, H256};
|
use util::{Bytes, U256, H256};
|
||||||
|
|
||||||
/// Potentially incomplete request for an account proof.
|
/// Potentially incomplete request for an account proof.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Block hash to request state proof for.
|
/// Block hash to request state proof for.
|
||||||
pub block_hash: Field<H256>,
|
pub block_hash: Field<H256>,
|
||||||
@ -1213,23 +1100,6 @@ pub mod account {
|
|||||||
pub address_hash: Field<H256>,
|
pub address_hash: Field<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
block_hash: rlp.val_at(0)?,
|
|
||||||
address_hash: rlp.val_at(1)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2)
|
|
||||||
.append(&self.block_hash)
|
|
||||||
.append(&self.address_hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -1292,7 +1162,7 @@ pub mod account {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The output of a request for an account state proof.
|
/// The output of a request for an account state proof.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Response {
|
pub struct Response {
|
||||||
/// Inclusion/exclusion proof
|
/// Inclusion/exclusion proof
|
||||||
pub proof: Vec<Bytes>,
|
pub proof: Vec<Bytes>,
|
||||||
@ -1313,39 +1183,15 @@ pub mod account {
|
|||||||
f(1, Output::Hash(self.storage_root));
|
f(1, Output::Hash(self.storage_root));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Response {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Response {
|
|
||||||
proof: rlp.list_at(0)?,
|
|
||||||
nonce: rlp.val_at(1)?,
|
|
||||||
balance: rlp.val_at(2)?,
|
|
||||||
code_hash: rlp.val_at(3)?,
|
|
||||||
storage_root: rlp.val_at(4)?
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Response {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(5)
|
|
||||||
.append_list::<Vec<u8>,_>(&self.proof[..])
|
|
||||||
.append(&self.nonce)
|
|
||||||
.append(&self.balance)
|
|
||||||
.append(&self.code_hash)
|
|
||||||
.append(&self.storage_root);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for a storage proof.
|
/// A request for a storage proof.
|
||||||
pub mod storage {
|
pub mod storage {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
|
||||||
use util::{Bytes, H256};
|
use util::{Bytes, H256};
|
||||||
|
|
||||||
/// Potentially incomplete request for an storage proof.
|
/// Potentially incomplete request for an storage proof.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// Block hash to request state proof for.
|
/// Block hash to request state proof for.
|
||||||
pub block_hash: Field<H256>,
|
pub block_hash: Field<H256>,
|
||||||
@ -1355,25 +1201,6 @@ pub mod storage {
|
|||||||
pub key_hash: Field<H256>,
|
pub key_hash: Field<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
block_hash: rlp.val_at(0)?,
|
|
||||||
address_hash: rlp.val_at(1)?,
|
|
||||||
key_hash: rlp.val_at(2)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(3)
|
|
||||||
.append(&self.block_hash)
|
|
||||||
.append(&self.address_hash)
|
|
||||||
.append(&self.key_hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -1450,7 +1277,7 @@ pub mod storage {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The output of a request for an account state proof.
|
/// The output of a request for an account state proof.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Response {
|
pub struct Response {
|
||||||
/// Inclusion/exclusion proof
|
/// Inclusion/exclusion proof
|
||||||
pub proof: Vec<Bytes>,
|
pub proof: Vec<Bytes>,
|
||||||
@ -1464,33 +1291,15 @@ pub mod storage {
|
|||||||
f(0, Output::Hash(self.value));
|
f(0, Output::Hash(self.value));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Response {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Response {
|
|
||||||
proof: rlp.list_at(0)?,
|
|
||||||
value: rlp.val_at(1)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Response {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2)
|
|
||||||
.append_list::<Vec<u8>,_>(&self.proof[..])
|
|
||||||
.append(&self.value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for contract code.
|
/// A request for contract code.
|
||||||
pub mod contract_code {
|
pub mod contract_code {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
|
||||||
use util::{Bytes, H256};
|
use util::{Bytes, H256};
|
||||||
|
|
||||||
/// Potentially incomplete contract code request.
|
/// Potentially incomplete contract code request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// The block hash to request the state for.
|
/// The block hash to request the state for.
|
||||||
pub block_hash: Field<H256>,
|
pub block_hash: Field<H256>,
|
||||||
@ -1498,23 +1307,6 @@ pub mod contract_code {
|
|||||||
pub code_hash: Field<H256>,
|
pub code_hash: Field<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
block_hash: rlp.val_at(0)?,
|
|
||||||
code_hash: rlp.val_at(1)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2)
|
|
||||||
.append(&self.block_hash)
|
|
||||||
.append(&self.code_hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
@ -1573,7 +1365,7 @@ pub mod contract_code {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The output of a request for
|
/// The output of a request for
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct Response {
|
pub struct Response {
|
||||||
/// The requested code.
|
/// The requested code.
|
||||||
pub code: Bytes,
|
pub code: Bytes,
|
||||||
@ -1583,21 +1375,6 @@ pub mod contract_code {
|
|||||||
/// Fill reusable outputs by providing them to the function.
|
/// Fill reusable outputs by providing them to the function.
|
||||||
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Response {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
|
|
||||||
Ok(Response {
|
|
||||||
code: rlp.as_val()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Response {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append(&self.code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request for proof of execution.
|
/// A request for proof of execution.
|
||||||
@ -1608,7 +1385,7 @@ pub mod execution {
|
|||||||
use util::{Bytes, Address, U256, H256, DBValue};
|
use util::{Bytes, Address, U256, H256, DBValue};
|
||||||
|
|
||||||
/// Potentially incomplete execution proof request.
|
/// Potentially incomplete execution proof request.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Incomplete {
|
pub struct Incomplete {
|
||||||
/// The block hash to request the state for.
|
/// The block hash to request the state for.
|
||||||
pub block_hash: Field<H256>,
|
pub block_hash: Field<H256>,
|
||||||
@ -1626,38 +1403,6 @@ pub mod execution {
|
|||||||
pub data: Bytes,
|
pub data: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Incomplete {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Incomplete {
|
|
||||||
block_hash: rlp.val_at(0)?,
|
|
||||||
from: rlp.val_at(1)?,
|
|
||||||
action: rlp.val_at(2)?,
|
|
||||||
gas: rlp.val_at(3)?,
|
|
||||||
gas_price: rlp.val_at(4)?,
|
|
||||||
value: rlp.val_at(5)?,
|
|
||||||
data: rlp.val_at(6)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Incomplete {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(7)
|
|
||||||
.append(&self.block_hash)
|
|
||||||
.append(&self.from);
|
|
||||||
|
|
||||||
match self.action {
|
|
||||||
Action::Create => s.append_empty_data(),
|
|
||||||
Action::Call(ref addr) => s.append(addr),
|
|
||||||
};
|
|
||||||
|
|
||||||
s.append(&self.gas)
|
|
||||||
.append(&self.gas_price)
|
|
||||||
.append(&self.value)
|
|
||||||
.append(&self.data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl super::IncompleteRequest for Incomplete {
|
impl super::IncompleteRequest for Incomplete {
|
||||||
type Complete = Complete;
|
type Complete = Complete;
|
||||||
type Response = Response;
|
type Response = Response;
|
||||||
|
@ -9,7 +9,7 @@ build = "build.rs"
|
|||||||
ethabi = "2.0"
|
ethabi = "2.0"
|
||||||
futures = "0.1"
|
futures = "0.1"
|
||||||
byteorder = "1.0"
|
byteorder = "1.0"
|
||||||
ethcore-util = { path = "../../util" }
|
ethcore-bigint = { path = "../../util/bigint" }
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
native-contract-generator = { path = "generator" }
|
native-contract-generator = { path = "generator" }
|
||||||
|
@ -21,12 +21,14 @@ use std::fs::File;
|
|||||||
use std::io::Write;
|
use std::io::Write;
|
||||||
|
|
||||||
// TODO: just walk the "res" directory and generate whole crate automatically.
|
// TODO: just walk the "res" directory and generate whole crate automatically.
|
||||||
|
const KEY_SERVER_SET_ABI: &'static str = include_str!("res/key_server_set.json");
|
||||||
const REGISTRY_ABI: &'static str = include_str!("res/registrar.json");
|
const REGISTRY_ABI: &'static str = include_str!("res/registrar.json");
|
||||||
const URLHINT_ABI: &'static str = include_str!("res/urlhint.json");
|
const URLHINT_ABI: &'static str = include_str!("res/urlhint.json");
|
||||||
const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json");
|
const SERVICE_TRANSACTION_ABI: &'static str = include_str!("res/service_transaction.json");
|
||||||
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json");
|
const SECRETSTORE_ACL_STORAGE_ABI: &'static str = include_str!("res/secretstore_acl_storage.json");
|
||||||
const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json");
|
const VALIDATOR_SET_ABI: &'static str = include_str!("res/validator_set.json");
|
||||||
const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json");
|
const VALIDATOR_REPORT_ABI: &'static str = include_str!("res/validator_report.json");
|
||||||
|
const PEER_SET_ABI: &'static str = include_str!("res/peer_set.json");
|
||||||
|
|
||||||
const TEST_VALIDATOR_SET_ABI: &'static str = include_str!("res/test_validator_set.json");
|
const TEST_VALIDATOR_SET_ABI: &'static str = include_str!("res/test_validator_set.json");
|
||||||
|
|
||||||
@ -45,12 +47,14 @@ fn build_test_contracts() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
|
build_file("KeyServerSet", KEY_SERVER_SET_ABI, "key_server_set.rs");
|
||||||
build_file("Registry", REGISTRY_ABI, "registry.rs");
|
build_file("Registry", REGISTRY_ABI, "registry.rs");
|
||||||
build_file("Urlhint", URLHINT_ABI, "urlhint.rs");
|
build_file("Urlhint", URLHINT_ABI, "urlhint.rs");
|
||||||
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
|
build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs");
|
||||||
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
|
build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs");
|
||||||
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
|
build_file("ValidatorSet", VALIDATOR_SET_ABI, "validator_set.rs");
|
||||||
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
|
build_file("ValidatorReport", VALIDATOR_REPORT_ABI, "validator_report.rs");
|
||||||
|
build_file("PeerSet", PEER_SET_ABI, "peer_set.rs");
|
||||||
|
|
||||||
build_test_contracts();
|
build_test_contracts();
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
//! Rust code contract generator.
|
//! Rust code contract generator.
|
||||||
//! The code generated will require a dependence on the `ethcore-util`,
|
//! The code generated will require a dependence on the `ethcore-bigint::prelude`,
|
||||||
//! `ethabi`, `byteorder`, and `futures` crates.
|
//! `ethabi`, `byteorder`, and `futures` crates.
|
||||||
//! This currently isn't hygienic, so compilation of generated code may fail
|
//! This currently isn't hygienic, so compilation of generated code may fail
|
||||||
//! due to missing crates or name collisions. This will change when
|
//! due to missing crates or name collisions. This will change when
|
||||||
@ -48,14 +48,14 @@ pub fn generate_module(struct_name: &str, abi: &str) -> Result<String, Error> {
|
|||||||
use byteorder::{{BigEndian, ByteOrder}};
|
use byteorder::{{BigEndian, ByteOrder}};
|
||||||
use futures::{{future, Future, IntoFuture, BoxFuture}};
|
use futures::{{future, Future, IntoFuture, BoxFuture}};
|
||||||
use ethabi::{{Contract, Interface, Token, Event}};
|
use ethabi::{{Contract, Interface, Token, Event}};
|
||||||
use util;
|
use bigint;
|
||||||
|
|
||||||
/// Generated Rust bindings to an Ethereum contract.
|
/// Generated Rust bindings to an Ethereum contract.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct {name} {{
|
pub struct {name} {{
|
||||||
contract: Contract,
|
contract: Contract,
|
||||||
/// Address to make calls to.
|
/// Address to make calls to.
|
||||||
pub address: util::Address,
|
pub address: bigint::prelude::H160,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
const ABI: &'static str = r#"{abi_str}"#;
|
const ABI: &'static str = r#"{abi_str}"#;
|
||||||
@ -63,7 +63,7 @@ const ABI: &'static str = r#"{abi_str}"#;
|
|||||||
impl {name} {{
|
impl {name} {{
|
||||||
/// Create a new instance of `{name}` with an address.
|
/// Create a new instance of `{name}` with an address.
|
||||||
/// Calls can be made, given a callback for dispatching calls asynchronously.
|
/// Calls can be made, given a callback for dispatching calls asynchronously.
|
||||||
pub fn new(address: util::Address) -> Self {{
|
pub fn new(address: bigint::prelude::H160) -> Self {{
|
||||||
let contract = Contract::new(Interface::load(ABI.as_bytes())
|
let contract = Contract::new(Interface::load(ABI.as_bytes())
|
||||||
.expect("ABI checked at generation-time; qed"));
|
.expect("ABI checked at generation-time; qed"));
|
||||||
{name} {{
|
{name} {{
|
||||||
@ -108,7 +108,7 @@ fn generate_functions(contract: &Contract) -> Result<String, Error> {
|
|||||||
/// Outputs: {abi_outputs:?}
|
/// Outputs: {abi_outputs:?}
|
||||||
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
|
pub fn {snake_name}<F, U>(&self, call: F, {params}) -> BoxFuture<{output_type}, String>
|
||||||
where
|
where
|
||||||
F: FnOnce(util::Address, Vec<u8>) -> U,
|
F: FnOnce(bigint::prelude::H160, Vec<u8>) -> U,
|
||||||
U: IntoFuture<Item=Vec<u8>, Error=String>,
|
U: IntoFuture<Item=Vec<u8>, Error=String>,
|
||||||
U::Future: Send + 'static
|
U::Future: Send + 'static
|
||||||
{{
|
{{
|
||||||
@ -217,8 +217,8 @@ fn output_params_codegen(outputs: &[ParamType]) -> Result<(String, String), Para
|
|||||||
// create code for an argument type from param type.
|
// create code for an argument type from param type.
|
||||||
fn rust_type(input: ParamType) -> Result<String, ParamType> {
|
fn rust_type(input: ParamType) -> Result<String, ParamType> {
|
||||||
Ok(match input {
|
Ok(match input {
|
||||||
ParamType::Address => "util::Address".into(),
|
ParamType::Address => "bigint::prelude::H160".into(),
|
||||||
ParamType::FixedBytes(len) if len <= 32 => format!("util::H{}", len * 8),
|
ParamType::FixedBytes(len) if len <= 32 => format!("bigint::prelude::H{}", len * 8),
|
||||||
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
|
ParamType::Bytes | ParamType::FixedBytes(_) => "Vec<u8>".into(),
|
||||||
ParamType::Int(width) => match width {
|
ParamType::Int(width) => match width {
|
||||||
8 | 16 | 32 | 64 => format!("i{}", width),
|
8 | 16 | 32 | 64 => format!("i{}", width),
|
||||||
@ -226,7 +226,7 @@ fn rust_type(input: ParamType) -> Result<String, ParamType> {
|
|||||||
},
|
},
|
||||||
ParamType::Uint(width) => match width {
|
ParamType::Uint(width) => match width {
|
||||||
8 | 16 | 32 | 64 => format!("u{}", width),
|
8 | 16 | 32 | 64 => format!("u{}", width),
|
||||||
128 | 160 | 256 => format!("util::U{}", width),
|
128 | 160 | 256 => format!("bigint::prelude::U{}", width),
|
||||||
_ => return Err(ParamType::Uint(width)),
|
_ => return Err(ParamType::Uint(width)),
|
||||||
},
|
},
|
||||||
ParamType::Bool => "bool".into(),
|
ParamType::Bool => "bool".into(),
|
||||||
@ -259,8 +259,8 @@ fn tokenize(name: &str, input: ParamType) -> (bool, String) {
|
|||||||
},
|
},
|
||||||
ParamType::Uint(width) => format!(
|
ParamType::Uint(width) => format!(
|
||||||
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
|
"let mut r = [0; 32]; {}.to_big_endian(&mut r); Token::Uint(r)",
|
||||||
if width <= 64 { format!("util::U256::from({} as u64)", name) }
|
if width <= 64 { format!("bigint::prelude::U256::from({} as u64)", name) }
|
||||||
else { format!("util::U256::from({})", name) }
|
else { format!("bigint::prelude::U256::from({})", name) }
|
||||||
),
|
),
|
||||||
ParamType::Bool => format!("Token::Bool({})", name),
|
ParamType::Bool => format!("Token::Bool({})", name),
|
||||||
ParamType::String => format!("Token::String({})", name),
|
ParamType::String => format!("Token::String({})", name),
|
||||||
@ -281,11 +281,11 @@ fn tokenize(name: &str, input: ParamType) -> (bool, String) {
|
|||||||
// panics on unsupported types.
|
// panics on unsupported types.
|
||||||
fn detokenize(name: &str, output_type: ParamType) -> String {
|
fn detokenize(name: &str, output_type: ParamType) -> String {
|
||||||
match output_type {
|
match output_type {
|
||||||
ParamType::Address => format!("{}.to_address().map(util::H160)", name),
|
ParamType::Address => format!("{}.to_address().map(bigint::prelude::H160)", name),
|
||||||
ParamType::Bytes => format!("{}.to_bytes()", name),
|
ParamType::Bytes => format!("{}.to_bytes()", name),
|
||||||
ParamType::FixedBytes(len) if len <= 32 => {
|
ParamType::FixedBytes(len) if len <= 32 => {
|
||||||
// ensure no panic on slice too small.
|
// ensure no panic on slice too small.
|
||||||
let read_hash = format!("b.resize({}, 0); util::H{}::from_slice(&b[..{}])",
|
let read_hash = format!("b.resize({}, 0); bigint::prelude::H{}::from_slice(&b[..{}])",
|
||||||
len, len * 8, len);
|
len, len * 8, len);
|
||||||
|
|
||||||
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
|
format!("{}.to_fixed_bytes().map(|mut b| {{ {} }})",
|
||||||
@ -302,8 +302,8 @@ fn detokenize(name: &str, output_type: ParamType) -> String {
|
|||||||
}
|
}
|
||||||
ParamType::Uint(width) => {
|
ParamType::Uint(width) => {
|
||||||
let read_uint = match width {
|
let read_uint = match width {
|
||||||
8 | 16 | 32 | 64 => format!("util::U256(u).low_u64() as u{}", width),
|
8 | 16 | 32 | 64 => format!("bigint::prelude::U256(u).low_u64() as u{}", width),
|
||||||
_ => format!("util::U{}::from(&u[..])", width),
|
_ => format!("bigint::prelude::U{}::from(&u[..])", width),
|
||||||
};
|
};
|
||||||
|
|
||||||
format!("{}.to_uint().map(|u| {})", name, read_uint)
|
format!("{}.to_uint().map(|u| {})", name, read_uint)
|
||||||
@ -328,30 +328,30 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn input_types() {
|
fn input_types() {
|
||||||
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
|
assert_eq!(::input_params_codegen(&[]).unwrap().0, "");
|
||||||
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: util::Address, ");
|
assert_eq!(::input_params_codegen(&[ParamType::Address]).unwrap().0, "param_0: bigint::prelude::H160, ");
|
||||||
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
|
assert_eq!(::input_params_codegen(&[ParamType::Address, ParamType::Bytes]).unwrap().0,
|
||||||
"param_0: util::Address, param_1: Vec<u8>, ");
|
"param_0: bigint::prelude::H160, param_1: Vec<u8>, ");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn output_types() {
|
fn output_types() {
|
||||||
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
|
assert_eq!(::output_params_codegen(&[]).unwrap().0, "()");
|
||||||
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(util::Address)");
|
assert_eq!(::output_params_codegen(&[ParamType::Address]).unwrap().0, "(bigint::prelude::H160)");
|
||||||
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
|
assert_eq!(::output_params_codegen(&[ParamType::Address, ParamType::Array(Box::new(ParamType::Bytes))]).unwrap().0,
|
||||||
"(util::Address, Vec<Vec<u8>>)");
|
"(bigint::prelude::H160, Vec<Vec<u8>>)");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn rust_type() {
|
fn rust_type() {
|
||||||
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "util::H256");
|
assert_eq!(::rust_type(ParamType::FixedBytes(32)).unwrap(), "bigint::prelude::H256");
|
||||||
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
|
assert_eq!(::rust_type(ParamType::Array(Box::new(ParamType::FixedBytes(32)))).unwrap(),
|
||||||
"Vec<util::H256>");
|
"Vec<bigint::prelude::H256>");
|
||||||
|
|
||||||
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
|
assert_eq!(::rust_type(ParamType::Uint(64)).unwrap(), "u64");
|
||||||
assert!(::rust_type(ParamType::Uint(63)).is_err());
|
assert!(::rust_type(ParamType::Uint(63)).is_err());
|
||||||
|
|
||||||
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
|
assert_eq!(::rust_type(ParamType::Int(32)).unwrap(), "i32");
|
||||||
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "util::U256");
|
assert_eq!(::rust_type(ParamType::Uint(256)).unwrap(), "bigint::prelude::U256");
|
||||||
}
|
}
|
||||||
|
|
||||||
// codegen tests will need bootstrapping of some kind.
|
// codegen tests will need bootstrapping of some kind.
|
||||||
|
1
ethcore/native_contracts/res/key_server_set.json
Normal file
1
ethcore/native_contracts/res/key_server_set.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
[{"constant":true,"inputs":[{"name":"","type":"uint256"}],"name":"keyServersList","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getKeyServerPublic","outputs":[{"name":"","type":"bytes"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"getKeyServers","outputs":[{"name":"","type":"address[]"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"keyServer","type":"address"}],"name":"getKeyServerAddress","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"keyServer","type":"address"}],"name":"removeKeyServer","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"keyServerPublic","type":"bytes"},{"name":"keyServerIp","type":"string"}],"name":"addKeyServer","outputs":[],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"name":"keyServer","type":"address"}],"name":"KeyServerRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"}]
|
1
ethcore/native_contracts/res/peer_set.json
Normal file
1
ethcore/native_contracts/res/peer_set.json
Normal file
@ -0,0 +1 @@
|
|||||||
|
[{"constant":true,"inputs":[{"name":"sl","type":"bytes32"},{"name":"sh","type":"bytes32"},{"name":"pl","type":"bytes32"},{"name":"ph","type":"bytes32"}],"name":"connectionAllowed","outputs":[{"name":"res","type":"bool"}],"payable":false,"type":"function"},{"inputs":[],"payable":false,"type":"constructor"}]
|
21
ethcore/native_contracts/src/key_server_set.rs
Normal file
21
ethcore/native_contracts/src/key_server_set.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#![allow(unused_mut, unused_variables, unused_imports)]
|
||||||
|
|
||||||
|
//! Secret store Key Server set contract.
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/key_server_set.rs"));
|
@ -21,20 +21,24 @@
|
|||||||
extern crate futures;
|
extern crate futures;
|
||||||
extern crate byteorder;
|
extern crate byteorder;
|
||||||
extern crate ethabi;
|
extern crate ethabi;
|
||||||
extern crate ethcore_util as util;
|
extern crate ethcore_bigint as bigint;
|
||||||
|
|
||||||
|
mod key_server_set;
|
||||||
mod registry;
|
mod registry;
|
||||||
mod urlhint;
|
mod urlhint;
|
||||||
mod service_transaction;
|
mod service_transaction;
|
||||||
mod secretstore_acl_storage;
|
mod secretstore_acl_storage;
|
||||||
mod validator_set;
|
mod validator_set;
|
||||||
mod validator_report;
|
mod validator_report;
|
||||||
|
mod peer_set;
|
||||||
|
|
||||||
pub mod test_contracts;
|
pub mod test_contracts;
|
||||||
|
|
||||||
|
pub use self::key_server_set::KeyServerSet;
|
||||||
pub use self::registry::Registry;
|
pub use self::registry::Registry;
|
||||||
pub use self::urlhint::Urlhint;
|
pub use self::urlhint::Urlhint;
|
||||||
pub use self::service_transaction::ServiceTransactionChecker;
|
pub use self::service_transaction::ServiceTransactionChecker;
|
||||||
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
|
pub use self::secretstore_acl_storage::SecretStoreAclStorage;
|
||||||
pub use self::validator_set::ValidatorSet;
|
pub use self::validator_set::ValidatorSet;
|
||||||
pub use self::validator_report::ValidatorReport;
|
pub use self::validator_report::ValidatorReport;
|
||||||
|
pub use self::peer_set::PeerSet;
|
||||||
|
21
ethcore/native_contracts/src/peer_set.rs
Normal file
21
ethcore/native_contracts/src/peer_set.rs
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
#![allow(unused_mut, unused_variables, unused_imports)]
|
||||||
|
|
||||||
|
//! Peer set contract.
|
||||||
|
|
||||||
|
include!(concat!(env!("OUT_DIR"), "/peer_set.rs"));
|
16
ethcore/node_filter/Cargo.toml
Normal file
16
ethcore/node_filter/Cargo.toml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
[package]
|
||||||
|
description = "Parity smart network connections"
|
||||||
|
homepage = "http://parity.io"
|
||||||
|
license = "GPL-3.0"
|
||||||
|
name = "node-filter"
|
||||||
|
version = "1.8.0"
|
||||||
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
ethcore = { path = ".."}
|
||||||
|
ethcore-util = { path = "../../util" }
|
||||||
|
ethcore-io = { path = "../../util/io" }
|
||||||
|
ethcore-network = { path = "../../util/network" }
|
||||||
|
native-contracts = { path = "../native_contracts" }
|
||||||
|
futures = "0.1"
|
||||||
|
log = "0.3"
|
44
ethcore/node_filter/res/node_filter.json
Normal file
44
ethcore/node_filter/res/node_filter.json
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"name": "TestNodeFilterContract",
|
||||||
|
"engine": {
|
||||||
|
"authorityRound": {
|
||||||
|
"params": {
|
||||||
|
"stepDuration": 1,
|
||||||
|
"startStep": 2,
|
||||||
|
"validators": {
|
||||||
|
"contract": "0x0000000000000000000000000000000000000005"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"accountStartNonce": "0x0",
|
||||||
|
"maximumExtraDataSize": "0x20",
|
||||||
|
"minGasLimit": "0x1388",
|
||||||
|
"networkID" : "0x69",
|
||||||
|
"gasLimitBoundDivisor": "0x0400"
|
||||||
|
},
|
||||||
|
"genesis": {
|
||||||
|
"seal": {
|
||||||
|
"generic": "0xc180"
|
||||||
|
},
|
||||||
|
"difficulty": "0x20000",
|
||||||
|
"author": "0x0000000000000000000000000000000000000000",
|
||||||
|
"timestamp": "0x00",
|
||||||
|
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"extraData": "0x",
|
||||||
|
"gasLimit": "0x222222"
|
||||||
|
},
|
||||||
|
"accounts": {
|
||||||
|
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
||||||
|
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
|
||||||
|
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
|
||||||
|
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
|
||||||
|
"0000000000000000000000000000000000000005": {
|
||||||
|
"balance": "1",
|
||||||
|
"constructor": "6060604052341561000f57600080fd5b5b6012600102600080601160010260001916815260200190815260200160002081600019169055506022600102600080602160010260001916815260200190815260200160002081600019169055506032600102600080603160010260001916815260200190815260200160002081600019169055506042600102600080604160010260001916815260200190815260200160002081600019169055505b5b610155806100bd6000396000f30060606040526000357c0100000000000000000000000000000000000000000000000000000000900463ffffffff168063994d790a1461003e575b600080fd5b341561004957600080fd5b61008a6004808035600019169060200190919080356000191690602001909190803560001916906020019091908035600019169060200190919050506100a4565b604051808215151515815260200191505060405180910390f35b60006001800285600019161480156100c3575060026001028460001916145b156100d15760019050610121565b60006001028360001916141580156100f157506000600102826000191614155b801561011e5750816000191660008085600019166000191681526020019081526020016000205460001916145b90505b9493505050505600a165627a7a723058202082b8d8667fd397925f39785d8e804540beda0524d28af15921375145dfcc250029"
|
||||||
|
},
|
||||||
|
"0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e": { "balance": "1606938044258990275541962092341162602522202993782792835301376" },
|
||||||
|
"0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
|
||||||
|
}
|
||||||
|
}
|
154
ethcore/node_filter/src/lib.rs
Normal file
154
ethcore/node_filter/src/lib.rs
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Smart contract based node filter.
|
||||||
|
|
||||||
|
extern crate ethcore;
|
||||||
|
extern crate ethcore_util as util;
|
||||||
|
extern crate ethcore_network as network;
|
||||||
|
extern crate native_contracts;
|
||||||
|
extern crate futures;
|
||||||
|
#[cfg(test)] extern crate ethcore_io as io;
|
||||||
|
#[macro_use] extern crate log;
|
||||||
|
|
||||||
|
use std::sync::Weak;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use native_contracts::PeerSet as Contract;
|
||||||
|
use network::{NodeId, ConnectionFilter, ConnectionDirection};
|
||||||
|
use ethcore::client::{BlockChainClient, BlockId, ChainNotify};
|
||||||
|
use util::{Mutex, Address, H256, Bytes};
|
||||||
|
use futures::Future;
|
||||||
|
|
||||||
|
const MAX_CACHE_SIZE: usize = 4096;
|
||||||
|
|
||||||
|
/// Connection filter that uses a contract to manage permissions.
|
||||||
|
pub struct NodeFilter {
|
||||||
|
contract: Mutex<Option<Contract>>,
|
||||||
|
client: Weak<BlockChainClient>,
|
||||||
|
contract_address: Address,
|
||||||
|
permission_cache: Mutex<HashMap<NodeId, bool>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl NodeFilter {
|
||||||
|
/// Create a new instance. Accepts a contract address.
|
||||||
|
pub fn new(client: Weak<BlockChainClient>, contract_address: Address) -> NodeFilter {
|
||||||
|
NodeFilter {
|
||||||
|
contract: Mutex::new(None),
|
||||||
|
client: client,
|
||||||
|
contract_address: contract_address,
|
||||||
|
permission_cache: Mutex::new(HashMap::new()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear cached permissions.
|
||||||
|
pub fn clear_cache(&self) {
|
||||||
|
self.permission_cache.lock().clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ConnectionFilter for NodeFilter {
|
||||||
|
fn connection_allowed(&self, own_id: &NodeId, connecting_id: &NodeId, _direction: ConnectionDirection) -> bool {
|
||||||
|
|
||||||
|
let mut cache = self.permission_cache.lock();
|
||||||
|
if let Some(res) = cache.get(connecting_id) {
|
||||||
|
return *res;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut contract = self.contract.lock();
|
||||||
|
if contract.is_none() {
|
||||||
|
*contract = Some(Contract::new(self.contract_address));
|
||||||
|
}
|
||||||
|
|
||||||
|
let allowed = match (self.client.upgrade(), &*contract) {
|
||||||
|
(Some(ref client), &Some(ref contract)) => {
|
||||||
|
let own_low = H256::from_slice(&own_id[0..32]);
|
||||||
|
let own_high = H256::from_slice(&own_id[32..64]);
|
||||||
|
let id_low = H256::from_slice(&connecting_id[0..32]);
|
||||||
|
let id_high = H256::from_slice(&connecting_id[32..64]);
|
||||||
|
let allowed = contract.connection_allowed(
|
||||||
|
|addr, data| futures::done(client.call_contract(BlockId::Latest, addr, data)),
|
||||||
|
own_low,
|
||||||
|
own_high,
|
||||||
|
id_low,
|
||||||
|
id_high,
|
||||||
|
).wait().unwrap_or_else(|e| {
|
||||||
|
debug!("Error callling peer set contract: {:?}", e);
|
||||||
|
false
|
||||||
|
});
|
||||||
|
|
||||||
|
allowed
|
||||||
|
}
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
|
||||||
|
if cache.len() < MAX_CACHE_SIZE {
|
||||||
|
cache.insert(*connecting_id, allowed);
|
||||||
|
}
|
||||||
|
allowed
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChainNotify for NodeFilter {
|
||||||
|
fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, _duration: u64) {
|
||||||
|
if !imported.is_empty() {
|
||||||
|
self.clear_cache();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
use std::str::FromStr;
|
||||||
|
use ethcore::spec::Spec;
|
||||||
|
use ethcore::client::{BlockChainClient, Client, ClientConfig};
|
||||||
|
use ethcore::miner::Miner;
|
||||||
|
use util::{Address};
|
||||||
|
use network::{ConnectionDirection, ConnectionFilter, NodeId};
|
||||||
|
use io::IoChannel;
|
||||||
|
use super::NodeFilter;
|
||||||
|
|
||||||
|
/// Contract code: https://gist.github.com/arkpar/467dbcc73cbb85b0997a7a10ffa0695f
|
||||||
|
#[test]
|
||||||
|
fn node_filter() {
|
||||||
|
let contract_addr = Address::from_str("0000000000000000000000000000000000000005").unwrap();
|
||||||
|
let data = include_bytes!("../res/node_filter.json");
|
||||||
|
let spec = Spec::load(::std::env::temp_dir(), &data[..]).unwrap();
|
||||||
|
let client_db = Arc::new(::util::kvdb::in_memory(::ethcore::db::NUM_COLUMNS.unwrap_or(0)));
|
||||||
|
|
||||||
|
let client = Client::new(
|
||||||
|
ClientConfig::default(),
|
||||||
|
&spec,
|
||||||
|
client_db,
|
||||||
|
Arc::new(Miner::with_spec(&spec)),
|
||||||
|
IoChannel::disconnected(),
|
||||||
|
).unwrap();
|
||||||
|
let filter = NodeFilter::new(Arc::downgrade(&client) as Weak<BlockChainClient>, contract_addr);
|
||||||
|
let self1 = NodeId::from_str("00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000002").unwrap();
|
||||||
|
let self2 = NodeId::from_str("00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003").unwrap();
|
||||||
|
let node1 = NodeId::from_str("00000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000012").unwrap();
|
||||||
|
let node2 = NodeId::from_str("00000000000000000000000000000000000000000000000000000000000000210000000000000000000000000000000000000000000000000000000000000022").unwrap();
|
||||||
|
let nodex = NodeId::from_str("77000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").unwrap();
|
||||||
|
|
||||||
|
assert!(filter.connection_allowed(&self1, &node1, ConnectionDirection::Inbound));
|
||||||
|
assert!(filter.connection_allowed(&self1, &nodex, ConnectionDirection::Inbound));
|
||||||
|
filter.clear_cache();
|
||||||
|
assert!(filter.connection_allowed(&self2, &node1, ConnectionDirection::Inbound));
|
||||||
|
assert!(filter.connection_allowed(&self2, &node2, ConnectionDirection::Inbound));
|
||||||
|
assert!(!filter.connection_allowed(&self2, &nodex, ConnectionDirection::Inbound));
|
||||||
|
}
|
||||||
|
}
|
@ -1 +1 @@
|
|||||||
Subproject commit 04c9d84c5fe5c3ad707be58664c7e72b97cc9996
|
Subproject commit 519b0b967cffd7d1236ef21698b1e6e415a048e9
|
@ -519,6 +519,11 @@ impl AccountProvider {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns account public key.
|
||||||
|
pub fn account_public(&self, address: Address, password: &str) -> Result<Public, Error> {
|
||||||
|
self.sstore.public(&self.sstore.account_ref(&address)?, password)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns each account along with name and meta.
|
/// Returns each account along with name and meta.
|
||||||
pub fn set_account_name(&self, address: Address, name: String) -> Result<(), Error> {
|
pub fn set_account_name(&self, address: Address, name: String) -> Result<(), Error> {
|
||||||
self.sstore.set_name(&self.sstore.account_ref(&address)?, name)?;
|
self.sstore.set_name(&self.sstore.account_ref(&address)?, name)?;
|
||||||
@ -697,6 +702,13 @@ impl AccountProvider {
|
|||||||
Ok(self.sstore.decrypt(&account, &password, shared_mac, message)?)
|
Ok(self.sstore.decrypt(&account, &password, shared_mac, message)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Agree on shared key.
|
||||||
|
pub fn agree(&self, address: Address, password: Option<String>, other_public: &Public) -> Result<Secret, SignError> {
|
||||||
|
let account = self.sstore.account_ref(&address)?;
|
||||||
|
let password = password.map(Ok).unwrap_or_else(|| self.password(&account))?;
|
||||||
|
Ok(self.sstore.agree(&account, &password, other_public)?)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns the underlying `SecretStore` reference if one exists.
|
/// Returns the underlying `SecretStore` reference if one exists.
|
||||||
pub fn list_geth_accounts(&self, testnet: bool) -> Vec<Address> {
|
pub fn list_geth_accounts(&self, testnet: bool) -> Vec<Address> {
|
||||||
self.sstore.list_geth_accounts(testnet).into_iter().map(|a| Address::from(a).into()).collect()
|
self.sstore.list_geth_accounts(testnet).into_iter().map(|a| Address::from(a).into()).collect()
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use itertools::Itertools;
|
||||||
use bloomchain as bc;
|
use bloomchain as bc;
|
||||||
use util::*;
|
use util::*;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
|
@ -25,11 +25,9 @@ use engines::epoch::{Transition as EpochTransition};
|
|||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use receipt::Receipt;
|
use receipt::Receipt;
|
||||||
|
|
||||||
use rlp::*;
|
use util::{HeapSizeOf, H256, H264, U256};
|
||||||
use util::*;
|
|
||||||
use util::kvdb::PREFIX_LEN as DB_PREFIX_LEN;
|
use util::kvdb::PREFIX_LEN as DB_PREFIX_LEN;
|
||||||
|
|
||||||
|
|
||||||
/// Represents index of extra data in database
|
/// Represents index of extra data in database
|
||||||
#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
|
#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)]
|
||||||
pub enum ExtrasIndex {
|
pub enum ExtrasIndex {
|
||||||
@ -184,7 +182,7 @@ impl Key<EpochTransitions> for u64 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Familial details concerning a block
|
/// Familial details concerning a block
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, RlpEncodable, RlpDecodable)]
|
||||||
pub struct BlockDetails {
|
pub struct BlockDetails {
|
||||||
/// Block number
|
/// Block number
|
||||||
pub number: BlockNumber,
|
pub number: BlockNumber,
|
||||||
@ -202,30 +200,8 @@ impl HeapSizeOf for BlockDetails {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for BlockDetails {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let details = BlockDetails {
|
|
||||||
number: rlp.val_at(0)?,
|
|
||||||
total_difficulty: rlp.val_at(1)?,
|
|
||||||
parent: rlp.val_at(2)?,
|
|
||||||
children: rlp.list_at(3)?,
|
|
||||||
};
|
|
||||||
Ok(details)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for BlockDetails {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4);
|
|
||||||
s.append(&self.number);
|
|
||||||
s.append(&self.total_difficulty);
|
|
||||||
s.append(&self.parent);
|
|
||||||
s.append_list(&self.children);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents address of certain transaction within block
|
/// Represents address of certain transaction within block
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone, RlpEncodable, RlpDecodable)]
|
||||||
pub struct TransactionAddress {
|
pub struct TransactionAddress {
|
||||||
/// Block hash
|
/// Block hash
|
||||||
pub block_hash: H256,
|
pub block_hash: H256,
|
||||||
@ -237,27 +213,8 @@ impl HeapSizeOf for TransactionAddress {
|
|||||||
fn heap_size_of_children(&self) -> usize { 0 }
|
fn heap_size_of_children(&self) -> usize { 0 }
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for TransactionAddress {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let tx_address = TransactionAddress {
|
|
||||||
block_hash: rlp.val_at(0)?,
|
|
||||||
index: rlp.val_at(1)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(tx_address)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for TransactionAddress {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2);
|
|
||||||
s.append(&self.block_hash);
|
|
||||||
s.append(&self.index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Contains all block receipts.
|
/// Contains all block receipts.
|
||||||
#[derive(Clone)]
|
#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct BlockReceipts {
|
pub struct BlockReceipts {
|
||||||
pub receipts: Vec<Receipt>,
|
pub receipts: Vec<Receipt>,
|
||||||
}
|
}
|
||||||
@ -270,20 +227,6 @@ impl BlockReceipts {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for BlockReceipts {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(BlockReceipts {
|
|
||||||
receipts: rlp.as_list()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for BlockReceipts {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append_list(&self.receipts);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HeapSizeOf for BlockReceipts {
|
impl HeapSizeOf for BlockReceipts {
|
||||||
fn heap_size_of_children(&self) -> usize {
|
fn heap_size_of_children(&self) -> usize {
|
||||||
self.receipts.heap_size_of_children()
|
self.receipts.heap_size_of_children()
|
||||||
@ -291,27 +234,12 @@ impl HeapSizeOf for BlockReceipts {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Candidate transitions to an epoch with specific number.
|
/// Candidate transitions to an epoch with specific number.
|
||||||
#[derive(Clone)]
|
#[derive(Clone, RlpEncodable, RlpDecodable)]
|
||||||
pub struct EpochTransitions {
|
pub struct EpochTransitions {
|
||||||
pub number: u64,
|
pub number: u64,
|
||||||
pub candidates: Vec<EpochTransition>,
|
pub candidates: Vec<EpochTransition>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for EpochTransitions {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2).append(&self.number).append_list(&self.candidates);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for EpochTransitions {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(EpochTransitions {
|
|
||||||
number: rlp.val_at(0)?,
|
|
||||||
candidates: rlp.list_at(1)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
|
@ -15,12 +15,11 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use bloomchain as bc;
|
use bloomchain as bc;
|
||||||
use rlp::*;
|
|
||||||
use util::HeapSizeOf;
|
use util::HeapSizeOf;
|
||||||
use basic_types::LogBloom;
|
use basic_types::LogBloom;
|
||||||
|
|
||||||
/// Helper structure representing bloom of the trace.
|
/// Helper structure representing bloom of the trace.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct Bloom(LogBloom);
|
pub struct Bloom(LogBloom);
|
||||||
|
|
||||||
impl From<LogBloom> for Bloom {
|
impl From<LogBloom> for Bloom {
|
||||||
@ -43,18 +42,6 @@ impl Into<bc::Bloom> for Bloom {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Bloom {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
LogBloom::decode(rlp).map(Bloom)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for Bloom {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
Encodable::rlp_append(&self.0, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HeapSizeOf for Bloom {
|
impl HeapSizeOf for Bloom {
|
||||||
fn heap_size_of_children(&self) -> usize {
|
fn heap_size_of_children(&self) -> usize {
|
||||||
0
|
0
|
||||||
|
@ -20,9 +20,10 @@ use std::sync::{Arc, Weak};
|
|||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::time::{Instant};
|
use std::time::{Instant};
|
||||||
use time::precise_time_ns;
|
use time::precise_time_ns;
|
||||||
|
use itertools::Itertools;
|
||||||
|
|
||||||
// util
|
// util
|
||||||
use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, MutexGuard, Hashable};
|
use util::{Bytes, PerfTimer, Mutex, RwLock, MutexGuard, Hashable};
|
||||||
use util::{journaldb, DBValue, TrieFactory, Trie};
|
use util::{journaldb, DBValue, TrieFactory, Trie};
|
||||||
use util::{U256, H256, Address, H2048};
|
use util::{U256, H256, Address, H2048};
|
||||||
use util::trie::TrieSpec;
|
use util::trie::TrieSpec;
|
||||||
@ -747,7 +748,7 @@ impl Client {
|
|||||||
self.factories.clone(),
|
self.factories.clone(),
|
||||||
).expect("state known to be available for just-imported block; qed");
|
).expect("state known to be available for just-imported block; qed");
|
||||||
|
|
||||||
let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false };
|
let options = TransactOptions::with_no_tracing().dont_check_nonce();
|
||||||
let res = Executive::new(&mut state, &env_info, &*self.engine)
|
let res = Executive::new(&mut state, &env_info, &*self.engine)
|
||||||
.transact(&transaction, options);
|
.transact(&transaction, options);
|
||||||
|
|
||||||
@ -906,7 +907,7 @@ impl Client {
|
|||||||
pub fn state_at(&self, id: BlockId) -> Option<State<StateDB>> {
|
pub fn state_at(&self, id: BlockId) -> Option<State<StateDB>> {
|
||||||
// fast path for latest state.
|
// fast path for latest state.
|
||||||
match id.clone() {
|
match id.clone() {
|
||||||
BlockId::Pending => return self.miner.pending_state().or_else(|| Some(self.state())),
|
BlockId::Pending => return self.miner.pending_state(self.chain.read().best_block_number()).or_else(|| Some(self.state())),
|
||||||
BlockId::Latest => return Some(self.state()),
|
BlockId::Latest => return Some(self.state()),
|
||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
@ -1055,19 +1056,20 @@ impl Client {
|
|||||||
self.history
|
self.history
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_hash(chain: &BlockChain, id: BlockId) -> Option<H256> {
|
fn block_hash(chain: &BlockChain, miner: &Miner, id: BlockId) -> Option<H256> {
|
||||||
match id {
|
match id {
|
||||||
BlockId::Hash(hash) => Some(hash),
|
BlockId::Hash(hash) => Some(hash),
|
||||||
BlockId::Number(number) => chain.block_hash(number),
|
BlockId::Number(number) => chain.block_hash(number),
|
||||||
BlockId::Earliest => chain.block_hash(0),
|
BlockId::Earliest => chain.block_hash(0),
|
||||||
BlockId::Latest | BlockId::Pending => Some(chain.best_block_hash()),
|
BlockId::Latest => Some(chain.best_block_hash()),
|
||||||
|
BlockId::Pending => miner.pending_block_header(chain.best_block_number()).map(|header| header.hash())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_address(&self, id: TransactionId) -> Option<TransactionAddress> {
|
fn transaction_address(&self, id: TransactionId) -> Option<TransactionAddress> {
|
||||||
match id {
|
match id {
|
||||||
TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash),
|
TransactionId::Hash(ref hash) => self.chain.read().transaction_address(hash),
|
||||||
TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), id).map(|hash| TransactionAddress {
|
TransactionId::Location(id, index) => Self::block_hash(&self.chain.read(), &self.miner, id).map(|hash| TransactionAddress {
|
||||||
block_hash: hash,
|
block_hash: hash,
|
||||||
index: index,
|
index: index,
|
||||||
})
|
})
|
||||||
@ -1110,6 +1112,41 @@ impl Client {
|
|||||||
data: data,
|
data: data,
|
||||||
}.fake_sign(from)
|
}.fake_sign(from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn do_virtual_call(&self, env_info: &EnvInfo, state: &mut State<StateDB>, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, CallError> {
|
||||||
|
fn call<E, V, T>(
|
||||||
|
state: &mut State<StateDB>,
|
||||||
|
env_info: &EnvInfo,
|
||||||
|
engine: &E,
|
||||||
|
state_diff: bool,
|
||||||
|
transaction: &SignedTransaction,
|
||||||
|
options: TransactOptions<T, V>,
|
||||||
|
) -> Result<Executed, CallError> where
|
||||||
|
E: Engine + ?Sized,
|
||||||
|
T: trace::Tracer,
|
||||||
|
V: trace::VMTracer,
|
||||||
|
{
|
||||||
|
let options = options.dont_check_nonce();
|
||||||
|
let original_state = if state_diff { Some(state.clone()) } else { None };
|
||||||
|
|
||||||
|
let mut ret = Executive::new(state, env_info, engine).transact_virtual(transaction, options)?;
|
||||||
|
|
||||||
|
if let Some(original) = original_state {
|
||||||
|
ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?);
|
||||||
|
}
|
||||||
|
Ok(ret)
|
||||||
|
}
|
||||||
|
|
||||||
|
let state_diff = analytics.state_diffing;
|
||||||
|
let engine = &*self.engine;
|
||||||
|
|
||||||
|
match (analytics.transaction_tracing, analytics.vm_tracing) {
|
||||||
|
(true, true) => call(state, env_info, engine, state_diff, t, TransactOptions::with_tracing_and_vm_tracing()),
|
||||||
|
(true, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_tracing()),
|
||||||
|
(false, true) => call(state, env_info, engine, state_diff, t, TransactOptions::with_vm_tracing()),
|
||||||
|
(false, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_no_tracing()),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl snapshot::DatabaseRestore for Client {
|
impl snapshot::DatabaseRestore for Client {
|
||||||
@ -1134,23 +1171,31 @@ impl snapshot::DatabaseRestore for Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainClient for Client {
|
impl BlockChainClient for Client {
|
||||||
fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result<Executed, CallError> {
|
fn call(&self, transaction: &SignedTransaction, analytics: CallAnalytics, block: BlockId) -> Result<Executed, CallError> {
|
||||||
let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
|
let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
|
||||||
env_info.gas_limit = U256::max_value();
|
env_info.gas_limit = U256::max_value();
|
||||||
|
|
||||||
// that's just a copy of the state.
|
// that's just a copy of the state.
|
||||||
let mut state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
let mut state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
||||||
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
|
|
||||||
|
|
||||||
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
|
self.do_virtual_call(&env_info, &mut state, transaction, analytics)
|
||||||
let mut ret = Executive::new(&mut state, &env_info, &*self.engine).transact_virtual(t, options)?;
|
}
|
||||||
|
|
||||||
// TODO gav move this into Executive.
|
fn call_many(&self, transactions: &[(SignedTransaction, CallAnalytics)], block: BlockId) -> Result<Vec<Executed>, CallError> {
|
||||||
if let Some(original) = original_state {
|
let mut env_info = self.env_info(block).ok_or(CallError::StatePruned)?;
|
||||||
ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?);
|
env_info.gas_limit = U256::max_value();
|
||||||
|
|
||||||
|
// that's just a copy of the state.
|
||||||
|
let mut state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
||||||
|
let mut results = Vec::with_capacity(transactions.len());
|
||||||
|
|
||||||
|
for &(ref t, analytics) in transactions {
|
||||||
|
let ret = self.do_virtual_call(&env_info, &mut state, t, analytics)?;
|
||||||
|
env_info.gas_used = ret.cumulative_gas_used;
|
||||||
|
results.push(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(ret)
|
Ok(results)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError> {
|
fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError> {
|
||||||
@ -1165,7 +1210,7 @@ impl BlockChainClient for Client {
|
|||||||
// that's just a copy of the state.
|
// that's just a copy of the state.
|
||||||
let original_state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
let original_state = self.state_at(block).ok_or(CallError::StatePruned)?;
|
||||||
let sender = t.sender();
|
let sender = t.sender();
|
||||||
let options = TransactOptions { tracing: true, vm_tracing: false, check_nonce: false };
|
let options = || TransactOptions::with_tracing();
|
||||||
|
|
||||||
let cond = |gas| {
|
let cond = |gas| {
|
||||||
let mut tx = t.as_unsigned().clone();
|
let mut tx = t.as_unsigned().clone();
|
||||||
@ -1174,7 +1219,7 @@ impl BlockChainClient for Client {
|
|||||||
|
|
||||||
let mut state = original_state.clone();
|
let mut state = original_state.clone();
|
||||||
Ok(Executive::new(&mut state, &env_info, &*self.engine)
|
Ok(Executive::new(&mut state, &env_info, &*self.engine)
|
||||||
.transact_virtual(&tx, options.clone())
|
.transact_virtual(&tx, options())
|
||||||
.map(|r| r.exception.is_none())
|
.map(|r| r.exception.is_none())
|
||||||
.unwrap_or(false))
|
.unwrap_or(false))
|
||||||
};
|
};
|
||||||
@ -1230,22 +1275,17 @@ impl BlockChainClient for Client {
|
|||||||
return Err(CallError::TransactionNotFound);
|
return Err(CallError::TransactionNotFound);
|
||||||
}
|
}
|
||||||
|
|
||||||
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
|
|
||||||
const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed";
|
const PROOF: &'static str = "Transactions fetched from blockchain; blockchain transactions are valid; qed";
|
||||||
let rest = txs.split_off(address.index);
|
let rest = txs.split_off(address.index);
|
||||||
for t in txs {
|
for t in txs {
|
||||||
let t = SignedTransaction::new(t).expect(PROOF);
|
let t = SignedTransaction::new(t).expect(PROOF);
|
||||||
let x = Executive::new(&mut state, &env_info, &*self.engine).transact(&t, Default::default())?;
|
let x = Executive::new(&mut state, &env_info, &*self.engine).transact(&t, TransactOptions::with_no_tracing())?;
|
||||||
env_info.gas_used = env_info.gas_used + x.gas_used;
|
env_info.gas_used = env_info.gas_used + x.gas_used;
|
||||||
}
|
}
|
||||||
let first = rest.into_iter().next().expect("We split off < `address.index`; Length is checked earlier; qed");
|
let first = rest.into_iter().next().expect("We split off < `address.index`; Length is checked earlier; qed");
|
||||||
let t = SignedTransaction::new(first).expect(PROOF);
|
let t = SignedTransaction::new(first).expect(PROOF);
|
||||||
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
|
|
||||||
let mut ret = Executive::new(&mut state, &env_info, &*self.engine).transact(&t, options)?;
|
self.do_virtual_call(&env_info, &mut state, &t, analytics)
|
||||||
if let Some(original) = original_state {
|
|
||||||
ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?)
|
|
||||||
}
|
|
||||||
Ok(ret)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mode(&self) -> IpcMode {
|
fn mode(&self) -> IpcMode {
|
||||||
@ -1303,7 +1343,16 @@ impl BlockChainClient for Client {
|
|||||||
|
|
||||||
fn block_header(&self, id: BlockId) -> Option<::encoded::Header> {
|
fn block_header(&self, id: BlockId) -> Option<::encoded::Header> {
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
Self::block_hash(&chain, id).and_then(|hash| chain.block_header_data(&hash))
|
|
||||||
|
if let BlockId::Pending = id {
|
||||||
|
if let Some(block) = self.miner.pending_block(chain.best_block_number()) {
|
||||||
|
return Some(encoded::Header::new(block.header.rlp(Seal::Without)));
|
||||||
|
}
|
||||||
|
// fall back to latest
|
||||||
|
return self.block_header(BlockId::Latest);
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::block_hash(&chain, &self.miner, id).and_then(|hash| chain.block_header_data(&hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
@ -1311,30 +1360,48 @@ impl BlockChainClient for Client {
|
|||||||
BlockId::Number(number) => Some(number),
|
BlockId::Number(number) => Some(number),
|
||||||
BlockId::Hash(ref hash) => self.chain.read().block_number(hash),
|
BlockId::Hash(ref hash) => self.chain.read().block_number(hash),
|
||||||
BlockId::Earliest => Some(0),
|
BlockId::Earliest => Some(0),
|
||||||
BlockId::Latest | BlockId::Pending => Some(self.chain.read().best_block_number()),
|
BlockId::Latest => Some(self.chain.read().best_block_number()),
|
||||||
|
BlockId::Pending => Some(self.chain.read().best_block_number() + 1),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_body(&self, id: BlockId) -> Option<encoded::Body> {
|
fn block_body(&self, id: BlockId) -> Option<encoded::Body> {
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
Self::block_hash(&chain, id).and_then(|hash| chain.block_body(&hash))
|
|
||||||
|
if let BlockId::Pending = id {
|
||||||
|
if let Some(block) = self.miner.pending_block(chain.best_block_number()) {
|
||||||
|
return Some(encoded::Body::new(BlockChain::block_to_body(&block.rlp_bytes(Seal::Without))));
|
||||||
|
}
|
||||||
|
// fall back to latest
|
||||||
|
return self.block_body(BlockId::Latest);
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::block_hash(&chain, &self.miner, id).and_then(|hash| chain.block_body(&hash))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block(&self, id: BlockId) -> Option<encoded::Block> {
|
fn block(&self, id: BlockId) -> Option<encoded::Block> {
|
||||||
|
let chain = self.chain.read();
|
||||||
|
|
||||||
if let BlockId::Pending = id {
|
if let BlockId::Pending = id {
|
||||||
if let Some(block) = self.miner.pending_block() {
|
if let Some(block) = self.miner.pending_block(chain.best_block_number()) {
|
||||||
return Some(encoded::Block::new(block.rlp_bytes(Seal::Without)));
|
return Some(encoded::Block::new(block.rlp_bytes(Seal::Without)));
|
||||||
}
|
}
|
||||||
|
// fall back to latest
|
||||||
|
return self.block(BlockId::Latest);
|
||||||
}
|
}
|
||||||
let chain = self.chain.read();
|
|
||||||
Self::block_hash(&chain, id).and_then(|hash| {
|
Self::block_hash(&chain, &self.miner, id).and_then(|hash| {
|
||||||
chain.block(&hash)
|
chain.block(&hash)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_status(&self, id: BlockId) -> BlockStatus {
|
fn block_status(&self, id: BlockId) -> BlockStatus {
|
||||||
|
if let BlockId::Pending = id {
|
||||||
|
return BlockStatus::Pending;
|
||||||
|
}
|
||||||
|
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
match Self::block_hash(&chain, id) {
|
match Self::block_hash(&chain, &self.miner, id) {
|
||||||
Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain,
|
Some(ref hash) if chain.is_known(hash) => BlockStatus::InChain,
|
||||||
Some(hash) => self.block_queue.status(&hash).into(),
|
Some(hash) => self.block_queue.status(&hash).into(),
|
||||||
None => BlockStatus::Unknown
|
None => BlockStatus::Unknown
|
||||||
@ -1342,13 +1409,18 @@ impl BlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block_total_difficulty(&self, id: BlockId) -> Option<U256> {
|
fn block_total_difficulty(&self, id: BlockId) -> Option<U256> {
|
||||||
if let BlockId::Pending = id {
|
|
||||||
if let Some(block) = self.miner.pending_block() {
|
|
||||||
return Some(*block.header.difficulty() + self.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
Self::block_hash(&chain, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
|
if let BlockId::Pending = id {
|
||||||
|
let latest_difficulty = self.block_total_difficulty(BlockId::Latest).expect("blocks in chain have details; qed");
|
||||||
|
let pending_difficulty = self.miner.pending_block_header(chain.best_block_number()).map(|header| *header.difficulty());
|
||||||
|
if let Some(difficulty) = pending_difficulty {
|
||||||
|
return Some(difficulty + latest_difficulty);
|
||||||
|
}
|
||||||
|
// fall back to latest
|
||||||
|
return Some(latest_difficulty);
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::block_hash(&chain, &self.miner, id).and_then(|hash| chain.block_details(&hash)).map(|d| d.total_difficulty)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn nonce(&self, address: &Address, id: BlockId) -> Option<U256> {
|
fn nonce(&self, address: &Address, id: BlockId) -> Option<U256> {
|
||||||
@ -1361,7 +1433,7 @@ impl BlockChainClient for Client {
|
|||||||
|
|
||||||
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
Self::block_hash(&chain, id)
|
Self::block_hash(&chain, &self.miner, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn code(&self, address: &Address, id: BlockId) -> Option<Option<Bytes>> {
|
fn code(&self, address: &Address, id: BlockId) -> Option<Option<Bytes>> {
|
||||||
@ -1526,7 +1598,8 @@ impl BlockChainClient for Client {
|
|||||||
if self.chain.read().is_known(&unverified.hash()) {
|
if self.chain.read().is_known(&unverified.hash()) {
|
||||||
return Err(BlockImportError::Import(ImportError::AlreadyInChain));
|
return Err(BlockImportError::Import(ImportError::AlreadyInChain));
|
||||||
}
|
}
|
||||||
if self.block_status(BlockId::Hash(unverified.parent_hash())) == BlockStatus::Unknown {
|
let status = self.block_status(BlockId::Hash(unverified.parent_hash()));
|
||||||
|
if status == BlockStatus::Unknown || status == BlockStatus::Pending {
|
||||||
return Err(BlockImportError::Block(BlockError::UnknownParent(unverified.parent_hash())));
|
return Err(BlockImportError::Block(BlockError::UnknownParent(unverified.parent_hash())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1540,7 +1613,8 @@ impl BlockChainClient for Client {
|
|||||||
if self.chain.read().is_known(&header.hash()) {
|
if self.chain.read().is_known(&header.hash()) {
|
||||||
return Err(BlockImportError::Import(ImportError::AlreadyInChain));
|
return Err(BlockImportError::Import(ImportError::AlreadyInChain));
|
||||||
}
|
}
|
||||||
if self.block_status(BlockId::Hash(header.parent_hash())) == BlockStatus::Unknown {
|
let status = self.block_status(BlockId::Hash(header.parent_hash()));
|
||||||
|
if status == BlockStatus::Unknown || status == BlockStatus::Pending {
|
||||||
return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash())));
|
return Err(BlockImportError::Block(BlockError::UnknownParent(header.parent_hash())));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1662,8 +1736,8 @@ impl BlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self) -> Option<u64> {
|
fn signing_chain_id(&self) -> Option<u64> {
|
||||||
self.engine.signing_network_id(&self.latest_env_info())
|
self.engine.signing_chain_id(&self.latest_env_info())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_extra_info(&self, id: BlockId) -> Option<BTreeMap<String, String>> {
|
fn block_extra_info(&self, id: BlockId) -> Option<BTreeMap<String, String>> {
|
||||||
@ -1686,7 +1760,7 @@ impl BlockChainClient for Client {
|
|||||||
fn call_contract(&self, block_id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String> {
|
fn call_contract(&self, block_id: BlockId, address: Address, data: Bytes) -> Result<Bytes, String> {
|
||||||
let transaction = self.contract_call_tx(block_id, address, data);
|
let transaction = self.contract_call_tx(block_id, address, data);
|
||||||
|
|
||||||
self.call(&transaction, block_id, Default::default())
|
self.call(&transaction, Default::default(), block_id)
|
||||||
.map_err(|e| format!("{:?}", e))
|
.map_err(|e| format!("{:?}", e))
|
||||||
.map(|executed| {
|
.map(|executed| {
|
||||||
executed.output
|
executed.output
|
||||||
@ -1702,9 +1776,9 @@ impl BlockChainClient for Client {
|
|||||||
value: U256::zero(),
|
value: U256::zero(),
|
||||||
data: data,
|
data: data,
|
||||||
};
|
};
|
||||||
let network_id = self.engine.signing_network_id(&self.latest_env_info());
|
let chain_id = self.engine.signing_chain_id(&self.latest_env_info());
|
||||||
let signature = self.engine.sign(transaction.hash(network_id))?;
|
let signature = self.engine.sign(transaction.hash(chain_id))?;
|
||||||
let signed = SignedTransaction::new(transaction.with_signature(signature, network_id))?;
|
let signed = SignedTransaction::new(transaction.with_signature(signature, chain_id))?;
|
||||||
self.miner.import_own_transaction(self, signed.into())
|
self.miner.import_own_transaction(self, signed.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1893,7 +1967,7 @@ impl ProvingBlockChainClient for Client {
|
|||||||
let backend = state::backend::Proving::new(jdb.as_hashdb_mut());
|
let backend = state::backend::Proving::new(jdb.as_hashdb_mut());
|
||||||
|
|
||||||
let mut state = state.replace_backend(backend);
|
let mut state = state.replace_backend(backend);
|
||||||
let options = TransactOptions { tracing: false, vm_tracing: false, check_nonce: false };
|
let options = TransactOptions::with_no_tracing().dont_check_nonce();
|
||||||
let res = Executive::new(&mut state, &env_info, &*self.engine).transact(&transaction, options);
|
let res = Executive::new(&mut state, &env_info, &*self.engine).transact(&transaction, options);
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
|
@ -18,9 +18,9 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use util::{self, U256, journaldb, trie};
|
use util::{self, U256, H256, journaldb, trie};
|
||||||
use util::kvdb::{self, KeyValueDB};
|
use util::kvdb::{self, KeyValueDB};
|
||||||
use {state, state_db, client, executive, trace, db, spec};
|
use {state, state_db, client, executive, trace, transaction, db, spec, pod_state};
|
||||||
use factory::Factories;
|
use factory::Factories;
|
||||||
use evm::{self, VMType};
|
use evm::{self, VMType};
|
||||||
use vm::{self, ActionParams};
|
use vm::{self, ActionParams};
|
||||||
@ -33,9 +33,17 @@ pub enum EvmTestError {
|
|||||||
/// EVM error.
|
/// EVM error.
|
||||||
Evm(vm::Error),
|
Evm(vm::Error),
|
||||||
/// Initialization error.
|
/// Initialization error.
|
||||||
Initialization(::error::Error),
|
ClientError(::error::Error),
|
||||||
/// Low-level database error.
|
/// Low-level database error.
|
||||||
Database(String),
|
Database(String),
|
||||||
|
/// Post-condition failure,
|
||||||
|
PostCondition(String),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: Into<::error::Error>> From<E> for EvmTestError {
|
||||||
|
fn from(err: E) -> Self {
|
||||||
|
EvmTestError::ClientError(err.into())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for EvmTestError {
|
impl fmt::Display for EvmTestError {
|
||||||
@ -45,52 +53,114 @@ impl fmt::Display for EvmTestError {
|
|||||||
match *self {
|
match *self {
|
||||||
Trie(ref err) => write!(fmt, "Trie: {}", err),
|
Trie(ref err) => write!(fmt, "Trie: {}", err),
|
||||||
Evm(ref err) => write!(fmt, "EVM: {}", err),
|
Evm(ref err) => write!(fmt, "EVM: {}", err),
|
||||||
Initialization(ref err) => write!(fmt, "Initialization: {}", err),
|
ClientError(ref err) => write!(fmt, "{}", err),
|
||||||
Database(ref err) => write!(fmt, "DB: {}", err),
|
Database(ref err) => write!(fmt, "DB: {}", err),
|
||||||
|
PostCondition(ref err) => write!(fmt, "{}", err),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Simplified, single-block EVM test client.
|
use ethereum;
|
||||||
pub struct EvmTestClient {
|
use ethjson::state::test::ForkSpec;
|
||||||
state_db: state_db::StateDB,
|
|
||||||
factories: Factories,
|
lazy_static! {
|
||||||
spec: spec::Spec,
|
pub static ref FRONTIER: spec::Spec = ethereum::new_frontier_test();
|
||||||
|
pub static ref HOMESTEAD: spec::Spec = ethereum::new_homestead_test();
|
||||||
|
pub static ref EIP150: spec::Spec = ethereum::new_eip150_test();
|
||||||
|
pub static ref EIP161: spec::Spec = ethereum::new_eip161_test();
|
||||||
|
pub static ref _METROPOLIS: spec::Spec = ethereum::new_metropolis_test();
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EvmTestClient {
|
/// Simplified, single-block EVM test client.
|
||||||
/// Creates new EVM test client with in-memory DB initialized with genesis of given Spec.
|
pub struct EvmTestClient<'a> {
|
||||||
pub fn new(spec: spec::Spec) -> Result<Self, EvmTestError> {
|
state: state::State<state_db::StateDB>,
|
||||||
let factories = Factories {
|
spec: &'a spec::Spec,
|
||||||
vm: evm::Factory::new(VMType::Interpreter, 5 * 1024),
|
}
|
||||||
trie: trie::TrieFactory::new(trie::TrieSpec::Secure),
|
|
||||||
accountdb: Default::default(),
|
impl<'a> EvmTestClient<'a> {
|
||||||
};
|
/// Converts a json spec definition into spec.
|
||||||
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
pub fn spec_from_json(spec: &ForkSpec) -> Option<&'static spec::Spec> {
|
||||||
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
match *spec {
|
||||||
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
ForkSpec::Frontier => Some(&*FRONTIER),
|
||||||
state_db = spec.ensure_db_good(state_db, &factories).map_err(EvmTestError::Initialization)?;
|
ForkSpec::Homestead => Some(&*HOMESTEAD),
|
||||||
// Write DB
|
ForkSpec::EIP150 => Some(&*EIP150),
|
||||||
{
|
ForkSpec::EIP158 => Some(&*EIP161),
|
||||||
let mut batch = kvdb::DBTransaction::new();
|
ForkSpec::Metropolis | ForkSpec::Byzantium | ForkSpec::Constantinople => None,
|
||||||
state_db.journal_under(&mut batch, 0, &spec.genesis_header().hash()).map_err(|e| EvmTestError::Initialization(e.into()))?;
|
|
||||||
db.write(batch).map_err(EvmTestError::Database)?;
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates new EVM test client with in-memory DB initialized with genesis of given Spec.
|
||||||
|
pub fn new(spec: &'a spec::Spec) -> Result<Self, EvmTestError> {
|
||||||
|
let factories = Self::factories();
|
||||||
|
let state = Self::state_from_spec(spec, &factories)?;
|
||||||
|
|
||||||
Ok(EvmTestClient {
|
Ok(EvmTestClient {
|
||||||
state_db,
|
state,
|
||||||
factories,
|
|
||||||
spec,
|
spec,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Call given contract.
|
/// Creates new EVM test client with in-memory DB initialized with given PodState.
|
||||||
|
pub fn from_pod_state(spec: &'a spec::Spec, pod_state: pod_state::PodState) -> Result<Self, EvmTestError> {
|
||||||
|
let factories = Self::factories();
|
||||||
|
let state = Self::state_from_pod(spec, &factories, pod_state)?;
|
||||||
|
|
||||||
|
Ok(EvmTestClient {
|
||||||
|
state,
|
||||||
|
spec,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn factories() -> Factories {
|
||||||
|
Factories {
|
||||||
|
vm: evm::Factory::new(VMType::Interpreter, 5 * 1024),
|
||||||
|
trie: trie::TrieFactory::new(trie::TrieSpec::Secure),
|
||||||
|
accountdb: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state_from_spec(spec: &'a spec::Spec, factories: &Factories) -> Result<state::State<state_db::StateDB>, EvmTestError> {
|
||||||
|
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
||||||
|
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
||||||
|
let mut state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
||||||
|
state_db = spec.ensure_db_good(state_db, factories)?;
|
||||||
|
|
||||||
|
let genesis = spec.genesis_header();
|
||||||
|
// Write DB
|
||||||
|
{
|
||||||
|
let mut batch = kvdb::DBTransaction::new();
|
||||||
|
state_db.journal_under(&mut batch, 0, &genesis.hash())?;
|
||||||
|
db.write(batch).map_err(EvmTestError::Database)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
state::State::from_existing(
|
||||||
|
state_db,
|
||||||
|
*genesis.state_root(),
|
||||||
|
spec.engine.account_start_nonce(0),
|
||||||
|
factories.clone()
|
||||||
|
).map_err(EvmTestError::Trie)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn state_from_pod(spec: &'a spec::Spec, factories: &Factories, pod_state: pod_state::PodState) -> Result<state::State<state_db::StateDB>, EvmTestError> {
|
||||||
|
let db = Arc::new(kvdb::in_memory(db::NUM_COLUMNS.expect("We use column-based DB; qed")));
|
||||||
|
let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, db::COL_STATE);
|
||||||
|
let state_db = state_db::StateDB::new(journal_db, 5 * 1024 * 1024);
|
||||||
|
let mut state = state::State::new(
|
||||||
|
state_db,
|
||||||
|
spec.engine.account_start_nonce(0),
|
||||||
|
factories.clone(),
|
||||||
|
);
|
||||||
|
state.populate_from(pod_state);
|
||||||
|
state.commit()?;
|
||||||
|
Ok(state)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute the VM given ActionParams and tracer.
|
||||||
|
/// Returns amount of gas left and the output.
|
||||||
pub fn call<T: trace::VMTracer>(&mut self, params: ActionParams, vm_tracer: &mut T)
|
pub fn call<T: trace::VMTracer>(&mut self, params: ActionParams, vm_tracer: &mut T)
|
||||||
-> Result<(U256, Vec<u8>), EvmTestError>
|
-> Result<(U256, Vec<u8>), EvmTestError>
|
||||||
{
|
{
|
||||||
let genesis = self.spec.genesis_header();
|
let genesis = self.spec.genesis_header();
|
||||||
let mut state = state::State::from_existing(self.state_db.boxed_clone(), *genesis.state_root(), self.spec.engine.account_start_nonce(0), self.factories.clone())
|
|
||||||
.map_err(EvmTestError::Trie)?;
|
|
||||||
let info = client::EnvInfo {
|
let info = client::EnvInfo {
|
||||||
number: genesis.number(),
|
number: genesis.number(),
|
||||||
author: *genesis.author(),
|
author: *genesis.author(),
|
||||||
@ -103,7 +173,7 @@ impl EvmTestClient {
|
|||||||
let mut substate = state::Substate::new();
|
let mut substate = state::Substate::new();
|
||||||
let mut tracer = trace::NoopTracer;
|
let mut tracer = trace::NoopTracer;
|
||||||
let mut output = vec![];
|
let mut output = vec![];
|
||||||
let mut executive = executive::Executive::new(&mut state, &info, &*self.spec.engine);
|
let mut executive = executive::Executive::new(&mut self.state, &info, &*self.spec.engine);
|
||||||
let (gas_left, _) = executive.call(
|
let (gas_left, _) = executive.call(
|
||||||
params,
|
params,
|
||||||
&mut substate,
|
&mut substate,
|
||||||
@ -114,4 +184,59 @@ impl EvmTestClient {
|
|||||||
|
|
||||||
Ok((gas_left, output))
|
Ok((gas_left, output))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Executes a SignedTransaction within context of the provided state and `EnvInfo`.
|
||||||
|
/// Returns the state root, gas left and the output.
|
||||||
|
pub fn transact<T: trace::VMTracer>(
|
||||||
|
&mut self,
|
||||||
|
env_info: &client::EnvInfo,
|
||||||
|
transaction: transaction::SignedTransaction,
|
||||||
|
vm_tracer: T,
|
||||||
|
) -> TransactResult {
|
||||||
|
let initial_gas = transaction.gas;
|
||||||
|
// Verify transaction
|
||||||
|
let is_ok = transaction.verify_basic(true, None, env_info.number >= self.spec.engine.params().eip86_transition);
|
||||||
|
if let Err(error) = is_ok {
|
||||||
|
return TransactResult::Err {
|
||||||
|
state_root: *self.state.root(),
|
||||||
|
error,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply transaction
|
||||||
|
let tracer = trace::NoopTracer;
|
||||||
|
let result = self.state.apply_with_tracing(&env_info, &*self.spec.engine, &transaction, tracer, vm_tracer);
|
||||||
|
|
||||||
|
match result {
|
||||||
|
Ok(result) => TransactResult::Ok {
|
||||||
|
state_root: *self.state.root(),
|
||||||
|
gas_left: initial_gas - result.receipt.gas_used,
|
||||||
|
output: result.output
|
||||||
|
},
|
||||||
|
Err(error) => TransactResult::Err {
|
||||||
|
state_root: *self.state.root(),
|
||||||
|
error,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A result of applying transaction to the state.
|
||||||
|
pub enum TransactResult {
|
||||||
|
/// Successful execution
|
||||||
|
Ok {
|
||||||
|
/// State root
|
||||||
|
state_root: H256,
|
||||||
|
/// Amount of gas left
|
||||||
|
gas_left: U256,
|
||||||
|
/// Output
|
||||||
|
output: Vec<u8>,
|
||||||
|
},
|
||||||
|
/// Transaction failed to run
|
||||||
|
Err {
|
||||||
|
/// State root
|
||||||
|
state_root: H256,
|
||||||
|
/// Execution error
|
||||||
|
error: ::error::Error,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,7 @@ mod client;
|
|||||||
pub use self::client::*;
|
pub use self::client::*;
|
||||||
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType};
|
pub use self::config::{Mode, ClientConfig, DatabaseCompactionProfile, BlockChainConfig, VMType};
|
||||||
pub use self::error::Error;
|
pub use self::error::Error;
|
||||||
pub use self::evm_test_client::{EvmTestClient, EvmTestError};
|
pub use self::evm_test_client::{EvmTestClient, EvmTestError, TransactResult};
|
||||||
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
pub use self::test_client::{TestBlockChainClient, EachBlockWith};
|
||||||
pub use self::chain_notify::ChainNotify;
|
pub use self::chain_notify::ChainNotify;
|
||||||
pub use self::traits::{BlockChainClient, MiningBlockChainClient, EngineClient};
|
pub use self::traits::{BlockChainClient, MiningBlockChainClient, EngineClient};
|
||||||
|
@ -20,6 +20,7 @@ use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrder};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::collections::{HashMap, BTreeMap};
|
use std::collections::{HashMap, BTreeMap};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use itertools::Itertools;
|
||||||
use rustc_hex::FromHex;
|
use rustc_hex::FromHex;
|
||||||
use util::*;
|
use util::*;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
@ -401,10 +402,18 @@ impl MiningBlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl BlockChainClient for TestBlockChainClient {
|
impl BlockChainClient for TestBlockChainClient {
|
||||||
fn call(&self, _t: &SignedTransaction, _block: BlockId, _analytics: CallAnalytics) -> Result<Executed, CallError> {
|
fn call(&self, _t: &SignedTransaction, _analytics: CallAnalytics, _block: BlockId) -> Result<Executed, CallError> {
|
||||||
self.execution_result.read().clone().unwrap()
|
self.execution_result.read().clone().unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn call_many(&self, txs: &[(SignedTransaction, CallAnalytics)], block: BlockId) -> Result<Vec<Executed>, CallError> {
|
||||||
|
let mut res = Vec::with_capacity(txs.len());
|
||||||
|
for &(ref tx, analytics) in txs {
|
||||||
|
res.push(self.call(tx, analytics, block)?);
|
||||||
|
}
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
|
||||||
fn estimate_gas(&self, _t: &SignedTransaction, _block: BlockId) -> Result<U256, CallError> {
|
fn estimate_gas(&self, _t: &SignedTransaction, _block: BlockId) -> Result<U256, CallError> {
|
||||||
Ok(21000.into())
|
Ok(21000.into())
|
||||||
}
|
}
|
||||||
@ -423,7 +432,7 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
|
|
||||||
fn nonce(&self, address: &Address, id: BlockId) -> Option<U256> {
|
fn nonce(&self, address: &Address, id: BlockId) -> Option<U256> {
|
||||||
match id {
|
match id {
|
||||||
BlockId::Latest => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params().account_start_nonce)),
|
BlockId::Latest | BlockId::Pending => Some(self.nonces.read().get(address).cloned().unwrap_or(self.spec.params().account_start_nonce)),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -438,16 +447,15 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
|
|
||||||
fn code(&self, address: &Address, id: BlockId) -> Option<Option<Bytes>> {
|
fn code(&self, address: &Address, id: BlockId) -> Option<Option<Bytes>> {
|
||||||
match id {
|
match id {
|
||||||
BlockId::Latest => Some(self.code.read().get(address).cloned()),
|
BlockId::Latest | BlockId::Pending => Some(self.code.read().get(address).cloned()),
|
||||||
_ => None,
|
_ => None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn balance(&self, address: &Address, id: BlockId) -> Option<U256> {
|
fn balance(&self, address: &Address, id: BlockId) -> Option<U256> {
|
||||||
if let BlockId::Latest = id {
|
match id {
|
||||||
Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero))
|
BlockId::Latest | BlockId::Pending => Some(self.balances.read().get(address).cloned().unwrap_or_else(U256::zero)),
|
||||||
} else {
|
_ => None,
|
||||||
None
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -456,10 +464,9 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option<H256> {
|
fn storage_at(&self, address: &Address, position: &H256, id: BlockId) -> Option<H256> {
|
||||||
if let BlockId::Latest = id {
|
match id {
|
||||||
Some(self.storage.read().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new))
|
BlockId::Latest | BlockId::Pending => Some(self.storage.read().get(&(address.clone(), position.clone())).cloned().unwrap_or_else(H256::new)),
|
||||||
} else {
|
_ => None,
|
||||||
None
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -548,7 +555,8 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
match id {
|
match id {
|
||||||
BlockId::Number(number) if (number as usize) < self.blocks.read().len() => BlockStatus::InChain,
|
BlockId::Number(number) if (number as usize) < self.blocks.read().len() => BlockStatus::InChain,
|
||||||
BlockId::Hash(ref hash) if self.blocks.read().get(hash).is_some() => BlockStatus::InChain,
|
BlockId::Hash(ref hash) if self.blocks.read().get(hash).is_some() => BlockStatus::InChain,
|
||||||
BlockId::Latest | BlockId::Pending | BlockId::Earliest => BlockStatus::InChain,
|
BlockId::Latest | BlockId::Earliest => BlockStatus::InChain,
|
||||||
|
BlockId::Pending => BlockStatus::Pending,
|
||||||
_ => BlockStatus::Unknown,
|
_ => BlockStatus::Unknown,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -726,7 +734,7 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
self.miner.ready_transactions(info.best_block_number, info.best_block_timestamp)
|
self.miner.ready_transactions(info.best_block_number, info.best_block_timestamp)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self) -> Option<u64> { None }
|
fn signing_chain_id(&self) -> Option<u64> { None }
|
||||||
|
|
||||||
fn mode(&self) -> Mode { Mode::Active }
|
fn mode(&self) -> Mode { Mode::Active }
|
||||||
|
|
||||||
@ -757,9 +765,9 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
value: U256::default(),
|
value: U256::default(),
|
||||||
data: data,
|
data: data,
|
||||||
};
|
};
|
||||||
let network_id = Some(self.spec.params().network_id);
|
let chain_id = Some(self.spec.chain_id());
|
||||||
let sig = self.spec.engine.sign(transaction.hash(network_id)).unwrap();
|
let sig = self.spec.engine.sign(transaction.hash(chain_id)).unwrap();
|
||||||
let signed = SignedTransaction::new(transaction.with_signature(sig, network_id)).unwrap();
|
let signed = SignedTransaction::new(transaction.with_signature(sig, chain_id)).unwrap();
|
||||||
self.miner.import_own_transaction(self, signed.into())
|
self.miner.import_own_transaction(self, signed.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use itertools::Itertools;
|
||||||
|
|
||||||
use block::{OpenBlock, SealedBlock, ClosedBlock};
|
use block::{OpenBlock, SealedBlock, ClosedBlock};
|
||||||
use blockchain::TreeRoute;
|
use blockchain::TreeRoute;
|
||||||
@ -33,7 +34,7 @@ use trace::LocalizedTrace;
|
|||||||
use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction};
|
use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction};
|
||||||
use verification::queue::QueueInfo as BlockQueueInfo;
|
use verification::queue::QueueInfo as BlockQueueInfo;
|
||||||
|
|
||||||
use util::{U256, Address, H256, H2048, Bytes, Itertools};
|
use util::{U256, Address, H256, H2048, Bytes};
|
||||||
use util::hashdb::DBValue;
|
use util::hashdb::DBValue;
|
||||||
|
|
||||||
use types::ids::*;
|
use types::ids::*;
|
||||||
@ -182,7 +183,11 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
||||||
|
|
||||||
/// Makes a non-persistent transaction call.
|
/// Makes a non-persistent transaction call.
|
||||||
fn call(&self, t: &SignedTransaction, block: BlockId, analytics: CallAnalytics) -> Result<Executed, CallError>;
|
fn call(&self, tx: &SignedTransaction, analytics: CallAnalytics, block: BlockId) -> Result<Executed, CallError>;
|
||||||
|
|
||||||
|
/// Makes multiple non-persistent but dependent transaction calls.
|
||||||
|
/// Returns a vector of successes or a failure if any of the transaction fails.
|
||||||
|
fn call_many(&self, txs: &[(SignedTransaction, CallAnalytics)], block: BlockId) -> Result<Vec<Executed>, CallError>;
|
||||||
|
|
||||||
/// Estimates how much gas will be necessary for a call.
|
/// Estimates how much gas will be necessary for a call.
|
||||||
fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError>;
|
fn estimate_gas(&self, t: &SignedTransaction, block: BlockId) -> Result<U256, CallError>;
|
||||||
@ -235,8 +240,8 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
corpus.into()
|
corpus.into()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the preferred network ID to sign on
|
/// Get the preferred chain ID to sign on
|
||||||
fn signing_network_id(&self) -> Option<u64>;
|
fn signing_chain_id(&self) -> Option<u64>;
|
||||||
|
|
||||||
/// Get the mode.
|
/// Get the mode.
|
||||||
fn mode(&self) -> Mode;
|
fn mode(&self) -> Mode;
|
||||||
|
@ -679,6 +679,7 @@ impl Engine for AuthorityRound {
|
|||||||
|
|
||||||
// apply immediate transitions.
|
// apply immediate transitions.
|
||||||
if let Some(change) = self.validators.is_epoch_end(first, chain_head) {
|
if let Some(change) = self.validators.is_epoch_end(first, chain_head) {
|
||||||
|
let change = combine_proofs(chain_head.number(), &change, &[]);
|
||||||
return Some(change)
|
return Some(change)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -792,9 +793,9 @@ impl Engine for AuthorityRound {
|
|||||||
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), Error> {
|
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, header: &Header) -> Result<(), Error> {
|
||||||
t.check_low_s()?;
|
t.check_low_s()?;
|
||||||
|
|
||||||
if let Some(n) = t.network_id() {
|
if let Some(n) = t.chain_id() {
|
||||||
if header.number() >= self.params().eip155_transition && n != self.params().chain_id {
|
if header.number() >= self.params().eip155_transition && n != self.params().chain_id {
|
||||||
return Err(TransactionError::InvalidNetworkId.into());
|
return Err(TransactionError::InvalidChainId.into());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16,14 +16,12 @@
|
|||||||
|
|
||||||
//! Epoch verifiers and transitions.
|
//! Epoch verifiers and transitions.
|
||||||
|
|
||||||
|
use util::H256;
|
||||||
use error::Error;
|
use error::Error;
|
||||||
use header::Header;
|
use header::Header;
|
||||||
|
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
|
||||||
use util::H256;
|
|
||||||
|
|
||||||
/// A full epoch transition.
|
/// A full epoch transition.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone, RlpEncodable, RlpDecodable)]
|
||||||
pub struct Transition {
|
pub struct Transition {
|
||||||
/// Block hash at which the transition occurred.
|
/// Block hash at which the transition occurred.
|
||||||
pub block_hash: H256,
|
pub block_hash: H256,
|
||||||
@ -33,46 +31,14 @@ pub struct Transition {
|
|||||||
pub proof: Vec<u8>,
|
pub proof: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for Transition {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(3)
|
|
||||||
.append(&self.block_hash)
|
|
||||||
.append(&self.block_number)
|
|
||||||
.append(&self.proof);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for Transition {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(Transition {
|
|
||||||
block_hash: rlp.val_at(0)?,
|
|
||||||
block_number: rlp.val_at(1)?,
|
|
||||||
proof: rlp.val_at(2)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An epoch transition pending a finality proof.
|
/// An epoch transition pending a finality proof.
|
||||||
/// Not all transitions need one.
|
/// Not all transitions need one.
|
||||||
|
#[derive(RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct PendingTransition {
|
pub struct PendingTransition {
|
||||||
/// "transition/epoch" proof from the engine.
|
/// "transition/epoch" proof from the engine.
|
||||||
pub proof: Vec<u8>,
|
pub proof: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for PendingTransition {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append(&self.proof);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for PendingTransition {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(PendingTransition {
|
|
||||||
proof: rlp.as_val()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verifier for all blocks within an epoch with self-contained state.
|
/// Verifier for all blocks within an epoch with self-contained state.
|
||||||
///
|
///
|
||||||
/// See docs on `Engine` relating to proving functions for more details.
|
/// See docs on `Engine` relating to proving functions for more details.
|
||||||
|
@ -19,7 +19,7 @@ use util::Address;
|
|||||||
use builtin::Builtin;
|
use builtin::Builtin;
|
||||||
use engines::{Engine, Seal};
|
use engines::{Engine, Seal};
|
||||||
use spec::CommonParams;
|
use spec::CommonParams;
|
||||||
use block::ExecutedBlock;
|
use block::{ExecutedBlock, IsBlock};
|
||||||
|
|
||||||
/// An engine which does not provide any consensus mechanism, just seals blocks internally.
|
/// An engine which does not provide any consensus mechanism, just seals blocks internally.
|
||||||
pub struct InstantSeal {
|
pub struct InstantSeal {
|
||||||
@ -56,8 +56,8 @@ impl Engine for InstantSeal {
|
|||||||
|
|
||||||
fn seals_internally(&self) -> Option<bool> { Some(true) }
|
fn seals_internally(&self) -> Option<bool> { Some(true) }
|
||||||
|
|
||||||
fn generate_seal(&self, _block: &ExecutedBlock) -> Seal {
|
fn generate_seal(&self, block: &ExecutedBlock) -> Seal {
|
||||||
Seal::Regular(Vec::new())
|
if block.transactions().is_empty() { Seal::None } else { Seal::Regular(Vec::new()) }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -263,7 +263,7 @@ pub trait Engine : Sync + Send {
|
|||||||
// TODO: Add flags for which bits of the transaction to check.
|
// TODO: Add flags for which bits of the transaction to check.
|
||||||
// TODO: consider including State in the params.
|
// TODO: consider including State in the params.
|
||||||
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, _header: &Header) -> Result<(), Error> {
|
fn verify_transaction_basic(&self, t: &UnverifiedTransaction, _header: &Header) -> Result<(), Error> {
|
||||||
t.verify_basic(true, Some(self.params().network_id), true)?;
|
t.verify_basic(true, Some(self.params().chain_id), true)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,7 +273,7 @@ pub trait Engine : Sync + Send {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// The network ID that transactions should be signed with.
|
/// The network ID that transactions should be signed with.
|
||||||
fn signing_network_id(&self, _env_info: &EnvInfo) -> Option<u64> {
|
fn signing_chain_id(&self, _env_info: &EnvInfo) -> Option<u64> {
|
||||||
Some(self.params().chain_id)
|
Some(self.params().chain_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ impl Engine for NullEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
||||||
Some(Box::new(::snapshot::PowSnapshot(10000)))
|
Some(Box::new(::snapshot::PowSnapshot::new(10000, 10000)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
|
fn on_close_block(&self, block: &mut ExecutedBlock) -> Result<(), Error> {
|
||||||
|
@ -632,6 +632,7 @@ impl Engine for Tendermint {
|
|||||||
let first = chain_head.number() == 0;
|
let first = chain_head.number() == 0;
|
||||||
|
|
||||||
if let Some(change) = self.validators.is_epoch_end(first, chain_head) {
|
if let Some(change) = self.validators.is_epoch_end(first, chain_head) {
|
||||||
|
let change = combine_proofs(chain_head.number(), &change, &[]);
|
||||||
return Some(change)
|
return Some(change)
|
||||||
} else if let Some(pending) = transition_store(chain_head.hash()) {
|
} else if let Some(pending) = transition_store(chain_head.hash()) {
|
||||||
let signal_number = chain_head.number();
|
let signal_number = chain_head.number();
|
||||||
@ -1039,7 +1040,7 @@ mod tests {
|
|||||||
client.miner().import_own_transaction(client.as_ref(), transaction.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), transaction.into()).unwrap();
|
||||||
|
|
||||||
// Propose
|
// Propose
|
||||||
let proposal = Some(client.miner().pending_block().unwrap().header.bare_hash());
|
let proposal = Some(client.miner().pending_block(0).unwrap().header.bare_hash());
|
||||||
// Propose timeout
|
// Propose timeout
|
||||||
engine.step();
|
engine.step();
|
||||||
|
|
||||||
|
@ -452,7 +452,7 @@ mod tests {
|
|||||||
let s0: Secret = "1".sha3().into();
|
let s0: Secret = "1".sha3().into();
|
||||||
let v0 = tap.insert_account(s0.clone(), "").unwrap();
|
let v0 = tap.insert_account(s0.clone(), "").unwrap();
|
||||||
let v1 = tap.insert_account("0".sha3().into(), "").unwrap();
|
let v1 = tap.insert_account("0".sha3().into(), "").unwrap();
|
||||||
let network_id = Spec::new_validator_safe_contract().network_id();
|
let chain_id = Spec::new_validator_safe_contract().chain_id();
|
||||||
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap));
|
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap));
|
||||||
client.engine().register_client(Arc::downgrade(&client));
|
client.engine().register_client(Arc::downgrade(&client));
|
||||||
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
|
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
|
||||||
@ -466,7 +466,7 @@ mod tests {
|
|||||||
action: Action::Call(validator_contract),
|
action: Action::Call(validator_contract),
|
||||||
value: 0.into(),
|
value: 0.into(),
|
||||||
data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
||||||
}.sign(&s0, Some(network_id));
|
}.sign(&s0, Some(chain_id));
|
||||||
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
||||||
client.update_sealing();
|
client.update_sealing();
|
||||||
assert_eq!(client.chain_info().best_block_number, 1);
|
assert_eq!(client.chain_info().best_block_number, 1);
|
||||||
@ -478,7 +478,7 @@ mod tests {
|
|||||||
action: Action::Call(validator_contract),
|
action: Action::Call(validator_contract),
|
||||||
value: 0.into(),
|
value: 0.into(),
|
||||||
data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
||||||
}.sign(&s0, Some(network_id));
|
}.sign(&s0, Some(chain_id));
|
||||||
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
||||||
client.update_sealing();
|
client.update_sealing();
|
||||||
// The transaction is not yet included so still unable to seal.
|
// The transaction is not yet included so still unable to seal.
|
||||||
@ -497,7 +497,7 @@ mod tests {
|
|||||||
action: Action::Call(Address::default()),
|
action: Action::Call(Address::default()),
|
||||||
value: 0.into(),
|
value: 0.into(),
|
||||||
data: Vec::new(),
|
data: Vec::new(),
|
||||||
}.sign(&s0, Some(network_id));
|
}.sign(&s0, Some(chain_id));
|
||||||
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
||||||
client.update_sealing();
|
client.update_sealing();
|
||||||
// Able to seal again.
|
// Able to seal again.
|
||||||
|
@ -78,8 +78,8 @@ pub enum TransactionError {
|
|||||||
RecipientBanned,
|
RecipientBanned,
|
||||||
/// Contract creation code is banned.
|
/// Contract creation code is banned.
|
||||||
CodeBanned,
|
CodeBanned,
|
||||||
/// Invalid network ID given.
|
/// Invalid chain ID given.
|
||||||
InvalidNetworkId,
|
InvalidChainId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for TransactionError {
|
impl fmt::Display for TransactionError {
|
||||||
@ -103,7 +103,7 @@ impl fmt::Display for TransactionError {
|
|||||||
SenderBanned => "Sender is temporarily banned.".into(),
|
SenderBanned => "Sender is temporarily banned.".into(),
|
||||||
RecipientBanned => "Recipient is temporarily banned.".into(),
|
RecipientBanned => "Recipient is temporarily banned.".into(),
|
||||||
CodeBanned => "Contract code is temporarily banned.".into(),
|
CodeBanned => "Contract code is temporarily banned.".into(),
|
||||||
InvalidNetworkId => "Transaction of this network ID is not allowed on this chain.".into(),
|
InvalidChainId => "Transaction of this chain ID is not allowed on this chain.".into(),
|
||||||
};
|
};
|
||||||
|
|
||||||
f.write_fmt(format_args!("Transaction error ({})", msg))
|
f.write_fmt(format_args!("Transaction error ({})", msg))
|
||||||
|
@ -40,7 +40,10 @@ pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]);
|
|||||||
|
|
||||||
/// Number of blocks in an ethash snapshot.
|
/// Number of blocks in an ethash snapshot.
|
||||||
// make dependent on difficulty incrment divisor?
|
// make dependent on difficulty incrment divisor?
|
||||||
const SNAPSHOT_BLOCKS: u64 = 30000;
|
const SNAPSHOT_BLOCKS: u64 = 5000;
|
||||||
|
/// Maximum number of blocks allowed in an ethash snapshot.
|
||||||
|
const MAX_SNAPSHOT_BLOCKS: u64 = 30000;
|
||||||
|
|
||||||
|
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -185,7 +188,14 @@ impl Engine for Arc<Ethash> {
|
|||||||
|
|
||||||
/// Additional engine-specific information for the user/developer concerning `header`.
|
/// Additional engine-specific information for the user/developer concerning `header`.
|
||||||
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
|
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
|
||||||
map!["nonce".to_owned() => format!("0x{}", header.nonce().hex()), "mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())]
|
if header.seal().len() == self.seal_fields() {
|
||||||
|
map![
|
||||||
|
"nonce".to_owned() => format!("0x{}", header.nonce().hex()),
|
||||||
|
"mixHash".to_owned() => format!("0x{}", header.mix_hash().hex())
|
||||||
|
]
|
||||||
|
} else {
|
||||||
|
BTreeMap::default()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn schedule(&self, block_number: BlockNumber) -> Schedule {
|
fn schedule(&self, block_number: BlockNumber) -> Schedule {
|
||||||
@ -207,7 +217,7 @@ impl Engine for Arc<Ethash> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_network_id(&self, env_info: &EnvInfo) -> Option<u64> {
|
fn signing_chain_id(&self, env_info: &EnvInfo) -> Option<u64> {
|
||||||
if env_info.number >= self.params().eip155_transition {
|
if env_info.number >= self.params().eip155_transition {
|
||||||
Some(self.params().chain_id)
|
Some(self.params().chain_id)
|
||||||
} else {
|
} else {
|
||||||
@ -420,8 +430,8 @@ impl Engine for Arc<Ethash> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let check_low_s = header.number() >= self.ethash_params.homestead_transition;
|
let check_low_s = header.number() >= self.ethash_params.homestead_transition;
|
||||||
let network_id = if header.number() >= self.params().eip155_transition { Some(self.params().chain_id) } else { None };
|
let chain_id = if header.number() >= self.params().eip155_transition { Some(self.params().chain_id) } else { None };
|
||||||
t.verify_basic(check_low_s, network_id, false)?;
|
t.verify_basic(check_low_s, chain_id, false)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -430,7 +440,7 @@ impl Engine for Arc<Ethash> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
fn snapshot_components(&self) -> Option<Box<::snapshot::SnapshotComponents>> {
|
||||||
Some(Box::new(::snapshot::PowSnapshot(SNAPSHOT_BLOCKS)))
|
Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ use evm::{CallType, Factory, Finalize, FinalizationResult};
|
|||||||
use vm::{self, Ext, CreateContractAddress, ReturnData, CleanDustMode, ActionParams, ActionValue};
|
use vm::{self, Ext, CreateContractAddress, ReturnData, CleanDustMode, ActionParams, ActionValue};
|
||||||
use wasm;
|
use wasm;
|
||||||
use externalities::*;
|
use externalities::*;
|
||||||
use trace::{FlatTrace, Tracer, NoopTracer, ExecutiveTracer, VMTrace, VMTracer, ExecutiveVMTracer, NoopVMTracer};
|
use trace::{self, FlatTrace, VMTrace, Tracer, VMTracer};
|
||||||
use transaction::{Action, SignedTransaction};
|
use transaction::{Action, SignedTransaction};
|
||||||
use crossbeam;
|
use crossbeam;
|
||||||
pub use executed::{Executed, ExecutionResult};
|
pub use executed::{Executed, ExecutionResult};
|
||||||
@ -66,16 +66,77 @@ pub fn contract_address(address_scheme: CreateContractAddress, sender: &Address,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Transaction execution options.
|
/// Transaction execution options.
|
||||||
#[derive(Default, Copy, Clone, PartialEq)]
|
#[derive(Copy, Clone, PartialEq)]
|
||||||
pub struct TransactOptions {
|
pub struct TransactOptions<T, V> {
|
||||||
/// Enable call tracing.
|
/// Enable call tracing.
|
||||||
pub tracing: bool,
|
pub tracer: T,
|
||||||
/// Enable VM tracing.
|
/// Enable VM tracing.
|
||||||
pub vm_tracing: bool,
|
pub vm_tracer: V,
|
||||||
/// Check transaction nonce before execution.
|
/// Check transaction nonce before execution.
|
||||||
pub check_nonce: bool,
|
pub check_nonce: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T, V> TransactOptions<T, V> {
|
||||||
|
/// Create new `TransactOptions` with given tracer and VM tracer.
|
||||||
|
pub fn new(tracer: T, vm_tracer: V) -> Self {
|
||||||
|
TransactOptions {
|
||||||
|
tracer,
|
||||||
|
vm_tracer,
|
||||||
|
check_nonce: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Disables the nonce check
|
||||||
|
pub fn dont_check_nonce(mut self) -> Self {
|
||||||
|
self.check_nonce = false;
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactOptions<trace::ExecutiveTracer, trace::ExecutiveVMTracer> {
|
||||||
|
/// Creates new `TransactOptions` with default tracing and VM tracing.
|
||||||
|
pub fn with_tracing_and_vm_tracing() -> Self {
|
||||||
|
TransactOptions {
|
||||||
|
tracer: trace::ExecutiveTracer::default(),
|
||||||
|
vm_tracer: trace::ExecutiveVMTracer::toplevel(),
|
||||||
|
check_nonce: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactOptions<trace::ExecutiveTracer, trace::NoopVMTracer> {
|
||||||
|
/// Creates new `TransactOptions` with default tracing and no VM tracing.
|
||||||
|
pub fn with_tracing() -> Self {
|
||||||
|
TransactOptions {
|
||||||
|
tracer: trace::ExecutiveTracer::default(),
|
||||||
|
vm_tracer: trace::NoopVMTracer,
|
||||||
|
check_nonce: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactOptions<trace::NoopTracer, trace::ExecutiveVMTracer> {
|
||||||
|
/// Creates new `TransactOptions` with no tracing and default VM tracing.
|
||||||
|
pub fn with_vm_tracing() -> Self {
|
||||||
|
TransactOptions {
|
||||||
|
tracer: trace::NoopTracer,
|
||||||
|
vm_tracer: trace::ExecutiveVMTracer::toplevel(),
|
||||||
|
check_nonce: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransactOptions<trace::NoopTracer, trace::NoopVMTracer> {
|
||||||
|
/// Creates new `TransactOptions` without any tracing.
|
||||||
|
pub fn with_no_tracing() -> Self {
|
||||||
|
TransactOptions {
|
||||||
|
tracer: trace::NoopTracer,
|
||||||
|
vm_tracer: trace::NoopVMTracer,
|
||||||
|
check_nonce: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub fn executor<E>(engine: &E, vm_factory: &Factory, params: &ActionParams)
|
pub fn executor<E>(engine: &E, vm_factory: &Factory, params: &ActionParams)
|
||||||
-> Box<vm::Vm> where E: Engine + ?Sized
|
-> Box<vm::Vm> where E: Engine + ?Sized
|
||||||
{
|
{
|
||||||
@ -137,27 +198,21 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// This function should be used to execute transaction.
|
/// This function should be used to execute transaction.
|
||||||
pub fn transact(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result<Executed, ExecutionError> {
|
pub fn transact<T, V>(&'a mut self, t: &SignedTransaction, options: TransactOptions<T, V>)
|
||||||
let check = options.check_nonce;
|
-> Result<Executed, ExecutionError> where T: Tracer, V: VMTracer,
|
||||||
match options.tracing {
|
{
|
||||||
true => match options.vm_tracing {
|
self.transact_with_tracer(t, options.check_nonce, options.tracer, options.vm_tracer)
|
||||||
true => self.transact_with_tracer(t, check, ExecutiveTracer::default(), ExecutiveVMTracer::toplevel()),
|
|
||||||
false => self.transact_with_tracer(t, check, ExecutiveTracer::default(), NoopVMTracer),
|
|
||||||
},
|
|
||||||
false => match options.vm_tracing {
|
|
||||||
true => self.transact_with_tracer(t, check, NoopTracer, ExecutiveVMTracer::toplevel()),
|
|
||||||
false => self.transact_with_tracer(t, check, NoopTracer, NoopVMTracer),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Execute a transaction in a "virtual" context.
|
/// Execute a transaction in a "virtual" context.
|
||||||
/// This will ensure the caller has enough balance to execute the desired transaction.
|
/// This will ensure the caller has enough balance to execute the desired transaction.
|
||||||
/// Used for extra-block executions for things like consensus contracts and RPCs
|
/// Used for extra-block executions for things like consensus contracts and RPCs
|
||||||
pub fn transact_virtual(&'a mut self, t: &SignedTransaction, options: TransactOptions) -> Result<Executed, ExecutionError> {
|
pub fn transact_virtual<T, V>(&'a mut self, t: &SignedTransaction, options: TransactOptions<T, V>)
|
||||||
|
-> Result<Executed, ExecutionError> where T: Tracer, V: VMTracer,
|
||||||
|
{
|
||||||
let sender = t.sender();
|
let sender = t.sender();
|
||||||
let balance = self.state.balance(&sender)?;
|
let balance = self.state.balance(&sender)?;
|
||||||
let needed_balance = t.value + t.gas * t.gas_price;
|
let needed_balance = t.value.saturating_add(t.gas.saturating_mul(t.gas_price));
|
||||||
if balance < needed_balance {
|
if balance < needed_balance {
|
||||||
// give the sender a sufficient balance
|
// give the sender a sufficient balance
|
||||||
self.state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)?;
|
self.state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)?;
|
||||||
@ -167,7 +222,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Execute transaction/call with tracing enabled
|
/// Execute transaction/call with tracing enabled
|
||||||
pub fn transact_with_tracer<T, V>(
|
fn transact_with_tracer<T, V>(
|
||||||
&'a mut self,
|
&'a mut self,
|
||||||
t: &SignedTransaction,
|
t: &SignedTransaction,
|
||||||
check_nonce: bool,
|
check_nonce: bool,
|
||||||
@ -261,7 +316,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// finalize here!
|
// finalize here!
|
||||||
Ok(self.finalize(t, substate, result, output, tracer.traces(), vm_tracer.drain())?)
|
Ok(self.finalize(t, substate, result, output, tracer.drain(), vm_tracer.drain())?)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn exec_vm<T, V>(
|
fn exec_vm<T, V>(
|
||||||
@ -399,7 +454,7 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
|
|
||||||
trace!(target: "executive", "res={:?}", res);
|
trace!(target: "executive", "res={:?}", res);
|
||||||
|
|
||||||
let traces = subtracer.traces();
|
let traces = subtracer.drain();
|
||||||
match res {
|
match res {
|
||||||
Ok(ref res) => tracer.trace_call(
|
Ok(ref res) => tracer.trace_call(
|
||||||
trace_info,
|
trace_info,
|
||||||
@ -484,9 +539,9 @@ impl<'a, B: 'a + StateBackend, E: Engine + ?Sized> Executive<'a, B, E> {
|
|||||||
gas - res.gas_left,
|
gas - res.gas_left,
|
||||||
trace_output,
|
trace_output,
|
||||||
created,
|
created,
|
||||||
subtracer.traces()
|
subtracer.drain()
|
||||||
),
|
),
|
||||||
Err(ref e) => tracer.trace_failed_create(trace_info, subtracer.traces(), e.into())
|
Err(ref e) => tracer.trace_failed_create(trace_info, subtracer.drain(), e.into())
|
||||||
};
|
};
|
||||||
|
|
||||||
self.enact_result(&res, substate, unconfirmed_substate);
|
self.enact_result(&res, substate, unconfirmed_substate);
|
||||||
@ -794,7 +849,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
}];
|
}];
|
||||||
|
|
||||||
assert_eq!(tracer.traces(), expected_trace);
|
assert_eq!(tracer.drain(), expected_trace);
|
||||||
|
|
||||||
let expected_vm_trace = VMTrace {
|
let expected_vm_trace = VMTrace {
|
||||||
parent_step: 0,
|
parent_step: 0,
|
||||||
@ -887,7 +942,7 @@ mod tests {
|
|||||||
}),
|
}),
|
||||||
}];
|
}];
|
||||||
|
|
||||||
assert_eq!(tracer.traces(), expected_trace);
|
assert_eq!(tracer.drain(), expected_trace);
|
||||||
|
|
||||||
let expected_vm_trace = VMTrace {
|
let expected_vm_trace = VMTrace {
|
||||||
parent_step: 0,
|
parent_step: 0,
|
||||||
@ -1138,7 +1193,7 @@ mod tests {
|
|||||||
|
|
||||||
let executed = {
|
let executed = {
|
||||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||||
let opts = TransactOptions { check_nonce: true, tracing: false, vm_tracing: false };
|
let opts = TransactOptions::with_no_tracing();
|
||||||
ex.transact(&t, opts).unwrap()
|
ex.transact(&t, opts).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1175,7 +1230,7 @@ mod tests {
|
|||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||||
let opts = TransactOptions { check_nonce: true, tracing: false, vm_tracing: false };
|
let opts = TransactOptions::with_no_tracing();
|
||||||
ex.transact(&t, opts)
|
ex.transact(&t, opts)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1208,7 +1263,7 @@ mod tests {
|
|||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||||
let opts = TransactOptions { check_nonce: true, tracing: false, vm_tracing: false };
|
let opts = TransactOptions::with_no_tracing();
|
||||||
ex.transact(&t, opts)
|
ex.transact(&t, opts)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -1241,7 +1296,7 @@ mod tests {
|
|||||||
|
|
||||||
let res = {
|
let res = {
|
||||||
let mut ex = Executive::new(&mut state, &info, &engine);
|
let mut ex = Executive::new(&mut state, &info, &engine);
|
||||||
let opts = TransactOptions { check_nonce: true, tracing: false, vm_tracing: false };
|
let opts = TransactOptions::with_no_tracing();
|
||||||
ex.transact(&t, opts)
|
ex.transact(&t, opts)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -15,23 +15,13 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use super::test_common::*;
|
use super::test_common::*;
|
||||||
use tests::helpers::*;
|
|
||||||
use pod_state::PodState;
|
use pod_state::PodState;
|
||||||
use ethereum;
|
use trace;
|
||||||
use spec::Spec;
|
use client::{EvmTestClient, EvmTestError, TransactResult};
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use ethjson::state::test::ForkSpec;
|
|
||||||
use transaction::SignedTransaction;
|
use transaction::SignedTransaction;
|
||||||
use vm::EnvInfo;
|
use vm::EnvInfo;
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
pub static ref FRONTIER: Spec = ethereum::new_frontier_test();
|
|
||||||
pub static ref HOMESTEAD: Spec = ethereum::new_homestead_test();
|
|
||||||
pub static ref EIP150: Spec = ethereum::new_eip150_test();
|
|
||||||
pub static ref EIP161: Spec = ethereum::new_eip161_test();
|
|
||||||
pub static ref _METROPOLIS: Spec = ethereum::new_metropolis_test();
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
|
pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
|
||||||
::ethcore_logger::init_log();
|
::ethcore_logger::init_log();
|
||||||
let tests = ethjson::state::test::Test::load(json_data).unwrap();
|
let tests = ethjson::state::test::Test::load(json_data).unwrap();
|
||||||
@ -43,35 +33,49 @@ pub fn json_chain_test(json_data: &[u8]) -> Vec<String> {
|
|||||||
let env: EnvInfo = test.env.into();
|
let env: EnvInfo = test.env.into();
|
||||||
let pre: PodState = test.pre_state.into();
|
let pre: PodState = test.pre_state.into();
|
||||||
|
|
||||||
for (spec, states) in test.post_states {
|
for (spec_name, states) in test.post_states {
|
||||||
let total = states.len();
|
let total = states.len();
|
||||||
let engine = match spec {
|
let spec = match EvmTestClient::spec_from_json(&spec_name) {
|
||||||
ForkSpec::Frontier => &FRONTIER.engine,
|
Some(spec) => spec,
|
||||||
ForkSpec::Homestead => &HOMESTEAD.engine,
|
None => {
|
||||||
ForkSpec::EIP150 => &EIP150.engine,
|
println!(" - {} | {:?} Ignoring tests because of missing spec", name, spec_name);
|
||||||
ForkSpec::EIP158 => &EIP161.engine,
|
continue;
|
||||||
ForkSpec::Metropolis => continue,
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
for (i, state) in states.into_iter().enumerate() {
|
for (i, state) in states.into_iter().enumerate() {
|
||||||
let info = format!(" - {} | {:?} ({}/{}) ...", name, spec, i + 1, total);
|
let info = format!(" - {} | {:?} ({}/{}) ...", name, spec_name, i + 1, total);
|
||||||
|
|
||||||
let post_root: H256 = state.hash.into();
|
let post_root: H256 = state.hash.into();
|
||||||
let transaction: SignedTransaction = multitransaction.select(&state.indexes).into();
|
let transaction: SignedTransaction = multitransaction.select(&state.indexes).into();
|
||||||
let mut state = get_temp_state();
|
|
||||||
state.populate_from(pre.clone());
|
let result = || -> Result<_, EvmTestError> {
|
||||||
if transaction.verify_basic(true, None, env.number >= engine.params().eip86_transition).is_ok() {
|
Ok(EvmTestClient::from_pod_state(spec, pre.clone())?
|
||||||
state.commit().expect(&format!("State test {} failed due to internal error.", name));
|
.transact(&env, transaction, trace::NoopVMTracer))
|
||||||
let _res = state.apply(&env, &**engine, &transaction, false);
|
};
|
||||||
} else {
|
match result() {
|
||||||
let _rest = state.commit();
|
Err(err) => {
|
||||||
}
|
println!("{} !!! Unexpected internal error: {:?}", info, err);
|
||||||
if state.root() != &post_root {
|
flushln!("{} fail", info);
|
||||||
println!("{} !!! State mismatch (got: {}, expect: {}", info, state.root(), post_root);
|
failed.push(name.clone());
|
||||||
flushln!("{} fail", info);
|
},
|
||||||
failed.push(name.clone());
|
Ok(TransactResult::Ok { state_root, .. }) if state_root != post_root => {
|
||||||
} else {
|
println!("{} !!! State mismatch (got: {}, expect: {}", info, state_root, post_root);
|
||||||
flushln!("{} ok", info);
|
flushln!("{} fail", info);
|
||||||
|
failed.push(name.clone());
|
||||||
|
},
|
||||||
|
Ok(TransactResult::Err { state_root, ref error }) if state_root != post_root => {
|
||||||
|
println!("{} !!! State mismatch (got: {}, expect: {}", info, state_root, post_root);
|
||||||
|
println!("{} !!! Execution error: {:?}", info, error);
|
||||||
|
flushln!("{} fail", info);
|
||||||
|
failed.push(name.clone());
|
||||||
|
},
|
||||||
|
Ok(TransactResult::Err { error, .. }) => {
|
||||||
|
flushln!("{} ok ({:?})", info, error);
|
||||||
|
},
|
||||||
|
Ok(_) => {
|
||||||
|
flushln!("{} ok", info);
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -36,25 +36,25 @@ fn do_json_test(json_data: &[u8]) -> Vec<String> {
|
|||||||
Some(x) if x < 3_000_000 => &homestead_schedule,
|
Some(x) if x < 3_000_000 => &homestead_schedule,
|
||||||
Some(_) => &metropolis_schedule
|
Some(_) => &metropolis_schedule
|
||||||
};
|
};
|
||||||
let allow_network_id_of_one = number.map_or(false, |n| n >= 2_675_000);
|
let allow_chain_id_of_one = number.map_or(false, |n| n >= 2_675_000);
|
||||||
let allow_unsigned = number.map_or(false, |n| n >= 3_000_000);
|
let allow_unsigned = number.map_or(false, |n| n >= 3_000_000);
|
||||||
|
|
||||||
let rlp: Vec<u8> = test.rlp.into();
|
let rlp: Vec<u8> = test.rlp.into();
|
||||||
let res = UntrustedRlp::new(&rlp)
|
let res = UntrustedRlp::new(&rlp)
|
||||||
.as_val()
|
.as_val()
|
||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
.and_then(|t: UnverifiedTransaction| t.validate(schedule, schedule.have_delegate_call, allow_network_id_of_one, allow_unsigned));
|
.and_then(|t: UnverifiedTransaction| t.validate(schedule, schedule.have_delegate_call, allow_chain_id_of_one, allow_unsigned));
|
||||||
|
|
||||||
fail_unless(test.transaction.is_none() == res.is_err(), "Validity different");
|
fail_unless(test.transaction.is_none() == res.is_err(), "Validity different");
|
||||||
if let (Some(tx), Some(sender)) = (test.transaction, test.sender) {
|
if let (Some(tx), Some(sender)) = (test.transaction, test.sender) {
|
||||||
let t = res.unwrap();
|
let t = res.unwrap();
|
||||||
fail_unless(SignedTransaction::new(t.clone()).unwrap().sender() == sender.into(), "sender mismatch");
|
fail_unless(SignedTransaction::new(t.clone()).unwrap().sender() == sender.into(), "sender mismatch");
|
||||||
let is_acceptable_network_id = match t.network_id() {
|
let is_acceptable_chain_id = match t.chain_id() {
|
||||||
None => true,
|
None => true,
|
||||||
Some(1) if allow_network_id_of_one => true,
|
Some(1) if allow_chain_id_of_one => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
fail_unless(is_acceptable_network_id, "Network ID unacceptable");
|
fail_unless(is_acceptable_chain_id, "Network ID unacceptable");
|
||||||
let data: Vec<u8> = tx.data.into();
|
let data: Vec<u8> = tx.data.into();
|
||||||
fail_unless(t.data == data, "data mismatch");
|
fail_unless(t.data == data, "data mismatch");
|
||||||
fail_unless(t.gas_price == tx.gas_price.into(), "gas_price mismatch");
|
fail_unless(t.gas_price == tx.gas_price.into(), "gas_price mismatch");
|
||||||
|
@ -101,6 +101,9 @@ extern crate num;
|
|||||||
extern crate price_info;
|
extern crate price_info;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
extern crate rlp;
|
extern crate rlp;
|
||||||
|
|
||||||
|
#[macro_use]
|
||||||
|
extern crate rlp_derive;
|
||||||
extern crate rustc_hex;
|
extern crate rustc_hex;
|
||||||
extern crate semver;
|
extern crate semver;
|
||||||
extern crate stats;
|
extern crate stats;
|
||||||
|
@ -21,8 +21,8 @@ use std::sync::Arc;
|
|||||||
use util::*;
|
use util::*;
|
||||||
use using_queue::{UsingQueue, GetAction};
|
use using_queue::{UsingQueue, GetAction};
|
||||||
use account_provider::{AccountProvider, SignError as AccountError};
|
use account_provider::{AccountProvider, SignError as AccountError};
|
||||||
use state::{State, CleanupMode};
|
use state::State;
|
||||||
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockId, CallAnalytics, TransactionId};
|
use client::{MiningBlockChainClient, BlockId, TransactionId};
|
||||||
use client::TransactionImportResult;
|
use client::TransactionImportResult;
|
||||||
use executive::contract_address;
|
use executive::contract_address;
|
||||||
use block::{ClosedBlock, IsBlock, Block};
|
use block::{ClosedBlock, IsBlock, Block};
|
||||||
@ -39,7 +39,7 @@ use miner::local_transactions::{Status as LocalTransactionStatus};
|
|||||||
use miner::service_transaction_checker::ServiceTransactionChecker;
|
use miner::service_transaction_checker::ServiceTransactionChecker;
|
||||||
use price_info::{Client as PriceInfoClient, PriceInfo};
|
use price_info::{Client as PriceInfoClient, PriceInfo};
|
||||||
use price_info::fetch::Client as FetchClient;
|
use price_info::fetch::Client as FetchClient;
|
||||||
use header::BlockNumber;
|
use header::{Header, BlockNumber};
|
||||||
|
|
||||||
/// Different possible definitions for pending transaction set.
|
/// Different possible definitions for pending transaction set.
|
||||||
#[derive(Debug, PartialEq)]
|
#[derive(Debug, PartialEq)]
|
||||||
@ -331,13 +331,28 @@ impl Miner {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing.
|
/// Get `Some` `clone()` of the current pending block's state or `None` if we're not sealing.
|
||||||
pub fn pending_state(&self) -> Option<State<::state_db::StateDB>> {
|
pub fn pending_state(&self, latest_block_number: BlockNumber) -> Option<State<::state_db::StateDB>> {
|
||||||
self.sealing_work.lock().queue.peek_last_ref().map(|b| b.block().fields().state.clone())
|
self.map_pending_block(|b| b.state().clone(), latest_block_number)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get `Some` `clone()` of the current pending block or `None` if we're not sealing.
|
/// Get `Some` `clone()` of the current pending block or `None` if we're not sealing.
|
||||||
pub fn pending_block(&self) -> Option<Block> {
|
pub fn pending_block(&self, latest_block_number: BlockNumber) -> Option<Block> {
|
||||||
self.sealing_work.lock().queue.peek_last_ref().map(|b| b.to_base())
|
self.map_pending_block(|b| b.to_base(), latest_block_number)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get `Some` `clone()` of the current pending block header or `None` if we're not sealing.
|
||||||
|
pub fn pending_block_header(&self, latest_block_number: BlockNumber) -> Option<Header> {
|
||||||
|
self.map_pending_block(|b| b.header().clone(), latest_block_number)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_pending_block<F, T>(&self, f: F, latest_block_number: BlockNumber) -> Option<T> where
|
||||||
|
F: FnOnce(&ClosedBlock) -> T,
|
||||||
|
{
|
||||||
|
self.from_pending_block(
|
||||||
|
latest_block_number,
|
||||||
|
|| None,
|
||||||
|
|block| Some(f(block)),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg_attr(feature="dev", allow(match_same_arms))]
|
#[cfg_attr(feature="dev", allow(match_same_arms))]
|
||||||
@ -679,7 +694,7 @@ impl Miner {
|
|||||||
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
#[cfg_attr(feature="dev", allow(wrong_self_convention))]
|
||||||
#[cfg_attr(feature="dev", allow(redundant_closure))]
|
#[cfg_attr(feature="dev", allow(redundant_closure))]
|
||||||
fn from_pending_block<H, F, G>(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H
|
fn from_pending_block<H, F, G>(&self, latest_block_number: BlockNumber, from_chain: F, map_block: G) -> H
|
||||||
where F: Fn() -> H, G: Fn(&ClosedBlock) -> H {
|
where F: Fn() -> H, G: FnOnce(&ClosedBlock) -> H {
|
||||||
let sealing_work = self.sealing_work.lock();
|
let sealing_work = self.sealing_work.lock();
|
||||||
sealing_work.queue.peek_last_ref().map_or_else(
|
sealing_work.queue.peek_last_ref().map_or_else(
|
||||||
|| from_chain(),
|
|| from_chain(),
|
||||||
@ -717,84 +732,6 @@ impl MinerService for Miner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn call(&self, client: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, CallError> {
|
|
||||||
let sealing_work = self.sealing_work.lock();
|
|
||||||
match sealing_work.queue.peek_last_ref() {
|
|
||||||
Some(work) => {
|
|
||||||
let block = work.block();
|
|
||||||
|
|
||||||
// TODO: merge this code with client.rs's fn call somwhow.
|
|
||||||
let header = block.header();
|
|
||||||
let last_hashes = Arc::new(client.last_hashes());
|
|
||||||
let env_info = EnvInfo {
|
|
||||||
number: header.number(),
|
|
||||||
author: *header.author(),
|
|
||||||
timestamp: header.timestamp(),
|
|
||||||
difficulty: *header.difficulty(),
|
|
||||||
last_hashes: last_hashes,
|
|
||||||
gas_used: U256::zero(),
|
|
||||||
gas_limit: U256::max_value(),
|
|
||||||
};
|
|
||||||
// that's just a copy of the state.
|
|
||||||
let mut state = block.state().clone();
|
|
||||||
let original_state = if analytics.state_diffing { Some(state.clone()) } else { None };
|
|
||||||
|
|
||||||
let sender = t.sender();
|
|
||||||
let balance = state.balance(&sender).map_err(ExecutionError::from)?;
|
|
||||||
let needed_balance = t.value + t.gas * t.gas_price;
|
|
||||||
if balance < needed_balance {
|
|
||||||
// give the sender a sufficient balance
|
|
||||||
state.add_balance(&sender, &(needed_balance - balance), CleanupMode::NoEmpty)
|
|
||||||
.map_err(ExecutionError::from)?;
|
|
||||||
}
|
|
||||||
let options = TransactOptions { tracing: analytics.transaction_tracing, vm_tracing: analytics.vm_tracing, check_nonce: false };
|
|
||||||
let mut ret = Executive::new(&mut state, &env_info, &*self.engine).transact(t, options)?;
|
|
||||||
|
|
||||||
// TODO gav move this into Executive.
|
|
||||||
if let Some(original) = original_state {
|
|
||||||
ret.state_diff = Some(state.diff_from(original).map_err(ExecutionError::from)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ret)
|
|
||||||
},
|
|
||||||
None => client.call(t, BlockId::Latest, analytics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: The `chain.latest_x` actually aren't infallible, they just panic on corruption.
|
|
||||||
// TODO: return trie::Result<T> here, or other.
|
|
||||||
fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<U256> {
|
|
||||||
self.from_pending_block(
|
|
||||||
chain.chain_info().best_block_number,
|
|
||||||
|| Some(chain.latest_balance(address)),
|
|
||||||
|b| b.block().fields().state.balance(address).ok(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option<H256> {
|
|
||||||
self.from_pending_block(
|
|
||||||
chain.chain_info().best_block_number,
|
|
||||||
|| Some(chain.latest_storage_at(address, position)),
|
|
||||||
|b| b.block().fields().state.storage_at(address, position).ok(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<U256> {
|
|
||||||
self.from_pending_block(
|
|
||||||
chain.chain_info().best_block_number,
|
|
||||||
|| Some(chain.latest_nonce(address)),
|
|
||||||
|b| b.block().fields().state.nonce(address).ok(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Option<Bytes>> {
|
|
||||||
self.from_pending_block(
|
|
||||||
chain.chain_info().best_block_number,
|
|
||||||
|| Some(chain.latest_code(address)),
|
|
||||||
|b| b.block().fields().state.code(address).ok().map(|c| c.map(|c| (&*c).clone()))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_author(&self, author: Address) {
|
fn set_author(&self, author: Address) {
|
||||||
if self.engine.seals_internally().is_some() {
|
if self.engine.seals_internally().is_some() {
|
||||||
let mut sealing_work = self.sealing_work.lock();
|
let mut sealing_work = self.sealing_work.lock();
|
||||||
@ -1369,10 +1306,10 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn transaction() -> SignedTransaction {
|
fn transaction() -> SignedTransaction {
|
||||||
transaction_with_network_id(2)
|
transaction_with_chain_id(2)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn transaction_with_network_id(id: u64) -> SignedTransaction {
|
fn transaction_with_chain_id(chain_id: u64) -> SignedTransaction {
|
||||||
let keypair = Random.generate().unwrap();
|
let keypair = Random.generate().unwrap();
|
||||||
Transaction {
|
Transaction {
|
||||||
action: Action::Create,
|
action: Action::Create,
|
||||||
@ -1381,7 +1318,7 @@ mod tests {
|
|||||||
gas: U256::from(100_000),
|
gas: U256::from(100_000),
|
||||||
gas_price: U256::zero(),
|
gas_price: U256::zero(),
|
||||||
nonce: U256::zero(),
|
nonce: U256::zero(),
|
||||||
}.sign(keypair.secret(), Some(id))
|
}.sign(keypair.secret(), Some(chain_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1462,18 +1399,18 @@ mod tests {
|
|||||||
|
|
||||||
let client = generate_dummy_client(2);
|
let client = generate_dummy_client(2);
|
||||||
|
|
||||||
assert_eq!(miner.import_external_transactions(&*client, vec![transaction_with_network_id(spec.network_id()).into()]).pop().unwrap().unwrap(), TransactionImportResult::Current);
|
assert_eq!(miner.import_external_transactions(&*client, vec![transaction_with_chain_id(spec.chain_id()).into()]).pop().unwrap().unwrap(), TransactionImportResult::Current);
|
||||||
|
|
||||||
miner.update_sealing(&*client);
|
miner.update_sealing(&*client);
|
||||||
client.flush_queue();
|
client.flush_queue();
|
||||||
assert!(miner.pending_block().is_none());
|
assert!(miner.pending_block(0).is_none());
|
||||||
assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber);
|
assert_eq!(client.chain_info().best_block_number, 3 as BlockNumber);
|
||||||
|
|
||||||
assert_eq!(miner.import_own_transaction(&*client, PendingTransaction::new(transaction_with_network_id(spec.network_id()).into(), None)).unwrap(), TransactionImportResult::Current);
|
assert_eq!(miner.import_own_transaction(&*client, PendingTransaction::new(transaction_with_chain_id(spec.chain_id()).into(), None)).unwrap(), TransactionImportResult::Current);
|
||||||
|
|
||||||
miner.update_sealing(&*client);
|
miner.update_sealing(&*client);
|
||||||
client.flush_queue();
|
client.flush_queue();
|
||||||
assert!(miner.pending_block().is_none());
|
assert!(miner.pending_block(0).is_none());
|
||||||
assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber);
|
assert_eq!(client.chain_info().best_block_number, 4 as BlockNumber);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,12 +62,12 @@ pub use self::stratum::{Stratum, Error as StratumError, Options as StratumOption
|
|||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use util::{H256, U256, Address, Bytes};
|
use util::{H256, U256, Address, Bytes};
|
||||||
use client::{MiningBlockChainClient, Executed, CallAnalytics};
|
use client::{MiningBlockChainClient};
|
||||||
use block::ClosedBlock;
|
use block::ClosedBlock;
|
||||||
use header::BlockNumber;
|
use header::BlockNumber;
|
||||||
use receipt::{RichReceipt, Receipt};
|
use receipt::{RichReceipt, Receipt};
|
||||||
use error::{Error, CallError};
|
use error::{Error};
|
||||||
use transaction::{UnverifiedTransaction, PendingTransaction, SignedTransaction};
|
use transaction::{UnverifiedTransaction, PendingTransaction};
|
||||||
|
|
||||||
/// Miner client API
|
/// Miner client API
|
||||||
pub trait MinerService : Send + Sync {
|
pub trait MinerService : Send + Sync {
|
||||||
@ -185,21 +185,6 @@ pub trait MinerService : Send + Sync {
|
|||||||
|
|
||||||
/// Suggested gas limit.
|
/// Suggested gas limit.
|
||||||
fn sensible_gas_limit(&self) -> U256 { 21000.into() }
|
fn sensible_gas_limit(&self) -> U256 { 21000.into() }
|
||||||
|
|
||||||
/// Latest account balance in pending state.
|
|
||||||
fn balance(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<U256>;
|
|
||||||
|
|
||||||
/// Call into contract code using pending state.
|
|
||||||
fn call(&self, chain: &MiningBlockChainClient, t: &SignedTransaction, analytics: CallAnalytics) -> Result<Executed, CallError>;
|
|
||||||
|
|
||||||
/// Get storage value in pending state.
|
|
||||||
fn storage_at(&self, chain: &MiningBlockChainClient, address: &Address, position: &H256) -> Option<H256>;
|
|
||||||
|
|
||||||
/// Get account nonce in pending state.
|
|
||||||
fn nonce(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<U256>;
|
|
||||||
|
|
||||||
/// Get contract code in pending state.
|
|
||||||
fn code(&self, chain: &MiningBlockChainClient, address: &Address) -> Option<Option<Bytes>>;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Mining status
|
/// Mining status
|
||||||
|
@ -32,7 +32,6 @@ use util::Mutex;
|
|||||||
use miner::{self, Miner, MinerService};
|
use miner::{self, Miner, MinerService};
|
||||||
use client::Client;
|
use client::Client;
|
||||||
use block::IsBlock;
|
use block::IsBlock;
|
||||||
use std::str::FromStr;
|
|
||||||
use rlp::encode;
|
use rlp::encode;
|
||||||
|
|
||||||
/// Configures stratum server options.
|
/// Configures stratum server options.
|
||||||
@ -60,7 +59,7 @@ impl SubmitPayload {
|
|||||||
return Err(PayloadError::ArgumentsAmountUnexpected(payload.len()));
|
return Err(PayloadError::ArgumentsAmountUnexpected(payload.len()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let nonce = match H64::from_str(clean_0x(&payload[0])) {
|
let nonce = match clean_0x(&payload[0]).parse::<H64>() {
|
||||||
Ok(nonce) => nonce,
|
Ok(nonce) => nonce,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(target: "stratum", "submit_work ({}): invalid nonce ({:?})", &payload[0], e);
|
warn!(target: "stratum", "submit_work ({}): invalid nonce ({:?})", &payload[0], e);
|
||||||
@ -68,7 +67,7 @@ impl SubmitPayload {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let pow_hash = match H256::from_str(clean_0x(&payload[1])) {
|
let pow_hash = match clean_0x(&payload[1]).parse::<H256>() {
|
||||||
Ok(pow_hash) => pow_hash,
|
Ok(pow_hash) => pow_hash,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(target: "stratum", "submit_work ({}): invalid hash ({:?})", &payload[1], e);
|
warn!(target: "stratum", "submit_work ({}): invalid hash ({:?})", &payload[1], e);
|
||||||
@ -76,7 +75,7 @@ impl SubmitPayload {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let mix_hash = match H256::from_str(clean_0x(&payload[2])) {
|
let mix_hash = match clean_0x(&payload[2]).parse::<H256>() {
|
||||||
Ok(mix_hash) => mix_hash,
|
Ok(mix_hash) => mix_hash,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(target: "stratum", "submit_work ({}): invalid mix-hash ({:?})", &payload[2], e);
|
warn!(target: "stratum", "submit_work ({}): invalid mix-hash ({:?})", &payload[2], e);
|
||||||
@ -133,7 +132,7 @@ impl JobDispatcher for StratumJobDispatcher {
|
|||||||
|
|
||||||
fn submit(&self, payload: Vec<String>) -> Result<(), StratumServiceError> {
|
fn submit(&self, payload: Vec<String>) -> Result<(), StratumServiceError> {
|
||||||
let payload = SubmitPayload::from_args(payload).map_err(|e|
|
let payload = SubmitPayload::from_args(payload).map_err(|e|
|
||||||
StratumServiceError::Dispatch(format!("{}", e))
|
StratumServiceError::Dispatch(e.to_string())
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
trace!(
|
trace!(
|
||||||
@ -144,14 +143,16 @@ impl JobDispatcher for StratumJobDispatcher {
|
|||||||
payload.mix_hash,
|
payload.mix_hash,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.with_core_void(|client, miner| {
|
self.with_core_result(|client, miner| {
|
||||||
let seal = vec![encode(&payload.mix_hash).into_vec(), encode(&payload.nonce).into_vec()];
|
let seal = vec![encode(&payload.mix_hash).into_vec(), encode(&payload.nonce).into_vec()];
|
||||||
if let Err(e) = miner.submit_seal(&*client, payload.pow_hash, seal) {
|
match miner.submit_seal(&*client, payload.pow_hash, seal) {
|
||||||
warn!(target: "stratum", "submit_seal error: {:?}", e);
|
Ok(_) => Ok(()),
|
||||||
};
|
Err(e) => {
|
||||||
});
|
warn!(target: "stratum", "submit_seal error: {:?}", e);
|
||||||
|
Err(StratumServiceError::Dispatch(e.to_string()))
|
||||||
Ok(())
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,8 +182,11 @@ impl StratumJobDispatcher {
|
|||||||
self.client.upgrade().and_then(|client| self.miner.upgrade().and_then(|miner| (f)(client, miner)))
|
self.client.upgrade().and_then(|client| self.miner.upgrade().and_then(|miner| (f)(client, miner)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn with_core_void<F>(&self, f: F) where F: Fn(Arc<Client>, Arc<Miner>) {
|
fn with_core_result<F>(&self, f: F) -> Result<(), StratumServiceError> where F: Fn(Arc<Client>, Arc<Miner>) -> Result<(), StratumServiceError> {
|
||||||
self.client.upgrade().map(|client| self.miner.upgrade().map(|miner| (f)(client, miner)));
|
match (self.client.upgrade(), self.miner.upgrade()) {
|
||||||
|
(Some(client), Some(miner)) => f(client, miner),
|
||||||
|
_ => Ok(()),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,7 +234,7 @@ impl Stratum {
|
|||||||
let dispatcher = Arc::new(StratumJobDispatcher::new(miner, client));
|
let dispatcher = Arc::new(StratumJobDispatcher::new(miner, client));
|
||||||
|
|
||||||
let stratum_svc = StratumService::start(
|
let stratum_svc = StratumService::start(
|
||||||
&SocketAddr::new(IpAddr::from_str(&options.listen_addr)?, options.port),
|
&SocketAddr::new(options.listen_addr.parse::<IpAddr>()?, options.port),
|
||||||
dispatcher.clone(),
|
dispatcher.clone(),
|
||||||
options.secret.clone(),
|
options.secret.clone(),
|
||||||
)?;
|
)?;
|
||||||
|
@ -506,8 +506,6 @@ pub struct AccountDetails {
|
|||||||
pub balance: U256,
|
pub balance: U256,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Transactions with `gas > (gas_limit + gas_limit * Factor(in percents))` are not imported to the queue.
|
|
||||||
const GAS_LIMIT_HYSTERESIS: usize = 200; // (100/GAS_LIMIT_HYSTERESIS) %
|
|
||||||
/// Transaction with the same (sender, nonce) can be replaced only if
|
/// Transaction with the same (sender, nonce) can be replaced only if
|
||||||
/// `new_gas_price > old_gas_price + old_gas_price >> SHIFT`
|
/// `new_gas_price > old_gas_price + old_gas_price >> SHIFT`
|
||||||
const GAS_PRICE_BUMP_SHIFT: usize = 3; // 2 = 25%, 3 = 12.5%, 4 = 6.25%
|
const GAS_PRICE_BUMP_SHIFT: usize = 3; // 2 = 25%, 3 = 12.5%, 4 = 6.25%
|
||||||
@ -570,8 +568,8 @@ pub struct TransactionQueue {
|
|||||||
minimal_gas_price: U256,
|
minimal_gas_price: U256,
|
||||||
/// The maximum amount of gas any individual transaction may use.
|
/// The maximum amount of gas any individual transaction may use.
|
||||||
tx_gas_limit: U256,
|
tx_gas_limit: U256,
|
||||||
/// Current gas limit (block gas limit * factor). Transactions above the limit will not be accepted (default to !0)
|
/// Current gas limit (block gas limit). Transactions above the limit will not be accepted (default to !0)
|
||||||
total_gas_limit: U256,
|
block_gas_limit: U256,
|
||||||
/// Maximal time transaction may occupy the queue.
|
/// Maximal time transaction may occupy the queue.
|
||||||
/// When we reach `max_time_in_queue / 2^3` we re-validate
|
/// When we reach `max_time_in_queue / 2^3` we re-validate
|
||||||
/// account balance.
|
/// account balance.
|
||||||
@ -631,7 +629,7 @@ impl TransactionQueue {
|
|||||||
TransactionQueue {
|
TransactionQueue {
|
||||||
strategy,
|
strategy,
|
||||||
minimal_gas_price: U256::zero(),
|
minimal_gas_price: U256::zero(),
|
||||||
total_gas_limit: !U256::zero(),
|
block_gas_limit: !U256::zero(),
|
||||||
tx_gas_limit,
|
tx_gas_limit,
|
||||||
max_time_in_queue: DEFAULT_QUEUING_PERIOD,
|
max_time_in_queue: DEFAULT_QUEUING_PERIOD,
|
||||||
current,
|
current,
|
||||||
@ -674,16 +672,10 @@ impl TransactionQueue {
|
|||||||
self.current.gas_price_entry_limit()
|
self.current.gas_price_entry_limit()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets new gas limit. Transactions with gas slightly (`GAS_LIMIT_HYSTERESIS`) above the limit won't be imported.
|
/// Sets new gas limit. Transactions with gas over the limit will not be accepted.
|
||||||
/// Any transaction already imported to the queue is not affected.
|
/// Any transaction already imported to the queue is not affected.
|
||||||
pub fn set_gas_limit(&mut self, gas_limit: U256) {
|
pub fn set_gas_limit(&mut self, gas_limit: U256) {
|
||||||
let extra = gas_limit / U256::from(GAS_LIMIT_HYSTERESIS);
|
self.block_gas_limit = gas_limit;
|
||||||
|
|
||||||
let total_gas_limit = match gas_limit.overflowing_add(extra) {
|
|
||||||
(_, true) => !U256::zero(),
|
|
||||||
(val, false) => val,
|
|
||||||
};
|
|
||||||
self.total_gas_limit = total_gas_limit;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets new total gas limit.
|
/// Sets new total gas limit.
|
||||||
@ -819,13 +811,13 @@ impl TransactionQueue {
|
|||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
let gas_limit = cmp::min(self.tx_gas_limit, self.total_gas_limit);
|
let gas_limit = cmp::min(self.tx_gas_limit, self.block_gas_limit);
|
||||||
if tx.gas > gas_limit {
|
if tx.gas > gas_limit {
|
||||||
trace!(target: "txqueue",
|
trace!(target: "txqueue",
|
||||||
"Dropping transaction above gas limit: {:?} ({} > min({}, {}))",
|
"Dropping transaction above gas limit: {:?} ({} > min({}, {}))",
|
||||||
tx.hash(),
|
tx.hash(),
|
||||||
tx.gas,
|
tx.gas,
|
||||||
self.total_gas_limit,
|
self.block_gas_limit,
|
||||||
self.tx_gas_limit
|
self.tx_gas_limit
|
||||||
);
|
);
|
||||||
return Err(Error::Transaction(TransactionError::GasLimitExceeded {
|
return Err(Error::Transaction(TransactionError::GasLimitExceeded {
|
||||||
@ -1922,13 +1914,13 @@ pub mod test {
|
|||||||
// given
|
// given
|
||||||
let mut txq = TransactionQueue::default();
|
let mut txq = TransactionQueue::default();
|
||||||
txq.set_gas_limit(U256::zero());
|
txq.set_gas_limit(U256::zero());
|
||||||
assert_eq!(txq.total_gas_limit, U256::zero());
|
assert_eq!(txq.block_gas_limit, U256::zero());
|
||||||
|
|
||||||
// when
|
// when
|
||||||
txq.set_gas_limit(!U256::zero());
|
txq.set_gas_limit(!U256::zero());
|
||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(txq.total_gas_limit, !U256::zero());
|
assert_eq!(txq.block_gas_limit, !U256::zero());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -1945,7 +1937,7 @@ pub mod test {
|
|||||||
|
|
||||||
// then
|
// then
|
||||||
assert_eq!(unwrap_tx_err(res), TransactionError::GasLimitExceeded {
|
assert_eq!(unwrap_tx_err(res), TransactionError::GasLimitExceeded {
|
||||||
limit: U256::from(50_250), // Should be 100.5% of set_gas_limit
|
limit: U256::from(50_000),
|
||||||
got: gas,
|
got: gas,
|
||||||
});
|
});
|
||||||
let stats = txq.status();
|
let stats = txq.status();
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use itertools::Itertools;
|
||||||
use util::*;
|
use util::*;
|
||||||
use state::Account;
|
use state::Account;
|
||||||
use ethjson;
|
use ethjson;
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
use itertools::Itertools;
|
||||||
use util::*;
|
use util::*;
|
||||||
use pod_account::{self, PodAccount};
|
use pod_account::{self, PodAccount};
|
||||||
use types::state_diff::StateDiff;
|
use types::state_diff::StateDiff;
|
||||||
|
@ -37,11 +37,24 @@ use rand::OsRng;
|
|||||||
/// Snapshot creation and restoration for PoW chains.
|
/// Snapshot creation and restoration for PoW chains.
|
||||||
/// This includes blocks from the head of the chain as a
|
/// This includes blocks from the head of the chain as a
|
||||||
/// loose assurance that the chain is valid.
|
/// loose assurance that the chain is valid.
|
||||||
///
|
|
||||||
/// The field is the number of blocks from the head of the chain
|
|
||||||
/// to include in the snapshot.
|
|
||||||
#[derive(Clone, Copy, PartialEq)]
|
#[derive(Clone, Copy, PartialEq)]
|
||||||
pub struct PowSnapshot(pub u64);
|
pub struct PowSnapshot {
|
||||||
|
/// Number of blocks from the head of the chain
|
||||||
|
/// to include in the snapshot.
|
||||||
|
pub blocks: u64,
|
||||||
|
/// Number of to allow in the snapshot when restoring.
|
||||||
|
pub max_restore_blocks: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PowSnapshot {
|
||||||
|
/// Create a new instance.
|
||||||
|
pub fn new(blocks: u64, max_restore_blocks: u64) -> PowSnapshot {
|
||||||
|
PowSnapshot {
|
||||||
|
blocks: blocks,
|
||||||
|
max_restore_blocks: max_restore_blocks,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl SnapshotComponents for PowSnapshot {
|
impl SnapshotComponents for PowSnapshot {
|
||||||
fn chunk_all(
|
fn chunk_all(
|
||||||
@ -57,7 +70,7 @@ impl SnapshotComponents for PowSnapshot {
|
|||||||
current_hash: block_at,
|
current_hash: block_at,
|
||||||
writer: chunk_sink,
|
writer: chunk_sink,
|
||||||
preferred_size: preferred_size,
|
preferred_size: preferred_size,
|
||||||
}.chunk_all(self.0)
|
}.chunk_all(self.blocks)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn rebuilder(
|
fn rebuilder(
|
||||||
@ -66,7 +79,7 @@ impl SnapshotComponents for PowSnapshot {
|
|||||||
db: Arc<KeyValueDB>,
|
db: Arc<KeyValueDB>,
|
||||||
manifest: &ManifestData,
|
manifest: &ManifestData,
|
||||||
) -> Result<Box<Rebuilder>, ::error::Error> {
|
) -> Result<Box<Rebuilder>, ::error::Error> {
|
||||||
PowRebuilder::new(chain, db, manifest, self.0).map(|r| Box::new(r) as Box<_>)
|
PowRebuilder::new(chain, db, manifest, self.max_restore_blocks).map(|r| Box::new(r) as Box<_>)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn min_supported_version(&self) -> u64 { ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION }
|
fn min_supported_version(&self) -> u64 { ::snapshot::MIN_SUPPORTED_STATE_CHUNK_VERSION }
|
||||||
@ -218,7 +231,7 @@ impl Rebuilder for PowRebuilder {
|
|||||||
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3);
|
trace!(target: "snapshot", "restoring block chunk with {} blocks.", item_count - 3);
|
||||||
|
|
||||||
if self.fed_blocks + num_blocks > self.snapshot_blocks {
|
if self.fed_blocks + num_blocks > self.snapshot_blocks {
|
||||||
return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks).into())
|
return Err(Error::TooManyBlocks(self.snapshot_blocks, self.fed_blocks + num_blocks).into())
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo: assert here that these values are consistent with chunks being in order.
|
// todo: assert here that these values are consistent with chunks being in order.
|
||||||
|
@ -27,7 +27,7 @@ use std::path::{Path, PathBuf};
|
|||||||
|
|
||||||
use util::Bytes;
|
use util::Bytes;
|
||||||
use util::hash::H256;
|
use util::hash::H256;
|
||||||
use rlp::{self, Encodable, RlpStream, UntrustedRlp};
|
use rlp::{RlpStream, UntrustedRlp};
|
||||||
|
|
||||||
use super::ManifestData;
|
use super::ManifestData;
|
||||||
|
|
||||||
@ -49,24 +49,9 @@ pub trait SnapshotWriter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// (hash, len, offset)
|
// (hash, len, offset)
|
||||||
|
#[derive(RlpEncodable, RlpDecodable)]
|
||||||
struct ChunkInfo(H256, u64, u64);
|
struct ChunkInfo(H256, u64, u64);
|
||||||
|
|
||||||
impl Encodable for ChunkInfo {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(3);
|
|
||||||
s.append(&self.0).append(&self.1).append(&self.2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl rlp::Decodable for ChunkInfo {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, rlp::DecoderError> {
|
|
||||||
let hash = rlp.val_at(0)?;
|
|
||||||
let len = rlp.val_at(1)?;
|
|
||||||
let off = rlp.val_at(2)?;
|
|
||||||
Ok(ChunkInfo(hash, len, off))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A packed snapshot writer. This writes snapshots to a single concatenated file.
|
/// A packed snapshot writer. This writes snapshots to a single concatenated file.
|
||||||
///
|
///
|
||||||
/// The file format is very simple and consists of three parts:
|
/// The file format is very simple and consists of three parts:
|
||||||
|
@ -130,7 +130,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
|
|||||||
action: Action::Call(Address::new()),
|
action: Action::Call(Address::new()),
|
||||||
value: 1.into(),
|
value: 1.into(),
|
||||||
data: Vec::new(),
|
data: Vec::new(),
|
||||||
}.sign(&*RICH_SECRET, client.signing_network_id());
|
}.sign(&*RICH_SECRET, client.signing_chain_id());
|
||||||
|
|
||||||
*nonce = *nonce + 1.into();
|
*nonce = *nonce + 1.into();
|
||||||
vec![transaction]
|
vec![transaction]
|
||||||
@ -176,7 +176,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
|
|||||||
action: Action::Call(addr),
|
action: Action::Call(addr),
|
||||||
value: 0.into(),
|
value: 0.into(),
|
||||||
data: data,
|
data: data,
|
||||||
}.sign(&*RICH_SECRET, client.signing_network_id());
|
}.sign(&*RICH_SECRET, client.signing_chain_id());
|
||||||
|
|
||||||
pending.push(transaction);
|
pending.push(transaction);
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ use util::kvdb::{self, KeyValueDB, DBTransaction};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::AtomicBool;
|
use std::sync::atomic::AtomicBool;
|
||||||
|
|
||||||
const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot(30000);
|
const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { blocks: 30000, max_restore_blocks: 30000 };
|
||||||
|
|
||||||
fn chunk_and_restore(amount: u64) {
|
fn chunk_and_restore(amount: u64) {
|
||||||
let mut canon_chain = ChainGenerator::default();
|
let mut canon_chain = ChainGenerator::default();
|
||||||
|
@ -100,6 +100,8 @@ pub struct CommonParams {
|
|||||||
pub block_reward: U256,
|
pub block_reward: U256,
|
||||||
/// Registrar contract address.
|
/// Registrar contract address.
|
||||||
pub registrar: Address,
|
pub registrar: Address,
|
||||||
|
/// Node permission managing contract address.
|
||||||
|
pub node_permission_contract: Option<Address>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CommonParams {
|
impl CommonParams {
|
||||||
@ -171,6 +173,7 @@ impl From<ethjson::spec::Params> for CommonParams {
|
|||||||
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
|
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
|
||||||
block_reward: p.block_reward.map_or_else(U256::zero, Into::into),
|
block_reward: p.block_reward.map_or_else(U256::zero, Into::into),
|
||||||
registrar: p.registrar.map_or_else(Address::new, Into::into),
|
registrar: p.registrar.map_or_else(Address::new, Into::into),
|
||||||
|
node_permission_contract: p.node_permission_contract.map(Into::into),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -380,6 +383,9 @@ impl Spec {
|
|||||||
/// Get the configured Network ID.
|
/// Get the configured Network ID.
|
||||||
pub fn network_id(&self) -> u64 { self.params().network_id }
|
pub fn network_id(&self) -> u64 { self.params().network_id }
|
||||||
|
|
||||||
|
/// Get the chain ID used for signing.
|
||||||
|
pub fn chain_id(&self) -> u64 { self.params().chain_id }
|
||||||
|
|
||||||
/// Get the configured subprotocol name.
|
/// Get the configured subprotocol name.
|
||||||
pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() }
|
pub fn subprotocol_name(&self) -> String { self.params().subprotocol_name.clone() }
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ use vm::EnvInfo;
|
|||||||
use error::Error;
|
use error::Error;
|
||||||
use executive::{Executive, TransactOptions};
|
use executive::{Executive, TransactOptions};
|
||||||
use factory::Factories;
|
use factory::Factories;
|
||||||
use trace::FlatTrace;
|
use trace::{self, FlatTrace, VMTrace};
|
||||||
use pod_account::*;
|
use pod_account::*;
|
||||||
use pod_state::{self, PodState};
|
use pod_state::{self, PodState};
|
||||||
use types::basic_account::BasicAccount;
|
use types::basic_account::BasicAccount;
|
||||||
@ -59,8 +59,12 @@ pub use self::substate::Substate;
|
|||||||
pub struct ApplyOutcome {
|
pub struct ApplyOutcome {
|
||||||
/// The receipt for the applied transaction.
|
/// The receipt for the applied transaction.
|
||||||
pub receipt: Receipt,
|
pub receipt: Receipt,
|
||||||
/// The trace for the applied transaction, if None if tracing is disabled.
|
/// The output of the applied transaction.
|
||||||
|
pub output: Bytes,
|
||||||
|
/// The trace for the applied transaction, empty if tracing was not produced.
|
||||||
pub trace: Vec<FlatTrace>,
|
pub trace: Vec<FlatTrace>,
|
||||||
|
/// The VM trace for the applied transaction, None if tracing was not produced.
|
||||||
|
pub vm_trace: Option<VMTrace>
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Result type for the execution ("application") of a transaction.
|
/// Result type for the execution ("application") of a transaction.
|
||||||
@ -205,7 +209,7 @@ pub fn check_proof(
|
|||||||
Err(_) => return ProvedExecution::BadProof,
|
Err(_) => return ProvedExecution::BadProof,
|
||||||
};
|
};
|
||||||
|
|
||||||
match state.execute(env_info, engine, transaction, false, true) {
|
match state.execute(env_info, engine, transaction, TransactOptions::with_no_tracing(), true) {
|
||||||
Ok(executed) => ProvedExecution::Complete(executed),
|
Ok(executed) => ProvedExecution::Complete(executed),
|
||||||
Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof,
|
Err(ExecutionError::Internal(_)) => ProvedExecution::BadProof,
|
||||||
Err(e) => ProvedExecution::Failed(e),
|
Err(e) => ProvedExecution::Failed(e),
|
||||||
@ -290,7 +294,7 @@ const SEC_TRIE_DB_UNWRAP_STR: &'static str = "A state can only be created with v
|
|||||||
|
|
||||||
impl<B: Backend> State<B> {
|
impl<B: Backend> State<B> {
|
||||||
/// Creates new state with empty state root
|
/// Creates new state with empty state root
|
||||||
#[cfg(test)]
|
/// Used for tests.
|
||||||
pub fn new(mut db: B, account_start_nonce: U256, factories: Factories) -> State<B> {
|
pub fn new(mut db: B, account_start_nonce: U256, factories: Factories) -> State<B> {
|
||||||
let mut root = H256::new();
|
let mut root = H256::new();
|
||||||
{
|
{
|
||||||
@ -623,29 +627,57 @@ impl<B: Backend> State<B> {
|
|||||||
/// Execute a given transaction, producing a receipt and an optional trace.
|
/// Execute a given transaction, producing a receipt and an optional trace.
|
||||||
/// This will change the state accordingly.
|
/// This will change the state accordingly.
|
||||||
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
|
pub fn apply(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool) -> ApplyResult {
|
||||||
// let old = self.to_pod();
|
if tracing {
|
||||||
|
let options = TransactOptions::with_tracing();
|
||||||
|
self.apply_with_tracing(env_info, engine, t, options.tracer, options.vm_tracer)
|
||||||
|
} else {
|
||||||
|
let options = TransactOptions::with_no_tracing();
|
||||||
|
self.apply_with_tracing(env_info, engine, t, options.tracer, options.vm_tracer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute a given transaction with given tracer and VM tracer producing a receipt and an optional trace.
|
||||||
|
/// This will change the state accordingly.
|
||||||
|
pub fn apply_with_tracing<V, T>(
|
||||||
|
&mut self,
|
||||||
|
env_info: &EnvInfo,
|
||||||
|
engine: &Engine,
|
||||||
|
t: &SignedTransaction,
|
||||||
|
tracer: T,
|
||||||
|
vm_tracer: V,
|
||||||
|
) -> ApplyResult where
|
||||||
|
T: trace::Tracer,
|
||||||
|
V: trace::VMTracer,
|
||||||
|
{
|
||||||
|
let options = TransactOptions::new(tracer, vm_tracer);
|
||||||
|
let e = self.execute(env_info, engine, t, options, false)?;
|
||||||
|
|
||||||
let e = self.execute(env_info, engine, t, tracing, false)?;
|
|
||||||
// trace!("Applied transaction. Diff:\n{}\n", state_diff::diff_pod(&old, &self.to_pod()));
|
|
||||||
let state_root = if env_info.number < engine.params().eip98_transition || env_info.number < engine.params().validate_receipts_transition {
|
let state_root = if env_info.number < engine.params().eip98_transition || env_info.number < engine.params().validate_receipts_transition {
|
||||||
self.commit()?;
|
self.commit()?;
|
||||||
Some(self.root().clone())
|
Some(self.root().clone())
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let output = e.output;
|
||||||
let receipt = Receipt::new(state_root, e.cumulative_gas_used, e.logs);
|
let receipt = Receipt::new(state_root, e.cumulative_gas_used, e.logs);
|
||||||
trace!(target: "state", "Transaction receipt: {:?}", receipt);
|
trace!(target: "state", "Transaction receipt: {:?}", receipt);
|
||||||
Ok(ApplyOutcome{receipt: receipt, trace: e.trace})
|
|
||||||
|
Ok(ApplyOutcome {
|
||||||
|
receipt,
|
||||||
|
output,
|
||||||
|
trace: e.trace,
|
||||||
|
vm_trace: e.vm_trace,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute a given transaction without committing changes.
|
// Execute a given transaction without committing changes.
|
||||||
//
|
//
|
||||||
// `virt` signals that we are executing outside of a block set and restrictions like
|
// `virt` signals that we are executing outside of a block set and restrictions like
|
||||||
// gas limits and gas costs should be lifted.
|
// gas limits and gas costs should be lifted.
|
||||||
fn execute(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, tracing: bool, virt: bool)
|
fn execute<T, V>(&mut self, env_info: &EnvInfo, engine: &Engine, t: &SignedTransaction, options: TransactOptions<T, V>, virt: bool)
|
||||||
-> Result<Executed, ExecutionError>
|
-> Result<Executed, ExecutionError> where T: trace::Tracer, V: trace::VMTracer,
|
||||||
{
|
{
|
||||||
let options = TransactOptions { tracing: tracing, vm_tracing: false, check_nonce: true };
|
|
||||||
let mut e = Executive::new(self, env_info, engine);
|
let mut e = Executive::new(self, env_info, engine);
|
||||||
|
|
||||||
match virt {
|
match virt {
|
||||||
@ -730,9 +762,8 @@ impl<B: Backend> State<B> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
#[cfg(feature = "json-tests")]
|
|
||||||
/// Populate the state from `accounts`.
|
/// Populate the state from `accounts`.
|
||||||
|
/// Used for tests.
|
||||||
pub fn populate_from(&mut self, accounts: PodState) {
|
pub fn populate_from(&mut self, accounts: PodState) {
|
||||||
assert!(self.checkpoints.borrow().is_empty());
|
assert!(self.checkpoints.borrow().is_empty());
|
||||||
for (add, acc) in accounts.drain().into_iter() {
|
for (add, acc) in accounts.drain().into_iter() {
|
||||||
|
@ -19,7 +19,7 @@ use std::sync::Arc;
|
|||||||
use io::IoChannel;
|
use io::IoChannel;
|
||||||
use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId};
|
use client::{BlockChainClient, MiningBlockChainClient, Client, ClientConfig, BlockId};
|
||||||
use state::{self, State, CleanupMode};
|
use state::{self, State, CleanupMode};
|
||||||
use executive::Executive;
|
use executive::{Executive, TransactOptions};
|
||||||
use ethereum;
|
use ethereum;
|
||||||
use block::IsBlock;
|
use block::IsBlock;
|
||||||
use tests::helpers::*;
|
use tests::helpers::*;
|
||||||
@ -361,7 +361,7 @@ fn transaction_proof() {
|
|||||||
|
|
||||||
let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap();
|
let mut state = State::from_existing(backend, root, 0.into(), factories.clone()).unwrap();
|
||||||
Executive::new(&mut state, &client.latest_env_info(), &*test_spec.engine)
|
Executive::new(&mut state, &client.latest_env_info(), &*test_spec.engine)
|
||||||
.transact(&transaction, Default::default()).unwrap();
|
.transact(&transaction, TransactOptions::with_no_tracing().dont_check_nonce()).unwrap();
|
||||||
|
|
||||||
assert_eq!(state.balance(&Address::default()).unwrap(), 5.into());
|
assert_eq!(state.balance(&Address::default()).unwrap(), 5.into());
|
||||||
assert_eq!(state.balance(&address).unwrap(), 95.into());
|
assert_eq!(state.balance(&address).unwrap(), 95.into());
|
||||||
|
@ -211,7 +211,7 @@ pub fn generate_dummy_client_with_spec_accounts_and_data<F>(get_test_spec: F, ac
|
|||||||
action: Action::Create,
|
action: Action::Create,
|
||||||
data: vec![],
|
data: vec![],
|
||||||
value: U256::zero(),
|
value: U256::zero(),
|
||||||
}.sign(kp.secret(), Some(test_spec.network_id())), None).unwrap();
|
}.sign(kp.secret(), Some(test_spec.chain_id())), None).unwrap();
|
||||||
n += 1;
|
n += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
use bloomchain::Bloom;
|
use bloomchain::Bloom;
|
||||||
use bloomchain::group::{BloomGroup, GroupPosition};
|
use bloomchain::group::{BloomGroup, GroupPosition};
|
||||||
use rlp::*;
|
|
||||||
use basic_types::LogBloom;
|
use basic_types::LogBloom;
|
||||||
|
|
||||||
/// Helper structure representing bloom of the trace.
|
/// Helper structure representing bloom of the trace.
|
||||||
#[derive(Clone)]
|
#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct BlockTracesBloom(LogBloom);
|
pub struct BlockTracesBloom(LogBloom);
|
||||||
|
|
||||||
impl From<LogBloom> for BlockTracesBloom {
|
impl From<LogBloom> for BlockTracesBloom {
|
||||||
@ -28,7 +27,7 @@ impl Into<Bloom> for BlockTracesBloom {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Represents group of X consecutive blooms.
|
/// Represents group of X consecutive blooms.
|
||||||
#[derive(Clone)]
|
#[derive(Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct BlockTracesBloomGroup {
|
pub struct BlockTracesBloomGroup {
|
||||||
blooms: Vec<BlockTracesBloom>,
|
blooms: Vec<BlockTracesBloom>,
|
||||||
}
|
}
|
||||||
@ -59,34 +58,6 @@ impl Into<BloomGroup> for BlockTracesBloomGroup {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for BlockTracesBloom {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
LogBloom::decode(rlp).map(BlockTracesBloom)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for BlockTracesBloom {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
Encodable::rlp_append(&self.0, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for BlockTracesBloomGroup {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let blooms = rlp.as_list()?;
|
|
||||||
let group = BlockTracesBloomGroup {
|
|
||||||
blooms: blooms
|
|
||||||
};
|
|
||||||
Ok(group)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Encodable for BlockTracesBloomGroup {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append_list(&self.blooms);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Represents `BloomGroup` position in database.
|
/// Represents `BloomGroup` position in database.
|
||||||
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
#[derive(PartialEq, Eq, Hash, Clone, Debug)]
|
||||||
pub struct TraceGroupPosition {
|
pub struct TraceGroupPosition {
|
||||||
|
@ -174,7 +174,7 @@ impl Tracer for ExecutiveTracer {
|
|||||||
ExecutiveTracer::default()
|
ExecutiveTracer::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn traces(self) -> Vec<FlatTrace> {
|
fn drain(self) -> Vec<FlatTrace> {
|
||||||
self.traces
|
self.traces
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ pub trait Tracer: Send {
|
|||||||
fn subtracer(&self) -> Self where Self: Sized;
|
fn subtracer(&self) -> Self where Self: Sized;
|
||||||
|
|
||||||
/// Consumes self and returns all traces.
|
/// Consumes self and returns all traces.
|
||||||
fn traces(self) -> Vec<FlatTrace>;
|
fn drain(self) -> Vec<FlatTrace>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Used by executive to build VM traces.
|
/// Used by executive to build VM traces.
|
||||||
|
@ -65,7 +65,7 @@ impl Tracer for NoopTracer {
|
|||||||
NoopTracer
|
NoopTracer
|
||||||
}
|
}
|
||||||
|
|
||||||
fn traces(self) -> Vec<FlatTrace> {
|
fn drain(self) -> Vec<FlatTrace> {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -77,7 +77,7 @@ impl Decodable for FlatTrace {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Represents all traces produced by a single transaction.
|
/// Represents all traces produced by a single transaction.
|
||||||
#[derive(Debug, PartialEq, Clone)]
|
#[derive(Debug, PartialEq, Clone, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct FlatTransactionTraces(Vec<FlatTrace>);
|
pub struct FlatTransactionTraces(Vec<FlatTrace>);
|
||||||
|
|
||||||
impl From<Vec<FlatTrace>> for FlatTransactionTraces {
|
impl From<Vec<FlatTrace>> for FlatTransactionTraces {
|
||||||
@ -99,18 +99,6 @@ impl FlatTransactionTraces {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for FlatTransactionTraces {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append_list(&self.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for FlatTransactionTraces {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(FlatTransactionTraces(rlp.as_list()?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<Vec<FlatTrace>> for FlatTransactionTraces {
|
impl Into<Vec<FlatTrace>> for FlatTransactionTraces {
|
||||||
fn into(self) -> Vec<FlatTrace> {
|
fn into(self) -> Vec<FlatTrace> {
|
||||||
self.0
|
self.0
|
||||||
@ -118,7 +106,7 @@ impl Into<Vec<FlatTrace>> for FlatTransactionTraces {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Represents all traces produced by transactions in a single block.
|
/// Represents all traces produced by transactions in a single block.
|
||||||
#[derive(Debug, PartialEq, Clone, Default)]
|
#[derive(Debug, PartialEq, Clone, Default, RlpEncodableWrapper, RlpDecodableWrapper)]
|
||||||
pub struct FlatBlockTraces(Vec<FlatTransactionTraces>);
|
pub struct FlatBlockTraces(Vec<FlatTransactionTraces>);
|
||||||
|
|
||||||
impl HeapSizeOf for FlatBlockTraces {
|
impl HeapSizeOf for FlatBlockTraces {
|
||||||
@ -140,18 +128,6 @@ impl FlatBlockTraces {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for FlatBlockTraces {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.append_list(&self.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for FlatBlockTraces {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(FlatBlockTraces(rlp.as_list()?))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Into<Vec<FlatTransactionTraces>> for FlatBlockTraces {
|
impl Into<Vec<FlatTransactionTraces>> for FlatBlockTraces {
|
||||||
fn into(self) -> Vec<FlatTransactionTraces> {
|
fn into(self) -> Vec<FlatTransactionTraces> {
|
||||||
self.0
|
self.0
|
||||||
|
@ -27,7 +27,7 @@ use evm::CallType;
|
|||||||
use super::error::Error;
|
use super::error::Error;
|
||||||
|
|
||||||
/// `Call` result.
|
/// `Call` result.
|
||||||
#[derive(Debug, Clone, PartialEq, Default)]
|
#[derive(Debug, Clone, PartialEq, Default, RlpEncodable, RlpDecodable)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub struct CallResult {
|
pub struct CallResult {
|
||||||
/// Gas used by call.
|
/// Gas used by call.
|
||||||
@ -36,27 +36,8 @@ pub struct CallResult {
|
|||||||
pub output: Bytes,
|
pub output: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for CallResult {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2);
|
|
||||||
s.append(&self.gas_used);
|
|
||||||
s.append(&self.output);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for CallResult {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let res = CallResult {
|
|
||||||
gas_used: rlp.val_at(0)?,
|
|
||||||
output: rlp.val_at(1)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// `Create` result.
|
/// `Create` result.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub struct CreateResult {
|
pub struct CreateResult {
|
||||||
/// Gas used by create.
|
/// Gas used by create.
|
||||||
@ -67,27 +48,6 @@ pub struct CreateResult {
|
|||||||
pub address: Address,
|
pub address: Address,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for CreateResult {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(3);
|
|
||||||
s.append(&self.gas_used);
|
|
||||||
s.append(&self.code);
|
|
||||||
s.append(&self.address);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for CreateResult {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let res = CreateResult {
|
|
||||||
gas_used: rlp.val_at(0)?,
|
|
||||||
code: rlp.val_at(1)?,
|
|
||||||
address: rlp.val_at(2)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CreateResult {
|
impl CreateResult {
|
||||||
/// Returns bloom.
|
/// Returns bloom.
|
||||||
pub fn bloom(&self) -> LogBloom {
|
pub fn bloom(&self) -> LogBloom {
|
||||||
@ -96,7 +56,7 @@ impl CreateResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Description of a _call_ action, either a `CALL` operation or a message transction.
|
/// Description of a _call_ action, either a `CALL` operation or a message transction.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub struct Call {
|
pub struct Call {
|
||||||
/// The sending account.
|
/// The sending account.
|
||||||
@ -126,33 +86,6 @@ impl From<ActionParams> for Call {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for Call {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(6);
|
|
||||||
s.append(&self.from);
|
|
||||||
s.append(&self.to);
|
|
||||||
s.append(&self.value);
|
|
||||||
s.append(&self.gas);
|
|
||||||
s.append(&self.input);
|
|
||||||
s.append(&self.call_type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for Call {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let res = Call {
|
|
||||||
from: rlp.val_at(0)?,
|
|
||||||
to: rlp.val_at(1)?,
|
|
||||||
value: rlp.val_at(2)?,
|
|
||||||
gas: rlp.val_at(3)?,
|
|
||||||
input: rlp.val_at(4)?,
|
|
||||||
call_type: rlp.val_at(5)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Call {
|
impl Call {
|
||||||
/// Returns call action bloom.
|
/// Returns call action bloom.
|
||||||
/// The bloom contains from and to addresses.
|
/// The bloom contains from and to addresses.
|
||||||
@ -163,7 +96,7 @@ impl Call {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Description of a _create_ action, either a `CREATE` operation or a create transction.
|
/// Description of a _create_ action, either a `CREATE` operation or a create transction.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub struct Create {
|
pub struct Create {
|
||||||
/// The address of the creator.
|
/// The address of the creator.
|
||||||
@ -187,29 +120,6 @@ impl From<ActionParams> for Create {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for Create {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4);
|
|
||||||
s.append(&self.from);
|
|
||||||
s.append(&self.value);
|
|
||||||
s.append(&self.gas);
|
|
||||||
s.append(&self.init);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for Create {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let res = Create {
|
|
||||||
from: rlp.val_at(0)?,
|
|
||||||
value: rlp.val_at(1)?,
|
|
||||||
gas: rlp.val_at(2)?,
|
|
||||||
init: rlp.val_at(3)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Create {
|
impl Create {
|
||||||
/// Returns bloom create action bloom.
|
/// Returns bloom create action bloom.
|
||||||
/// The bloom contains only from address.
|
/// The bloom contains only from address.
|
||||||
@ -290,7 +200,7 @@ impl Decodable for Reward {
|
|||||||
|
|
||||||
|
|
||||||
/// Suicide action.
|
/// Suicide action.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub struct Suicide {
|
pub struct Suicide {
|
||||||
/// Suicided address.
|
/// Suicided address.
|
||||||
@ -331,7 +241,7 @@ impl Decodable for Suicide {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Description of an action that we trace.
|
/// Description of an action that we trace; will be either a call or a create.
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
pub enum Action {
|
pub enum Action {
|
||||||
@ -474,7 +384,7 @@ impl Res {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
/// A diff of some chunk of memory.
|
/// A diff of some chunk of memory.
|
||||||
pub struct MemoryDiff {
|
pub struct MemoryDiff {
|
||||||
@ -484,24 +394,7 @@ pub struct MemoryDiff {
|
|||||||
pub data: Bytes,
|
pub data: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for MemoryDiff {
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2);
|
|
||||||
s.append(&self.offset);
|
|
||||||
s.append(&self.data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for MemoryDiff {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(MemoryDiff {
|
|
||||||
offset: rlp.val_at(0)?,
|
|
||||||
data: rlp.val_at(1)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
/// A diff of some storage value.
|
/// A diff of some storage value.
|
||||||
pub struct StorageDiff {
|
pub struct StorageDiff {
|
||||||
@ -511,24 +404,7 @@ pub struct StorageDiff {
|
|||||||
pub value: U256,
|
pub value: U256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for StorageDiff {
|
#[derive(Debug, Clone, PartialEq, RlpEncodable, RlpDecodable)]
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(2);
|
|
||||||
s.append(&self.location);
|
|
||||||
s.append(&self.value);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for StorageDiff {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(StorageDiff {
|
|
||||||
location: rlp.val_at(0)?,
|
|
||||||
value: rlp.val_at(1)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq)]
|
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
/// A record of an executed VM operation.
|
/// A record of an executed VM operation.
|
||||||
pub struct VMExecutedOperation {
|
pub struct VMExecutedOperation {
|
||||||
@ -542,28 +418,7 @@ pub struct VMExecutedOperation {
|
|||||||
pub store_diff: Option<StorageDiff>,
|
pub store_diff: Option<StorageDiff>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for VMExecutedOperation {
|
#[derive(Debug, Clone, PartialEq, Default, RlpEncodable, RlpDecodable)]
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4);
|
|
||||||
s.append(&self.gas_used);
|
|
||||||
s.append_list(&self.stack_push);
|
|
||||||
s.append(&self.mem_diff);
|
|
||||||
s.append(&self.store_diff);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for VMExecutedOperation {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(VMExecutedOperation {
|
|
||||||
gas_used: rlp.val_at(0)?,
|
|
||||||
stack_push: rlp.list_at(1)?,
|
|
||||||
mem_diff: rlp.val_at(2)?,
|
|
||||||
store_diff: rlp.val_at(3)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default)]
|
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
/// A record of the execution of a single VM operation.
|
/// A record of the execution of a single VM operation.
|
||||||
pub struct VMOperation {
|
pub struct VMOperation {
|
||||||
@ -577,30 +432,7 @@ pub struct VMOperation {
|
|||||||
pub executed: Option<VMExecutedOperation>,
|
pub executed: Option<VMExecutedOperation>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for VMOperation {
|
#[derive(Debug, Clone, PartialEq, Default, RlpEncodable, RlpDecodable)]
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4);
|
|
||||||
s.append(&self.pc);
|
|
||||||
s.append(&self.instruction);
|
|
||||||
s.append(&self.gas_cost);
|
|
||||||
s.append(&self.executed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for VMOperation {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let res = VMOperation {
|
|
||||||
pc: rlp.val_at(0)?,
|
|
||||||
instruction: rlp.val_at(1)?,
|
|
||||||
gas_cost: rlp.val_at(2)?,
|
|
||||||
executed: rlp.val_at(3)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Default)]
|
|
||||||
#[cfg_attr(feature = "ipc", binary)]
|
#[cfg_attr(feature = "ipc", binary)]
|
||||||
/// A record of a full VM trace for a CALL/CREATE.
|
/// A record of a full VM trace for a CALL/CREATE.
|
||||||
pub struct VMTrace {
|
pub struct VMTrace {
|
||||||
@ -614,26 +446,3 @@ pub struct VMTrace {
|
|||||||
/// Thre is a 1:1 correspondance between these and a CALL/CREATE/CALLCODE/DELEGATECALL instruction.
|
/// Thre is a 1:1 correspondance between these and a CALL/CREATE/CALLCODE/DELEGATECALL instruction.
|
||||||
pub subs: Vec<VMTrace>,
|
pub subs: Vec<VMTrace>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for VMTrace {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4);
|
|
||||||
s.append(&self.parent_step);
|
|
||||||
s.append(&self.code);
|
|
||||||
s.append_list(&self.operations);
|
|
||||||
s.append_list(&self.subs);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for VMTrace {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let res = VMTrace {
|
|
||||||
parent_step: rlp.val_at(0)?,
|
|
||||||
code: rlp.val_at(1)?,
|
|
||||||
operations: rlp.list_at(2)?,
|
|
||||||
subs: rlp.list_at(3)?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -56,6 +56,15 @@ impl Decodable for Action {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Encodable for Action {
|
||||||
|
fn rlp_append(&self, s: &mut RlpStream) {
|
||||||
|
match *self {
|
||||||
|
Action::Create => s.append_internal(&""),
|
||||||
|
Action::Call(ref addr) => s.append_internal(addr),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Transaction activation condition.
|
/// Transaction activation condition.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub enum Condition {
|
pub enum Condition {
|
||||||
@ -85,18 +94,15 @@ pub struct Transaction {
|
|||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
/// Append object with a without signature into RLP stream
|
/// Append object with a without signature into RLP stream
|
||||||
pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, network_id: Option<u64>) {
|
pub fn rlp_append_unsigned_transaction(&self, s: &mut RlpStream, chain_id: Option<u64>) {
|
||||||
s.begin_list(if network_id.is_none() { 6 } else { 9 });
|
s.begin_list(if chain_id.is_none() { 6 } else { 9 });
|
||||||
s.append(&self.nonce);
|
s.append(&self.nonce);
|
||||||
s.append(&self.gas_price);
|
s.append(&self.gas_price);
|
||||||
s.append(&self.gas);
|
s.append(&self.gas);
|
||||||
match self.action {
|
s.append(&self.action);
|
||||||
Action::Create => s.append_empty_data(),
|
|
||||||
Action::Call(ref to) => s.append(to)
|
|
||||||
};
|
|
||||||
s.append(&self.value);
|
s.append(&self.value);
|
||||||
s.append(&self.data);
|
s.append(&self.data);
|
||||||
if let Some(n) = network_id {
|
if let Some(n) = chain_id {
|
||||||
s.append(&n);
|
s.append(&n);
|
||||||
s.append(&0u8);
|
s.append(&0u8);
|
||||||
s.append(&0u8);
|
s.append(&0u8);
|
||||||
@ -157,27 +163,27 @@ impl From<ethjson::transaction::Transaction> for UnverifiedTransaction {
|
|||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
/// The message hash of the transaction.
|
/// The message hash of the transaction.
|
||||||
pub fn hash(&self, network_id: Option<u64>) -> H256 {
|
pub fn hash(&self, chain_id: Option<u64>) -> H256 {
|
||||||
let mut stream = RlpStream::new();
|
let mut stream = RlpStream::new();
|
||||||
self.rlp_append_unsigned_transaction(&mut stream, network_id);
|
self.rlp_append_unsigned_transaction(&mut stream, chain_id);
|
||||||
stream.as_raw().sha3()
|
stream.as_raw().sha3()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signs the transaction as coming from `sender`.
|
/// Signs the transaction as coming from `sender`.
|
||||||
pub fn sign(self, secret: &Secret, network_id: Option<u64>) -> SignedTransaction {
|
pub fn sign(self, secret: &Secret, chain_id: Option<u64>) -> SignedTransaction {
|
||||||
let sig = ::ethkey::sign(secret, &self.hash(network_id))
|
let sig = ::ethkey::sign(secret, &self.hash(chain_id))
|
||||||
.expect("data is valid and context has signing capabilities; qed");
|
.expect("data is valid and context has signing capabilities; qed");
|
||||||
SignedTransaction::new(self.with_signature(sig, network_id))
|
SignedTransaction::new(self.with_signature(sig, chain_id))
|
||||||
.expect("secret is valid so it's recoverable")
|
.expect("secret is valid so it's recoverable")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Signs the transaction with signature.
|
/// Signs the transaction with signature.
|
||||||
pub fn with_signature(self, sig: Signature, network_id: Option<u64>) -> UnverifiedTransaction {
|
pub fn with_signature(self, sig: Signature, chain_id: Option<u64>) -> UnverifiedTransaction {
|
||||||
UnverifiedTransaction {
|
UnverifiedTransaction {
|
||||||
unsigned: self,
|
unsigned: self,
|
||||||
r: sig.r().into(),
|
r: sig.r().into(),
|
||||||
s: sig.s().into(),
|
s: sig.s().into(),
|
||||||
v: sig.v() as u64 + if let Some(n) = network_id { 35 + n * 2 } else { 27 },
|
v: sig.v() as u64 + if let Some(n) = chain_id { 35 + n * 2 } else { 27 },
|
||||||
hash: 0.into(),
|
hash: 0.into(),
|
||||||
}.compute_hash()
|
}.compute_hash()
|
||||||
}
|
}
|
||||||
@ -210,13 +216,13 @@ impl Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Add EIP-86 compatible empty signature.
|
/// Add EIP-86 compatible empty signature.
|
||||||
pub fn null_sign(self, network_id: u64) -> SignedTransaction {
|
pub fn null_sign(self, chain_id: u64) -> SignedTransaction {
|
||||||
SignedTransaction {
|
SignedTransaction {
|
||||||
transaction: UnverifiedTransaction {
|
transaction: UnverifiedTransaction {
|
||||||
unsigned: self,
|
unsigned: self,
|
||||||
r: U256::zero(),
|
r: U256::zero(),
|
||||||
s: U256::zero(),
|
s: U256::zero(),
|
||||||
v: network_id,
|
v: chain_id,
|
||||||
hash: 0.into(),
|
hash: 0.into(),
|
||||||
}.compute_hash(),
|
}.compute_hash(),
|
||||||
sender: UNSIGNED_SENDER,
|
sender: UNSIGNED_SENDER,
|
||||||
@ -244,7 +250,7 @@ pub struct UnverifiedTransaction {
|
|||||||
/// Plain Transaction.
|
/// Plain Transaction.
|
||||||
unsigned: Transaction,
|
unsigned: Transaction,
|
||||||
/// The V field of the signature; the LS bit described which half of the curve our point falls
|
/// The V field of the signature; the LS bit described which half of the curve our point falls
|
||||||
/// in. The MS bits describe which network this transaction is for. If 27/28, its for all networks.
|
/// in. The MS bits describe which chain this transaction is for. If 27/28, its for all chains.
|
||||||
v: u64,
|
v: u64,
|
||||||
/// The R field of the signature; helps describe the point on the curve.
|
/// The R field of the signature; helps describe the point on the curve.
|
||||||
r: U256,
|
r: U256,
|
||||||
@ -308,10 +314,7 @@ impl UnverifiedTransaction {
|
|||||||
s.append(&self.nonce);
|
s.append(&self.nonce);
|
||||||
s.append(&self.gas_price);
|
s.append(&self.gas_price);
|
||||||
s.append(&self.gas);
|
s.append(&self.gas);
|
||||||
match self.action {
|
s.append(&self.action);
|
||||||
Action::Create => s.append_empty_data(),
|
|
||||||
Action::Call(ref to) => s.append(to)
|
|
||||||
};
|
|
||||||
s.append(&self.value);
|
s.append(&self.value);
|
||||||
s.append(&self.data);
|
s.append(&self.data);
|
||||||
s.append(&self.v);
|
s.append(&self.v);
|
||||||
@ -330,8 +333,8 @@ impl UnverifiedTransaction {
|
|||||||
/// The `v` value that appears in the RLP.
|
/// The `v` value that appears in the RLP.
|
||||||
pub fn original_v(&self) -> u64 { self.v }
|
pub fn original_v(&self) -> u64 { self.v }
|
||||||
|
|
||||||
/// The network ID, or `None` if this is a global transaction.
|
/// The chain ID, or `None` if this is a global transaction.
|
||||||
pub fn network_id(&self) -> Option<u64> {
|
pub fn chain_id(&self) -> Option<u64> {
|
||||||
match self.v {
|
match self.v {
|
||||||
v if self.is_unsigned() => Some(v),
|
v if self.is_unsigned() => Some(v),
|
||||||
v if v > 36 => Some((v - 35) / 2),
|
v if v > 36 => Some((v - 35) / 2),
|
||||||
@ -360,15 +363,15 @@ impl UnverifiedTransaction {
|
|||||||
|
|
||||||
/// Recovers the public key of the sender.
|
/// Recovers the public key of the sender.
|
||||||
pub fn recover_public(&self) -> Result<Public, Error> {
|
pub fn recover_public(&self) -> Result<Public, Error> {
|
||||||
Ok(recover(&self.signature(), &self.unsigned.hash(self.network_id()))?)
|
Ok(recover(&self.signature(), &self.unsigned.hash(self.chain_id()))?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Do basic validation, checking for valid signature and minimum gas,
|
/// Do basic validation, checking for valid signature and minimum gas,
|
||||||
// TODO: consider use in block validation.
|
// TODO: consider use in block validation.
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
#[cfg(feature = "json-tests")]
|
#[cfg(feature = "json-tests")]
|
||||||
pub fn validate(self, schedule: &Schedule, require_low: bool, allow_network_id_of_one: bool, allow_empty_signature: bool) -> Result<UnverifiedTransaction, Error> {
|
pub fn validate(self, schedule: &Schedule, require_low: bool, allow_chain_id_of_one: bool, allow_empty_signature: bool) -> Result<UnverifiedTransaction, Error> {
|
||||||
let chain_id = if allow_network_id_of_one { Some(1) } else { None };
|
let chain_id = if allow_chain_id_of_one { Some(1) } else { None };
|
||||||
self.verify_basic(require_low, chain_id, allow_empty_signature)?;
|
self.verify_basic(require_low, chain_id, allow_empty_signature)?;
|
||||||
if !allow_empty_signature || !self.is_unsigned() {
|
if !allow_empty_signature || !self.is_unsigned() {
|
||||||
self.recover_public()?;
|
self.recover_public()?;
|
||||||
@ -388,10 +391,10 @@ impl UnverifiedTransaction {
|
|||||||
if allow_empty_signature && self.is_unsigned() && !(self.gas_price.is_zero() && self.value.is_zero() && self.nonce.is_zero()) {
|
if allow_empty_signature && self.is_unsigned() && !(self.gas_price.is_zero() && self.value.is_zero() && self.nonce.is_zero()) {
|
||||||
return Err(EthkeyError::InvalidSignature.into())
|
return Err(EthkeyError::InvalidSignature.into())
|
||||||
}
|
}
|
||||||
match (self.network_id(), chain_id) {
|
match (self.chain_id(), chain_id) {
|
||||||
(None, _) => {},
|
(None, _) => {},
|
||||||
(Some(n), Some(m)) if n == m => {},
|
(Some(n), Some(m)) if n == m => {},
|
||||||
_ => return Err(TransactionError::InvalidNetworkId.into()),
|
_ => return Err(TransactionError::InvalidChainId.into()),
|
||||||
};
|
};
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -555,7 +558,7 @@ mod tests {
|
|||||||
} else { panic!(); }
|
} else { panic!(); }
|
||||||
assert_eq!(t.value, U256::from(0x0au64));
|
assert_eq!(t.value, U256::from(0x0au64));
|
||||||
assert_eq!(public_to_address(&t.recover_public().unwrap()), "0f65fe9276bc9a24ae7083ae28e2660ef72df99e".into());
|
assert_eq!(public_to_address(&t.recover_public().unwrap()), "0f65fe9276bc9a24ae7083ae28e2660ef72df99e".into());
|
||||||
assert_eq!(t.network_id(), None);
|
assert_eq!(t.chain_id(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -572,7 +575,7 @@ mod tests {
|
|||||||
data: b"Hello!".to_vec()
|
data: b"Hello!".to_vec()
|
||||||
}.sign(&key.secret(), None);
|
}.sign(&key.secret(), None);
|
||||||
assert_eq!(Address::from(key.public().sha3()), t.sender());
|
assert_eq!(Address::from(key.public().sha3()), t.sender());
|
||||||
assert_eq!(t.network_id(), None);
|
assert_eq!(t.chain_id(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -586,15 +589,15 @@ mod tests {
|
|||||||
data: b"Hello!".to_vec()
|
data: b"Hello!".to_vec()
|
||||||
}.fake_sign(Address::from(0x69));
|
}.fake_sign(Address::from(0x69));
|
||||||
assert_eq!(Address::from(0x69), t.sender());
|
assert_eq!(Address::from(0x69), t.sender());
|
||||||
assert_eq!(t.network_id(), None);
|
assert_eq!(t.chain_id(), None);
|
||||||
|
|
||||||
let t = t.clone();
|
let t = t.clone();
|
||||||
assert_eq!(Address::from(0x69), t.sender());
|
assert_eq!(Address::from(0x69), t.sender());
|
||||||
assert_eq!(t.network_id(), None);
|
assert_eq!(t.chain_id(), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn should_recover_from_network_specific_signing() {
|
fn should_recover_from_chain_specific_signing() {
|
||||||
use ethkey::{Random, Generator};
|
use ethkey::{Random, Generator};
|
||||||
let key = Random.generate().unwrap();
|
let key = Random.generate().unwrap();
|
||||||
let t = Transaction {
|
let t = Transaction {
|
||||||
@ -606,7 +609,7 @@ mod tests {
|
|||||||
data: b"Hello!".to_vec()
|
data: b"Hello!".to_vec()
|
||||||
}.sign(&key.secret(), Some(69));
|
}.sign(&key.secret(), Some(69));
|
||||||
assert_eq!(Address::from(key.public().sha3()), t.sender());
|
assert_eq!(Address::from(key.public().sha3()), t.sender());
|
||||||
assert_eq!(t.network_id(), Some(69));
|
assert_eq!(t.chain_id(), Some(69));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -617,7 +620,7 @@ mod tests {
|
|||||||
let signed = decode(&FromHex::from_hex(tx_data).unwrap());
|
let signed = decode(&FromHex::from_hex(tx_data).unwrap());
|
||||||
let signed = SignedTransaction::new(signed).unwrap();
|
let signed = SignedTransaction::new(signed).unwrap();
|
||||||
assert_eq!(signed.sender(), address.into());
|
assert_eq!(signed.sender(), address.into());
|
||||||
flushln!("networkid: {:?}", signed.network_id());
|
flushln!("chainid: {:?}", signed.chain_id());
|
||||||
};
|
};
|
||||||
|
|
||||||
test_vector("f864808504a817c800825208943535353535353535353535353535353535353535808025a0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116da0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d", "0xf0f6f18bca1b28cd68e4357452947e021241e9ce");
|
test_vector("f864808504a817c800825208943535353535353535353535353535353535353535808025a0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116da0044852b2a670ade5407e78fb2863c51de9fcb96542a07186fe3aeda6bb8a116d", "0xf0f6f18bca1b28cd68e4357452947e021241e9ce");
|
||||||
|
@ -6,6 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rlp = { path = "../../util/rlp" }
|
rlp = { path = "../../util/rlp" }
|
||||||
|
rlp_derive = { path = "../../util/rlp_derive" }
|
||||||
ethcore-util = { path = "../../util" }
|
ethcore-util = { path = "../../util" }
|
||||||
ethjson = { path = "../../json" }
|
ethjson = { path = "../../json" }
|
||||||
bloomable = { path = "../../util/bloomable" }
|
bloomable = { path = "../../util/bloomable" }
|
||||||
|
@ -16,11 +16,10 @@
|
|||||||
|
|
||||||
//! Basic account type -- the decoded RLP from the state trie.
|
//! Basic account type -- the decoded RLP from the state trie.
|
||||||
|
|
||||||
use rlp::*;
|
|
||||||
use util::{U256, H256};
|
use util::{U256, H256};
|
||||||
|
|
||||||
/// Basic account type.
|
/// Basic account type.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct BasicAccount {
|
pub struct BasicAccount {
|
||||||
/// Nonce of the account.
|
/// Nonce of the account.
|
||||||
pub nonce: U256,
|
pub nonce: U256,
|
||||||
@ -31,24 +30,3 @@ pub struct BasicAccount {
|
|||||||
/// Code hash of the account.
|
/// Code hash of the account.
|
||||||
pub code_hash: H256,
|
pub code_hash: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for BasicAccount {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(4)
|
|
||||||
.append(&self.nonce)
|
|
||||||
.append(&self.balance)
|
|
||||||
.append(&self.storage_root)
|
|
||||||
.append(&self.code_hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for BasicAccount {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
Ok(BasicAccount {
|
|
||||||
nonce: rlp.val_at(0)?,
|
|
||||||
balance: rlp.val_at(1)?,
|
|
||||||
storage_root: rlp.val_at(2)?,
|
|
||||||
code_hash: rlp.val_at(3)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -23,6 +23,8 @@ pub enum BlockStatus {
|
|||||||
Queued,
|
Queued,
|
||||||
/// Known as bad.
|
/// Known as bad.
|
||||||
Bad,
|
Bad,
|
||||||
|
/// Pending block.
|
||||||
|
Pending,
|
||||||
/// Unknown.
|
/// Unknown.
|
||||||
Unknown,
|
Unknown,
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,8 @@
|
|||||||
extern crate ethcore_util as util;
|
extern crate ethcore_util as util;
|
||||||
extern crate ethjson;
|
extern crate ethjson;
|
||||||
extern crate rlp;
|
extern crate rlp;
|
||||||
|
#[macro_use]
|
||||||
|
extern crate rlp_derive;
|
||||||
extern crate bloomable;
|
extern crate bloomable;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use util::{H256, Address, Bytes, HeapSizeOf, Hashable};
|
use util::{H256, Address, Bytes, HeapSizeOf, Hashable};
|
||||||
use bloomable::Bloomable;
|
use bloomable::Bloomable;
|
||||||
use rlp::*;
|
|
||||||
|
|
||||||
use {BlockNumber};
|
use {BlockNumber};
|
||||||
use ethjson;
|
use ethjson;
|
||||||
@ -27,7 +26,7 @@ use ethjson;
|
|||||||
pub type LogBloom = ::util::H2048;
|
pub type LogBloom = ::util::H2048;
|
||||||
|
|
||||||
/// A record of execution for a `LOG` operation.
|
/// A record of execution for a `LOG` operation.
|
||||||
#[derive(Default, Debug, Clone, PartialEq, Eq)]
|
#[derive(Default, Debug, Clone, PartialEq, Eq, RlpEncodable, RlpDecodable)]
|
||||||
pub struct LogEntry {
|
pub struct LogEntry {
|
||||||
/// The address of the contract executing at the point of the `LOG` operation.
|
/// The address of the contract executing at the point of the `LOG` operation.
|
||||||
pub address: Address,
|
pub address: Address,
|
||||||
@ -37,26 +36,6 @@ pub struct LogEntry {
|
|||||||
pub data: Bytes,
|
pub data: Bytes,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for LogEntry {
|
|
||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
|
||||||
s.begin_list(3);
|
|
||||||
s.append(&self.address);
|
|
||||||
s.append_list(&self.topics);
|
|
||||||
s.append(&self.data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Decodable for LogEntry {
|
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
|
||||||
let entry = LogEntry {
|
|
||||||
address: rlp.val_at(0)?,
|
|
||||||
topics: rlp.list_at(1)?,
|
|
||||||
data: rlp.val_at(2)?,
|
|
||||||
};
|
|
||||||
Ok(entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HeapSizeOf for LogEntry {
|
impl HeapSizeOf for LogEntry {
|
||||||
fn heap_size_of_children(&self) -> usize {
|
fn heap_size_of_children(&self) -> usize {
|
||||||
self.topics.heap_size_of_children() + self.data.heap_size_of_children()
|
self.topics.heap_size_of_children() + self.data.heap_size_of_children()
|
||||||
|
@ -61,6 +61,7 @@ pub struct FakeExt {
|
|||||||
pub info: EnvInfo,
|
pub info: EnvInfo,
|
||||||
pub schedule: Schedule,
|
pub schedule: Schedule,
|
||||||
pub balances: HashMap<Address, U256>,
|
pub balances: HashMap<Address, U256>,
|
||||||
|
pub tracing: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
// similar to the normal `finalize` function, but ignoring NeedsReturn.
|
// similar to the normal `finalize` function, but ignoring NeedsReturn.
|
||||||
@ -184,4 +185,8 @@ impl Ext for FakeExt {
|
|||||||
fn inc_sstore_clears(&mut self) {
|
fn inc_sstore_clears(&mut self) {
|
||||||
self.sstore_clears += 1;
|
self.sstore_clears += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn trace_next_instruction(&mut self, _pc: usize, _instruction: u8) -> bool {
|
||||||
|
self.tracing
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,8 +17,9 @@
|
|||||||
//! Wasm env module bindings
|
//! Wasm env module bindings
|
||||||
|
|
||||||
use parity_wasm::elements::ValueType::*;
|
use parity_wasm::elements::ValueType::*;
|
||||||
use parity_wasm::interpreter::UserFunctionDescriptor;
|
use parity_wasm::interpreter::{self, UserFunctionDescriptor};
|
||||||
use parity_wasm::interpreter::UserFunctionDescriptor::*;
|
use parity_wasm::interpreter::UserFunctionDescriptor::*;
|
||||||
|
use super::runtime::Runtime;
|
||||||
|
|
||||||
pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
||||||
Static(
|
Static(
|
||||||
@ -81,63 +82,29 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
|||||||
&[I32],
|
&[I32],
|
||||||
None,
|
None,
|
||||||
),
|
),
|
||||||
Static(
|
|
||||||
"_abort",
|
|
||||||
&[],
|
|
||||||
None,
|
|
||||||
),
|
|
||||||
Static(
|
|
||||||
"abortOnCannotGrowMemory",
|
|
||||||
&[I32; 0],
|
|
||||||
Some(I32)
|
|
||||||
),
|
|
||||||
|
|
||||||
/*
|
|
||||||
THIS IS EXPERIMENTAL RUST-ONLY RUNTIME EXTERNS, THEY ARE SUBJECT TO CHANGE
|
|
||||||
|
|
||||||
AVOID YOUR WASM CONTAINS ANY OF THESE OTHERWISE
|
|
||||||
EITHER FACE THE NEED OF HARDFORK
|
|
||||||
OR YOU CAN STUCK ON SPECIFIC RUST VERSION FOR WASM COMPILATION
|
|
||||||
*/
|
|
||||||
|
|
||||||
Static(
|
|
||||||
"_rust_begin_unwind",
|
|
||||||
&[I32; 4],
|
|
||||||
None,
|
|
||||||
),
|
|
||||||
Static(
|
Static(
|
||||||
"_emscripten_memcpy_big",
|
"_emscripten_memcpy_big",
|
||||||
&[I32; 3],
|
&[I32; 3],
|
||||||
Some(I32),
|
Some(I32),
|
||||||
),
|
),
|
||||||
Static(
|
|
||||||
"___syscall6",
|
// TODO: Get rid of it also somehow?
|
||||||
&[I32; 2],
|
|
||||||
Some(I32),
|
|
||||||
),
|
|
||||||
Static(
|
|
||||||
"___syscall140",
|
|
||||||
&[I32; 2],
|
|
||||||
Some(I32)
|
|
||||||
),
|
|
||||||
Static(
|
|
||||||
"___syscall146",
|
|
||||||
&[I32; 2],
|
|
||||||
Some(I32)
|
|
||||||
),
|
|
||||||
Static(
|
|
||||||
"___syscall54",
|
|
||||||
&[I32; 2],
|
|
||||||
Some(I32)
|
|
||||||
),
|
|
||||||
Static(
|
Static(
|
||||||
"_llvm_trap",
|
"_llvm_trap",
|
||||||
&[I32; 0],
|
&[I32; 0],
|
||||||
None
|
None
|
||||||
),
|
),
|
||||||
|
|
||||||
Static(
|
Static(
|
||||||
"___setErrNo",
|
"_llvm_bswap_i64",
|
||||||
&[I32; 1],
|
&[I32; 2],
|
||||||
None
|
Some(I32)
|
||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
|
pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserFunctions<'a> {
|
||||||
|
interpreter::UserFunctions {
|
||||||
|
executor: runtime,
|
||||||
|
functions: ::std::borrow::Cow::from(SIGNATURES),
|
||||||
|
}
|
||||||
|
}
|
@ -32,8 +32,6 @@ mod result;
|
|||||||
mod tests;
|
mod tests;
|
||||||
mod env;
|
mod env;
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
const DEFAULT_STACK_SPACE: u32 = 5 * 1024 * 1024;
|
const DEFAULT_STACK_SPACE: u32 = 5 * 1024 * 1024;
|
||||||
|
|
||||||
use parity_wasm::{interpreter, elements};
|
use parity_wasm::{interpreter, elements};
|
||||||
@ -89,6 +87,7 @@ impl vm::Vm for WasmInterpreter {
|
|||||||
DEFAULT_STACK_SPACE,
|
DEFAULT_STACK_SPACE,
|
||||||
params.gas.low_u64(),
|
params.gas.low_u64(),
|
||||||
RuntimeContext::new(params.address, params.sender),
|
RuntimeContext::new(params.address, params.sender),
|
||||||
|
&self.program,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut cursor = ::std::io::Cursor::new(&*code);
|
let mut cursor = ::std::io::Cursor::new(&*code);
|
||||||
@ -112,16 +111,8 @@ impl vm::Vm for WasmInterpreter {
|
|||||||
)?;
|
)?;
|
||||||
|
|
||||||
{
|
{
|
||||||
let execution_params = interpreter::ExecutionParams::with_external(
|
let execution_params = runtime.execution_params()
|
||||||
"env".into(),
|
.add_argument(interpreter::RuntimeValue::I32(d_ptr.as_raw() as i32));
|
||||||
Arc::new(
|
|
||||||
interpreter::env_native_module(env_instance, native_bindings(&mut runtime))
|
|
||||||
.map_err(|err| {
|
|
||||||
// todo: prefer explicit panic here also?
|
|
||||||
vm::Error::Wasm(format!("Error instantiating native bindings: {:?}", err))
|
|
||||||
})?
|
|
||||||
)
|
|
||||||
).add_argument(interpreter::RuntimeValue::I32(d_ptr.as_raw() as i32));
|
|
||||||
|
|
||||||
let module_instance = self.program.add_module("contract", contract_module, Some(&execution_params.externals))
|
let module_instance = self.program.add_module("contract", contract_module, Some(&execution_params.externals))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
@ -158,13 +149,6 @@ impl vm::Vm for WasmInterpreter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserFunctions<'a> {
|
|
||||||
interpreter::UserFunctions {
|
|
||||||
executor: runtime,
|
|
||||||
functions: ::std::borrow::Cow::from(env::SIGNATURES),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<runtime::Error> for vm::Error {
|
impl From<runtime::Error> for vm::Error {
|
||||||
fn from(err: runtime::Error) -> vm::Error {
|
fn from(err: runtime::Error) -> vm::Error {
|
||||||
vm::Error::Wasm(format!("WASM runtime-error: {:?}", err))
|
vm::Error::Wasm(format!("WASM runtime-error: {:?}", err))
|
||||||
|
@ -72,24 +72,26 @@ impl RuntimeContext {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Runtime enviroment data for wasm contract execution
|
/// Runtime enviroment data for wasm contract execution
|
||||||
pub struct Runtime<'a> {
|
pub struct Runtime<'a, 'b> {
|
||||||
gas_counter: u64,
|
gas_counter: u64,
|
||||||
gas_limit: u64,
|
gas_limit: u64,
|
||||||
dynamic_top: u32,
|
dynamic_top: u32,
|
||||||
ext: &'a mut vm::Ext,
|
ext: &'a mut vm::Ext,
|
||||||
memory: Arc<interpreter::MemoryInstance>,
|
memory: Arc<interpreter::MemoryInstance>,
|
||||||
context: RuntimeContext,
|
context: RuntimeContext,
|
||||||
|
instance: &'b interpreter::ProgramInstance,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Runtime<'a> {
|
impl<'a, 'b> Runtime<'a, 'b> {
|
||||||
/// New runtime for wasm contract with specified params
|
/// New runtime for wasm contract with specified params
|
||||||
pub fn with_params<'b>(
|
pub fn with_params<'c, 'd>(
|
||||||
ext: &'b mut vm::Ext,
|
ext: &'c mut vm::Ext,
|
||||||
memory: Arc<interpreter::MemoryInstance>,
|
memory: Arc<interpreter::MemoryInstance>,
|
||||||
stack_space: u32,
|
stack_space: u32,
|
||||||
gas_limit: u64,
|
gas_limit: u64,
|
||||||
context: RuntimeContext,
|
context: RuntimeContext,
|
||||||
) -> Runtime<'b> {
|
program_instance: &'d interpreter::ProgramInstance,
|
||||||
|
) -> Runtime<'c, 'd> {
|
||||||
Runtime {
|
Runtime {
|
||||||
gas_counter: 0,
|
gas_counter: 0,
|
||||||
gas_limit: gas_limit,
|
gas_limit: gas_limit,
|
||||||
@ -97,6 +99,7 @@ impl<'a> Runtime<'a> {
|
|||||||
memory: memory,
|
memory: memory,
|
||||||
ext: ext,
|
ext: ext,
|
||||||
context: context,
|
context: context,
|
||||||
|
instance: program_instance,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -449,9 +452,58 @@ impl<'a> Runtime<'a> {
|
|||||||
|
|
||||||
Ok(Some(0i32.into()))
|
Ok(Some(0i32.into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn bswap_32(x: u32) -> u32 {
|
||||||
|
x >> 24 | x >> 8 & 0xff00 | x << 8 & 0xff0000 | x << 24
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bitswap_i64(&mut self, context: interpreter::CallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
||||||
|
{
|
||||||
|
let x1 = context.value_stack.pop_as::<i32>()?;
|
||||||
|
let x2 = context.value_stack.pop_as::<i32>()?;
|
||||||
|
|
||||||
|
let result = ((Runtime::bswap_32(x2 as u32) as u64) << 32
|
||||||
|
| Runtime::bswap_32(x1 as u32) as u64) as i64;
|
||||||
|
|
||||||
|
self.return_i64(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_i64(&mut self, val: i64) -> Result<Option<interpreter::RuntimeValue>, interpreter::Error> {
|
||||||
|
let uval = val as u64;
|
||||||
|
let hi = (uval >> 32) as i32;
|
||||||
|
let lo = (uval << 32 >> 32) as i32;
|
||||||
|
|
||||||
|
let target = self.instance.module("contract")
|
||||||
|
.ok_or(interpreter::Error::Trap("Error locating main execution entry".to_owned()))?;
|
||||||
|
target.execute_export(
|
||||||
|
"setTempRet0",
|
||||||
|
self.execution_params().add_argument(
|
||||||
|
interpreter::RuntimeValue::I32(hi).into()
|
||||||
|
),
|
||||||
|
)?;
|
||||||
|
Ok(Some(
|
||||||
|
(lo).into()
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn execution_params(&mut self) -> interpreter::ExecutionParams {
|
||||||
|
use super::env;
|
||||||
|
|
||||||
|
let env_instance = self.instance.module("env")
|
||||||
|
.expect("Env module always exists; qed");
|
||||||
|
|
||||||
|
interpreter::ExecutionParams::with_external(
|
||||||
|
"env".into(),
|
||||||
|
Arc::new(
|
||||||
|
interpreter::env_native_module(env_instance, env::native_bindings(self))
|
||||||
|
.expect("Env module always exists; qed")
|
||||||
|
)
|
||||||
|
)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> interpreter::UserFunctionExecutor for Runtime<'a> {
|
impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> {
|
||||||
fn execute(&mut self, name: &str, context: interpreter::CallerContext)
|
fn execute(&mut self, name: &str, context: interpreter::CallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
||||||
{
|
{
|
||||||
@ -494,6 +546,9 @@ impl<'a> interpreter::UserFunctionExecutor for Runtime<'a> {
|
|||||||
"_emscripten_memcpy_big" => {
|
"_emscripten_memcpy_big" => {
|
||||||
self.mem_copy(context)
|
self.mem_copy(context)
|
||||||
},
|
},
|
||||||
|
"_llvm_bswap_i64" => {
|
||||||
|
self.bitswap_i64(context)
|
||||||
|
},
|
||||||
_ => {
|
_ => {
|
||||||
trace!(target: "wasm", "Trapped due to unhandled function: '{}'", name);
|
trace!(target: "wasm", "Trapped due to unhandled function: '{}'", name);
|
||||||
self.user_trap(context)
|
self.user_trap(context)
|
||||||
|
@ -57,7 +57,7 @@ fn empty() {
|
|||||||
test_finalize(interpreter.exec(params, &mut ext)).unwrap()
|
test_finalize(interpreter.exec(params, &mut ext)).unwrap()
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_996));
|
assert_eq!(gas_left, U256::from(99_992));
|
||||||
}
|
}
|
||||||
|
|
||||||
// This test checks if the contract deserializes payload header properly.
|
// This test checks if the contract deserializes payload header properly.
|
||||||
@ -85,7 +85,7 @@ fn logger() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
println!("ext.store: {:?}", ext.store);
|
println!("ext.store: {:?}", ext.store);
|
||||||
assert_eq!(gas_left, U256::from(99590));
|
assert_eq!(gas_left, U256::from(99327));
|
||||||
let address_val: H256 = address.into();
|
let address_val: H256 = address.into();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"),
|
ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"),
|
||||||
@ -136,7 +136,7 @@ fn identity() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_687));
|
assert_eq!(gas_left, U256::from(99_672));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Address::from_slice(&result),
|
Address::from_slice(&result),
|
||||||
@ -170,7 +170,7 @@ fn dispersion() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_423));
|
assert_eq!(gas_left, U256::from(99_270));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
result,
|
result,
|
||||||
@ -199,7 +199,7 @@ fn suicide_not() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_656));
|
assert_eq!(gas_left, U256::from(99_578));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
result,
|
result,
|
||||||
@ -233,7 +233,7 @@ fn suicide() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_740));
|
assert_eq!(gas_left, U256::from(99_621));
|
||||||
assert!(ext.suicides.contains(&refund));
|
assert!(ext.suicides.contains(&refund));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -264,7 +264,7 @@ fn create() {
|
|||||||
assert!(ext.calls.contains(
|
assert!(ext.calls.contains(
|
||||||
&FakeCall {
|
&FakeCall {
|
||||||
call_type: FakeCallType::Create,
|
call_type: FakeCallType::Create,
|
||||||
gas: U256::from(99_767),
|
gas: U256::from(99_674),
|
||||||
sender_address: None,
|
sender_address: None,
|
||||||
receive_address: None,
|
receive_address: None,
|
||||||
value: Some(1_000_000_000.into()),
|
value: Some(1_000_000_000.into()),
|
||||||
@ -272,7 +272,7 @@ fn create() {
|
|||||||
code_address: None,
|
code_address: None,
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
assert_eq!(gas_left, U256::from(99_759));
|
assert_eq!(gas_left, U256::from(99_596));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -306,7 +306,7 @@ fn call_code() {
|
|||||||
assert!(ext.calls.contains(
|
assert!(ext.calls.contains(
|
||||||
&FakeCall {
|
&FakeCall {
|
||||||
call_type: FakeCallType::Call,
|
call_type: FakeCallType::Call,
|
||||||
gas: U256::from(99_061),
|
gas: U256::from(99_069),
|
||||||
sender_address: Some(sender),
|
sender_address: Some(sender),
|
||||||
receive_address: Some(receiver),
|
receive_address: Some(receiver),
|
||||||
value: None,
|
value: None,
|
||||||
@ -314,7 +314,7 @@ fn call_code() {
|
|||||||
code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()),
|
code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()),
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
assert_eq!(gas_left, U256::from(94196));
|
assert_eq!(gas_left, U256::from(94144));
|
||||||
|
|
||||||
// siphash result
|
// siphash result
|
||||||
let res = LittleEndian::read_u32(&result[..]);
|
let res = LittleEndian::read_u32(&result[..]);
|
||||||
@ -351,7 +351,7 @@ fn call_static() {
|
|||||||
assert!(ext.calls.contains(
|
assert!(ext.calls.contains(
|
||||||
&FakeCall {
|
&FakeCall {
|
||||||
call_type: FakeCallType::Call,
|
call_type: FakeCallType::Call,
|
||||||
gas: U256::from(99_061),
|
gas: U256::from(99_069),
|
||||||
sender_address: Some(sender),
|
sender_address: Some(sender),
|
||||||
receive_address: Some(receiver),
|
receive_address: Some(receiver),
|
||||||
value: None,
|
value: None,
|
||||||
@ -359,7 +359,7 @@ fn call_static() {
|
|||||||
code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()),
|
code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()),
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
assert_eq!(gas_left, U256::from(94196));
|
assert_eq!(gas_left, U256::from(94144));
|
||||||
|
|
||||||
// siphash result
|
// siphash result
|
||||||
let res = LittleEndian::read_u32(&result[..]);
|
let res = LittleEndian::read_u32(&result[..]);
|
||||||
@ -378,13 +378,158 @@ fn realloc() {
|
|||||||
let mut ext = FakeExt::new();
|
let mut ext = FakeExt::new();
|
||||||
|
|
||||||
let (gas_left, result) = {
|
let (gas_left, result) = {
|
||||||
let mut interpreter = wasm_interpreter();
|
let mut interpreter = wasm_interpreter();
|
||||||
let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors");
|
let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors");
|
||||||
match result {
|
match result {
|
||||||
GasLeft::Known(_) => { panic!("Realloc should return payload"); },
|
GasLeft::Known(_) => { panic!("Realloc should return payload"); },
|
||||||
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
assert_eq!(gas_left, U256::from(98326));
|
assert_eq!(gas_left, U256::from(99432));
|
||||||
assert_eq!(result, vec![0u8; 2]);
|
assert_eq!(result, vec![0u8; 2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Tests that contract's ability to read from a storage
|
||||||
|
// Test prepopulates address into storage, than executes a contract which read that address from storage and write this address into result
|
||||||
|
#[test]
|
||||||
|
fn storage_read() {
|
||||||
|
let code = load_sample!("storage_read.wasm");
|
||||||
|
let address: Address = "0f572e5295c57f15886f9b263e2f6d2d6c7b5ec6".parse().unwrap();
|
||||||
|
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.gas = U256::from(100_000);
|
||||||
|
params.code = Some(Arc::new(code));
|
||||||
|
let mut ext = FakeExt::new();
|
||||||
|
ext.store.insert("0100000000000000000000000000000000000000000000000000000000000000".into(), address.into());
|
||||||
|
|
||||||
|
let (gas_left, result) = {
|
||||||
|
let mut interpreter = wasm_interpreter();
|
||||||
|
let result = interpreter.exec(params, &mut ext).expect("Interpreter to execute without any errors");
|
||||||
|
match result {
|
||||||
|
GasLeft::Known(_) => { panic!("storage_read should return payload"); },
|
||||||
|
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(gas_left, U256::from(99682));
|
||||||
|
assert_eq!(Address::from(&result[12..32]), address);
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! reqrep_test {
|
||||||
|
($name: expr, $input: expr) => {
|
||||||
|
{
|
||||||
|
::ethcore_logger::init_log();
|
||||||
|
let code = load_sample!($name);
|
||||||
|
|
||||||
|
let mut params = ActionParams::default();
|
||||||
|
params.gas = U256::from(100_000);
|
||||||
|
params.code = Some(Arc::new(code));
|
||||||
|
params.data = Some($input);
|
||||||
|
|
||||||
|
let (gas_left, result) = {
|
||||||
|
let mut interpreter = wasm_interpreter();
|
||||||
|
let result = interpreter.exec(params, &mut FakeExt::new()).expect("Interpreter to execute without any errors");
|
||||||
|
match result {
|
||||||
|
GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); },
|
||||||
|
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
(gas_left, result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// math_* tests check the ability of wasm contract to perform big integer operations
|
||||||
|
// - addition
|
||||||
|
// - multiplication
|
||||||
|
// - substraction
|
||||||
|
// - division
|
||||||
|
|
||||||
|
// addition
|
||||||
|
#[test]
|
||||||
|
fn math_add() {
|
||||||
|
|
||||||
|
let (gas_left, result) = reqrep_test!(
|
||||||
|
"math.wasm",
|
||||||
|
{
|
||||||
|
let mut args = [0u8; 65];
|
||||||
|
let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap();
|
||||||
|
let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap();
|
||||||
|
arg_a.to_big_endian(&mut args[1..33]);
|
||||||
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
|
args.to_vec()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(gas_left, U256::from(98087));
|
||||||
|
assert_eq!(
|
||||||
|
U256::from_dec_str("1888888888888888888888888888887").unwrap(),
|
||||||
|
(&result[..]).into()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// multiplication
|
||||||
|
#[test]
|
||||||
|
fn math_mul() {
|
||||||
|
let (gas_left, result) = reqrep_test!(
|
||||||
|
"math.wasm",
|
||||||
|
{
|
||||||
|
let mut args = [1u8; 65];
|
||||||
|
let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap();
|
||||||
|
let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap();
|
||||||
|
arg_a.to_big_endian(&mut args[1..33]);
|
||||||
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
|
args.to_vec()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(gas_left, U256::from(97236));
|
||||||
|
assert_eq!(
|
||||||
|
U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(),
|
||||||
|
(&result[..]).into()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// substraction
|
||||||
|
#[test]
|
||||||
|
fn math_sub() {
|
||||||
|
let (gas_left, result) = reqrep_test!(
|
||||||
|
"math.wasm",
|
||||||
|
{
|
||||||
|
let mut args = [2u8; 65];
|
||||||
|
let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap();
|
||||||
|
let arg_b = U256::from_dec_str("888888888888888888888888888888").unwrap();
|
||||||
|
arg_a.to_big_endian(&mut args[1..33]);
|
||||||
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
|
args.to_vec()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(gas_left, U256::from(98131));
|
||||||
|
assert_eq!(
|
||||||
|
U256::from_dec_str("111111111111111111111111111111").unwrap(),
|
||||||
|
(&result[..]).into()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn math_div() {
|
||||||
|
let (gas_left, result) = reqrep_test!(
|
||||||
|
"math.wasm",
|
||||||
|
{
|
||||||
|
let mut args = [3u8; 65];
|
||||||
|
let arg_a = U256::from_dec_str("999999999999999999999999999999").unwrap();
|
||||||
|
let arg_b = U256::from_dec_str("888888888888888888888888").unwrap();
|
||||||
|
arg_a.to_big_endian(&mut args[1..33]);
|
||||||
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
|
args.to_vec()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(gas_left, U256::from(91420));
|
||||||
|
assert_eq!(
|
||||||
|
U256::from_dec_str("1125000").unwrap(),
|
||||||
|
(&result[..]).into()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
@ -5,7 +5,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rust-crypto = "0.2.36"
|
rust-crypto = "0.2.36"
|
||||||
tiny-keccak = "1.2"
|
tiny-keccak = "1.3"
|
||||||
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
|
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
|
||||||
ethkey = { path = "../ethkey" }
|
ethkey = { path = "../ethkey" }
|
||||||
ethcore-bigint = { path = "../util/bigint" }
|
ethcore-bigint = { path = "../util/bigint" }
|
||||||
|
@ -6,7 +6,7 @@ authors = ["Parity Technologies <admin@parity.io>"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
rand = "0.3.14"
|
rand = "0.3.14"
|
||||||
lazy_static = "0.2"
|
lazy_static = "0.2"
|
||||||
tiny-keccak = "1.2"
|
tiny-keccak = "1.3"
|
||||||
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
|
eth-secp256k1 = { git = "https://github.com/paritytech/rust-secp256k1" }
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
ethcore-bigint = { path = "../util/bigint" }
|
ethcore-bigint = { path = "../util/bigint" }
|
||||||
|
@ -13,7 +13,7 @@ serde_json = "1.0"
|
|||||||
serde_derive = "1.0"
|
serde_derive = "1.0"
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
rust-crypto = "0.2.36"
|
rust-crypto = "0.2.36"
|
||||||
tiny-keccak = "1.0"
|
tiny-keccak = "1.3"
|
||||||
time = "0.1.34"
|
time = "0.1.34"
|
||||||
itertools = "0.5"
|
itertools = "0.5"
|
||||||
parking_lot = "0.4"
|
parking_lot = "0.4"
|
||||||
|
@ -14,7 +14,8 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use ethkey::{KeyPair, sign, Address, Signature, Message, Public};
|
use ethkey::{KeyPair, sign, Address, Signature, Message, Public, Secret};
|
||||||
|
use crypto::ecdh::agree;
|
||||||
use {json, Error, crypto};
|
use {json, Error, crypto};
|
||||||
use account::Version;
|
use account::Version;
|
||||||
use super::crypto::Crypto;
|
use super::crypto::Crypto;
|
||||||
@ -135,6 +136,12 @@ impl SafeAccount {
|
|||||||
crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from)
|
crypto::ecies::decrypt(&secret, shared_mac, message).map_err(From::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Agree on shared key.
|
||||||
|
pub fn agree(&self, password: &str, other: &Public) -> Result<Secret, Error> {
|
||||||
|
let secret = self.crypto.secret(password)?;
|
||||||
|
agree(&secret, other).map_err(From::from)
|
||||||
|
}
|
||||||
|
|
||||||
/// Derive public key.
|
/// Derive public key.
|
||||||
pub fn public(&self, password: &str) -> Result<Public, Error> {
|
pub fn public(&self, password: &str) -> Result<Public, Error> {
|
||||||
let secret = self.crypto.secret(password)?;
|
let secret = self.crypto.secret(password)?;
|
||||||
|
@ -97,6 +97,10 @@ impl SimpleSecretStore for EthStore {
|
|||||||
self.store.sign_derived(account_ref, password, derivation, message)
|
self.store.sign_derived(account_ref, password, derivation, message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result<Secret, Error> {
|
||||||
|
self.store.agree(account, password, other)
|
||||||
|
}
|
||||||
|
|
||||||
fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
let account = self.get(account)?;
|
let account = self.get(account)?;
|
||||||
account.decrypt(password, shared_mac, message)
|
account.decrypt(password, shared_mac, message)
|
||||||
@ -495,18 +499,26 @@ impl SimpleSecretStore for EthMultiStore {
|
|||||||
|
|
||||||
fn sign(&self, account: &StoreAccountRef, password: &str, message: &Message) -> Result<Signature, Error> {
|
fn sign(&self, account: &StoreAccountRef, password: &str, message: &Message) -> Result<Signature, Error> {
|
||||||
let accounts = self.get_matching(account, password)?;
|
let accounts = self.get_matching(account, password)?;
|
||||||
for account in accounts {
|
match accounts.first() {
|
||||||
return account.sign(password, message);
|
Some(ref account) => account.sign(password, message),
|
||||||
|
None => Err(Error::InvalidPassword),
|
||||||
}
|
}
|
||||||
Err(Error::InvalidPassword)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
fn decrypt(&self, account: &StoreAccountRef, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
let accounts = self.get_matching(account, password)?;
|
let accounts = self.get_matching(account, password)?;
|
||||||
for account in accounts {
|
match accounts.first() {
|
||||||
return account.decrypt(password, shared_mac, message);
|
Some(ref account) => account.decrypt(password, shared_mac, message),
|
||||||
|
None => Err(Error::InvalidPassword),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn agree(&self, account: &StoreAccountRef, password: &str, other: &Public) -> Result<Secret, Error> {
|
||||||
|
let accounts = self.get_matching(account, password)?;
|
||||||
|
match accounts.first() {
|
||||||
|
Some(ref account) => account.agree(password, other),
|
||||||
|
None => Err(Error::InvalidPassword),
|
||||||
}
|
}
|
||||||
Err(Error::InvalidPassword)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_vault(&self, name: &str, password: &str) -> Result<(), Error> {
|
fn create_vault(&self, name: &str, password: &str) -> Result<(), Error> {
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user