Merge branch 'master' into ui-2
This commit is contained in:
commit
4a184dbbe6
103
Cargo.lock
generated
103
Cargo.lock
generated
@ -2,12 +2,12 @@
|
|||||||
name = "wasm"
|
name = "wasm"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"ethcore-logger 1.8.0",
|
"ethcore-logger 1.8.0",
|
||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-wasm 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"vm 0.1.0",
|
"vm 0.1.0",
|
||||||
"wasm-utils 0.1.0 (git+https://github.com/paritytech/wasm-utils)",
|
"wasm-utils 0.1.0 (git+https://github.com/paritytech/wasm-utils)",
|
||||||
]
|
]
|
||||||
@ -129,7 +129,7 @@ name = "bigint"
|
|||||||
version = "4.1.2"
|
version = "4.1.2"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -141,7 +141,7 @@ name = "bincode"
|
|||||||
version = "0.8.0"
|
version = "0.8.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num-traits 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num-traits 0.1.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde 1.0.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -210,7 +210,7 @@ name = "bn"
|
|||||||
version = "0.4.4"
|
version = "0.4.4"
|
||||||
source = "git+https://github.com/paritytech/bn#b97e95a45f4484a41a515338c4f0e093bf6675e0"
|
source = "git+https://github.com/paritytech/bn#b97e95a45f4484a41a515338c4f0e093bf6675e0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -222,7 +222,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "byteorder"
|
name = "byteorder"
|
||||||
version = "1.0.0"
|
version = "1.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -230,7 +230,7 @@ name = "bytes"
|
|||||||
version = "0.4.4"
|
version = "0.4.4"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"iovec 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -295,6 +295,15 @@ dependencies = [
|
|||||||
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"unicode-normalization 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "coco"
|
||||||
|
version = "0.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "common-types"
|
name = "common-types"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
@ -395,14 +404,6 @@ dependencies = [
|
|||||||
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "deque"
|
|
||||||
version = "0.3.1"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "difference"
|
name = "difference"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
@ -507,7 +508,7 @@ dependencies = [
|
|||||||
"bloomable 0.1.0",
|
"bloomable 0.1.0",
|
||||||
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bn 0.4.4 (git+https://github.com/paritytech/bn)",
|
"bn 0.4.4 (git+https://github.com/paritytech/bn)",
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"common-types 0.1.0",
|
"common-types 0.1.0",
|
||||||
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -544,6 +545,7 @@ dependencies = [
|
|||||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"price-info 1.7.0",
|
"price-info 1.7.0",
|
||||||
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.2.0",
|
"rlp 0.2.0",
|
||||||
"rlp_derive 0.1.0",
|
"rlp_derive 0.1.0",
|
||||||
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -742,7 +744,7 @@ dependencies = [
|
|||||||
name = "ethcore-secretstore"
|
name = "ethcore-secretstore"
|
||||||
version = "1.0.0"
|
version = "1.0.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore 1.8.0",
|
"ethcore 1.8.0",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
@ -855,7 +857,7 @@ dependencies = [
|
|||||||
name = "ethkey"
|
name = "ethkey"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)",
|
"eth-secp256k1 0.5.6 (git+https://github.com/paritytech/rust-secp256k1)",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -948,7 +950,7 @@ name = "evm"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bit-set 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"common-types 0.1.0",
|
"common-types 0.1.0",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"ethcore-logger 1.8.0",
|
"ethcore-logger 1.8.0",
|
||||||
@ -959,7 +961,6 @@ dependencies = [
|
|||||||
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.2.0",
|
"rlp 0.2.0",
|
||||||
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-hex 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1049,7 +1050,7 @@ name = "gcc"
|
|||||||
version = "0.3.51"
|
version = "0.3.51"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1421,7 +1422,7 @@ name = "libflate"
|
|||||||
version = "0.1.9"
|
version = "0.1.9"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1634,7 +1635,7 @@ dependencies = [
|
|||||||
name = "native-contracts"
|
name = "native-contracts"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ethabi 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2062,6 +2063,7 @@ dependencies = [
|
|||||||
"ethcore-ipc 1.8.0",
|
"ethcore-ipc 1.8.0",
|
||||||
"ethcore-light 1.8.0",
|
"ethcore-light 1.8.0",
|
||||||
"ethcore-logger 1.8.0",
|
"ethcore-logger 1.8.0",
|
||||||
|
"ethcore-network 1.8.0",
|
||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
"ethcrypto 0.1.0",
|
"ethcrypto 0.1.0",
|
||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
@ -2159,7 +2161,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ui-precompiled"
|
name = "parity-ui-precompiled"
|
||||||
version = "1.4.0"
|
version = "1.4.0"
|
||||||
source = "git+https://github.com/paritytech/js-precompiled.git#75e4afa0b77396aa8feefb49276672c3fe885a88"
|
source = "git+https://github.com/paritytech/js-precompiled.git#d2b6b36b4ea27ed5c8b6b1b3cb02dc08983cda18"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -2187,10 +2189,10 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-wasm"
|
name = "parity-wasm"
|
||||||
version = "0.12.1"
|
version = "0.14.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
@ -2200,7 +2202,7 @@ name = "parity-whisper"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"ethcore-network 1.8.0",
|
"ethcore-network 1.8.0",
|
||||||
"ethcrypto 0.1.0",
|
"ethcrypto 0.1.0",
|
||||||
@ -2433,18 +2435,27 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rayon"
|
name = "rayon"
|
||||||
version = "0.7.0"
|
version = "0.7.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon-core 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rayon"
|
||||||
|
version = "0.8.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"rayon-core 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rayon-core"
|
name = "rayon-core"
|
||||||
version = "1.0.0"
|
version = "1.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2496,7 +2507,7 @@ dependencies = [
|
|||||||
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
"gcc 0.3.51 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
"libc 0.2.21 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"untrusted 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2504,7 +2515,7 @@ dependencies = [
|
|||||||
name = "rlp"
|
name = "rlp"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"elastic-array 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2647,6 +2658,11 @@ name = "scoped-tls"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "scopeguard"
|
||||||
|
version = "0.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "secur32-sys"
|
name = "secur32-sys"
|
||||||
version = "0.2.0"
|
version = "0.2.0"
|
||||||
@ -3257,7 +3273,7 @@ dependencies = [
|
|||||||
name = "vm"
|
name = "vm"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"common-types 0.1.0",
|
"common-types 0.1.0",
|
||||||
"ethcore-bigint 0.1.3",
|
"ethcore-bigint 0.1.3",
|
||||||
"ethcore-util 1.8.0",
|
"ethcore-util 1.8.0",
|
||||||
@ -3277,13 +3293,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "wasm-utils"
|
name = "wasm-utils"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://github.com/paritytech/wasm-utils#9462bcc0680f0ec2c876abdf75bae981dd4344a5"
|
source = "git+https://github.com/paritytech/wasm-utils#95f9f04d1036c39de5af1c811c6e5dc488fb73d9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
"lazy_static 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-wasm 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3301,7 +3318,7 @@ name = "ws"
|
|||||||
version = "0.7.1"
|
version = "0.7.1"
|
||||||
source = "git+https://github.com/tomusdrw/ws-rs#f8306a798b7541d64624299a83a2c934f173beed"
|
source = "git+https://github.com/tomusdrw/ws-rs#f8306a798b7541d64624299a83a2c934f173beed"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -3380,13 +3397,14 @@ dependencies = [
|
|||||||
"checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d"
|
"checksum bloomchain 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f421095d2a76fc24cd3fb3f912b90df06be7689912b1bdb423caefae59c258d"
|
||||||
"checksum bn 0.4.4 (git+https://github.com/paritytech/bn)" = "<none>"
|
"checksum bn 0.4.4 (git+https://github.com/paritytech/bn)" = "<none>"
|
||||||
"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
|
"checksum byteorder 0.5.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0fc10e8cc6b2580fda3f36eb6dc5316657f812a3df879a44a66fc9f0fdbc4855"
|
||||||
"checksum byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c40977b0ee6b9885c9013cd41d9feffdd22deb3bb4dc3a71d901cc7a77de18c8"
|
"checksum byteorder 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ff81738b726f5d099632ceaffe7fb65b90212e8dce59d518729e7e8634032d3d"
|
||||||
"checksum bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8b24f16593f445422331a5eed46b72f7f171f910fead4f2ea8f17e727e9c5c14"
|
"checksum bytes 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "8b24f16593f445422331a5eed46b72f7f171f910fead4f2ea8f17e727e9c5c14"
|
||||||
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
|
"checksum cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "de1e760d7b6535af4241fca8bd8adf68e2e7edacc6b29f5d399050c5e48cf88c"
|
||||||
"checksum cid 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "34aa7da06f10541fbca6850719cdaa8fa03060a5d2fb33840f149cf8133a00c7"
|
"checksum cid 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "34aa7da06f10541fbca6850719cdaa8fa03060a5d2fb33840f149cf8133a00c7"
|
||||||
"checksum clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b8f69e518f967224e628896b54e41ff6acfb4dcfefc5076325c36525dac900f"
|
"checksum clap 2.24.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6b8f69e518f967224e628896b54e41ff6acfb4dcfefc5076325c36525dac900f"
|
||||||
"checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32"
|
"checksum clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "5b4fabf979ddf6419a313c1c0ada4a5b95cfd2049c56e8418d622d27b4b6ff32"
|
||||||
"checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a"
|
"checksum clippy_lints 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)" = "ce96ec05bfe018a0d5d43da115e54850ea2217981ff0f2e462780ab9d594651a"
|
||||||
|
"checksum coco 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "c06169f5beb7e31c7c67ebf5540b8b472d23e3eade3b2ec7d1f5b504a85f91bd"
|
||||||
"checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299"
|
"checksum conv 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "78ff10625fd0ac447827aa30ea8b861fead473bb60aeb73af6c1c58caf0d1299"
|
||||||
"checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591"
|
"checksum cookie 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d53b80dde876f47f03cda35303e368a79b91c70b0d65ecba5fd5280944a08591"
|
||||||
"checksum core-foundation 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "20a6d0448d3a99d977ae4a2aa5a98d886a923e863e81ad9ff814645b6feb3bbd"
|
"checksum core-foundation 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "20a6d0448d3a99d977ae4a2aa5a98d886a923e863e81ad9ff814645b6feb3bbd"
|
||||||
@ -3398,7 +3416,6 @@ dependencies = [
|
|||||||
"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9"
|
"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9"
|
||||||
"checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf"
|
"checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf"
|
||||||
"checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850"
|
"checksum dbghelp-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "97590ba53bcb8ac28279161ca943a924d1fd4a8fb3fa63302591647c4fc5b850"
|
||||||
"checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf"
|
|
||||||
"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8"
|
"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8"
|
||||||
"checksum docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b5b93718f8b3e5544fcc914c43de828ca6c6ace23e0332c6080a2977b49787a"
|
"checksum docopt 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3b5b93718f8b3e5544fcc914c43de828ca6c6ace23e0332c6080a2977b49787a"
|
||||||
"checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f"
|
"checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f"
|
||||||
@ -3499,7 +3516,7 @@ dependencies = [
|
|||||||
"checksum parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1d06f6ee0fda786df3784a96ee3f0629f529b91cbfb7d142f6410e6bcd1ce2c"
|
"checksum parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e1d06f6ee0fda786df3784a96ee3f0629f529b91cbfb7d142f6410e6bcd1ce2c"
|
||||||
"checksum parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "<none>"
|
"checksum parity-tokio-ipc 0.1.5 (git+https://github.com/nikvolf/parity-tokio-ipc)" = "<none>"
|
||||||
"checksum parity-ui-precompiled 1.4.0 (git+https://github.com/paritytech/js-precompiled.git)" = "<none>"
|
"checksum parity-ui-precompiled 1.4.0 (git+https://github.com/paritytech/js-precompiled.git)" = "<none>"
|
||||||
"checksum parity-wasm 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)" = "51104c8b8da5cd0ebe0ab765dfab37bc1927b4a01a3d870b0fe09d9ee65e35ea"
|
"checksum parity-wasm 0.14.3 (registry+https://github.com/rust-lang/crates.io-index)" = "466c01423614bbf89a37b0fc081e1ed3523dfd9064497308ad3f9c7c9f0092bb"
|
||||||
"checksum parity-wordlist 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "52142d717754f7ff7ef0fc8da1bdce4f302dd576fb9bf8b727d6a5fdef33348d"
|
"checksum parity-wordlist 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "52142d717754f7ff7ef0fc8da1bdce4f302dd576fb9bf8b727d6a5fdef33348d"
|
||||||
"checksum parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aebb68eebde2c99f89592d925288600fde220177e46b5c9a91ca218d245aeedf"
|
"checksum parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "aebb68eebde2c99f89592d925288600fde220177e46b5c9a91ca218d245aeedf"
|
||||||
"checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068"
|
"checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068"
|
||||||
@ -3523,8 +3540,9 @@ dependencies = [
|
|||||||
"checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a"
|
"checksum quine-mc_cluskey 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a6683b0e23d80813b1a535841f0048c1537d3f86d63c999e8373b39a9b0eb74a"
|
||||||
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
|
"checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
|
||||||
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
|
"checksum rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)" = "2791d88c6defac799c3f20d74f094ca33b9332612d9aef9078519c82e4fe04a5"
|
||||||
"checksum rayon 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8c83adcb08e5b922e804fe1918142b422602ef11f2fd670b0b52218cb5984a20"
|
"checksum rayon 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a77c51c07654ddd93f6cb543c7a849863b03abc7e82591afda6dc8ad4ac3ac4a"
|
||||||
"checksum rayon-core 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "767d91bacddf07d442fe39257bf04fd95897d1c47c545d009f6beb03efd038f8"
|
"checksum rayon 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "b614fe08b6665cb9a231d07ac1364b0ef3cb3698f1239ee0c4c3a88a524f54c8"
|
||||||
|
"checksum rayon-core 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7febc28567082c345f10cddc3612c6ea020fc3297a1977d472cf9fdb73e6e493"
|
||||||
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
|
"checksum regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4278c17d0f6d62dfef0ab00028feb45bd7d2102843f80763474eeb1be8a10c01"
|
||||||
"checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9"
|
"checksum regex-syntax 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "841591b1e05609a643e3b4d0045fce04f701daba7151ddcd3ad47b080693d5a9"
|
||||||
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
|
"checksum regex-syntax 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "2f9191b1f57603095f105d317e375d19b1c9c5c3185ea9633a99a6dcbed04457"
|
||||||
@ -3543,6 +3561,7 @@ dependencies = [
|
|||||||
"checksum rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1e114e275f7c9b5d50bb52b28f9aac1921209f02aa6077c8b255e21eefaf8ffa"
|
"checksum rustc_version 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "1e114e275f7c9b5d50bb52b28f9aac1921209f02aa6077c8b255e21eefaf8ffa"
|
||||||
"checksum schannel 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4e45ac5e9e4698c1c138d2972bedcd90b81fe1efeba805449d2bdd54512de5f9"
|
"checksum schannel 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "4e45ac5e9e4698c1c138d2972bedcd90b81fe1efeba805449d2bdd54512de5f9"
|
||||||
"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
|
"checksum scoped-tls 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f417c22df063e9450888a7561788e9bd46d3bb3c1466435b4eccb903807f147d"
|
||||||
|
"checksum scopeguard 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "c79eb2c3ac4bc2507cda80e7f3ac5b88bd8eae4c0914d5663e6a8933994be918"
|
||||||
"checksum secur32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f412dfa83308d893101dd59c10d6fda8283465976c28c287c5c855bf8d216bc"
|
"checksum secur32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3f412dfa83308d893101dd59c10d6fda8283465976c28c287c5c855bf8d216bc"
|
||||||
"checksum security-framework 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "42ddf098d78d0b64564b23ee6345d07573e7d10e52ad86875d89ddf5f8378a02"
|
"checksum security-framework 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "42ddf098d78d0b64564b23ee6345d07573e7d10e52ad86875d89ddf5f8378a02"
|
||||||
"checksum security-framework-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "5bacdada57ea62022500c457c8571c17dfb5e6240b7c8eac5916ffa8c7138a55"
|
"checksum security-framework-sys 0.1.14 (registry+https://github.com/rust-lang/crates.io-index)" = "5bacdada57ea62022500c457c8571c17dfb5e6240b7c8eac5916ffa8c7138a55"
|
||||||
|
@ -49,6 +49,7 @@ num = "0.1"
|
|||||||
num_cpus = "1.2"
|
num_cpus = "1.2"
|
||||||
parking_lot = "0.4"
|
parking_lot = "0.4"
|
||||||
price-info = { path = "../price-info" }
|
price-info = { path = "../price-info" }
|
||||||
|
rayon = "0.8"
|
||||||
rand = "0.3"
|
rand = "0.3"
|
||||||
rlp = { path = "../util/rlp" }
|
rlp = { path = "../util/rlp" }
|
||||||
rlp_derive = { path = "../util/rlp_derive" }
|
rlp_derive = { path = "../util/rlp_derive" }
|
||||||
|
@ -16,11 +16,10 @@ lazy_static = "0.2"
|
|||||||
log = "0.3"
|
log = "0.3"
|
||||||
rlp = { path = "../../util/rlp" }
|
rlp = { path = "../../util/rlp" }
|
||||||
vm = { path = "../vm" }
|
vm = { path = "../vm" }
|
||||||
parity-wasm = "0.12"
|
|
||||||
parking_lot = "0.4"
|
|
||||||
ethcore-logger = { path = "../../logger" }
|
ethcore-logger = { path = "../../logger" }
|
||||||
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
||||||
hash = { path = "../../util/hash" }
|
hash = { path = "../../util/hash" }
|
||||||
|
parking_lot = "0.4"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rustc-hex = "1.0"
|
rustc-hex = "1.0"
|
||||||
|
@ -23,7 +23,6 @@ extern crate ethcore_util as util;
|
|||||||
extern crate ethcore_bigint as bigint;
|
extern crate ethcore_bigint as bigint;
|
||||||
extern crate ethjson;
|
extern crate ethjson;
|
||||||
extern crate rlp;
|
extern crate rlp;
|
||||||
extern crate parity_wasm;
|
|
||||||
extern crate parking_lot;
|
extern crate parking_lot;
|
||||||
extern crate wasm_utils;
|
extern crate wasm_utils;
|
||||||
extern crate ethcore_logger;
|
extern crate ethcore_logger;
|
||||||
|
74
ethcore/light/src/client/fetch.rs
Normal file
74
ethcore/light/src/client/fetch.rs
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
//! Trait for fetching chain data.
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use ethcore::encoded;
|
||||||
|
use ethcore::engines::{Engine, StateDependentProof};
|
||||||
|
use ethcore::header::Header;
|
||||||
|
use ethcore::receipt::Receipt;
|
||||||
|
use futures::future::IntoFuture;
|
||||||
|
use bigint::hash::H256;
|
||||||
|
|
||||||
|
/// Provides full chain data.
|
||||||
|
pub trait ChainDataFetcher: Send + Sync + 'static {
|
||||||
|
/// Error type when data unavailable.
|
||||||
|
type Error: ::std::fmt::Debug;
|
||||||
|
|
||||||
|
/// Future for fetching block body.
|
||||||
|
type Body: IntoFuture<Item=encoded::Block, Error=Self::Error>;
|
||||||
|
/// Future for fetching block receipts.
|
||||||
|
type Receipts: IntoFuture<Item=Vec<Receipt>, Error=Self::Error>;
|
||||||
|
/// Future for fetching epoch transition
|
||||||
|
type Transition: IntoFuture<Item=Vec<u8>, Error=Self::Error>;
|
||||||
|
|
||||||
|
/// Fetch a block body.
|
||||||
|
fn block_body(&self, header: &Header) -> Self::Body;
|
||||||
|
|
||||||
|
/// Fetch block receipts.
|
||||||
|
fn block_receipts(&self, header: &Header) -> Self::Receipts;
|
||||||
|
|
||||||
|
/// Fetch epoch transition proof at given header.
|
||||||
|
fn epoch_transition(&self, hash: H256, engine: Arc<Engine>, checker: Arc<StateDependentProof>) -> Self::Transition;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetcher implementation which cannot fetch anything.
|
||||||
|
pub struct Unavailable;
|
||||||
|
|
||||||
|
/// Create a fetcher which has all data unavailable.
|
||||||
|
pub fn unavailable() -> Unavailable { Unavailable }
|
||||||
|
|
||||||
|
impl ChainDataFetcher for Unavailable {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
type Body = Result<encoded::Block, &'static str>;
|
||||||
|
type Receipts = Result<Vec<Receipt>, &'static str>;
|
||||||
|
type Transition = Result<Vec<u8>, &'static str>;
|
||||||
|
|
||||||
|
fn block_body(&self, _header: &Header) -> Self::Body {
|
||||||
|
Err("fetching block bodies unavailable")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_receipts(&self, _header: &Header) -> Self::Receipts {
|
||||||
|
Err("fetching block receipts unavailable")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn epoch_transition(&self, _h: H256, _e: Arc<Engine>, _check: Arc<StateDependentProof>) -> Self::Transition {
|
||||||
|
Err("fetching epoch transition proofs unavailable")
|
||||||
|
}
|
||||||
|
}
|
@ -18,11 +18,12 @@
|
|||||||
//!
|
//!
|
||||||
//! Unlike a full node's `BlockChain` this doesn't store much in the database.
|
//! Unlike a full node's `BlockChain` this doesn't store much in the database.
|
||||||
//! It stores candidates for the last 2048-4096 blocks as well as CHT roots for
|
//! It stores candidates for the last 2048-4096 blocks as well as CHT roots for
|
||||||
//! historical blocks all the way to the genesis.
|
//! historical blocks all the way to the genesis. If the engine makes use
|
||||||
|
//! of epoch transitions, those are stored as well.
|
||||||
//!
|
//!
|
||||||
//! This is separate from the `BlockChain` for two reasons:
|
//! This is separate from the `BlockChain` for two reasons:
|
||||||
//! - It stores only headers (and a pruned subset of them)
|
//! - It stores only headers (and a pruned subset of them)
|
||||||
//! - To allow for flexibility in the database layout once that's incorporated.
|
//! - To allow for flexibility in the database layout..
|
||||||
|
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -30,15 +31,20 @@ use std::sync::Arc;
|
|||||||
use cht;
|
use cht;
|
||||||
|
|
||||||
use ethcore::block_status::BlockStatus;
|
use ethcore::block_status::BlockStatus;
|
||||||
use ethcore::error::BlockError;
|
use ethcore::error::{BlockImportError, BlockError};
|
||||||
use ethcore::encoded;
|
use ethcore::encoded;
|
||||||
use ethcore::header::Header;
|
use ethcore::header::Header;
|
||||||
use ethcore::ids::BlockId;
|
use ethcore::ids::BlockId;
|
||||||
|
use ethcore::spec::Spec;
|
||||||
|
use ethcore::engines::epoch::{
|
||||||
|
Transition as EpochTransition,
|
||||||
|
PendingTransition as PendingEpochTransition
|
||||||
|
};
|
||||||
|
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
|
use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp};
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::{H256, H256FastMap, H264};
|
||||||
use util::kvdb::{DBTransaction, KeyValueDB};
|
use util::kvdb::{DBTransaction, KeyValueDB};
|
||||||
|
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
@ -54,6 +60,9 @@ const HISTORY: u64 = 2048;
|
|||||||
/// The best block key. Maps to an RLP list: [best_era, last_era]
|
/// The best block key. Maps to an RLP list: [best_era, last_era]
|
||||||
const CURRENT_KEY: &'static [u8] = &*b"best_and_latest";
|
const CURRENT_KEY: &'static [u8] = &*b"best_and_latest";
|
||||||
|
|
||||||
|
/// Key storing the last canonical epoch transition.
|
||||||
|
const LAST_CANONICAL_TRANSITION: &'static [u8] = &*b"canonical_transition";
|
||||||
|
|
||||||
/// Information about a block.
|
/// Information about a block.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct BlockDescriptor {
|
pub struct BlockDescriptor {
|
||||||
@ -101,7 +110,6 @@ impl Encodable for Entry {
|
|||||||
|
|
||||||
impl Decodable for Entry {
|
impl Decodable for Entry {
|
||||||
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
||||||
|
|
||||||
let mut candidates = SmallVec::<[Candidate; 3]>::new();
|
let mut candidates = SmallVec::<[Candidate; 3]>::new();
|
||||||
|
|
||||||
for item in rlp.iter() {
|
for item in rlp.iter() {
|
||||||
@ -131,6 +139,42 @@ fn era_key(number: u64) -> String {
|
|||||||
format!("candidates_{}", number)
|
format!("candidates_{}", number)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn pending_transition_key(block_hash: H256) -> H264 {
|
||||||
|
const LEADING: u8 = 1;
|
||||||
|
|
||||||
|
let mut key = H264::default();
|
||||||
|
|
||||||
|
key[0] = LEADING;
|
||||||
|
key.0[1..].copy_from_slice(&block_hash.0[..]);
|
||||||
|
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transition_key(block_hash: H256) -> H264 {
|
||||||
|
const LEADING: u8 = 2;
|
||||||
|
|
||||||
|
let mut key = H264::default();
|
||||||
|
|
||||||
|
key[0] = LEADING;
|
||||||
|
key.0[1..].copy_from_slice(&block_hash.0[..]);
|
||||||
|
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
// encode last canonical transition entry: header and proof.
|
||||||
|
fn encode_canonical_transition(header: &Header, proof: &[u8]) -> Vec<u8> {
|
||||||
|
let mut stream = RlpStream::new_list(2);
|
||||||
|
stream.append(header).append(&proof);
|
||||||
|
stream.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
// decode last canonical transition entry.
|
||||||
|
fn decode_canonical_transition(t: &[u8]) -> Result<(Header, &[u8]), DecoderError> {
|
||||||
|
let rlp = UntrustedRlp::new(t);
|
||||||
|
|
||||||
|
Ok((rlp.val_at(0)?, rlp.at(1)?.data()?))
|
||||||
|
}
|
||||||
|
|
||||||
/// Pending changes from `insert` to be applied after the database write has finished.
|
/// Pending changes from `insert` to be applied after the database write has finished.
|
||||||
pub struct PendingChanges {
|
pub struct PendingChanges {
|
||||||
best_block: Option<BlockDescriptor>, // new best block.
|
best_block: Option<BlockDescriptor>, // new best block.
|
||||||
@ -141,6 +185,7 @@ pub struct HeaderChain {
|
|||||||
genesis_header: encoded::Header, // special-case the genesis.
|
genesis_header: encoded::Header, // special-case the genesis.
|
||||||
candidates: RwLock<BTreeMap<u64, Entry>>,
|
candidates: RwLock<BTreeMap<u64, Entry>>,
|
||||||
best_block: RwLock<BlockDescriptor>,
|
best_block: RwLock<BlockDescriptor>,
|
||||||
|
live_epoch_proofs: RwLock<H256FastMap<EpochTransition>>,
|
||||||
db: Arc<KeyValueDB>,
|
db: Arc<KeyValueDB>,
|
||||||
col: Option<u32>,
|
col: Option<u32>,
|
||||||
cache: Arc<Mutex<Cache>>,
|
cache: Arc<Mutex<Cache>>,
|
||||||
@ -148,8 +193,16 @@ pub struct HeaderChain {
|
|||||||
|
|
||||||
impl HeaderChain {
|
impl HeaderChain {
|
||||||
/// Create a new header chain given this genesis block and database to read from.
|
/// Create a new header chain given this genesis block and database to read from.
|
||||||
pub fn new(db: Arc<KeyValueDB>, col: Option<u32>, genesis: &[u8], cache: Arc<Mutex<Cache>>) -> Result<Self, String> {
|
pub fn new(
|
||||||
use ethcore::views::HeaderView;
|
db: Arc<KeyValueDB>,
|
||||||
|
col: Option<u32>,
|
||||||
|
spec: &Spec,
|
||||||
|
cache: Arc<Mutex<Cache>>,
|
||||||
|
) -> Result<Self, String> {
|
||||||
|
let mut live_epoch_proofs = ::std::collections::HashMap::default();
|
||||||
|
|
||||||
|
let genesis = ::rlp::encode(&spec.genesis_header()).into_vec();
|
||||||
|
let decoded_header = spec.genesis_header();
|
||||||
|
|
||||||
let chain = if let Some(current) = db.get(col, CURRENT_KEY)? {
|
let chain = if let Some(current) = db.get(col, CURRENT_KEY)? {
|
||||||
let (best_number, highest_number) = {
|
let (best_number, highest_number) = {
|
||||||
@ -160,12 +213,24 @@ impl HeaderChain {
|
|||||||
let mut cur_number = highest_number;
|
let mut cur_number = highest_number;
|
||||||
let mut candidates = BTreeMap::new();
|
let mut candidates = BTreeMap::new();
|
||||||
|
|
||||||
// load all era entries and referenced headers within them.
|
// load all era entries, referenced headers within them,
|
||||||
|
// and live epoch proofs.
|
||||||
while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? {
|
while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? {
|
||||||
let entry: Entry = ::rlp::decode(&entry);
|
let entry: Entry = ::rlp::decode(&entry);
|
||||||
trace!(target: "chain", "loaded header chain entry for era {} with {} candidates",
|
trace!(target: "chain", "loaded header chain entry for era {} with {} candidates",
|
||||||
cur_number, entry.candidates.len());
|
cur_number, entry.candidates.len());
|
||||||
|
|
||||||
|
for c in &entry.candidates {
|
||||||
|
let key = transition_key(c.hash);
|
||||||
|
|
||||||
|
if let Some(proof) = db.get(col, &*key)? {
|
||||||
|
live_epoch_proofs.insert(c.hash, EpochTransition {
|
||||||
|
block_hash: c.hash,
|
||||||
|
block_number: cur_number,
|
||||||
|
proof: proof.into_vec(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
candidates.insert(cur_number, entry);
|
candidates.insert(cur_number, entry);
|
||||||
|
|
||||||
cur_number -= 1;
|
cur_number -= 1;
|
||||||
@ -187,29 +252,42 @@ impl HeaderChain {
|
|||||||
};
|
};
|
||||||
|
|
||||||
HeaderChain {
|
HeaderChain {
|
||||||
genesis_header: encoded::Header::new(genesis.to_owned()),
|
genesis_header: encoded::Header::new(genesis),
|
||||||
best_block: RwLock::new(best_block),
|
best_block: RwLock::new(best_block),
|
||||||
candidates: RwLock::new(candidates),
|
candidates: RwLock::new(candidates),
|
||||||
|
live_epoch_proofs: RwLock::new(live_epoch_proofs),
|
||||||
db: db,
|
db: db,
|
||||||
col: col,
|
col: col,
|
||||||
cache: cache,
|
cache: cache,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let g_view = HeaderView::new(genesis);
|
|
||||||
HeaderChain {
|
HeaderChain {
|
||||||
genesis_header: encoded::Header::new(genesis.to_owned()),
|
genesis_header: encoded::Header::new(genesis),
|
||||||
best_block: RwLock::new(BlockDescriptor {
|
best_block: RwLock::new(BlockDescriptor {
|
||||||
hash: g_view.hash(),
|
hash: decoded_header.hash(),
|
||||||
number: 0,
|
number: 0,
|
||||||
total_difficulty: g_view.difficulty(),
|
total_difficulty: *decoded_header.difficulty(),
|
||||||
}),
|
}),
|
||||||
candidates: RwLock::new(BTreeMap::new()),
|
candidates: RwLock::new(BTreeMap::new()),
|
||||||
|
live_epoch_proofs: RwLock::new(live_epoch_proofs),
|
||||||
db: db,
|
db: db,
|
||||||
col: col,
|
col: col,
|
||||||
cache: cache,
|
cache: cache,
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// instantiate genesis epoch data if it doesn't exist.
|
||||||
|
if let None = chain.db.get(col, LAST_CANONICAL_TRANSITION)? {
|
||||||
|
let genesis_data = spec.genesis_epoch_data()?;
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut batch = chain.db.transaction();
|
||||||
|
let data = encode_canonical_transition(&decoded_header, &genesis_data);
|
||||||
|
batch.put_vec(col, LAST_CANONICAL_TRANSITION, data);
|
||||||
|
chain.db.write(batch)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(chain)
|
Ok(chain)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -218,10 +296,24 @@ impl HeaderChain {
|
|||||||
/// This blindly trusts that the data given to it is sensible.
|
/// This blindly trusts that the data given to it is sensible.
|
||||||
/// Returns a set of pending changes to be applied with `apply_pending`
|
/// Returns a set of pending changes to be applied with `apply_pending`
|
||||||
/// before the next call to insert and after the transaction has been written.
|
/// before the next call to insert and after the transaction has been written.
|
||||||
pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result<PendingChanges, BlockError> {
|
///
|
||||||
|
/// If the block is an epoch transition, provide the transition along with
|
||||||
|
/// the header.
|
||||||
|
pub fn insert(
|
||||||
|
&self,
|
||||||
|
transaction: &mut DBTransaction,
|
||||||
|
header: Header,
|
||||||
|
transition_proof: Option<Vec<u8>>,
|
||||||
|
) -> Result<PendingChanges, BlockImportError> {
|
||||||
let hash = header.hash();
|
let hash = header.hash();
|
||||||
let number = header.number();
|
let number = header.number();
|
||||||
let parent_hash = *header.parent_hash();
|
let parent_hash = *header.parent_hash();
|
||||||
|
let transition = transition_proof.map(|proof| EpochTransition {
|
||||||
|
block_hash: hash,
|
||||||
|
block_number: number,
|
||||||
|
proof: proof,
|
||||||
|
});
|
||||||
|
|
||||||
let mut pending = PendingChanges {
|
let mut pending = PendingChanges {
|
||||||
best_block: None,
|
best_block: None,
|
||||||
};
|
};
|
||||||
@ -237,7 +329,8 @@ impl HeaderChain {
|
|||||||
candidates.get(&(number - 1))
|
candidates.get(&(number - 1))
|
||||||
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
|
.and_then(|entry| entry.candidates.iter().find(|c| c.hash == parent_hash))
|
||||||
.map(|c| c.total_difficulty)
|
.map(|c| c.total_difficulty)
|
||||||
.ok_or_else(|| BlockError::UnknownParent(parent_hash))?
|
.ok_or_else(|| BlockError::UnknownParent(parent_hash))
|
||||||
|
.map_err(BlockImportError::Block)?
|
||||||
};
|
};
|
||||||
|
|
||||||
let total_difficulty = parent_td + *header.difficulty();
|
let total_difficulty = parent_td + *header.difficulty();
|
||||||
@ -262,8 +355,13 @@ impl HeaderChain {
|
|||||||
transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era))
|
transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era))
|
||||||
}
|
}
|
||||||
|
|
||||||
let raw = ::rlp::encode(&header);
|
if let Some(transition) = transition {
|
||||||
transaction.put(self.col, &hash[..], &*raw);
|
transaction.put(self.col, &*transition_key(hash), &transition.proof);
|
||||||
|
self.live_epoch_proofs.write().insert(hash, transition);
|
||||||
|
}
|
||||||
|
|
||||||
|
let raw = header.encoded().into_inner();
|
||||||
|
transaction.put_vec(self.col, &hash[..], raw);
|
||||||
|
|
||||||
let (best_num, is_new_best) = {
|
let (best_num, is_new_best) = {
|
||||||
let cur_best = self.best_block.read();
|
let cur_best = self.best_block.read();
|
||||||
@ -316,8 +414,10 @@ impl HeaderChain {
|
|||||||
let cht_num = cht::block_to_cht_number(earliest_era)
|
let cht_num = cht::block_to_cht_number(earliest_era)
|
||||||
.expect("fails only for number == 0; genesis never imported; qed");
|
.expect("fails only for number == 0; genesis never imported; qed");
|
||||||
|
|
||||||
|
let mut last_canonical_transition = None;
|
||||||
let cht_root = {
|
let cht_root = {
|
||||||
let mut i = earliest_era;
|
let mut i = earliest_era;
|
||||||
|
let mut live_epoch_proofs = self.live_epoch_proofs.write();
|
||||||
|
|
||||||
// iterable function which removes the candidates as it goes
|
// iterable function which removes the candidates as it goes
|
||||||
// along. this will only be called until the CHT is complete.
|
// along. this will only be called until the CHT is complete.
|
||||||
@ -328,7 +428,25 @@ impl HeaderChain {
|
|||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
|
|
||||||
|
// prune old blocks and epoch proofs.
|
||||||
for ancient in &era_entry.candidates {
|
for ancient in &era_entry.candidates {
|
||||||
|
let maybe_transition = live_epoch_proofs.remove(&ancient.hash);
|
||||||
|
if let Some(epoch_transition) = maybe_transition {
|
||||||
|
transaction.delete(self.col, &*transition_key(ancient.hash));
|
||||||
|
|
||||||
|
if ancient.hash == era_entry.canonical_hash {
|
||||||
|
last_canonical_transition = match self.db.get(self.col, &ancient.hash) {
|
||||||
|
Err(e) => {
|
||||||
|
warn!(target: "chain", "Error reading from DB: {}\n
|
||||||
|
", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
Ok(None) => panic!("stored candidates always have corresponding headers; qed"),
|
||||||
|
Ok(Some(header)) => Some((epoch_transition, ::rlp::decode(&header))),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
transaction.delete(self.col, &ancient.hash);
|
transaction.delete(self.col, &ancient.hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,6 +460,12 @@ impl HeaderChain {
|
|||||||
// write the CHT root to the database.
|
// write the CHT root to the database.
|
||||||
debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root);
|
debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root);
|
||||||
transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root));
|
transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root));
|
||||||
|
|
||||||
|
// update the last canonical transition proof
|
||||||
|
if let Some((epoch_transition, header)) = last_canonical_transition {
|
||||||
|
let x = encode_canonical_transition(&header, &epoch_transition.proof);
|
||||||
|
transaction.put_vec(self.col, LAST_CANONICAL_TRANSITION, x);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -367,7 +491,7 @@ impl HeaderChain {
|
|||||||
/// will be returned.
|
/// will be returned.
|
||||||
pub fn block_hash(&self, id: BlockId) -> Option<H256> {
|
pub fn block_hash(&self, id: BlockId) -> Option<H256> {
|
||||||
match id {
|
match id {
|
||||||
BlockId::Earliest => Some(self.genesis_hash()),
|
BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_hash()),
|
||||||
BlockId::Hash(hash) => Some(hash),
|
BlockId::Hash(hash) => Some(hash),
|
||||||
BlockId::Number(num) => {
|
BlockId::Number(num) => {
|
||||||
if self.best_block.read().number < num { return None }
|
if self.best_block.read().number < num { return None }
|
||||||
@ -518,6 +642,56 @@ impl HeaderChain {
|
|||||||
false => BlockStatus::Unknown,
|
false => BlockStatus::Unknown,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Insert a pending transition.
|
||||||
|
pub fn insert_pending_transition(&self, batch: &mut DBTransaction, hash: H256, t: PendingEpochTransition) {
|
||||||
|
let key = pending_transition_key(hash);
|
||||||
|
batch.put(self.col, &*key, &*::rlp::encode(&t));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get pending transition for a specific block hash.
|
||||||
|
pub fn pending_transition(&self, hash: H256) -> Option<PendingEpochTransition> {
|
||||||
|
let key = pending_transition_key(hash);
|
||||||
|
match self.db.get(self.col, &*key) {
|
||||||
|
Ok(val) => val.map(|x| ::rlp::decode(&x)),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(target: "chain", "Error reading from database: {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the transition to the epoch the given parent hash is part of
|
||||||
|
/// or transitions to.
|
||||||
|
/// This will give the epoch that any children of this parent belong to.
|
||||||
|
///
|
||||||
|
/// The header corresponding the the parent hash must be stored already.
|
||||||
|
pub fn epoch_transition_for(&self, parent_hash: H256) -> Option<(Header, Vec<u8>)> {
|
||||||
|
// slow path: loop back block by block
|
||||||
|
let live_proofs = self.live_epoch_proofs.read();
|
||||||
|
|
||||||
|
for hdr in self.ancestry_iter(BlockId::Hash(parent_hash)) {
|
||||||
|
if let Some(transition) = live_proofs.get(&hdr.hash()).cloned() {
|
||||||
|
return Some((hdr.decode(), transition.proof))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// any blocks left must be descendants of the last canonical transition block.
|
||||||
|
match self.db.get(self.col, LAST_CANONICAL_TRANSITION) {
|
||||||
|
Ok(x) => {
|
||||||
|
let x = x.expect("last canonical transition always instantiated; qed");
|
||||||
|
|
||||||
|
let (hdr, proof) = decode_canonical_transition(&x)
|
||||||
|
.expect("last canonical transition always encoded correctly; qed");
|
||||||
|
|
||||||
|
Some((hdr, proof.to_vec()))
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Error reading from DB: {}", e);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl HeapSizeOf for HeaderChain {
|
impl HeapSizeOf for HeaderChain {
|
||||||
@ -570,7 +744,7 @@ mod tests {
|
|||||||
|
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -583,7 +757,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -603,7 +777,7 @@ mod tests {
|
|||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
@ -616,7 +790,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -635,7 +809,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -659,7 +833,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -682,12 +856,10 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn earliest_is_latest() {
|
fn earliest_is_latest() {
|
||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
let genesis_header = spec.genesis_header();
|
|
||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
|
|
||||||
assert!(chain.block_header(BlockId::Earliest).is_some());
|
assert!(chain.block_header(BlockId::Earliest).is_some());
|
||||||
assert!(chain.block_header(BlockId::Latest).is_some());
|
assert!(chain.block_header(BlockId::Latest).is_some());
|
||||||
@ -702,7 +874,7 @@ mod tests {
|
|||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
for i in 1..10000 {
|
for i in 1..10000 {
|
||||||
@ -714,7 +886,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -722,7 +894,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
assert!(chain.block_header(BlockId::Number(10)).is_none());
|
assert!(chain.block_header(BlockId::Number(10)).is_none());
|
||||||
assert!(chain.block_header(BlockId::Number(9000)).is_some());
|
assert!(chain.block_header(BlockId::Number(9000)).is_some());
|
||||||
assert!(chain.cht_root(2).is_some());
|
assert!(chain.cht_root(2).is_some());
|
||||||
@ -738,7 +910,7 @@ mod tests {
|
|||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
{
|
{
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
let mut parent_hash = genesis_header.hash();
|
let mut parent_hash = genesis_header.hash();
|
||||||
let mut rolling_timestamp = genesis_header.timestamp();
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
|
|
||||||
@ -752,7 +924,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -769,7 +941,7 @@ mod tests {
|
|||||||
parent_hash = header.hash();
|
parent_hash = header.hash();
|
||||||
|
|
||||||
let mut tx = db.transaction();
|
let mut tx = db.transaction();
|
||||||
let pending = chain.insert(&mut tx, header).unwrap();
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
db.write(tx).unwrap();
|
db.write(tx).unwrap();
|
||||||
chain.apply_pending(pending);
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
@ -780,7 +952,7 @@ mod tests {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// after restoration, non-canonical eras should still be loaded.
|
// after restoration, non-canonical eras should still be loaded.
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
|
assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10);
|
||||||
assert!(chain.candidates.read().get(&100).is_some())
|
assert!(chain.candidates.read().get(&100).is_some())
|
||||||
}
|
}
|
||||||
@ -792,10 +964,76 @@ mod tests {
|
|||||||
let db = make_db();
|
let db = make_db();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header), cache.clone()).unwrap();
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache.clone()).unwrap();
|
||||||
|
|
||||||
assert!(chain.block_header(BlockId::Earliest).is_some());
|
assert!(chain.block_header(BlockId::Earliest).is_some());
|
||||||
assert!(chain.block_header(BlockId::Number(0)).is_some());
|
assert!(chain.block_header(BlockId::Number(0)).is_some());
|
||||||
assert!(chain.block_header(BlockId::Hash(genesis_header.hash())).is_some());
|
assert!(chain.block_header(BlockId::Hash(genesis_header.hash())).is_some());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn epoch_transitions_available_after_cht() {
|
||||||
|
let spec = Spec::new_test();
|
||||||
|
let genesis_header = spec.genesis_header();
|
||||||
|
let db = make_db();
|
||||||
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
|
let chain = HeaderChain::new(db.clone(), None, &spec, cache).unwrap();
|
||||||
|
|
||||||
|
let mut parent_hash = genesis_header.hash();
|
||||||
|
let mut rolling_timestamp = genesis_header.timestamp();
|
||||||
|
for i in 1..6 {
|
||||||
|
let mut header = Header::new();
|
||||||
|
header.set_parent_hash(parent_hash);
|
||||||
|
header.set_number(i);
|
||||||
|
header.set_timestamp(rolling_timestamp);
|
||||||
|
header.set_difficulty(*genesis_header.difficulty() * i.into());
|
||||||
|
parent_hash = header.hash();
|
||||||
|
|
||||||
|
let mut tx = db.transaction();
|
||||||
|
let epoch_proof = if i == 3 {
|
||||||
|
Some(vec![1, 2, 3, 4])
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let pending = chain.insert(&mut tx, header, epoch_proof).unwrap();
|
||||||
|
db.write(tx).unwrap();
|
||||||
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
|
rolling_timestamp += 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
// these 3 should end up falling back to the genesis epoch proof in DB
|
||||||
|
for i in 0..3 {
|
||||||
|
let hash = chain.block_hash(BlockId::Number(i)).unwrap();
|
||||||
|
assert_eq!(chain.epoch_transition_for(hash).unwrap().1, Vec::<u8>::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
// these are live.
|
||||||
|
for i in 3..6 {
|
||||||
|
let hash = chain.block_hash(BlockId::Number(i)).unwrap();
|
||||||
|
assert_eq!(chain.epoch_transition_for(hash).unwrap().1, vec![1, 2, 3, 4]);
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in 6..10000 {
|
||||||
|
let mut header = Header::new();
|
||||||
|
header.set_parent_hash(parent_hash);
|
||||||
|
header.set_number(i);
|
||||||
|
header.set_timestamp(rolling_timestamp);
|
||||||
|
header.set_difficulty(*genesis_header.difficulty() * i.into());
|
||||||
|
parent_hash = header.hash();
|
||||||
|
|
||||||
|
let mut tx = db.transaction();
|
||||||
|
let pending = chain.insert(&mut tx, header, None).unwrap();
|
||||||
|
db.write(tx).unwrap();
|
||||||
|
chain.apply_pending(pending);
|
||||||
|
|
||||||
|
rolling_timestamp += 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
// no live blocks have associated epoch proofs -- make sure we aren't leaking memory.
|
||||||
|
assert!(chain.live_epoch_proofs.read().is_empty());
|
||||||
|
assert_eq!(chain.epoch_transition_for(parent_hash).unwrap().1, vec![1, 2, 3, 4]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,10 +20,10 @@ use std::sync::{Weak, Arc};
|
|||||||
|
|
||||||
use ethcore::block_status::BlockStatus;
|
use ethcore::block_status::BlockStatus;
|
||||||
use ethcore::client::{ClientReport, EnvInfo};
|
use ethcore::client::{ClientReport, EnvInfo};
|
||||||
use ethcore::engines::Engine;
|
use ethcore::engines::{epoch, Engine, EpochChange, EpochTransition, Proof, Unsure};
|
||||||
use ethcore::error::BlockImportError;
|
use ethcore::error::BlockImportError;
|
||||||
use ethcore::ids::BlockId;
|
use ethcore::ids::BlockId;
|
||||||
use ethcore::header::Header;
|
use ethcore::header::{BlockNumber, Header};
|
||||||
use ethcore::verification::queue::{self, HeaderQueue};
|
use ethcore::verification::queue::{self, HeaderQueue};
|
||||||
use ethcore::blockchain_info::BlockChainInfo;
|
use ethcore::blockchain_info::BlockChainInfo;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
@ -33,9 +33,11 @@ use io::IoChannel;
|
|||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
|
use futures::{IntoFuture, Future};
|
||||||
|
|
||||||
use util::kvdb::{KeyValueDB, CompactionProfile};
|
use util::kvdb::{KeyValueDB, CompactionProfile};
|
||||||
|
|
||||||
|
use self::fetch::ChainDataFetcher;
|
||||||
use self::header_chain::{AncestryIter, HeaderChain};
|
use self::header_chain::{AncestryIter, HeaderChain};
|
||||||
|
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
@ -45,6 +47,8 @@ pub use self::service::Service;
|
|||||||
mod header_chain;
|
mod header_chain;
|
||||||
mod service;
|
mod service;
|
||||||
|
|
||||||
|
pub mod fetch;
|
||||||
|
|
||||||
/// Configuration for the light client.
|
/// Configuration for the light client.
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
pub struct Config {
|
pub struct Config {
|
||||||
@ -80,6 +84,9 @@ impl Default for Config {
|
|||||||
|
|
||||||
/// Trait for interacting with the header chain abstractly.
|
/// Trait for interacting with the header chain abstractly.
|
||||||
pub trait LightChainClient: Send + Sync {
|
pub trait LightChainClient: Send + Sync {
|
||||||
|
/// Adds a new `LightChainNotify` listener.
|
||||||
|
fn add_listener(&self, listener: Weak<LightChainNotify>);
|
||||||
|
|
||||||
/// Get chain info.
|
/// Get chain info.
|
||||||
fn chain_info(&self) -> BlockChainInfo;
|
fn chain_info(&self) -> BlockChainInfo;
|
||||||
|
|
||||||
@ -128,7 +135,7 @@ pub trait LightChainClient: Send + Sync {
|
|||||||
fn cht_root(&self, i: usize) -> Option<H256>;
|
fn cht_root(&self, i: usize) -> Option<H256>;
|
||||||
|
|
||||||
/// Get the EIP-86 transition block number.
|
/// Get the EIP-86 transition block number.
|
||||||
fn eip86_transition(&self) -> u64;
|
fn eip86_transition(&self) -> BlockNumber;
|
||||||
|
|
||||||
/// Get a report of import activity since the last call.
|
/// Get a report of import activity since the last call.
|
||||||
fn report(&self) -> ClientReport;
|
fn report(&self) -> ClientReport;
|
||||||
@ -156,7 +163,7 @@ impl<T: LightChainClient> AsLightClient for T {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Light client implementation.
|
/// Light client implementation.
|
||||||
pub struct Client {
|
pub struct Client<T> {
|
||||||
queue: HeaderQueue,
|
queue: HeaderQueue,
|
||||||
engine: Arc<Engine>,
|
engine: Arc<Engine>,
|
||||||
chain: HeaderChain,
|
chain: HeaderChain,
|
||||||
@ -164,22 +171,30 @@ pub struct Client {
|
|||||||
import_lock: Mutex<()>,
|
import_lock: Mutex<()>,
|
||||||
db: Arc<KeyValueDB>,
|
db: Arc<KeyValueDB>,
|
||||||
listeners: RwLock<Vec<Weak<LightChainNotify>>>,
|
listeners: RwLock<Vec<Weak<LightChainNotify>>>,
|
||||||
|
fetcher: T,
|
||||||
verify_full: bool,
|
verify_full: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl<T: ChainDataFetcher> Client<T> {
|
||||||
/// Create a new `Client`.
|
/// Create a new `Client`.
|
||||||
pub fn new(config: Config, db: Arc<KeyValueDB>, chain_col: Option<u32>, spec: &Spec, io_channel: IoChannel<ClientIoMessage>, cache: Arc<Mutex<Cache>>) -> Result<Self, String> {
|
pub fn new(
|
||||||
let gh = ::rlp::encode(&spec.genesis_header());
|
config: Config,
|
||||||
|
db: Arc<KeyValueDB>,
|
||||||
|
chain_col: Option<u32>,
|
||||||
|
spec: &Spec,
|
||||||
|
fetcher: T,
|
||||||
|
io_channel: IoChannel<ClientIoMessage>,
|
||||||
|
cache: Arc<Mutex<Cache>>
|
||||||
|
) -> Result<Self, String> {
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal),
|
queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, config.check_seal),
|
||||||
engine: spec.engine.clone(),
|
engine: spec.engine.clone(),
|
||||||
chain: HeaderChain::new(db.clone(), chain_col, &gh, cache)?,
|
chain: HeaderChain::new(db.clone(), chain_col, &spec, cache)?,
|
||||||
report: RwLock::new(ClientReport::default()),
|
report: RwLock::new(ClientReport::default()),
|
||||||
import_lock: Mutex::new(()),
|
import_lock: Mutex::new(()),
|
||||||
db: db,
|
db: db,
|
||||||
listeners: RwLock::new(vec![]),
|
listeners: RwLock::new(vec![]),
|
||||||
|
fetcher: fetcher,
|
||||||
verify_full: config.verify_full,
|
verify_full: config.verify_full,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -191,10 +206,24 @@ impl Client {
|
|||||||
|
|
||||||
/// Create a new `Client` backed purely in-memory.
|
/// Create a new `Client` backed purely in-memory.
|
||||||
/// This will ignore all database options in the configuration.
|
/// This will ignore all database options in the configuration.
|
||||||
pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel<ClientIoMessage>, cache: Arc<Mutex<Cache>>) -> Self {
|
pub fn in_memory(
|
||||||
|
config: Config,
|
||||||
|
spec: &Spec,
|
||||||
|
fetcher: T,
|
||||||
|
io_channel: IoChannel<ClientIoMessage>,
|
||||||
|
cache: Arc<Mutex<Cache>>
|
||||||
|
) -> Self {
|
||||||
let db = ::util::kvdb::in_memory(0);
|
let db = ::util::kvdb::in_memory(0);
|
||||||
|
|
||||||
Client::new(config, Arc::new(db), None, spec, io_channel, cache).expect("New DB creation infallible; qed")
|
Client::new(
|
||||||
|
config,
|
||||||
|
Arc::new(db),
|
||||||
|
None,
|
||||||
|
spec,
|
||||||
|
fetcher,
|
||||||
|
io_channel,
|
||||||
|
cache
|
||||||
|
).expect("New DB creation infallible; qed")
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Import a header to the queue for additional verification.
|
/// Import a header to the queue for additional verification.
|
||||||
@ -293,19 +322,33 @@ impl Client {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: `epoch_end_signal`, `is_epoch_end`.
|
let write_proof_result = match self.check_epoch_signal(&verified_header) {
|
||||||
// proofs we get from the network would be _complete_, whereas we need
|
Ok(Some(proof)) => self.write_pending_proof(&verified_header, proof),
|
||||||
// _incomplete_ signals
|
Ok(None) => Ok(()),
|
||||||
|
Err(e) =>
|
||||||
|
panic!("Unable to fetch epoch transition proof: {:?}", e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Err(e) = write_proof_result {
|
||||||
|
warn!(target: "client", "Error writing pending transition proof to DB: {:?} \
|
||||||
|
The node may not be able to synchronize further.", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
let epoch_proof = self.engine.is_epoch_end(
|
||||||
|
&verified_header,
|
||||||
|
&|h| self.chain.block_header(BlockId::Hash(h)).map(|hdr| hdr.decode()),
|
||||||
|
&|h| self.chain.pending_transition(h),
|
||||||
|
);
|
||||||
|
|
||||||
let mut tx = self.db.transaction();
|
let mut tx = self.db.transaction();
|
||||||
let pending = match self.chain.insert(&mut tx, verified_header) {
|
let pending = match self.chain.insert(&mut tx, verified_header, epoch_proof) {
|
||||||
Ok(pending) => {
|
Ok(pending) => {
|
||||||
good.push(hash);
|
good.push(hash);
|
||||||
self.report.write().blocks_imported += 1;
|
self.report.write().blocks_imported += 1;
|
||||||
pending
|
pending
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e);
|
debug!(target: "client", "Error importing header {:?}: {:?}", (num, hash), e);
|
||||||
bad.push(hash);
|
bad.push(hash);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@ -421,9 +464,76 @@ impl Client {
|
|||||||
|
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn check_epoch_signal(&self, verified_header: &Header) -> Result<Option<Proof>, T::Error> {
|
||||||
|
let (mut block, mut receipts) = (None, None);
|
||||||
|
|
||||||
|
// First, check without providing auxiliary data.
|
||||||
|
match self.engine.signals_epoch_end(verified_header, None, None) {
|
||||||
|
EpochChange::No => return Ok(None),
|
||||||
|
EpochChange::Yes(proof) => return Ok(Some(proof)),
|
||||||
|
EpochChange::Unsure(unsure) => {
|
||||||
|
let (b, r) = match unsure {
|
||||||
|
Unsure::NeedsBody =>
|
||||||
|
(Some(self.fetcher.block_body(verified_header)), None),
|
||||||
|
Unsure::NeedsReceipts =>
|
||||||
|
(None, Some(self.fetcher.block_receipts(verified_header))),
|
||||||
|
Unsure::NeedsBoth => (
|
||||||
|
Some(self.fetcher.block_body(verified_header)),
|
||||||
|
Some(self.fetcher.block_receipts(verified_header)),
|
||||||
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(b) = b {
|
||||||
|
block = Some(b.into_future().wait()?.into_inner());
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(r) = r {
|
||||||
|
receipts = Some(r.into_future().wait()?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let block = block.as_ref().map(|x| &x[..]);
|
||||||
|
let receipts = receipts.as_ref().map(|x| &x[..]);
|
||||||
|
|
||||||
|
// Check again now that required data has been fetched.
|
||||||
|
match self.engine.signals_epoch_end(verified_header, block, receipts) {
|
||||||
|
EpochChange::No => return Ok(None),
|
||||||
|
EpochChange::Yes(proof) => return Ok(Some(proof)),
|
||||||
|
EpochChange::Unsure(_) =>
|
||||||
|
panic!("Detected faulty engine implementation: requests additional \
|
||||||
|
data to check epoch end signal when everything necessary provided"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// attempts to fetch the epoch proof from the network until successful.
|
||||||
|
fn write_pending_proof(&self, header: &Header, proof: Proof) -> Result<(), T::Error> {
|
||||||
|
let proof = match proof {
|
||||||
|
Proof::Known(known) => known,
|
||||||
|
Proof::WithState(state_dependent) => {
|
||||||
|
self.fetcher.epoch_transition(
|
||||||
|
header.hash(),
|
||||||
|
self.engine.clone(),
|
||||||
|
state_dependent
|
||||||
|
).into_future().wait()?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut batch = self.db.transaction();
|
||||||
|
self.chain.insert_pending_transition(&mut batch, header.hash(), epoch::PendingTransition {
|
||||||
|
proof: proof,
|
||||||
|
});
|
||||||
|
self.db.write_buffered(batch);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightChainClient for Client {
|
impl<T: ChainDataFetcher> LightChainClient for Client<T> {
|
||||||
|
fn add_listener(&self, listener: Weak<LightChainNotify>) {
|
||||||
|
Client::add_listener(self, listener)
|
||||||
|
}
|
||||||
|
|
||||||
fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) }
|
fn chain_info(&self) -> BlockChainInfo { Client::chain_info(self) }
|
||||||
|
|
||||||
fn queue_header(&self, header: Header) -> Result<H256, BlockImportError> {
|
fn queue_header(&self, header: Header) -> Result<H256, BlockImportError> {
|
||||||
@ -482,7 +592,7 @@ impl LightChainClient for Client {
|
|||||||
Client::cht_root(self, i)
|
Client::cht_root(self, i)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn eip86_transition(&self) -> u64 {
|
fn eip86_transition(&self) -> BlockNumber {
|
||||||
self.engine().params().eip86_transition
|
self.engine().params().eip86_transition
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -490,3 +600,29 @@ impl LightChainClient for Client {
|
|||||||
Client::report(self)
|
Client::report(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: ChainDataFetcher> ::ethcore::client::EngineClient for Client<T> {
|
||||||
|
fn update_sealing(&self) { }
|
||||||
|
fn submit_seal(&self, _block_hash: H256, _seal: Vec<Vec<u8>>) { }
|
||||||
|
fn broadcast_consensus_message(&self, _message: Vec<u8>) { }
|
||||||
|
|
||||||
|
fn epoch_transition_for(&self, parent_hash: H256) -> Option<EpochTransition> {
|
||||||
|
self.chain.epoch_transition_for(parent_hash).map(|(hdr, proof)| EpochTransition {
|
||||||
|
block_hash: hdr.hash(),
|
||||||
|
block_number: hdr.number(),
|
||||||
|
proof: proof,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
Client::chain_info(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_full_client(&self) -> Option<&::ethcore::client::BlockChainClient> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
|
self.block_header(id).map(|hdr| hdr.number())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -30,7 +30,7 @@ use util::kvdb::{Database, DatabaseConfig};
|
|||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
use super::{Client, Config as ClientConfig};
|
use super::{ChainDataFetcher, Client, Config as ClientConfig};
|
||||||
|
|
||||||
/// Errors on service initialization.
|
/// Errors on service initialization.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@ -51,14 +51,14 @@ impl fmt::Display for Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Light client service.
|
/// Light client service.
|
||||||
pub struct Service {
|
pub struct Service<T> {
|
||||||
client: Arc<Client>,
|
client: Arc<Client<T>>,
|
||||||
io_service: IoService<ClientIoMessage>,
|
io_service: IoService<ClientIoMessage>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl<T: ChainDataFetcher> Service<T> {
|
||||||
/// Start the service: initialize I/O workers and client itself.
|
/// Start the service: initialize I/O workers and client itself.
|
||||||
pub fn start(config: ClientConfig, spec: &Spec, path: &Path, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
|
pub fn start(config: ClientConfig, spec: &Spec, fetcher: T, path: &Path, cache: Arc<Mutex<Cache>>) -> Result<Self, Error> {
|
||||||
|
|
||||||
// initialize database.
|
// initialize database.
|
||||||
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
|
let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS);
|
||||||
@ -81,10 +81,14 @@ impl Service {
|
|||||||
db,
|
db,
|
||||||
db::COL_LIGHT_CHAIN,
|
db::COL_LIGHT_CHAIN,
|
||||||
spec,
|
spec,
|
||||||
|
fetcher,
|
||||||
io_service.channel(),
|
io_service.channel(),
|
||||||
cache,
|
cache,
|
||||||
).map_err(Error::Database)?);
|
).map_err(Error::Database)?);
|
||||||
|
|
||||||
io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
|
io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?;
|
||||||
|
spec.engine.register_client(Arc::downgrade(&client) as _);
|
||||||
|
|
||||||
Ok(Service {
|
Ok(Service {
|
||||||
client: client,
|
client: client,
|
||||||
io_service: io_service,
|
io_service: io_service,
|
||||||
@ -97,14 +101,14 @@ impl Service {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Get a handle to the client.
|
/// Get a handle to the client.
|
||||||
pub fn client(&self) -> &Arc<Client> {
|
pub fn client(&self) -> &Arc<Client<T>> {
|
||||||
&self.client
|
&self.client
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ImportBlocks(Arc<Client>);
|
struct ImportBlocks<T>(Arc<Client<T>>);
|
||||||
|
|
||||||
impl IoHandler<ClientIoMessage> for ImportBlocks {
|
impl<T: ChainDataFetcher> IoHandler<ClientIoMessage> for ImportBlocks<T> {
|
||||||
fn message(&self, _io: &IoContext<ClientIoMessage>, message: &ClientIoMessage) {
|
fn message(&self, _io: &IoContext<ClientIoMessage>, message: &ClientIoMessage) {
|
||||||
if let ClientIoMessage::BlockVerified = *message {
|
if let ClientIoMessage::BlockVerified = *message {
|
||||||
self.0.import_verified();
|
self.0.import_verified();
|
||||||
@ -120,6 +124,7 @@ mod tests {
|
|||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use cache::Cache;
|
use cache::Cache;
|
||||||
|
use client::fetch;
|
||||||
use time::Duration;
|
use time::Duration;
|
||||||
use parking_lot::Mutex;
|
use parking_lot::Mutex;
|
||||||
|
|
||||||
@ -129,6 +134,6 @@ mod tests {
|
|||||||
let temp_path = RandomTempPath::new();
|
let temp_path = RandomTempPath::new();
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
|
|
||||||
Service::start(Default::default(), &spec, temp_path.as_path(), cache).unwrap();
|
Service::start(Default::default(), &spec, fetch::unavailable(), temp_path.as_path(), cache).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -62,6 +62,7 @@ fn hardcoded_serve_time(kind: Kind) -> u64 {
|
|||||||
Kind::Storage => 2_000_000,
|
Kind::Storage => 2_000_000,
|
||||||
Kind::Code => 1_500_000,
|
Kind::Code => 1_500_000,
|
||||||
Kind::Execution => 250, // per gas.
|
Kind::Execution => 250, // per gas.
|
||||||
|
Kind::Signal => 500_000,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,9 +104,8 @@ mod packet {
|
|||||||
// relay transactions to peers.
|
// relay transactions to peers.
|
||||||
pub const SEND_TRANSACTIONS: u8 = 0x06;
|
pub const SEND_TRANSACTIONS: u8 = 0x06;
|
||||||
|
|
||||||
// request and respond with epoch transition proof
|
// two packets were previously meant to be reserved for epoch proofs.
|
||||||
pub const REQUEST_EPOCH_PROOF: u8 = 0x07;
|
// these have since been moved to requests.
|
||||||
pub const EPOCH_PROOF: u8 = 0x08;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// timeouts for different kinds of requests. all values are in milliseconds.
|
// timeouts for different kinds of requests. all values are in milliseconds.
|
||||||
@ -124,6 +123,7 @@ mod timeout {
|
|||||||
pub const CONTRACT_CODE: i64 = 100;
|
pub const CONTRACT_CODE: i64 = 100;
|
||||||
pub const HEADER_PROOF: i64 = 100;
|
pub const HEADER_PROOF: i64 = 100;
|
||||||
pub const TRANSACTION_PROOF: i64 = 1000; // per gas?
|
pub const TRANSACTION_PROOF: i64 = 1000; // per gas?
|
||||||
|
pub const EPOCH_SIGNAL: i64 = 200;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request id.
|
/// A request id.
|
||||||
@ -584,12 +584,6 @@ impl LightProtocol {
|
|||||||
|
|
||||||
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
|
packet::SEND_TRANSACTIONS => self.relay_transactions(peer, io, rlp),
|
||||||
|
|
||||||
packet::REQUEST_EPOCH_PROOF | packet::EPOCH_PROOF => {
|
|
||||||
// ignore these for now, but leave them specified.
|
|
||||||
debug!(target: "pip", "Ignoring request/response for epoch proof");
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
other => {
|
other => {
|
||||||
Err(Error::UnrecognizedPacket(other))
|
Err(Error::UnrecognizedPacket(other))
|
||||||
}
|
}
|
||||||
@ -952,6 +946,7 @@ impl LightProtocol {
|
|||||||
CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage),
|
CompleteRequest::Storage(req) => self.provider.storage_proof(req).map(Response::Storage),
|
||||||
CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code),
|
CompleteRequest::Code(req) => self.provider.contract_code(req).map(Response::Code),
|
||||||
CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution),
|
CompleteRequest::Execution(req) => self.provider.transaction_proof(req).map(Response::Execution),
|
||||||
|
CompleteRequest::Signal(req) => self.provider.epoch_signal(req).map(Response::Signal),
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -91,6 +91,7 @@ pub struct CostTable {
|
|||||||
code: U256,
|
code: U256,
|
||||||
header_proof: U256,
|
header_proof: U256,
|
||||||
transaction_proof: U256, // cost per gas.
|
transaction_proof: U256, // cost per gas.
|
||||||
|
epoch_signal: U256,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for CostTable {
|
impl Default for CostTable {
|
||||||
@ -107,6 +108,7 @@ impl Default for CostTable {
|
|||||||
code: 20000.into(),
|
code: 20000.into(),
|
||||||
header_proof: 15000.into(),
|
header_proof: 15000.into(),
|
||||||
transaction_proof: 2.into(),
|
transaction_proof: 2.into(),
|
||||||
|
epoch_signal: 10000.into(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -121,7 +123,7 @@ impl Encodable for CostTable {
|
|||||||
s.append(cost);
|
s.append(cost);
|
||||||
}
|
}
|
||||||
|
|
||||||
s.begin_list(10).append(&self.base);
|
s.begin_list(11).append(&self.base);
|
||||||
append_cost(s, &self.headers, request::Kind::Headers);
|
append_cost(s, &self.headers, request::Kind::Headers);
|
||||||
append_cost(s, &self.transaction_index, request::Kind::TransactionIndex);
|
append_cost(s, &self.transaction_index, request::Kind::TransactionIndex);
|
||||||
append_cost(s, &self.body, request::Kind::Body);
|
append_cost(s, &self.body, request::Kind::Body);
|
||||||
@ -131,6 +133,7 @@ impl Encodable for CostTable {
|
|||||||
append_cost(s, &self.code, request::Kind::Code);
|
append_cost(s, &self.code, request::Kind::Code);
|
||||||
append_cost(s, &self.header_proof, request::Kind::HeaderProof);
|
append_cost(s, &self.header_proof, request::Kind::HeaderProof);
|
||||||
append_cost(s, &self.transaction_proof, request::Kind::Execution);
|
append_cost(s, &self.transaction_proof, request::Kind::Execution);
|
||||||
|
append_cost(s, &self.epoch_signal, request::Kind::Signal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,6 +150,7 @@ impl Decodable for CostTable {
|
|||||||
let mut code = None;
|
let mut code = None;
|
||||||
let mut header_proof = None;
|
let mut header_proof = None;
|
||||||
let mut transaction_proof = None;
|
let mut transaction_proof = None;
|
||||||
|
let mut epoch_signal = None;
|
||||||
|
|
||||||
for cost_list in rlp.iter().skip(1) {
|
for cost_list in rlp.iter().skip(1) {
|
||||||
let cost = cost_list.val_at(1)?;
|
let cost = cost_list.val_at(1)?;
|
||||||
@ -160,6 +164,7 @@ impl Decodable for CostTable {
|
|||||||
request::Kind::Code => code = Some(cost),
|
request::Kind::Code => code = Some(cost),
|
||||||
request::Kind::HeaderProof => header_proof = Some(cost),
|
request::Kind::HeaderProof => header_proof = Some(cost),
|
||||||
request::Kind::Execution => transaction_proof = Some(cost),
|
request::Kind::Execution => transaction_proof = Some(cost),
|
||||||
|
request::Kind::Signal => epoch_signal = Some(cost),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -176,6 +181,7 @@ impl Decodable for CostTable {
|
|||||||
code: unwrap_cost(code)?,
|
code: unwrap_cost(code)?,
|
||||||
header_proof: unwrap_cost(header_proof)?,
|
header_proof: unwrap_cost(header_proof)?,
|
||||||
transaction_proof: unwrap_cost(transaction_proof)?,
|
transaction_proof: unwrap_cost(transaction_proof)?,
|
||||||
|
epoch_signal: unwrap_cost(epoch_signal)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -238,6 +244,7 @@ impl FlowParams {
|
|||||||
code: cost_for_kind(Kind::Code),
|
code: cost_for_kind(Kind::Code),
|
||||||
header_proof: cost_for_kind(Kind::HeaderProof),
|
header_proof: cost_for_kind(Kind::HeaderProof),
|
||||||
transaction_proof: cost_for_kind(Kind::Execution),
|
transaction_proof: cost_for_kind(Kind::Execution),
|
||||||
|
epoch_signal: cost_for_kind(Kind::Signal),
|
||||||
};
|
};
|
||||||
|
|
||||||
FlowParams {
|
FlowParams {
|
||||||
@ -263,7 +270,8 @@ impl FlowParams {
|
|||||||
storage: free_cost.clone(),
|
storage: free_cost.clone(),
|
||||||
code: free_cost.clone(),
|
code: free_cost.clone(),
|
||||||
header_proof: free_cost.clone(),
|
header_proof: free_cost.clone(),
|
||||||
transaction_proof: free_cost,
|
transaction_proof: free_cost.clone(),
|
||||||
|
epoch_signal: free_cost,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -293,6 +301,7 @@ impl FlowParams {
|
|||||||
Request::Storage(_) => self.costs.storage,
|
Request::Storage(_) => self.costs.storage,
|
||||||
Request::Code(_) => self.costs.code,
|
Request::Code(_) => self.costs.code,
|
||||||
Request::Execution(ref req) => self.costs.transaction_proof * req.gas,
|
Request::Execution(ref req) => self.costs.transaction_proof * req.gas,
|
||||||
|
Request::Signal(_) => self.costs.epoch_signal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,6 +139,7 @@ fn compute_timeout(reqs: &Requests) -> Duration {
|
|||||||
Request::Storage(_) => timeout::PROOF,
|
Request::Storage(_) => timeout::PROOF,
|
||||||
Request::Code(_) => timeout::CONTRACT_CODE,
|
Request::Code(_) => timeout::CONTRACT_CODE,
|
||||||
Request::Execution(_) => timeout::TRANSACTION_PROOF,
|
Request::Execution(_) => timeout::TRANSACTION_PROOF,
|
||||||
|
Request::Signal(_) => timeout::EPOCH_SIGNAL,
|
||||||
}
|
}
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
@ -158,6 +158,12 @@ impl Provider for TestProvider {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
|
||||||
|
Some(request::SignalResponse {
|
||||||
|
signal: vec![1, 2, 3, 4],
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
fn ready_transactions(&self) -> Vec<PendingTransaction> {
|
fn ready_transactions(&self) -> Vec<PendingTransaction> {
|
||||||
self.0.client.ready_transactions()
|
self.0.client.ready_transactions()
|
||||||
}
|
}
|
||||||
@ -523,6 +529,50 @@ fn get_contract_code() {
|
|||||||
proto.handle_packet(&expected, &1, packet::REQUEST, &request_body);
|
proto.handle_packet(&expected, &1, packet::REQUEST, &request_body);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn epoch_signal() {
|
||||||
|
let capabilities = capabilities();
|
||||||
|
|
||||||
|
let (provider, proto) = setup(capabilities.clone());
|
||||||
|
let flow_params = proto.flow_params.read().clone();
|
||||||
|
|
||||||
|
let cur_status = status(provider.client.chain_info());
|
||||||
|
|
||||||
|
{
|
||||||
|
let packet_body = write_handshake(&cur_status, &capabilities, &proto);
|
||||||
|
proto.on_connect(&1, &Expect::Send(1, packet::STATUS, packet_body.clone()));
|
||||||
|
proto.handle_packet(&Expect::Nothing, &1, packet::STATUS, &packet_body);
|
||||||
|
}
|
||||||
|
|
||||||
|
let req_id = 112;
|
||||||
|
let request = Request::Signal(request::IncompleteSignalRequest {
|
||||||
|
block_hash: H256([1; 32]).into(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let requests = encode_single(request.clone());
|
||||||
|
let request_body = make_packet(req_id, &requests);
|
||||||
|
|
||||||
|
let response = {
|
||||||
|
let response = vec![Response::Signal(SignalResponse {
|
||||||
|
signal: vec![1, 2, 3, 4],
|
||||||
|
})];
|
||||||
|
|
||||||
|
let limit = *flow_params.limit();
|
||||||
|
let cost = flow_params.compute_cost_multi(requests.requests());
|
||||||
|
|
||||||
|
println!("limit = {}, cost = {}", limit, cost);
|
||||||
|
let new_creds = limit - cost;
|
||||||
|
|
||||||
|
let mut response_stream = RlpStream::new_list(3);
|
||||||
|
response_stream.append(&req_id).append(&new_creds).append_list(&response);
|
||||||
|
|
||||||
|
response_stream.out()
|
||||||
|
};
|
||||||
|
|
||||||
|
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||||
|
proto.handle_packet(&expected, &1, packet::REQUEST, &request_body);
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn proof_of_execution() {
|
fn proof_of_execution() {
|
||||||
let capabilities = capabilities();
|
let capabilities = capabilities();
|
||||||
|
@ -195,6 +195,8 @@ fn guess_capabilities(requests: &[CheckedRequest]) -> Capabilities {
|
|||||||
caps.serve_headers = true,
|
caps.serve_headers = true,
|
||||||
CheckedRequest::HeaderByHash(_, _) =>
|
CheckedRequest::HeaderByHash(_, _) =>
|
||||||
caps.serve_headers = true,
|
caps.serve_headers = true,
|
||||||
|
CheckedRequest::Signal(_, _) =>
|
||||||
|
caps.serve_headers = true,
|
||||||
CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() {
|
CheckedRequest::Body(ref req, _) => if let Ok(ref hdr) = req.0.as_ref() {
|
||||||
update_since(&mut caps.serve_chain_since, hdr.number());
|
update_since(&mut caps.serve_chain_since, hdr.number());
|
||||||
},
|
},
|
||||||
|
@ -20,7 +20,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use ethcore::basic_account::BasicAccount;
|
use ethcore::basic_account::BasicAccount;
|
||||||
use ethcore::encoded;
|
use ethcore::encoded;
|
||||||
use ethcore::engines::Engine;
|
use ethcore::engines::{Engine, StateDependentProof};
|
||||||
use ethcore::receipt::Receipt;
|
use ethcore::receipt::Receipt;
|
||||||
use ethcore::state::{self, ProvedExecution};
|
use ethcore::state::{self, ProvedExecution};
|
||||||
use ethcore::transaction::SignedTransaction;
|
use ethcore::transaction::SignedTransaction;
|
||||||
@ -56,6 +56,8 @@ pub enum Request {
|
|||||||
Code(Code),
|
Code(Code),
|
||||||
/// A request for proof of execution.
|
/// A request for proof of execution.
|
||||||
Execution(TransactionProof),
|
Execution(TransactionProof),
|
||||||
|
/// A request for epoch change signal.
|
||||||
|
Signal(Signal),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A request argument.
|
/// A request argument.
|
||||||
@ -136,6 +138,7 @@ impl_single!(Body, Body, encoded::Block);
|
|||||||
impl_single!(Account, Account, Option<BasicAccount>);
|
impl_single!(Account, Account, Option<BasicAccount>);
|
||||||
impl_single!(Code, Code, Bytes);
|
impl_single!(Code, Code, Bytes);
|
||||||
impl_single!(Execution, TransactionProof, super::ExecutionResult);
|
impl_single!(Execution, TransactionProof, super::ExecutionResult);
|
||||||
|
impl_single!(Signal, Signal, Vec<u8>);
|
||||||
|
|
||||||
macro_rules! impl_args {
|
macro_rules! impl_args {
|
||||||
() => {
|
() => {
|
||||||
@ -244,6 +247,7 @@ pub enum CheckedRequest {
|
|||||||
Account(Account, net_request::IncompleteAccountRequest),
|
Account(Account, net_request::IncompleteAccountRequest),
|
||||||
Code(Code, net_request::IncompleteCodeRequest),
|
Code(Code, net_request::IncompleteCodeRequest),
|
||||||
Execution(TransactionProof, net_request::IncompleteExecutionRequest),
|
Execution(TransactionProof, net_request::IncompleteExecutionRequest),
|
||||||
|
Signal(Signal, net_request::IncompleteSignalRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<Request> for CheckedRequest {
|
impl From<Request> for CheckedRequest {
|
||||||
@ -302,6 +306,12 @@ impl From<Request> for CheckedRequest {
|
|||||||
};
|
};
|
||||||
CheckedRequest::Execution(req, net_req)
|
CheckedRequest::Execution(req, net_req)
|
||||||
}
|
}
|
||||||
|
Request::Signal(req) => {
|
||||||
|
let net_req = net_request::IncompleteSignalRequest {
|
||||||
|
block_hash: req.hash.into(),
|
||||||
|
};
|
||||||
|
CheckedRequest::Signal(req, net_req)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -319,6 +329,7 @@ impl CheckedRequest {
|
|||||||
CheckedRequest::Account(_, req) => NetRequest::Account(req),
|
CheckedRequest::Account(_, req) => NetRequest::Account(req),
|
||||||
CheckedRequest::Code(_, req) => NetRequest::Code(req),
|
CheckedRequest::Code(_, req) => NetRequest::Code(req),
|
||||||
CheckedRequest::Execution(_, req) => NetRequest::Execution(req),
|
CheckedRequest::Execution(_, req) => NetRequest::Execution(req),
|
||||||
|
CheckedRequest::Signal(_, req) => NetRequest::Signal(req),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -446,6 +457,7 @@ macro_rules! match_me {
|
|||||||
CheckedRequest::Account($check, $req) => $e,
|
CheckedRequest::Account($check, $req) => $e,
|
||||||
CheckedRequest::Code($check, $req) => $e,
|
CheckedRequest::Code($check, $req) => $e,
|
||||||
CheckedRequest::Execution($check, $req) => $e,
|
CheckedRequest::Execution($check, $req) => $e,
|
||||||
|
CheckedRequest::Signal($check, $req) => $e,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -473,6 +485,7 @@ impl IncompleteRequest for CheckedRequest {
|
|||||||
CheckedRequest::Account(_, ref req) => req.check_outputs(f),
|
CheckedRequest::Account(_, ref req) => req.check_outputs(f),
|
||||||
CheckedRequest::Code(_, ref req) => req.check_outputs(f),
|
CheckedRequest::Code(_, ref req) => req.check_outputs(f),
|
||||||
CheckedRequest::Execution(_, ref req) => req.check_outputs(f),
|
CheckedRequest::Execution(_, ref req) => req.check_outputs(f),
|
||||||
|
CheckedRequest::Signal(_, ref req) => req.check_outputs(f),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,6 +506,7 @@ impl IncompleteRequest for CheckedRequest {
|
|||||||
CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account),
|
CheckedRequest::Account(_, req) => req.complete().map(CompleteRequest::Account),
|
||||||
CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code),
|
CheckedRequest::Code(_, req) => req.complete().map(CompleteRequest::Code),
|
||||||
CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution),
|
CheckedRequest::Execution(_, req) => req.complete().map(CompleteRequest::Execution),
|
||||||
|
CheckedRequest::Signal(_, req) => req.complete().map(CompleteRequest::Signal),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -544,6 +558,9 @@ impl net_request::CheckedRequest for CheckedRequest {
|
|||||||
CheckedRequest::Execution(ref prover, _) =>
|
CheckedRequest::Execution(ref prover, _) =>
|
||||||
expect!((&NetResponse::Execution(ref res), _) =>
|
expect!((&NetResponse::Execution(ref res), _) =>
|
||||||
prover.check_response(cache, &res.items).map(Response::Execution)),
|
prover.check_response(cache, &res.items).map(Response::Execution)),
|
||||||
|
CheckedRequest::Signal(ref prover, _) =>
|
||||||
|
expect!((&NetResponse::Signal(ref res), _) =>
|
||||||
|
prover.check_response(cache, &res.signal).map(Response::Signal)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -567,6 +584,8 @@ pub enum Response {
|
|||||||
Code(Vec<u8>),
|
Code(Vec<u8>),
|
||||||
/// Response to a request for proved execution.
|
/// Response to a request for proved execution.
|
||||||
Execution(super::ExecutionResult),
|
Execution(super::ExecutionResult),
|
||||||
|
/// Response to a request for epoch change signal.
|
||||||
|
Signal(Vec<u8>),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl net_request::ResponseLike for Response {
|
impl net_request::ResponseLike for Response {
|
||||||
@ -850,6 +869,27 @@ impl TransactionProof {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Request for epoch signal.
|
||||||
|
/// Provide engine and state-dependent proof checker.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Signal {
|
||||||
|
/// Block hash and number to fetch proof for.
|
||||||
|
pub hash: H256,
|
||||||
|
/// Consensus engine, used to check the proof.
|
||||||
|
pub engine: Arc<Engine>,
|
||||||
|
/// Special checker for the proof.
|
||||||
|
pub proof_check: Arc<StateDependentProof>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Signal {
|
||||||
|
/// Check the signal, returning the signal or indicate that it's bad.
|
||||||
|
pub fn check_response(&self, _: &Mutex<::cache::Cache>, signal: &[u8]) -> Result<Vec<u8>, Error> {
|
||||||
|
self.proof_check.check_proof(&*self.engine, signal)
|
||||||
|
.map(|_| signal.to_owned())
|
||||||
|
.map_err(|_| Error::BadProof)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
@ -127,6 +127,9 @@ pub trait Provider: Send + Sync {
|
|||||||
/// Provide a proof-of-execution for the given transaction proof request.
|
/// Provide a proof-of-execution for the given transaction proof request.
|
||||||
/// Returns a vector of all state items necessary to execute the transaction.
|
/// Returns a vector of all state items necessary to execute the transaction.
|
||||||
fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option<request::ExecutionResponse>;
|
fn transaction_proof(&self, req: request::CompleteExecutionRequest) -> Option<request::ExecutionResponse>;
|
||||||
|
|
||||||
|
/// Provide epoch signal data at given block hash. This should be just the
|
||||||
|
fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option<request::SignalResponse>;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implementation of a light client data provider for a client.
|
// Implementation of a light client data provider for a client.
|
||||||
@ -265,6 +268,12 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
|
|||||||
fn ready_transactions(&self) -> Vec<PendingTransaction> {
|
fn ready_transactions(&self) -> Vec<PendingTransaction> {
|
||||||
BlockChainClient::ready_transactions(self)
|
BlockChainClient::ready_transactions(self)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn epoch_signal(&self, req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
|
||||||
|
self.epoch_signal(req.block_hash).map(|signal| request::SignalResponse {
|
||||||
|
signal: signal,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The light client "provider" implementation. This wraps a `LightClient` and
|
/// The light client "provider" implementation. This wraps a `LightClient` and
|
||||||
@ -330,6 +339,10 @@ impl<L: AsLightClient + Send + Sync> Provider for LightProvider<L> {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn epoch_signal(&self, _req: request::CompleteSignalRequest) -> Option<request::SignalResponse> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
fn ready_transactions(&self) -> Vec<PendingTransaction> {
|
fn ready_transactions(&self) -> Vec<PendingTransaction> {
|
||||||
let chain_info = self.chain_info();
|
let chain_info = self.chain_info();
|
||||||
self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp)
|
self.txqueue.read().ready_transactions(chain_info.best_block_number, chain_info.best_block_timestamp)
|
||||||
|
@ -67,6 +67,11 @@ pub use self::execution::{
|
|||||||
Incomplete as IncompleteExecutionRequest,
|
Incomplete as IncompleteExecutionRequest,
|
||||||
Response as ExecutionResponse,
|
Response as ExecutionResponse,
|
||||||
};
|
};
|
||||||
|
pub use self::epoch_signal::{
|
||||||
|
Complete as CompleteSignalRequest,
|
||||||
|
Incomplete as IncompleteSignalRequest,
|
||||||
|
Response as SignalResponse,
|
||||||
|
};
|
||||||
|
|
||||||
pub use self::builder::{RequestBuilder, Requests};
|
pub use self::builder::{RequestBuilder, Requests};
|
||||||
|
|
||||||
@ -261,6 +266,8 @@ pub enum Request {
|
|||||||
Code(IncompleteCodeRequest),
|
Code(IncompleteCodeRequest),
|
||||||
/// A request for proof of execution,
|
/// A request for proof of execution,
|
||||||
Execution(IncompleteExecutionRequest),
|
Execution(IncompleteExecutionRequest),
|
||||||
|
/// A request for an epoch signal.
|
||||||
|
Signal(IncompleteSignalRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All request types, in an answerable state.
|
/// All request types, in an answerable state.
|
||||||
@ -284,6 +291,8 @@ pub enum CompleteRequest {
|
|||||||
Code(CompleteCodeRequest),
|
Code(CompleteCodeRequest),
|
||||||
/// A request for proof of execution,
|
/// A request for proof of execution,
|
||||||
Execution(CompleteExecutionRequest),
|
Execution(CompleteExecutionRequest),
|
||||||
|
/// A request for an epoch signal.
|
||||||
|
Signal(CompleteSignalRequest),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CompleteRequest {
|
impl CompleteRequest {
|
||||||
@ -299,6 +308,7 @@ impl CompleteRequest {
|
|||||||
CompleteRequest::Storage(_) => Kind::Storage,
|
CompleteRequest::Storage(_) => Kind::Storage,
|
||||||
CompleteRequest::Code(_) => Kind::Code,
|
CompleteRequest::Code(_) => Kind::Code,
|
||||||
CompleteRequest::Execution(_) => Kind::Execution,
|
CompleteRequest::Execution(_) => Kind::Execution,
|
||||||
|
CompleteRequest::Signal(_) => Kind::Signal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -316,6 +326,7 @@ impl Request {
|
|||||||
Request::Storage(_) => Kind::Storage,
|
Request::Storage(_) => Kind::Storage,
|
||||||
Request::Code(_) => Kind::Code,
|
Request::Code(_) => Kind::Code,
|
||||||
Request::Execution(_) => Kind::Execution,
|
Request::Execution(_) => Kind::Execution,
|
||||||
|
Request::Signal(_) => Kind::Signal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -332,6 +343,7 @@ impl Decodable for Request {
|
|||||||
Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)),
|
Kind::Storage => Ok(Request::Storage(rlp.val_at(1)?)),
|
||||||
Kind::Code => Ok(Request::Code(rlp.val_at(1)?)),
|
Kind::Code => Ok(Request::Code(rlp.val_at(1)?)),
|
||||||
Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)),
|
Kind::Execution => Ok(Request::Execution(rlp.val_at(1)?)),
|
||||||
|
Kind::Signal => Ok(Request::Signal(rlp.val_at(1)?)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -353,6 +365,7 @@ impl Encodable for Request {
|
|||||||
Request::Storage(ref req) => s.append(req),
|
Request::Storage(ref req) => s.append(req),
|
||||||
Request::Code(ref req) => s.append(req),
|
Request::Code(ref req) => s.append(req),
|
||||||
Request::Execution(ref req) => s.append(req),
|
Request::Execution(ref req) => s.append(req),
|
||||||
|
Request::Signal(ref req) => s.append(req),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -374,6 +387,7 @@ impl IncompleteRequest for Request {
|
|||||||
Request::Storage(ref req) => req.check_outputs(f),
|
Request::Storage(ref req) => req.check_outputs(f),
|
||||||
Request::Code(ref req) => req.check_outputs(f),
|
Request::Code(ref req) => req.check_outputs(f),
|
||||||
Request::Execution(ref req) => req.check_outputs(f),
|
Request::Execution(ref req) => req.check_outputs(f),
|
||||||
|
Request::Signal(ref req) => req.check_outputs(f),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,6 +402,7 @@ impl IncompleteRequest for Request {
|
|||||||
Request::Storage(ref req) => req.note_outputs(f),
|
Request::Storage(ref req) => req.note_outputs(f),
|
||||||
Request::Code(ref req) => req.note_outputs(f),
|
Request::Code(ref req) => req.note_outputs(f),
|
||||||
Request::Execution(ref req) => req.note_outputs(f),
|
Request::Execution(ref req) => req.note_outputs(f),
|
||||||
|
Request::Signal(ref req) => req.note_outputs(f),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,6 +417,7 @@ impl IncompleteRequest for Request {
|
|||||||
Request::Storage(ref mut req) => req.fill(oracle),
|
Request::Storage(ref mut req) => req.fill(oracle),
|
||||||
Request::Code(ref mut req) => req.fill(oracle),
|
Request::Code(ref mut req) => req.fill(oracle),
|
||||||
Request::Execution(ref mut req) => req.fill(oracle),
|
Request::Execution(ref mut req) => req.fill(oracle),
|
||||||
|
Request::Signal(ref mut req) => req.fill(oracle),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -416,6 +432,7 @@ impl IncompleteRequest for Request {
|
|||||||
Request::Storage(req) => req.complete().map(CompleteRequest::Storage),
|
Request::Storage(req) => req.complete().map(CompleteRequest::Storage),
|
||||||
Request::Code(req) => req.complete().map(CompleteRequest::Code),
|
Request::Code(req) => req.complete().map(CompleteRequest::Code),
|
||||||
Request::Execution(req) => req.complete().map(CompleteRequest::Execution),
|
Request::Execution(req) => req.complete().map(CompleteRequest::Execution),
|
||||||
|
Request::Signal(req) => req.complete().map(CompleteRequest::Signal),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -430,6 +447,7 @@ impl IncompleteRequest for Request {
|
|||||||
Request::Storage(ref mut req) => req.adjust_refs(mapping),
|
Request::Storage(ref mut req) => req.adjust_refs(mapping),
|
||||||
Request::Code(ref mut req) => req.adjust_refs(mapping),
|
Request::Code(ref mut req) => req.adjust_refs(mapping),
|
||||||
Request::Execution(ref mut req) => req.adjust_refs(mapping),
|
Request::Execution(ref mut req) => req.adjust_refs(mapping),
|
||||||
|
Request::Signal(ref mut req) => req.adjust_refs(mapping),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -471,6 +489,8 @@ pub enum Kind {
|
|||||||
Code = 7,
|
Code = 7,
|
||||||
/// A request for transaction execution + state proof.
|
/// A request for transaction execution + state proof.
|
||||||
Execution = 8,
|
Execution = 8,
|
||||||
|
/// A request for epoch transition signal.
|
||||||
|
Signal = 9,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Kind {
|
impl Decodable for Kind {
|
||||||
@ -485,6 +505,7 @@ impl Decodable for Kind {
|
|||||||
6 => Ok(Kind::Storage),
|
6 => Ok(Kind::Storage),
|
||||||
7 => Ok(Kind::Code),
|
7 => Ok(Kind::Code),
|
||||||
8 => Ok(Kind::Execution),
|
8 => Ok(Kind::Execution),
|
||||||
|
9 => Ok(Kind::Signal),
|
||||||
_ => Err(DecoderError::Custom("Unknown PIP request ID.")),
|
_ => Err(DecoderError::Custom("Unknown PIP request ID.")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -517,6 +538,8 @@ pub enum Response {
|
|||||||
Code(CodeResponse),
|
Code(CodeResponse),
|
||||||
/// A response for proof of execution,
|
/// A response for proof of execution,
|
||||||
Execution(ExecutionResponse),
|
Execution(ExecutionResponse),
|
||||||
|
/// A response for epoch change signal.
|
||||||
|
Signal(SignalResponse),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ResponseLike for Response {
|
impl ResponseLike for Response {
|
||||||
@ -532,6 +555,7 @@ impl ResponseLike for Response {
|
|||||||
Response::Storage(ref res) => res.fill_outputs(f),
|
Response::Storage(ref res) => res.fill_outputs(f),
|
||||||
Response::Code(ref res) => res.fill_outputs(f),
|
Response::Code(ref res) => res.fill_outputs(f),
|
||||||
Response::Execution(ref res) => res.fill_outputs(f),
|
Response::Execution(ref res) => res.fill_outputs(f),
|
||||||
|
Response::Signal(ref res) => res.fill_outputs(f),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -549,6 +573,7 @@ impl Response {
|
|||||||
Response::Storage(_) => Kind::Storage,
|
Response::Storage(_) => Kind::Storage,
|
||||||
Response::Code(_) => Kind::Code,
|
Response::Code(_) => Kind::Code,
|
||||||
Response::Execution(_) => Kind::Execution,
|
Response::Execution(_) => Kind::Execution,
|
||||||
|
Response::Signal(_) => Kind::Signal,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -565,6 +590,7 @@ impl Decodable for Response {
|
|||||||
Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)),
|
Kind::Storage => Ok(Response::Storage(rlp.val_at(1)?)),
|
||||||
Kind::Code => Ok(Response::Code(rlp.val_at(1)?)),
|
Kind::Code => Ok(Response::Code(rlp.val_at(1)?)),
|
||||||
Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)),
|
Kind::Execution => Ok(Response::Execution(rlp.val_at(1)?)),
|
||||||
|
Kind::Signal => Ok(Response::Signal(rlp.val_at(1)?)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -586,6 +612,7 @@ impl Encodable for Response {
|
|||||||
Response::Storage(ref res) => s.append(res),
|
Response::Storage(ref res) => s.append(res),
|
||||||
Response::Code(ref res) => s.append(res),
|
Response::Code(ref res) => s.append(res),
|
||||||
Response::Execution(ref res) => s.append(res),
|
Response::Execution(ref res) => s.append(res),
|
||||||
|
Response::Signal(ref res) => s.append(res),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -760,8 +787,8 @@ pub mod header {
|
|||||||
pub mod header_proof {
|
pub mod header_proof {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
||||||
use bigint::hash::H256;
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
|
use bigint::hash::H256;
|
||||||
use util::Bytes;
|
use util::Bytes;
|
||||||
|
|
||||||
/// Potentially incomplete header proof request.
|
/// Potentially incomplete header proof request.
|
||||||
@ -1091,8 +1118,8 @@ pub mod block_body {
|
|||||||
/// A request for an account proof.
|
/// A request for an account proof.
|
||||||
pub mod account {
|
pub mod account {
|
||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use bigint::hash::H256;
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
|
use bigint::hash::H256;
|
||||||
use util::Bytes;
|
use util::Bytes;
|
||||||
|
|
||||||
/// Potentially incomplete request for an account proof.
|
/// Potentially incomplete request for an account proof.
|
||||||
@ -1388,8 +1415,8 @@ pub mod execution {
|
|||||||
use super::{Field, NoSuchOutput, OutputKind, Output};
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
use ethcore::transaction::Action;
|
use ethcore::transaction::Action;
|
||||||
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
||||||
use bigint::hash::H256;
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
|
use bigint::hash::H256;
|
||||||
use util::{Bytes, Address, DBValue};
|
use util::{Bytes, Address, DBValue};
|
||||||
|
|
||||||
/// Potentially incomplete execution proof request.
|
/// Potentially incomplete execution proof request.
|
||||||
@ -1509,6 +1536,105 @@ pub mod execution {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A request for epoch signal data.
|
||||||
|
pub mod epoch_signal {
|
||||||
|
use super::{Field, NoSuchOutput, OutputKind, Output};
|
||||||
|
use rlp::{Encodable, Decodable, DecoderError, RlpStream, UntrustedRlp};
|
||||||
|
use bigint::hash::H256;
|
||||||
|
use util::Bytes;
|
||||||
|
|
||||||
|
/// Potentially incomplete epoch signal request.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct Incomplete {
|
||||||
|
/// The block hash to request the signal for.
|
||||||
|
pub block_hash: Field<H256>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Decodable for Incomplete {
|
||||||
|
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
||||||
|
Ok(Incomplete {
|
||||||
|
block_hash: rlp.val_at(0)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Encodable for Incomplete {
|
||||||
|
fn rlp_append(&self, s: &mut RlpStream) {
|
||||||
|
s.begin_list(1).append(&self.block_hash);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl super::IncompleteRequest for Incomplete {
|
||||||
|
type Complete = Complete;
|
||||||
|
type Response = Response;
|
||||||
|
|
||||||
|
fn check_outputs<F>(&self, mut f: F) -> Result<(), NoSuchOutput>
|
||||||
|
where F: FnMut(usize, usize, OutputKind) -> Result<(), NoSuchOutput>
|
||||||
|
{
|
||||||
|
if let Field::BackReference(req, idx) = self.block_hash {
|
||||||
|
f(req, idx, OutputKind::Hash)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn note_outputs<F>(&self, _: F) where F: FnMut(usize, OutputKind) {}
|
||||||
|
|
||||||
|
fn fill<F>(&mut self, oracle: F) where F: Fn(usize, usize) -> Result<Output, NoSuchOutput> {
|
||||||
|
if let Field::BackReference(req, idx) = self.block_hash {
|
||||||
|
self.block_hash = match oracle(req, idx) {
|
||||||
|
Ok(Output::Hash(block_hash)) => Field::Scalar(block_hash.into()),
|
||||||
|
_ => Field::BackReference(req, idx),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn complete(self) -> Result<Self::Complete, NoSuchOutput> {
|
||||||
|
Ok(Complete {
|
||||||
|
block_hash: self.block_hash.into_scalar()?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn adjust_refs<F>(&mut self, mut mapping: F) where F: FnMut(usize) -> usize {
|
||||||
|
self.block_hash.adjust_req(&mut mapping);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A complete request.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct Complete {
|
||||||
|
/// The block hash to request the epoch signal for.
|
||||||
|
pub block_hash: H256,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The output of a request for an epoch signal.
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub struct Response {
|
||||||
|
/// The requested epoch signal.
|
||||||
|
pub signal: Bytes,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl super::ResponseLike for Response {
|
||||||
|
/// Fill reusable outputs by providing them to the function.
|
||||||
|
fn fill_outputs<F>(&self, _: F) where F: FnMut(usize, Output) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Decodable for Response {
|
||||||
|
fn decode(rlp: &UntrustedRlp) -> Result<Self, DecoderError> {
|
||||||
|
|
||||||
|
Ok(Response {
|
||||||
|
signal: rlp.as_val()?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Encodable for Response {
|
||||||
|
fn rlp_append(&self, s: &mut RlpStream) {
|
||||||
|
s.append(&self.signal);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -1797,4 +1923,22 @@ mod tests {
|
|||||||
let raw = ::rlp::encode_list(&reqs);
|
let raw = ::rlp::encode_list(&reqs);
|
||||||
assert_eq!(::rlp::decode_list::<Response>(&raw), reqs);
|
assert_eq!(::rlp::decode_list::<Response>(&raw), reqs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn epoch_signal_roundtrip() {
|
||||||
|
let req = IncompleteSignalRequest {
|
||||||
|
block_hash: Field::Scalar(Default::default()),
|
||||||
|
};
|
||||||
|
|
||||||
|
let full_req = Request::Signal(req.clone());
|
||||||
|
let res = SignalResponse {
|
||||||
|
signal: vec![1, 2, 3, 4, 5, 6, 7, 6, 5, 4],
|
||||||
|
};
|
||||||
|
let full_res = Response::Signal(res.clone());
|
||||||
|
|
||||||
|
check_roundtrip(req);
|
||||||
|
check_roundtrip(full_req);
|
||||||
|
check_roundtrip(res);
|
||||||
|
check_roundtrip(full_res);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,6 +44,7 @@ use db::{self, Writable, Readable, CacheUpdatePolicy};
|
|||||||
use cache_manager::CacheManager;
|
use cache_manager::CacheManager;
|
||||||
use encoded;
|
use encoded;
|
||||||
use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition};
|
use engines::epoch::{Transition as EpochTransition, PendingTransition as PendingEpochTransition};
|
||||||
|
use rayon::prelude::*;
|
||||||
use ansi_term::Colour;
|
use ansi_term::Colour;
|
||||||
|
|
||||||
const LOG_BLOOMS_LEVELS: usize = 3;
|
const LOG_BLOOMS_LEVELS: usize = 3;
|
||||||
@ -152,7 +153,7 @@ pub trait BlockProvider {
|
|||||||
|
|
||||||
/// Returns logs matching given filter.
|
/// Returns logs matching given filter.
|
||||||
fn logs<F>(&self, blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
fn logs<F>(&self, blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||||
where F: Fn(&LogEntry) -> bool, Self: Sized;
|
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized;
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! otry {
|
macro_rules! otry {
|
||||||
@ -363,13 +364,15 @@ impl BlockProvider for BlockChain {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
fn logs<F>(&self, mut blocks: Vec<BlockNumber>, matches: F, limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||||
where F: Fn(&LogEntry) -> bool, Self: Sized {
|
where F: Fn(&LogEntry) -> bool + Send + Sync, Self: Sized {
|
||||||
// sort in reverse order
|
// sort in reverse order
|
||||||
blocks.sort_by(|a, b| b.cmp(a));
|
blocks.sort_by(|a, b| b.cmp(a));
|
||||||
|
|
||||||
let mut log_index = 0;
|
let mut logs = blocks
|
||||||
let mut logs = blocks.into_iter()
|
.chunks(128)
|
||||||
.filter_map(|number| self.block_hash(number).map(|hash| (number, hash)))
|
.flat_map(move |blocks_chunk| {
|
||||||
|
blocks_chunk.into_par_iter()
|
||||||
|
.filter_map(|number| self.block_hash(*number).map(|hash| (*number, hash)))
|
||||||
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
.filter_map(|(number, hash)| self.block_receipts(&hash).map(|r| (number, hash, r.receipts)))
|
||||||
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes())))
|
.filter_map(|(number, hash, receipts)| self.block_body(&hash).map(|ref b| (number, hash, receipts, b.transaction_hashes())))
|
||||||
.flat_map(|(number, hash, mut receipts, mut hashes)| {
|
.flat_map(|(number, hash, mut receipts, mut hashes)| {
|
||||||
@ -377,7 +380,7 @@ impl BlockProvider for BlockChain {
|
|||||||
warn!("Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt?", number, hash, receipts.len(), hashes.len());
|
warn!("Block {} ({}) has different number of receipts ({}) to transactions ({}). Database corrupt?", number, hash, receipts.len(), hashes.len());
|
||||||
assert!(false);
|
assert!(false);
|
||||||
}
|
}
|
||||||
log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len());
|
let mut log_index = receipts.iter().fold(0, |sum, receipt| sum + receipt.logs.len());
|
||||||
|
|
||||||
let receipts_len = receipts.len();
|
let receipts_len = receipts.len();
|
||||||
hashes.reverse();
|
hashes.reverse();
|
||||||
@ -405,9 +408,13 @@ impl BlockProvider for BlockChain {
|
|||||||
log_index: current_log_index - i - 1,
|
log_index: current_log_index - i - 1,
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
|
||||||
.filter(|log_entry| matches(&log_entry.entry))
|
.filter(|log_entry| matches(&log_entry.entry))
|
||||||
.take(limit.unwrap_or(::std::usize::MAX))
|
.take(limit.unwrap_or(::std::usize::MAX))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
.take(limit.unwrap_or(::std::usize::MAX))
|
||||||
.collect::<Vec<LocalizedLogEntry>>();
|
.collect::<Vec<LocalizedLogEntry>>();
|
||||||
logs.reverse();
|
logs.reverse();
|
||||||
logs
|
logs
|
||||||
|
@ -34,7 +34,7 @@ use util::kvdb::*;
|
|||||||
|
|
||||||
// other
|
// other
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::{H256, H2048};
|
use bigint::hash::H256;
|
||||||
use basic_types::Seal;
|
use basic_types::Seal;
|
||||||
use block::*;
|
use block::*;
|
||||||
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
use blockchain::{BlockChain, BlockProvider, TreeRoute, ImportRoute};
|
||||||
@ -43,7 +43,7 @@ use client::ancient_import::AncientVerifier;
|
|||||||
use client::Error as ClientError;
|
use client::Error as ClientError;
|
||||||
use client::{
|
use client::{
|
||||||
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
|
BlockId, TransactionId, UncleId, TraceId, ClientConfig, BlockChainClient,
|
||||||
MiningBlockChainClient, EngineClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
|
MiningBlockChainClient, TraceFilter, CallAnalytics, BlockImportError, Mode,
|
||||||
ChainNotify, PruningInfo, ProvingBlockChainClient,
|
ChainNotify, PruningInfo, ProvingBlockChainClient,
|
||||||
};
|
};
|
||||||
use encoded;
|
use encoded;
|
||||||
@ -771,7 +771,7 @@ impl Client {
|
|||||||
res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect()))
|
res.map(|(output, proof)| (output, proof.into_iter().map(|x| x.into_vec()).collect()))
|
||||||
};
|
};
|
||||||
|
|
||||||
match (with_state)(&call) {
|
match with_state.generate_proof(&call) {
|
||||||
Ok(proof) => proof,
|
Ok(proof) => proof,
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e);
|
warn!(target: "client", "Failed to generate transition proof for block {}: {}", hash, e);
|
||||||
@ -918,7 +918,7 @@ impl Client {
|
|||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
let block_number = match self.block_number(id.clone()) {
|
let block_number = match self.block_number(id) {
|
||||||
Some(num) => num,
|
Some(num) => num,
|
||||||
None => return None,
|
None => return None,
|
||||||
};
|
};
|
||||||
@ -1155,6 +1155,16 @@ impl Client {
|
|||||||
(false, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_no_tracing()),
|
(false, false) => call(state, env_info, engine, state_diff, t, TransactOptions::with_no_tracing()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn block_number_ref(&self, id: &BlockId) -> Option<BlockNumber> {
|
||||||
|
match *id {
|
||||||
|
BlockId::Number(number) => Some(number),
|
||||||
|
BlockId::Hash(ref hash) => self.chain.read().block_number(hash),
|
||||||
|
BlockId::Earliest => Some(0),
|
||||||
|
BlockId::Latest => Some(self.chain.read().best_block_number()),
|
||||||
|
BlockId::Pending => Some(self.chain.read().best_block_number() + 1),
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl snapshot::DatabaseRestore for Client {
|
impl snapshot::DatabaseRestore for Client {
|
||||||
@ -1364,13 +1374,7 @@ impl BlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
match id {
|
self.block_number_ref(&id)
|
||||||
BlockId::Number(number) => Some(number),
|
|
||||||
BlockId::Hash(ref hash) => self.chain.read().block_number(hash),
|
|
||||||
BlockId::Earliest => Some(0),
|
|
||||||
BlockId::Latest => Some(self.chain.read().best_block_number()),
|
|
||||||
BlockId::Pending => Some(self.chain.read().best_block_number() + 1),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_body(&self, id: BlockId) -> Option<encoded::Body> {
|
fn block_body(&self, id: BlockId) -> Option<encoded::Body> {
|
||||||
@ -1651,16 +1655,17 @@ impl BlockChainClient for Client {
|
|||||||
self.engine.additional_params().into_iter().collect()
|
self.engine.additional_params().into_iter().collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
|
||||||
match (self.block_number(from_block), self.block_number(to_block)) {
|
|
||||||
(Some(from), Some(to)) => Some(self.chain.read().blocks_with_bloom(bloom, from, to)),
|
|
||||||
_ => None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
|
let (from, to) = match (self.block_number_ref(&filter.from_block), self.block_number_ref(&filter.to_block)) {
|
||||||
|
(Some(from), Some(to)) => (from, to),
|
||||||
|
_ => return Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let chain = self.chain.read();
|
||||||
let blocks = filter.bloom_possibilities().iter()
|
let blocks = filter.bloom_possibilities().iter()
|
||||||
.filter_map(|bloom| self.blocks_with_bloom(bloom, filter.from_block.clone(), filter.to_block.clone()))
|
.map(move |bloom| {
|
||||||
|
chain.blocks_with_bloom(bloom, from, to)
|
||||||
|
})
|
||||||
.flat_map(|m| m)
|
.flat_map(|m| m)
|
||||||
// remove duplicate elements
|
// remove duplicate elements
|
||||||
.collect::<HashSet<u64>>()
|
.collect::<HashSet<u64>>()
|
||||||
@ -1937,7 +1942,7 @@ impl MiningBlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EngineClient for Client {
|
impl super::traits::EngineClient for Client {
|
||||||
fn update_sealing(&self) {
|
fn update_sealing(&self) {
|
||||||
self.miner.update_sealing(self)
|
self.miner.update_sealing(self)
|
||||||
}
|
}
|
||||||
@ -1955,6 +1960,16 @@ impl EngineClient for Client {
|
|||||||
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> {
|
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition> {
|
||||||
self.chain.read().epoch_transition_for(parent_hash)
|
self.chain.read().epoch_transition_for(parent_hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
BlockChainClient::chain_info(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) }
|
||||||
|
|
||||||
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
|
BlockChainClient::block_number(self, id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProvingBlockChainClient for Client {
|
impl ProvingBlockChainClient for Client {
|
||||||
@ -1969,27 +1984,30 @@ impl ProvingBlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
|
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
|
||||||
let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) {
|
let (header, mut env_info) = match (self.block_header(id), self.env_info(id)) {
|
||||||
(Some(s), Some(e)) => (s, e),
|
(Some(s), Some(e)) => (s, e),
|
||||||
_ => return None,
|
_ => return None,
|
||||||
};
|
};
|
||||||
|
|
||||||
env_info.gas_limit = transaction.gas.clone();
|
env_info.gas_limit = transaction.gas.clone();
|
||||||
let mut jdb = self.state_db.lock().journal_db().boxed_clone();
|
let mut jdb = self.state_db.lock().journal_db().boxed_clone();
|
||||||
let backend = state::backend::Proving::new(jdb.as_hashdb_mut());
|
|
||||||
|
|
||||||
let mut state = state.replace_backend(backend);
|
state::prove_transaction(
|
||||||
let options = TransactOptions::with_no_tracing().dont_check_nonce();
|
jdb.as_hashdb_mut(),
|
||||||
let res = Executive::new(&mut state, &env_info, &*self.engine).transact(&transaction, options);
|
header.state_root().clone(),
|
||||||
|
&transaction,
|
||||||
|
&*self.engine,
|
||||||
|
&env_info,
|
||||||
|
self.factories.clone(),
|
||||||
|
false,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
match res {
|
|
||||||
Err(ExecutionError::Internal(_)) => None,
|
fn epoch_signal(&self, hash: H256) -> Option<Vec<u8>> {
|
||||||
Err(e) => {
|
// pending transitions are never deleted, and do not contain
|
||||||
trace!(target: "client", "Proved call failed: {}", e);
|
// finality proofs by definition.
|
||||||
Some((Vec::new(), state.drop().1.extract_proof()))
|
self.chain.read().get_pending_transition(hash).map(|pending| pending.proof)
|
||||||
}
|
|
||||||
Ok(res) => Some((res.output, state.drop().1.extract_proof())),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@ use itertools::Itertools;
|
|||||||
use rustc_hex::FromHex;
|
use rustc_hex::FromHex;
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::{H256, H2048};
|
use bigint::hash::H256;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use util::*;
|
use util::*;
|
||||||
use rlp::*;
|
use rlp::*;
|
||||||
@ -33,7 +33,7 @@ use devtools::*;
|
|||||||
use transaction::{Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action};
|
use transaction::{Transaction, LocalizedTransaction, PendingTransaction, SignedTransaction, Action};
|
||||||
use blockchain::TreeRoute;
|
use blockchain::TreeRoute;
|
||||||
use client::{
|
use client::{
|
||||||
BlockChainClient, MiningBlockChainClient, EngineClient, BlockChainInfo, BlockStatus, BlockId,
|
BlockChainClient, MiningBlockChainClient, BlockChainInfo, BlockStatus, BlockId,
|
||||||
TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError,
|
TransactionId, UncleId, TraceId, TraceFilter, LastHashes, CallAnalytics, BlockImportError,
|
||||||
ProvingBlockChainClient,
|
ProvingBlockChainClient,
|
||||||
};
|
};
|
||||||
@ -508,10 +508,6 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
self.receipts.read().get(&id).cloned()
|
self.receipts.read().get(&id).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockId, _to_block: BlockId) -> Option<Vec<BlockNumber>> {
|
|
||||||
unimplemented!();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry> {
|
||||||
let mut logs = self.logs.read().clone();
|
let mut logs = self.logs.read().clone();
|
||||||
let len = logs.len();
|
let len = logs.len();
|
||||||
@ -801,9 +797,13 @@ impl ProvingBlockChainClient for TestBlockChainClient {
|
|||||||
fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
|
fn prove_transaction(&self, _: SignedTransaction, _: BlockId) -> Option<(Bytes, Vec<DBValue>)> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn epoch_signal(&self, _: H256) -> Option<Vec<u8>> {
|
||||||
|
None
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl EngineClient for TestBlockChainClient {
|
impl super::traits::EngineClient for TestBlockChainClient {
|
||||||
fn update_sealing(&self) {
|
fn update_sealing(&self) {
|
||||||
self.miner.update_sealing(self)
|
self.miner.update_sealing(self)
|
||||||
}
|
}
|
||||||
@ -819,4 +819,14 @@ impl EngineClient for TestBlockChainClient {
|
|||||||
fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> {
|
fn epoch_transition_for(&self, _block_hash: H256) -> Option<::engines::EpochTransition> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn chain_info(&self) -> BlockChainInfo {
|
||||||
|
BlockChainClient::chain_info(self)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_full_client(&self) -> Option<&BlockChainClient> { Some(self) }
|
||||||
|
|
||||||
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber> {
|
||||||
|
BlockChainClient::block_number(self, id)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -35,7 +35,7 @@ use transaction::{LocalizedTransaction, PendingTransaction, SignedTransaction};
|
|||||||
use verification::queue::QueueInfo as BlockQueueInfo;
|
use verification::queue::QueueInfo as BlockQueueInfo;
|
||||||
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::{H256, H2048};
|
use bigint::hash::H256;
|
||||||
use util::{Address, Bytes};
|
use util::{Address, Bytes};
|
||||||
use util::hashdb::DBValue;
|
use util::hashdb::DBValue;
|
||||||
|
|
||||||
@ -181,9 +181,6 @@ pub trait BlockChainClient : Sync + Send {
|
|||||||
/// Get the best block header.
|
/// Get the best block header.
|
||||||
fn best_block_header(&self) -> encoded::Header;
|
fn best_block_header(&self) -> encoded::Header;
|
||||||
|
|
||||||
/// Returns numbers of blocks containing given bloom.
|
|
||||||
fn blocks_with_bloom(&self, bloom: &H2048, from_block: BlockId, to_block: BlockId) -> Option<Vec<BlockNumber>>;
|
|
||||||
|
|
||||||
/// Returns logs matching given filter.
|
/// Returns logs matching given filter.
|
||||||
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
fn logs(&self, filter: Filter) -> Vec<LocalizedLogEntry>;
|
||||||
|
|
||||||
@ -317,7 +314,7 @@ pub trait MiningBlockChainClient: BlockChainClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Client facilities used by internally sealing Engines.
|
/// Client facilities used by internally sealing Engines.
|
||||||
pub trait EngineClient: MiningBlockChainClient {
|
pub trait EngineClient: Sync + Send {
|
||||||
/// Make a new block and seal it.
|
/// Make a new block and seal it.
|
||||||
fn update_sealing(&self);
|
fn update_sealing(&self);
|
||||||
|
|
||||||
@ -333,6 +330,15 @@ pub trait EngineClient: MiningBlockChainClient {
|
|||||||
///
|
///
|
||||||
/// The block corresponding the the parent hash must be stored already.
|
/// The block corresponding the the parent hash must be stored already.
|
||||||
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>;
|
fn epoch_transition_for(&self, parent_hash: H256) -> Option<::engines::EpochTransition>;
|
||||||
|
|
||||||
|
/// Get block chain info.
|
||||||
|
fn chain_info(&self) -> BlockChainInfo;
|
||||||
|
|
||||||
|
/// Attempt to cast the engine client to a full client.
|
||||||
|
fn as_full_client(&self) -> Option<&BlockChainClient>;
|
||||||
|
|
||||||
|
/// Get a block number by ID.
|
||||||
|
fn block_number(&self, id: BlockId) -> Option<BlockNumber>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Extended client interface for providing proofs of the state.
|
/// Extended client interface for providing proofs of the state.
|
||||||
@ -352,4 +358,7 @@ pub trait ProvingBlockChainClient: BlockChainClient {
|
|||||||
/// Returns the output of the call and a vector of database items necessary
|
/// Returns the output of the call and a vector of database items necessary
|
||||||
/// to reproduce it.
|
/// to reproduce it.
|
||||||
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)>;
|
fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option<(Bytes, Vec<DBValue>)>;
|
||||||
|
|
||||||
|
/// Get an epoch change signal by block hash.
|
||||||
|
fn epoch_signal(&self, hash: H256) -> Option<Vec<u8>>;
|
||||||
}
|
}
|
||||||
|
@ -25,7 +25,7 @@ use std::cmp;
|
|||||||
use account_provider::AccountProvider;
|
use account_provider::AccountProvider;
|
||||||
use block::*;
|
use block::*;
|
||||||
use builtin::Builtin;
|
use builtin::Builtin;
|
||||||
use client::{Client, EngineClient};
|
use client::EngineClient;
|
||||||
use engines::{Call, Engine, Seal, EngineError, ConstructedVerifier};
|
use engines::{Call, Engine, Seal, EngineError, ConstructedVerifier};
|
||||||
use error::{Error, TransactionError, BlockError};
|
use error::{Error, TransactionError, BlockError};
|
||||||
use ethjson;
|
use ethjson;
|
||||||
@ -647,6 +647,8 @@ impl Engine for AuthorityRound {
|
|||||||
(&active_set as &_, epoch_manager.epoch_transition_number)
|
(&active_set as &_, epoch_manager.epoch_transition_number)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// always report with "self.validators" so that the report actually gets
|
||||||
|
// to the contract.
|
||||||
let report = |report| match report {
|
let report = |report| match report {
|
||||||
Report::Benign(address, block_number) =>
|
Report::Benign(address, block_number) =>
|
||||||
self.validators.report_benign(&address, set_number, block_number),
|
self.validators.report_benign(&address, set_number, block_number),
|
||||||
@ -739,13 +741,18 @@ impl Engine for AuthorityRound {
|
|||||||
{
|
{
|
||||||
if let Ok(finalized) = epoch_manager.finality_checker.push_hash(chain_head.hash(), *chain_head.author()) {
|
if let Ok(finalized) = epoch_manager.finality_checker.push_hash(chain_head.hash(), *chain_head.author()) {
|
||||||
let mut finalized = finalized.into_iter();
|
let mut finalized = finalized.into_iter();
|
||||||
while let Some(hash) = finalized.next() {
|
while let Some(finalized_hash) = finalized.next() {
|
||||||
if let Some(pending) = transition_store(hash) {
|
if let Some(pending) = transition_store(finalized_hash) {
|
||||||
let finality_proof = ::std::iter::once(hash)
|
let finality_proof = ::std::iter::once(finalized_hash)
|
||||||
.chain(finalized)
|
.chain(finalized)
|
||||||
.chain(epoch_manager.finality_checker.unfinalized_hashes())
|
.chain(epoch_manager.finality_checker.unfinalized_hashes())
|
||||||
.map(|hash| chain(hash)
|
.map(|h| if h == chain_head.hash() {
|
||||||
.expect("these headers fetched before when constructing finality checker; qed"))
|
// chain closure only stores ancestry, but the chain head is also
|
||||||
|
// unfinalized.
|
||||||
|
chain_head.clone()
|
||||||
|
} else {
|
||||||
|
chain(h).expect("these headers fetched before when constructing finality checker; qed")
|
||||||
|
})
|
||||||
.collect::<Vec<Header>>();
|
.collect::<Vec<Header>>();
|
||||||
|
|
||||||
// this gives us the block number for `hash`, assuming it's ancestry.
|
// this gives us the block number for `hash`, assuming it's ancestry.
|
||||||
@ -809,9 +816,9 @@ impl Engine for AuthorityRound {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
*self.client.write() = Some(client.clone());
|
*self.client.write() = Some(client.clone());
|
||||||
self.validators.register_contract(client);
|
self.validators.register_client(client);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) {
|
fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) {
|
||||||
|
@ -34,7 +34,7 @@ use error::{BlockError, Error};
|
|||||||
use evm::Schedule;
|
use evm::Schedule;
|
||||||
use ethjson;
|
use ethjson;
|
||||||
use header::{Header, BlockNumber};
|
use header::{Header, BlockNumber};
|
||||||
use client::Client;
|
use client::EngineClient;
|
||||||
use semantic_version::SemanticVersion;
|
use semantic_version::SemanticVersion;
|
||||||
use super::signer::EngineSigner;
|
use super::signer::EngineSigner;
|
||||||
use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
|
use super::validator_set::{ValidatorSet, SimpleList, new_validator_set};
|
||||||
@ -237,8 +237,8 @@ impl Engine for BasicAuthority {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
self.validators.register_contract(client);
|
self.validators.register_client(client);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) {
|
fn set_signer(&self, ap: Arc<AccountProvider>, address: Address, password: String) {
|
||||||
|
@ -44,7 +44,7 @@ use self::epoch::PendingTransition;
|
|||||||
use account_provider::AccountProvider;
|
use account_provider::AccountProvider;
|
||||||
use block::ExecutedBlock;
|
use block::ExecutedBlock;
|
||||||
use builtin::Builtin;
|
use builtin::Builtin;
|
||||||
use client::Client;
|
use client::EngineClient;
|
||||||
use vm::{EnvInfo, LastHashes, Schedule, CreateContractAddress};
|
use vm::{EnvInfo, LastHashes, Schedule, CreateContractAddress};
|
||||||
use error::Error;
|
use error::Error;
|
||||||
use header::{Header, BlockNumber};
|
use header::{Header, BlockNumber};
|
||||||
@ -124,12 +124,22 @@ pub type Headers<'a> = Fn(H256) -> Option<Header> + 'a;
|
|||||||
/// Type alias for a function we can query pending transitions by block hash through.
|
/// Type alias for a function we can query pending transitions by block hash through.
|
||||||
pub type PendingTransitionStore<'a> = Fn(H256) -> Option<PendingTransition> + 'a;
|
pub type PendingTransitionStore<'a> = Fn(H256) -> Option<PendingTransition> + 'a;
|
||||||
|
|
||||||
|
/// Proof dependent on state.
|
||||||
|
pub trait StateDependentProof: Send + Sync {
|
||||||
|
/// Generate a proof, given the state.
|
||||||
|
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String>;
|
||||||
|
/// Check a proof generated elsewhere (potentially by a peer).
|
||||||
|
// `engine` needed to check state proofs, while really this should
|
||||||
|
// just be state machine params.
|
||||||
|
fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String>;
|
||||||
|
}
|
||||||
|
|
||||||
/// Proof generated on epoch change.
|
/// Proof generated on epoch change.
|
||||||
pub enum Proof {
|
pub enum Proof {
|
||||||
/// Known proof (exctracted from signal)
|
/// Known proof (extracted from signal)
|
||||||
Known(Vec<u8>),
|
Known(Vec<u8>),
|
||||||
/// Extract proof from caller.
|
/// State dependent proof.
|
||||||
WithState(Box<Fn(&Call) -> Result<Vec<u8>, String>>),
|
WithState(Arc<StateDependentProof>),
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Generated epoch verifier.
|
/// Generated epoch verifier.
|
||||||
@ -361,7 +371,7 @@ pub trait Engine : Sync + Send {
|
|||||||
fn sign(&self, _hash: H256) -> Result<Signature, Error> { unimplemented!() }
|
fn sign(&self, _hash: H256) -> Result<Signature, Error> { unimplemented!() }
|
||||||
|
|
||||||
/// Add Client which can be used for sealing, querying the state and sending messages.
|
/// Add Client which can be used for sealing, querying the state and sending messages.
|
||||||
fn register_client(&self, _client: Weak<Client>) {}
|
fn register_client(&self, _client: Weak<EngineClient>) {}
|
||||||
|
|
||||||
/// Trigger next step of the consensus engine.
|
/// Trigger next step of the consensus engine.
|
||||||
fn step(&self) {}
|
fn step(&self) {}
|
||||||
|
@ -35,7 +35,7 @@ use bigint::hash::{H256, H520};
|
|||||||
use parking_lot::RwLock;
|
use parking_lot::RwLock;
|
||||||
use util::*;
|
use util::*;
|
||||||
use unexpected::{OutOfBounds, Mismatch};
|
use unexpected::{OutOfBounds, Mismatch};
|
||||||
use client::{Client, EngineClient};
|
use client::EngineClient;
|
||||||
use error::{Error, BlockError};
|
use error::{Error, BlockError};
|
||||||
use header::{Header, BlockNumber};
|
use header::{Header, BlockNumber};
|
||||||
use builtin::Builtin;
|
use builtin::Builtin;
|
||||||
@ -571,18 +571,35 @@ impl Engine for Tendermint {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Verify validators and gas limit.
|
/// Verify gas limit.
|
||||||
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
|
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
|
||||||
if header.number() == 0 {
|
if header.number() == 0 {
|
||||||
return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into());
|
return Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }).into());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
|
||||||
|
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
|
||||||
|
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
|
||||||
|
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
|
||||||
|
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
|
||||||
|
return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_block_external(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
|
||||||
if let Ok(proposal) = ConsensusMessage::new_proposal(header) {
|
if let Ok(proposal) = ConsensusMessage::new_proposal(header) {
|
||||||
let proposer = proposal.verify()?;
|
let proposer = proposal.verify()?;
|
||||||
if !self.is_authority(&proposer) {
|
if !self.is_authority(&proposer) {
|
||||||
return Err(EngineError::NotAuthorized(proposer).into());
|
return Err(EngineError::NotAuthorized(proposer).into());
|
||||||
}
|
}
|
||||||
self.check_view_proposer(header.parent_hash(), proposal.vote_step.height, proposal.vote_step.view, &proposer)?;
|
self.check_view_proposer(
|
||||||
|
header.parent_hash(),
|
||||||
|
proposal.vote_step.height,
|
||||||
|
proposal.vote_step.view,
|
||||||
|
&proposer
|
||||||
|
).map_err(Into::into)
|
||||||
} else {
|
} else {
|
||||||
let vote_step = VoteStep::new(header.number() as usize, consensus_view(header)?, Step::Precommit);
|
let vote_step = VoteStep::new(header.number() as usize, consensus_view(header)?, Step::Precommit);
|
||||||
let precommit_hash = message_hash(vote_step.clone(), header.bare_hash());
|
let precommit_hash = message_hash(vote_step.clone(), header.bare_hash());
|
||||||
@ -608,18 +625,8 @@ impl Engine for Tendermint {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.check_above_threshold(origins.len())?
|
self.check_above_threshold(origins.len()).map_err(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
let gas_limit_divisor = self.params().gas_limit_bound_divisor;
|
|
||||||
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
|
|
||||||
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
|
|
||||||
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
|
|
||||||
self.validators.report_malicious(header.author(), header.number(), header.number(), Default::default());
|
|
||||||
return Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }).into());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
|
fn signals_epoch_end(&self, header: &Header, block: Option<&[u8]>, receipts: Option<&[::receipt::Receipt]>)
|
||||||
@ -754,13 +761,12 @@ impl Engine for Tendermint {
|
|||||||
self.to_step(next_step);
|
self.to_step(next_step);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
use client::BlockChainClient;
|
|
||||||
if let Some(c) = client.upgrade() {
|
if let Some(c) = client.upgrade() {
|
||||||
self.height.store(c.chain_info().best_block_number as usize + 1, AtomicOrdering::SeqCst);
|
self.height.store(c.chain_info().best_block_number as usize + 1, AtomicOrdering::SeqCst);
|
||||||
}
|
}
|
||||||
*self.client.write() = Some(client.clone());
|
*self.client.write() = Some(client.clone());
|
||||||
self.validators.register_contract(client);
|
self.validators.register_client(client);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -888,14 +894,14 @@ mod tests {
|
|||||||
let seal = proposal_seal(&tap, &header, 0);
|
let seal = proposal_seal(&tap, &header, 0);
|
||||||
header.set_seal(seal);
|
header.set_seal(seal);
|
||||||
// Good proposer.
|
// Good proposer.
|
||||||
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
|
assert!(engine.verify_block_external(&header, None).is_ok());
|
||||||
|
|
||||||
let validator = insert_and_unlock(&tap, "0");
|
let validator = insert_and_unlock(&tap, "0");
|
||||||
header.set_author(validator);
|
header.set_author(validator);
|
||||||
let seal = proposal_seal(&tap, &header, 0);
|
let seal = proposal_seal(&tap, &header, 0);
|
||||||
header.set_seal(seal);
|
header.set_seal(seal);
|
||||||
// Bad proposer.
|
// Bad proposer.
|
||||||
match engine.verify_block_family(&header, &parent_header, None) {
|
match engine.verify_block_external(&header, None) {
|
||||||
Err(Error::Engine(EngineError::NotProposer(_))) => {},
|
Err(Error::Engine(EngineError::NotProposer(_))) => {},
|
||||||
_ => panic!(),
|
_ => panic!(),
|
||||||
}
|
}
|
||||||
@ -905,7 +911,7 @@ mod tests {
|
|||||||
let seal = proposal_seal(&tap, &header, 0);
|
let seal = proposal_seal(&tap, &header, 0);
|
||||||
header.set_seal(seal);
|
header.set_seal(seal);
|
||||||
// Not authority.
|
// Not authority.
|
||||||
match engine.verify_block_family(&header, &parent_header, None) {
|
match engine.verify_block_external(&header, None) {
|
||||||
Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
|
Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
|
||||||
_ => panic!(),
|
_ => panic!(),
|
||||||
};
|
};
|
||||||
@ -935,7 +941,7 @@ mod tests {
|
|||||||
header.set_seal(seal.clone());
|
header.set_seal(seal.clone());
|
||||||
|
|
||||||
// One good signature is not enough.
|
// One good signature is not enough.
|
||||||
match engine.verify_block_family(&header, &parent_header, None) {
|
match engine.verify_block_external(&header, None) {
|
||||||
Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {},
|
Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {},
|
||||||
_ => panic!(),
|
_ => panic!(),
|
||||||
}
|
}
|
||||||
@ -946,7 +952,7 @@ mod tests {
|
|||||||
seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec();
|
seal[2] = ::rlp::encode_list(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).into_vec();
|
||||||
header.set_seal(seal.clone());
|
header.set_seal(seal.clone());
|
||||||
|
|
||||||
assert!(engine.verify_block_family(&header, &parent_header, None).is_ok());
|
assert!(engine.verify_block_external(&header, None).is_ok());
|
||||||
|
|
||||||
let bad_voter = insert_and_unlock(&tap, "101");
|
let bad_voter = insert_and_unlock(&tap, "101");
|
||||||
let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap();
|
let bad_signature = tap.sign(bad_voter, None, keccak(vote_info)).unwrap();
|
||||||
@ -955,7 +961,7 @@ mod tests {
|
|||||||
header.set_seal(seal);
|
header.set_seal(seal);
|
||||||
|
|
||||||
// One good and one bad signature.
|
// One good and one bad signature.
|
||||||
match engine.verify_block_family(&header, &parent_header, None) {
|
match engine.verify_block_external(&header, None) {
|
||||||
Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
|
Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
|
||||||
_ => panic!(),
|
_ => panic!(),
|
||||||
};
|
};
|
||||||
@ -1001,7 +1007,7 @@ mod tests {
|
|||||||
let client = generate_dummy_client(0);
|
let client = generate_dummy_client(0);
|
||||||
let notify = Arc::new(TestNotify::default());
|
let notify = Arc::new(TestNotify::default());
|
||||||
client.add_notify(notify.clone());
|
client.add_notify(notify.clone());
|
||||||
engine.register_client(Arc::downgrade(&client));
|
engine.register_client(Arc::downgrade(&client) as _);
|
||||||
|
|
||||||
let prevote_current = vote(engine.as_ref(), |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal);
|
let prevote_current = vote(engine.as_ref(), |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal);
|
||||||
|
|
||||||
@ -1019,7 +1025,6 @@ mod tests {
|
|||||||
fn seal_submission() {
|
fn seal_submission() {
|
||||||
use ethkey::{Generator, Random};
|
use ethkey::{Generator, Random};
|
||||||
use transaction::{Transaction, Action};
|
use transaction::{Transaction, Action};
|
||||||
use client::BlockChainClient;
|
|
||||||
|
|
||||||
let tap = Arc::new(AccountProvider::transient_provider());
|
let tap = Arc::new(AccountProvider::transient_provider());
|
||||||
// Accounts for signing votes.
|
// Accounts for signing votes.
|
||||||
@ -1032,7 +1037,7 @@ mod tests {
|
|||||||
|
|
||||||
let notify = Arc::new(TestNotify::default());
|
let notify = Arc::new(TestNotify::default());
|
||||||
client.add_notify(notify.clone());
|
client.add_notify(notify.clone());
|
||||||
engine.register_client(Arc::downgrade(&client));
|
engine.register_client(Arc::downgrade(&client) as _);
|
||||||
|
|
||||||
let keypair = Random.generate().unwrap();
|
let keypair = Random.generate().unwrap();
|
||||||
let transaction = Transaction {
|
let transaction = Transaction {
|
||||||
|
@ -25,7 +25,7 @@ use util::*;
|
|||||||
use futures::Future;
|
use futures::Future;
|
||||||
use native_contracts::ValidatorReport as Provider;
|
use native_contracts::ValidatorReport as Provider;
|
||||||
|
|
||||||
use client::{Client, BlockChainClient};
|
use client::EngineClient;
|
||||||
use engines::{Call, Engine};
|
use engines::{Call, Engine};
|
||||||
use header::{Header, BlockNumber};
|
use header::{Header, BlockNumber};
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ use super::safe_contract::ValidatorSafeContract;
|
|||||||
pub struct ValidatorContract {
|
pub struct ValidatorContract {
|
||||||
validators: ValidatorSafeContract,
|
validators: ValidatorSafeContract,
|
||||||
provider: Provider,
|
provider: Provider,
|
||||||
client: RwLock<Option<Weak<Client>>>, // TODO [keorn]: remove
|
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ValidatorContract {
|
impl ValidatorContract {
|
||||||
@ -58,7 +58,13 @@ impl ValidatorContract {
|
|||||||
Box::new(move |a, d| client.as_ref()
|
Box::new(move |a, d| client.as_ref()
|
||||||
.and_then(Weak::upgrade)
|
.and_then(Weak::upgrade)
|
||||||
.ok_or("No client!".into())
|
.ok_or("No client!".into())
|
||||||
.and_then(|c| c.transact_contract(a, d).map_err(|e| format!("Transaction import error: {}", e)))
|
.and_then(|c| {
|
||||||
|
match c.as_full_client() {
|
||||||
|
Some(c) => c.transact_contract(a, d)
|
||||||
|
.map_err(|e| format!("Transaction import error: {}", e)),
|
||||||
|
None => Err("No full client!".into()),
|
||||||
|
}
|
||||||
|
})
|
||||||
.map(|_| Default::default()))
|
.map(|_| Default::default()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,8 +126,8 @@ impl ValidatorSet for ValidatorContract {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_contract(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
self.validators.register_contract(client.clone());
|
self.validators.register_client(client.clone());
|
||||||
*self.client.write() = Some(client);
|
*self.client.write() = Some(client);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -148,7 +154,7 @@ mod tests {
|
|||||||
fn fetches_validators() {
|
fn fetches_validators() {
|
||||||
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, None);
|
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, None);
|
||||||
let vc = Arc::new(ValidatorContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap()));
|
let vc = Arc::new(ValidatorContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap()));
|
||||||
vc.register_contract(Arc::downgrade(&client));
|
vc.register_client(Arc::downgrade(&client) as _);
|
||||||
let last_hash = client.best_block_header().hash();
|
let last_hash = client.best_block_header().hash();
|
||||||
assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap()));
|
assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap()));
|
||||||
assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap()));
|
assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap()));
|
||||||
@ -159,7 +165,7 @@ mod tests {
|
|||||||
let tap = Arc::new(AccountProvider::transient_provider());
|
let tap = Arc::new(AccountProvider::transient_provider());
|
||||||
let v1 = tap.insert_account(keccak("1").into(), "").unwrap();
|
let v1 = tap.insert_account(keccak("1").into(), "").unwrap();
|
||||||
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, Some(tap.clone()));
|
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_contract, Some(tap.clone()));
|
||||||
client.engine().register_client(Arc::downgrade(&client));
|
client.engine().register_client(Arc::downgrade(&client) as _);
|
||||||
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
|
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
|
||||||
|
|
||||||
// Make sure reporting can be done.
|
// Make sure reporting can be done.
|
||||||
|
@ -28,7 +28,7 @@ use ids::BlockId;
|
|||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
use util::{Bytes, Address};
|
use util::{Bytes, Address};
|
||||||
use ethjson::spec::ValidatorSet as ValidatorSpec;
|
use ethjson::spec::ValidatorSet as ValidatorSpec;
|
||||||
use client::Client;
|
use client::EngineClient;
|
||||||
use header::{Header, BlockNumber};
|
use header::{Header, BlockNumber};
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -142,5 +142,5 @@ pub trait ValidatorSet: Send + Sync {
|
|||||||
/// Notifies about benign misbehaviour.
|
/// Notifies about benign misbehaviour.
|
||||||
fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {}
|
fn report_benign(&self, _validator: &Address, _set_block: BlockNumber, _block: BlockNumber) {}
|
||||||
/// Allows blockchain state access.
|
/// Allows blockchain state access.
|
||||||
fn register_contract(&self, _client: Weak<Client>) {}
|
fn register_client(&self, _client: Weak<EngineClient>) {}
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ use parking_lot::RwLock;
|
|||||||
use util::{Bytes, Address};
|
use util::{Bytes, Address};
|
||||||
use ids::BlockId;
|
use ids::BlockId;
|
||||||
use header::{BlockNumber, Header};
|
use header::{BlockNumber, Header};
|
||||||
use client::{Client, BlockChainClient};
|
use client::EngineClient;
|
||||||
use super::{SystemCall, ValidatorSet};
|
use super::{SystemCall, ValidatorSet};
|
||||||
|
|
||||||
type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
|
type BlockNumberLookup = Box<Fn(BlockId) -> Result<BlockNumber, String> + Send + Sync + 'static>;
|
||||||
@ -131,9 +131,9 @@ impl ValidatorSet for Multi {
|
|||||||
self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block);
|
self.correct_set_by_number(set_block).1.report_benign(validator, set_block, block);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_contract(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
for set in self.sets.values() {
|
for set in self.sets.values() {
|
||||||
set.register_contract(client.clone());
|
set.register_client(client.clone());
|
||||||
}
|
}
|
||||||
*self.block_number.write() = Box::new(move |id| client
|
*self.block_number.write() = Box::new(move |id| client
|
||||||
.upgrade()
|
.upgrade()
|
||||||
@ -148,7 +148,7 @@ mod tests {
|
|||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use account_provider::AccountProvider;
|
use account_provider::AccountProvider;
|
||||||
use client::{BlockChainClient, EngineClient};
|
use client::BlockChainClient;
|
||||||
use engines::EpochChange;
|
use engines::EpochChange;
|
||||||
use engines::validator_set::ValidatorSet;
|
use engines::validator_set::ValidatorSet;
|
||||||
use ethkey::Secret;
|
use ethkey::Secret;
|
||||||
@ -170,7 +170,7 @@ mod tests {
|
|||||||
let v0 = tap.insert_account(s0.clone(), "").unwrap();
|
let v0 = tap.insert_account(s0.clone(), "").unwrap();
|
||||||
let v1 = tap.insert_account(keccak("1").into(), "").unwrap();
|
let v1 = tap.insert_account(keccak("1").into(), "").unwrap();
|
||||||
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap));
|
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_multi, Some(tap));
|
||||||
client.engine().register_client(Arc::downgrade(&client));
|
client.engine().register_client(Arc::downgrade(&client) as _);
|
||||||
|
|
||||||
// Make sure txs go through.
|
// Make sure txs go through.
|
||||||
client.miner().set_gas_floor_target(1_000_000.into());
|
client.miner().set_gas_floor_target(1_000_000.into());
|
||||||
@ -178,27 +178,27 @@ mod tests {
|
|||||||
// Wrong signer for the first block.
|
// Wrong signer for the first block.
|
||||||
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
||||||
client.transact_contract(Default::default(), Default::default()).unwrap();
|
client.transact_contract(Default::default(), Default::default()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 0);
|
assert_eq!(client.chain_info().best_block_number, 0);
|
||||||
// Right signer for the first block.
|
// Right signer for the first block.
|
||||||
client.miner().set_engine_signer(v0, "".into()).unwrap();
|
client.miner().set_engine_signer(v0, "".into()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 1);
|
assert_eq!(client.chain_info().best_block_number, 1);
|
||||||
// This time v0 is wrong.
|
// This time v0 is wrong.
|
||||||
client.transact_contract(Default::default(), Default::default()).unwrap();
|
client.transact_contract(Default::default(), Default::default()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 1);
|
assert_eq!(client.chain_info().best_block_number, 1);
|
||||||
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 2);
|
assert_eq!(client.chain_info().best_block_number, 2);
|
||||||
// v1 is still good.
|
// v1 is still good.
|
||||||
client.transact_contract(Default::default(), Default::default()).unwrap();
|
client.transact_contract(Default::default(), Default::default()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 3);
|
assert_eq!(client.chain_info().best_block_number, 3);
|
||||||
|
|
||||||
// Check syncing.
|
// Check syncing.
|
||||||
let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]);
|
let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_multi, 0, 0, &[]);
|
||||||
sync_client.engine().register_client(Arc::downgrade(&sync_client));
|
sync_client.engine().register_client(Arc::downgrade(&sync_client) as _);
|
||||||
for i in 1..4 {
|
for i in 1..4 {
|
||||||
sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap();
|
sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -23,14 +23,15 @@ use hash::keccak;
|
|||||||
|
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::{H160, H256};
|
use bigint::hash::{H160, H256};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{Mutex, RwLock};
|
||||||
|
|
||||||
use util::*;
|
use util::*;
|
||||||
use util::cache::MemoryLruCache;
|
use util::cache::MemoryLruCache;
|
||||||
use unexpected::Mismatch;
|
use unexpected::Mismatch;
|
||||||
use rlp::{UntrustedRlp, RlpStream};
|
use rlp::{UntrustedRlp, RlpStream};
|
||||||
|
|
||||||
use basic_types::LogBloom;
|
use basic_types::LogBloom;
|
||||||
use client::{Client, BlockChainClient};
|
use client::EngineClient;
|
||||||
use engines::{Call, Engine};
|
use engines::{Call, Engine};
|
||||||
use header::Header;
|
use header::Header;
|
||||||
use ids::BlockId;
|
use ids::BlockId;
|
||||||
@ -49,12 +50,35 @@ lazy_static! {
|
|||||||
static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME);
|
static ref EVENT_NAME_HASH: H256 = keccak(EVENT_NAME);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// state-dependent proofs for the safe contract:
|
||||||
|
// only "first" proofs are such.
|
||||||
|
struct StateProof {
|
||||||
|
header: Mutex<Header>,
|
||||||
|
provider: Provider,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ::engines::StateDependentProof for StateProof {
|
||||||
|
fn generate_proof(&self, caller: &Call) -> Result<Vec<u8>, String> {
|
||||||
|
prove_initial(&self.provider, &*self.header.lock(), caller)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check_proof(&self, engine: &Engine, proof: &[u8]) -> Result<(), String> {
|
||||||
|
let (header, state_items) = decode_first_proof(&UntrustedRlp::new(proof))
|
||||||
|
.map_err(|e| format!("proof incorrectly encoded: {}", e))?;
|
||||||
|
if &header != &*self.header.lock(){
|
||||||
|
return Err("wrong header in proof".into());
|
||||||
|
}
|
||||||
|
|
||||||
|
check_first_proof(engine, &self.provider, header, &state_items).map(|_| ())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// The validator contract should have the following interface:
|
/// The validator contract should have the following interface:
|
||||||
pub struct ValidatorSafeContract {
|
pub struct ValidatorSafeContract {
|
||||||
pub address: Address,
|
pub address: Address,
|
||||||
validators: RwLock<MemoryLruCache<H256, SimpleList>>,
|
validators: RwLock<MemoryLruCache<H256, SimpleList>>,
|
||||||
provider: Provider,
|
provider: Provider,
|
||||||
client: RwLock<Option<Weak<Client>>>, // TODO [keorn]: remove
|
client: RwLock<Option<Weak<EngineClient>>>, // TODO [keorn]: remove
|
||||||
}
|
}
|
||||||
|
|
||||||
// first proof is just a state proof call of `getValidators` at header's state.
|
// first proof is just a state proof call of `getValidators` at header's state.
|
||||||
@ -68,6 +92,59 @@ fn encode_first_proof(header: &Header, state_items: &[Vec<u8>]) -> Bytes {
|
|||||||
stream.out()
|
stream.out()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check a first proof: fetch the validator set at the given block.
|
||||||
|
fn check_first_proof(engine: &Engine, provider: &Provider, old_header: Header, state_items: &[DBValue])
|
||||||
|
-> Result<Vec<Address>, String>
|
||||||
|
{
|
||||||
|
use transaction::{Action, Transaction};
|
||||||
|
|
||||||
|
// TODO: match client contract_call_tx more cleanly without duplication.
|
||||||
|
const PROVIDED_GAS: u64 = 50_000_000;
|
||||||
|
|
||||||
|
let env_info = ::vm::EnvInfo {
|
||||||
|
number: old_header.number(),
|
||||||
|
author: *old_header.author(),
|
||||||
|
difficulty: *old_header.difficulty(),
|
||||||
|
gas_limit: PROVIDED_GAS.into(),
|
||||||
|
timestamp: old_header.timestamp(),
|
||||||
|
last_hashes: {
|
||||||
|
// this will break if we don't inclue all 256 last hashes.
|
||||||
|
let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect();
|
||||||
|
last_hashes[255] = *old_header.parent_hash();
|
||||||
|
Arc::new(last_hashes)
|
||||||
|
},
|
||||||
|
gas_used: 0.into(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// check state proof using given engine.
|
||||||
|
let number = old_header.number();
|
||||||
|
provider.get_validators(move |a, d| {
|
||||||
|
let from = Address::default();
|
||||||
|
let tx = Transaction {
|
||||||
|
nonce: engine.account_start_nonce(number),
|
||||||
|
action: Action::Call(a),
|
||||||
|
gas: PROVIDED_GAS.into(),
|
||||||
|
gas_price: U256::default(),
|
||||||
|
value: U256::default(),
|
||||||
|
data: d,
|
||||||
|
}.fake_sign(from);
|
||||||
|
|
||||||
|
let res = ::state::check_proof(
|
||||||
|
state_items,
|
||||||
|
*old_header.state_root(),
|
||||||
|
&tx,
|
||||||
|
engine,
|
||||||
|
&env_info,
|
||||||
|
);
|
||||||
|
|
||||||
|
match res {
|
||||||
|
::state::ProvedExecution::BadProof => Err("Bad proof".into()),
|
||||||
|
::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)),
|
||||||
|
::state::ProvedExecution::Complete(e) => Ok(e.output),
|
||||||
|
}
|
||||||
|
}).wait()
|
||||||
|
}
|
||||||
|
|
||||||
fn decode_first_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec<DBValue>), ::error::Error> {
|
fn decode_first_proof(rlp: &UntrustedRlp) -> Result<(Header, Vec<DBValue>), ::error::Error> {
|
||||||
let header = rlp.val_at(0)?;
|
let header = rlp.val_at(0)?;
|
||||||
let state_items = rlp.at(1)?.iter().map(|x| {
|
let state_items = rlp.at(1)?.iter().map(|x| {
|
||||||
@ -105,8 +182,7 @@ fn prove_initial(provider: &Provider, header: &Header, caller: &Call) -> Result<
|
|||||||
Ok(result)
|
Ok(result)
|
||||||
};
|
};
|
||||||
|
|
||||||
provider.get_validators(caller)
|
provider.get_validators(caller).wait()
|
||||||
.wait()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
res.map(|validators| {
|
res.map(|validators| {
|
||||||
@ -235,7 +311,12 @@ impl ValidatorSet for ValidatorSafeContract {
|
|||||||
Box::new(move |addr, data| client.as_ref()
|
Box::new(move |addr, data| client.as_ref()
|
||||||
.and_then(Weak::upgrade)
|
.and_then(Weak::upgrade)
|
||||||
.ok_or("No client!".into())
|
.ok_or("No client!".into())
|
||||||
.and_then(|c| c.call_contract(id, addr, data))
|
.and_then(|c| {
|
||||||
|
match c.as_full_client() {
|
||||||
|
Some(c) => c.call_contract(id, addr, data),
|
||||||
|
None => Err("No full client!".into()),
|
||||||
|
}
|
||||||
|
})
|
||||||
.map(|out| (out, Vec::new()))) // generate no proofs in general
|
.map(|out| (out, Vec::new()))) // generate no proofs in general
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -260,9 +341,11 @@ impl ValidatorSet for ValidatorSafeContract {
|
|||||||
// transition to the first block of a contract requires finality but has no log event.
|
// transition to the first block of a contract requires finality but has no log event.
|
||||||
if first {
|
if first {
|
||||||
debug!(target: "engine", "signalling transition to fresh contract.");
|
debug!(target: "engine", "signalling transition to fresh contract.");
|
||||||
let (provider, header) = (self.provider.clone(), header.clone());
|
let state_proof = Arc::new(StateProof {
|
||||||
let with_caller: Box<Fn(&Call) -> _> = Box::new(move |caller| prove_initial(&provider, &header, caller));
|
header: Mutex::new(header.clone()),
|
||||||
return ::engines::EpochChange::Yes(::engines::Proof::WithState(with_caller))
|
provider: self.provider.clone(),
|
||||||
|
});
|
||||||
|
return ::engines::EpochChange::Yes(::engines::Proof::WithState(state_proof as Arc<_>));
|
||||||
}
|
}
|
||||||
|
|
||||||
// otherwise, we're checking for logs.
|
// otherwise, we're checking for logs.
|
||||||
@ -291,61 +374,16 @@ impl ValidatorSet for ValidatorSafeContract {
|
|||||||
fn epoch_set(&self, first: bool, engine: &Engine, _number: ::header::BlockNumber, proof: &[u8])
|
fn epoch_set(&self, first: bool, engine: &Engine, _number: ::header::BlockNumber, proof: &[u8])
|
||||||
-> Result<(SimpleList, Option<H256>), ::error::Error>
|
-> Result<(SimpleList, Option<H256>), ::error::Error>
|
||||||
{
|
{
|
||||||
use transaction::{Action, Transaction};
|
|
||||||
|
|
||||||
let rlp = UntrustedRlp::new(proof);
|
let rlp = UntrustedRlp::new(proof);
|
||||||
|
|
||||||
if first {
|
if first {
|
||||||
trace!(target: "engine", "Recovering initial epoch set");
|
trace!(target: "engine", "Recovering initial epoch set");
|
||||||
|
|
||||||
// TODO: match client contract_call_tx more cleanly without duplication.
|
|
||||||
const PROVIDED_GAS: u64 = 50_000_000;
|
|
||||||
|
|
||||||
let (old_header, state_items) = decode_first_proof(&rlp)?;
|
let (old_header, state_items) = decode_first_proof(&rlp)?;
|
||||||
let old_hash = old_header.hash();
|
|
||||||
|
|
||||||
let env_info = ::vm::EnvInfo {
|
|
||||||
number: old_header.number(),
|
|
||||||
author: *old_header.author(),
|
|
||||||
difficulty: *old_header.difficulty(),
|
|
||||||
gas_limit: PROVIDED_GAS.into(),
|
|
||||||
timestamp: old_header.timestamp(),
|
|
||||||
last_hashes: {
|
|
||||||
// this will break if we don't inclue all 256 last hashes.
|
|
||||||
let mut last_hashes: Vec<_> = (0..256).map(|_| H256::default()).collect();
|
|
||||||
last_hashes[255] = *old_header.parent_hash();
|
|
||||||
Arc::new(last_hashes)
|
|
||||||
},
|
|
||||||
gas_used: 0.into(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// check state proof using given engine.
|
|
||||||
let number = old_header.number();
|
let number = old_header.number();
|
||||||
let addresses = self.provider.get_validators(move |a, d| {
|
let old_hash = old_header.hash();
|
||||||
let from = Address::default();
|
let addresses = check_first_proof(engine, &self.provider, old_header, &state_items)
|
||||||
let tx = Transaction {
|
.map_err(::engines::EngineError::InsufficientProof)?;
|
||||||
nonce: engine.account_start_nonce(number),
|
|
||||||
action: Action::Call(a),
|
|
||||||
gas: PROVIDED_GAS.into(),
|
|
||||||
gas_price: U256::default(),
|
|
||||||
value: U256::default(),
|
|
||||||
data: d,
|
|
||||||
}.fake_sign(from);
|
|
||||||
|
|
||||||
let res = ::state::check_proof(
|
|
||||||
&state_items,
|
|
||||||
*old_header.state_root(),
|
|
||||||
&tx,
|
|
||||||
engine,
|
|
||||||
&env_info,
|
|
||||||
);
|
|
||||||
|
|
||||||
match res {
|
|
||||||
::state::ProvedExecution::BadProof => Err("Bad proof".into()),
|
|
||||||
::state::ProvedExecution::Failed(e) => Err(format!("Failed call: {}", e)),
|
|
||||||
::state::ProvedExecution::Complete(e) => Ok(e.output),
|
|
||||||
}
|
|
||||||
}).wait().map_err(::engines::EngineError::InsufficientProof)?;
|
|
||||||
|
|
||||||
trace!(target: "engine", "extracted epoch set at #{}: {} addresses",
|
trace!(target: "engine", "extracted epoch set at #{}: {} addresses",
|
||||||
number, addresses.len());
|
number, addresses.len());
|
||||||
@ -419,7 +457,7 @@ impl ValidatorSet for ValidatorSafeContract {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_contract(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
trace!(target: "engine", "Setting up contract caller.");
|
trace!(target: "engine", "Setting up contract caller.");
|
||||||
*self.client.write() = Some(client);
|
*self.client.write() = Some(client);
|
||||||
}
|
}
|
||||||
@ -435,7 +473,7 @@ mod tests {
|
|||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use account_provider::AccountProvider;
|
use account_provider::AccountProvider;
|
||||||
use transaction::{Transaction, Action};
|
use transaction::{Transaction, Action};
|
||||||
use client::{BlockChainClient, EngineClient};
|
use client::BlockChainClient;
|
||||||
use ethkey::Secret;
|
use ethkey::Secret;
|
||||||
use miner::MinerService;
|
use miner::MinerService;
|
||||||
use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data};
|
use tests::helpers::{generate_dummy_client_with_spec_and_accounts, generate_dummy_client_with_spec_and_data};
|
||||||
@ -446,7 +484,7 @@ mod tests {
|
|||||||
fn fetches_validators() {
|
fn fetches_validators() {
|
||||||
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None);
|
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, None);
|
||||||
let vc = Arc::new(ValidatorSafeContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap()));
|
let vc = Arc::new(ValidatorSafeContract::new("0000000000000000000000000000000000000005".parse::<Address>().unwrap()));
|
||||||
vc.register_contract(Arc::downgrade(&client));
|
vc.register_client(Arc::downgrade(&client) as _);
|
||||||
let last_hash = client.best_block_header().hash();
|
let last_hash = client.best_block_header().hash();
|
||||||
assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap()));
|
assert!(vc.contains(&last_hash, &"7d577a597b2742b498cb5cf0c26cdcd726d39e6e".parse::<Address>().unwrap()));
|
||||||
assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap()));
|
assert!(vc.contains(&last_hash, &"82a978b3f5962a5b0957d9ee9eef472ee55b42f1".parse::<Address>().unwrap()));
|
||||||
@ -460,7 +498,7 @@ mod tests {
|
|||||||
let v1 = tap.insert_account(keccak("0").into(), "").unwrap();
|
let v1 = tap.insert_account(keccak("0").into(), "").unwrap();
|
||||||
let chain_id = Spec::new_validator_safe_contract().chain_id();
|
let chain_id = Spec::new_validator_safe_contract().chain_id();
|
||||||
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap));
|
let client = generate_dummy_client_with_spec_and_accounts(Spec::new_validator_safe_contract, Some(tap));
|
||||||
client.engine().register_client(Arc::downgrade(&client));
|
client.engine().register_client(Arc::downgrade(&client) as _);
|
||||||
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
|
let validator_contract = "0000000000000000000000000000000000000005".parse::<Address>().unwrap();
|
||||||
|
|
||||||
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
||||||
@ -474,7 +512,7 @@ mod tests {
|
|||||||
data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
data: "bfc708a000000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
||||||
}.sign(&s0, Some(chain_id));
|
}.sign(&s0, Some(chain_id));
|
||||||
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 1);
|
assert_eq!(client.chain_info().best_block_number, 1);
|
||||||
// Add "1" validator back in.
|
// Add "1" validator back in.
|
||||||
let tx = Transaction {
|
let tx = Transaction {
|
||||||
@ -486,13 +524,13 @@ mod tests {
|
|||||||
data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
data: "4d238c8e00000000000000000000000082a978b3f5962a5b0957d9ee9eef472ee55b42f1".from_hex().unwrap(),
|
||||||
}.sign(&s0, Some(chain_id));
|
}.sign(&s0, Some(chain_id));
|
||||||
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
// The transaction is not yet included so still unable to seal.
|
// The transaction is not yet included so still unable to seal.
|
||||||
assert_eq!(client.chain_info().best_block_number, 1);
|
assert_eq!(client.chain_info().best_block_number, 1);
|
||||||
|
|
||||||
// Switch to the validator that is still there.
|
// Switch to the validator that is still there.
|
||||||
client.miner().set_engine_signer(v0, "".into()).unwrap();
|
client.miner().set_engine_signer(v0, "".into()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
assert_eq!(client.chain_info().best_block_number, 2);
|
assert_eq!(client.chain_info().best_block_number, 2);
|
||||||
// Switch back to the added validator, since the state is updated.
|
// Switch back to the added validator, since the state is updated.
|
||||||
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
client.miner().set_engine_signer(v1, "".into()).unwrap();
|
||||||
@ -505,13 +543,13 @@ mod tests {
|
|||||||
data: Vec::new(),
|
data: Vec::new(),
|
||||||
}.sign(&s0, Some(chain_id));
|
}.sign(&s0, Some(chain_id));
|
||||||
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
client.miner().import_own_transaction(client.as_ref(), tx.into()).unwrap();
|
||||||
client.update_sealing();
|
::client::EngineClient::update_sealing(&*client);
|
||||||
// Able to seal again.
|
// Able to seal again.
|
||||||
assert_eq!(client.chain_info().best_block_number, 3);
|
assert_eq!(client.chain_info().best_block_number, 3);
|
||||||
|
|
||||||
// Check syncing.
|
// Check syncing.
|
||||||
let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]);
|
let sync_client = generate_dummy_client_with_spec_and_data(Spec::new_validator_safe_contract, 0, 0, &[]);
|
||||||
sync_client.engine().register_client(Arc::downgrade(&sync_client));
|
sync_client.engine().register_client(Arc::downgrade(&sync_client) as _);
|
||||||
for i in 1..4 {
|
for i in 1..4 {
|
||||||
sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap();
|
sync_client.import_block(client.block(BlockId::Number(i)).unwrap().into_inner()).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,7 @@ use rlp::{self, UntrustedRlp};
|
|||||||
use vm::LastHashes;
|
use vm::LastHashes;
|
||||||
use semantic_version::SemanticVersion;
|
use semantic_version::SemanticVersion;
|
||||||
use tx_filter::{TransactionFilter};
|
use tx_filter::{TransactionFilter};
|
||||||
use client::{Client, BlockChainClient};
|
use client::EngineClient;
|
||||||
|
|
||||||
/// Parity tries to round block.gas_limit to multiple of this constant
|
/// Parity tries to round block.gas_limit to multiple of this constant
|
||||||
pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]);
|
pub const PARITY_GAS_LIMIT_DETERMINANT: U256 = U256([37, 0, 0, 0]);
|
||||||
@ -460,9 +460,9 @@ impl Engine for Arc<Ethash> {
|
|||||||
Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS)))
|
Some(Box::new(::snapshot::PowSnapshot::new(SNAPSHOT_BLOCKS, MAX_SNAPSHOT_BLOCKS)))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn register_client(&self, client: Weak<Client>) {
|
fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
if let Some(ref filter) = self.tx_filter {
|
if let Some(ref filter) = self.tx_filter {
|
||||||
filter.register_client(client as Weak<BlockChainClient>);
|
filter.register_client(client);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,8 +261,13 @@ impl Header {
|
|||||||
s.out()
|
s.out()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the KECCAK (Keccak) of this header, optionally `with_seal`.
|
/// Get the SHA3 (Keccak) of this header, optionally `with_seal`.
|
||||||
pub fn rlp_keccak(&self, with_seal: Seal) -> H256 { keccak(self.rlp(with_seal)) }
|
pub fn rlp_keccak(&self, with_seal: Seal) -> H256 { keccak(self.rlp(with_seal)) }
|
||||||
|
|
||||||
|
/// Encode the header, getting a type-safe wrapper around the RLP.
|
||||||
|
pub fn encoded(&self) -> ::encoded::Header {
|
||||||
|
::encoded::Header::new(self.rlp(Seal::With))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Decodable for Header {
|
impl Decodable for Header {
|
||||||
|
@ -102,6 +102,7 @@ extern crate num;
|
|||||||
extern crate parking_lot;
|
extern crate parking_lot;
|
||||||
extern crate price_info;
|
extern crate price_info;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
|
extern crate rayon;
|
||||||
extern crate rlp;
|
extern crate rlp;
|
||||||
extern crate hash;
|
extern crate hash;
|
||||||
extern crate heapsize;
|
extern crate heapsize;
|
||||||
|
@ -116,7 +116,7 @@ impl ClientService {
|
|||||||
});
|
});
|
||||||
io_service.register_handler(client_io)?;
|
io_service.register_handler(client_io)?;
|
||||||
|
|
||||||
spec.engine.register_client(Arc::downgrade(&client));
|
spec.engine.register_client(Arc::downgrade(&client) as _);
|
||||||
|
|
||||||
let stop_guard = ::devtools::StopGuard::new();
|
let stop_guard = ::devtools::StopGuard::new();
|
||||||
run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share());
|
run_ipc(ipc_path, client.clone(), snapshot.clone(), stop_guard.share());
|
||||||
|
@ -93,7 +93,7 @@ fn make_chain(accounts: Arc<AccountProvider>, blocks_beyond: usize, transitions:
|
|||||||
let mut cur_signers = vec![*RICH_ADDR];
|
let mut cur_signers = vec![*RICH_ADDR];
|
||||||
{
|
{
|
||||||
let engine = client.engine();
|
let engine = client.engine();
|
||||||
engine.register_client(Arc::downgrade(&client));
|
engine.register_client(Arc::downgrade(&client) as _);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -36,7 +36,6 @@ use factory::Factories;
|
|||||||
use header::{BlockNumber, Header};
|
use header::{BlockNumber, Header};
|
||||||
use pod_state::*;
|
use pod_state::*;
|
||||||
use rlp::{Rlp, RlpStream};
|
use rlp::{Rlp, RlpStream};
|
||||||
use state_db::StateDB;
|
|
||||||
use state::{Backend, State, Substate};
|
use state::{Backend, State, Substate};
|
||||||
use state::backend::Basic as BasicBackend;
|
use state::backend::Basic as BasicBackend;
|
||||||
use trace::{NoopTracer, NoopVMTracer};
|
use trace::{NoopTracer, NoopVMTracer};
|
||||||
@ -465,7 +464,7 @@ impl Spec {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Ensure that the given state DB has the trie nodes in for the genesis state.
|
/// Ensure that the given state DB has the trie nodes in for the genesis state.
|
||||||
pub fn ensure_db_good(&self, db: StateDB, factories: &Factories) -> Result<StateDB, Error> {
|
pub fn ensure_db_good<T: Backend>(&self, db: T, factories: &Factories) -> Result<T, Error> {
|
||||||
if db.as_hashdb().contains(&self.state_root()) {
|
if db.as_hashdb().contains(&self.state_root()) {
|
||||||
return Ok(db)
|
return Ok(db)
|
||||||
}
|
}
|
||||||
@ -487,6 +486,63 @@ impl Spec {
|
|||||||
.and_then(|x| load_from(cache_dir, x).map_err(fmt))
|
.and_then(|x| load_from(cache_dir, x).map_err(fmt))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// initialize genesis epoch data, using in-memory database for
|
||||||
|
/// constructor.
|
||||||
|
pub fn genesis_epoch_data(&self) -> Result<Vec<u8>, String> {
|
||||||
|
use transaction::{Action, Transaction};
|
||||||
|
use util::{journaldb, kvdb};
|
||||||
|
|
||||||
|
let genesis = self.genesis_header();
|
||||||
|
|
||||||
|
let factories = Default::default();
|
||||||
|
let mut db = journaldb::new(
|
||||||
|
Arc::new(kvdb::in_memory(0)),
|
||||||
|
journaldb::Algorithm::Archive,
|
||||||
|
None,
|
||||||
|
);
|
||||||
|
|
||||||
|
self.ensure_db_good(BasicBackend(db.as_hashdb_mut()), &factories)
|
||||||
|
.map_err(|e| format!("Unable to initialize genesis state: {}", e))?;
|
||||||
|
|
||||||
|
let call = |a, d| {
|
||||||
|
let mut db = db.boxed_clone();
|
||||||
|
let env_info = ::evm::EnvInfo {
|
||||||
|
number: 0,
|
||||||
|
author: *genesis.author(),
|
||||||
|
timestamp: genesis.timestamp(),
|
||||||
|
difficulty: *genesis.difficulty(),
|
||||||
|
gas_limit: *genesis.gas_limit(),
|
||||||
|
last_hashes: Arc::new(Vec::new()),
|
||||||
|
gas_used: 0.into()
|
||||||
|
};
|
||||||
|
|
||||||
|
let from = Address::default();
|
||||||
|
let tx = Transaction {
|
||||||
|
nonce: self.engine.account_start_nonce(0),
|
||||||
|
action: Action::Call(a),
|
||||||
|
gas: U256::from(50_000_000), // TODO: share with client.
|
||||||
|
gas_price: U256::default(),
|
||||||
|
value: U256::default(),
|
||||||
|
data: d,
|
||||||
|
}.fake_sign(from);
|
||||||
|
|
||||||
|
let res = ::state::prove_transaction(
|
||||||
|
db.as_hashdb_mut(),
|
||||||
|
*genesis.state_root(),
|
||||||
|
&tx,
|
||||||
|
&*self.engine,
|
||||||
|
&env_info,
|
||||||
|
factories.clone(),
|
||||||
|
true,
|
||||||
|
);
|
||||||
|
|
||||||
|
res.map(|(out, proof)| (out, proof.into_iter().map(|x| x.into_vec()).collect()))
|
||||||
|
.ok_or_else(|| "Failed to prove call: insufficient state".into())
|
||||||
|
};
|
||||||
|
|
||||||
|
self.engine.genesis_epoch_data(&genesis, &call)
|
||||||
|
}
|
||||||
|
|
||||||
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
|
/// Create a new Spec which conforms to the Frontier-era Morden chain except that it's a NullEngine consensus.
|
||||||
pub fn new_test() -> Spec { load_bundled!("null_morden") }
|
pub fn new_test() -> Spec { load_bundled!("null_morden") }
|
||||||
|
|
||||||
|
@ -19,10 +19,10 @@
|
|||||||
use std::sync::Weak;
|
use std::sync::Weak;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::collections::hash_map::Entry;
|
use std::collections::hash_map::Entry;
|
||||||
use native_contracts::TransactAcl as Contract;
|
|
||||||
use client::{BlockChainClient, BlockId, ChainNotify};
|
|
||||||
use util::{Address, Bytes};
|
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
|
use native_contracts::TransactAcl as Contract;
|
||||||
|
use client::{EngineClient, BlockId, ChainNotify};
|
||||||
|
use util::{Address, Bytes};
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use futures::{self, Future};
|
use futures::{self, Future};
|
||||||
use spec::CommonParams;
|
use spec::CommonParams;
|
||||||
@ -43,7 +43,7 @@ mod tx_permissions {
|
|||||||
/// Connection filter that uses a contract to manage permissions.
|
/// Connection filter that uses a contract to manage permissions.
|
||||||
pub struct TransactionFilter {
|
pub struct TransactionFilter {
|
||||||
contract: Mutex<Option<Contract>>,
|
contract: Mutex<Option<Contract>>,
|
||||||
client: RwLock<Option<Weak<BlockChainClient>>>,
|
client: RwLock<Option<Weak<EngineClient>>>,
|
||||||
contract_address: Address,
|
contract_address: Address,
|
||||||
permission_cache: Mutex<HashMap<(H256, Address), u32>>,
|
permission_cache: Mutex<HashMap<(H256, Address), u32>>,
|
||||||
}
|
}
|
||||||
@ -67,7 +67,7 @@ impl TransactionFilter {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Set client reference to be used for contract call.
|
/// Set client reference to be used for contract call.
|
||||||
pub fn register_client(&self, client: Weak<BlockChainClient>) {
|
pub fn register_client(&self, client: Weak<EngineClient>) {
|
||||||
*self.client.write() = Some(client);
|
*self.client.write() = Some(client);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,6 +79,12 @@ impl TransactionFilter {
|
|||||||
Some(client) => client,
|
Some(client) => client,
|
||||||
_ => return false,
|
_ => return false,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let client = match client.as_full_client() {
|
||||||
|
Some(client) => client,
|
||||||
|
_ => return false, // TODO: how to handle verification for light clients?
|
||||||
|
};
|
||||||
|
|
||||||
let tx_type = match transaction.action {
|
let tx_type = match transaction.action {
|
||||||
Action::Create => tx_permissions::CREATE,
|
Action::Create => tx_permissions::CREATE,
|
||||||
Action::Call(address) => if client.code_hash(&address, BlockId::Hash(*parent_hash)).map_or(false, |c| c != KECCAK_EMPTY) {
|
Action::Call(address) => if client.code_hash(&address, BlockId::Hash(*parent_hash)).map_or(false, |c| c != KECCAK_EMPTY) {
|
||||||
@ -205,7 +211,7 @@ mod test {
|
|||||||
let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap();
|
let key4 = KeyPair::from_secret(Secret::from("0000000000000000000000000000000000000000000000000000000000000004")).unwrap();
|
||||||
|
|
||||||
let filter = TransactionFilter::from_params(spec.params()).unwrap();
|
let filter = TransactionFilter::from_params(spec.params()).unwrap();
|
||||||
filter.register_client(Arc::downgrade(&client) as Weak<BlockChainClient>);
|
filter.register_client(Arc::downgrade(&client) as Weak<_>);
|
||||||
let mut basic_tx = Transaction::default();
|
let mut basic_tx = Transaction::default();
|
||||||
basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032"));
|
basic_tx.action = Action::Call(Address::from("000000000000000000000000000000000000032"));
|
||||||
let create_tx = Transaction::default();
|
let create_tx = Transaction::default();
|
||||||
|
@ -392,14 +392,13 @@ mod tests {
|
|||||||
self.numbers.get(&index).cloned()
|
self.numbers.get(&index).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec<BlockNumber> {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn block_receipts(&self, _hash: &H256) -> Option<BlockReceipts> {
|
fn block_receipts(&self, _hash: &H256) -> Option<BlockReceipts> {
|
||||||
unimplemented!()
|
unimplemented!()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn blocks_with_bloom(&self, _bloom: &H2048, _from_block: BlockNumber, _to_block: BlockNumber) -> Vec<BlockNumber> {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
|
||||||
fn logs<F>(&self, _blocks: Vec<BlockNumber>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
fn logs<F>(&self, _blocks: Vec<BlockNumber>, _matches: F, _limit: Option<usize>) -> Vec<LocalizedLogEntry>
|
||||||
where F: Fn(&LogEntry) -> bool, Self: Sized {
|
where F: Fn(&LogEntry) -> bool, Self: Sized {
|
||||||
|
@ -8,7 +8,7 @@ byteorder = "1.0"
|
|||||||
ethcore-util = { path = "../../util" }
|
ethcore-util = { path = "../../util" }
|
||||||
ethcore-bigint = { path = "../../util/bigint" }
|
ethcore-bigint = { path = "../../util/bigint" }
|
||||||
log = "0.3"
|
log = "0.3"
|
||||||
parity-wasm = "0.12"
|
parity-wasm = "0.14"
|
||||||
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
wasm-utils = { git = "https://github.com/paritytech/wasm-utils" }
|
||||||
vm = { path = "../vm" }
|
vm = { path = "../vm" }
|
||||||
ethcore-logger = { path = "../../logger" }
|
ethcore-logger = { path = "../../logger" }
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
use parity_wasm::elements::ValueType::*;
|
use parity_wasm::elements::ValueType::*;
|
||||||
use parity_wasm::interpreter::{self, UserFunctionDescriptor};
|
use parity_wasm::interpreter::{self, UserFunctionDescriptor};
|
||||||
use parity_wasm::interpreter::UserFunctionDescriptor::*;
|
use parity_wasm::interpreter::UserFunctionDescriptor::*;
|
||||||
use super::runtime::Runtime;
|
use super::runtime::{Runtime, UserTrap};
|
||||||
|
|
||||||
pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
||||||
Static(
|
Static(
|
||||||
@ -87,6 +87,41 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
|||||||
&[I32; 3],
|
&[I32; 3],
|
||||||
Some(I32),
|
Some(I32),
|
||||||
),
|
),
|
||||||
|
Static(
|
||||||
|
"_panic",
|
||||||
|
&[I32; 2],
|
||||||
|
None,
|
||||||
|
),
|
||||||
|
Static(
|
||||||
|
"_blockhash",
|
||||||
|
&[I32; 3],
|
||||||
|
Some(I32),
|
||||||
|
),
|
||||||
|
Static(
|
||||||
|
"_coinbase",
|
||||||
|
&[I32],
|
||||||
|
None,
|
||||||
|
),
|
||||||
|
Static(
|
||||||
|
"_timestamp",
|
||||||
|
&[],
|
||||||
|
Some(I32),
|
||||||
|
),
|
||||||
|
Static(
|
||||||
|
"_blocknumber",
|
||||||
|
&[],
|
||||||
|
Some(I32),
|
||||||
|
),
|
||||||
|
Static(
|
||||||
|
"_difficulty",
|
||||||
|
&[I32],
|
||||||
|
None,
|
||||||
|
),
|
||||||
|
Static(
|
||||||
|
"_gaslimit",
|
||||||
|
&[I32],
|
||||||
|
None,
|
||||||
|
),
|
||||||
|
|
||||||
// TODO: Get rid of it also somehow?
|
// TODO: Get rid of it also somehow?
|
||||||
Static(
|
Static(
|
||||||
@ -102,9 +137,10 @@ pub const SIGNATURES: &'static [UserFunctionDescriptor] = &[
|
|||||||
),
|
),
|
||||||
];
|
];
|
||||||
|
|
||||||
pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserFunctions<'a> {
|
pub fn native_bindings<'a>(runtime: &'a mut Runtime) -> interpreter::UserDefinedElements<'a, UserTrap> {
|
||||||
interpreter::UserFunctions {
|
interpreter::UserDefinedElements {
|
||||||
executor: runtime,
|
executor: Some(runtime),
|
||||||
|
globals: ::std::collections::HashMap::new(),
|
||||||
functions: ::std::borrow::Cow::from(SIGNATURES),
|
functions: ::std::borrow::Cow::from(SIGNATURES),
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -39,21 +39,41 @@ use parity_wasm::{interpreter, elements};
|
|||||||
use parity_wasm::interpreter::ModuleInstanceInterface;
|
use parity_wasm::interpreter::ModuleInstanceInterface;
|
||||||
|
|
||||||
use vm::{GasLeft, ReturnData, ActionParams};
|
use vm::{GasLeft, ReturnData, ActionParams};
|
||||||
use self::runtime::{Runtime, RuntimeContext};
|
use self::runtime::{Runtime, RuntimeContext, UserTrap};
|
||||||
|
|
||||||
pub use self::runtime::Error as RuntimeError;
|
pub use self::runtime::InterpreterError;
|
||||||
|
|
||||||
const DEFAULT_RESULT_BUFFER: usize = 1024;
|
const DEFAULT_RESULT_BUFFER: usize = 1024;
|
||||||
|
|
||||||
|
/// Wrapped interpreter error
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Error(InterpreterError);
|
||||||
|
|
||||||
|
impl From<InterpreterError> for Error {
|
||||||
|
fn from(e: InterpreterError) -> Self {
|
||||||
|
Error(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Error> for vm::Error {
|
||||||
|
fn from(e: Error) -> Self {
|
||||||
|
vm::Error::Wasm(format!("Wasm runtime error: {:?}", e.0))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<UserTrap> for vm::Error {
|
||||||
|
fn from(e: UserTrap) -> Self { e.into() }
|
||||||
|
}
|
||||||
|
|
||||||
/// Wasm interpreter instance
|
/// Wasm interpreter instance
|
||||||
pub struct WasmInterpreter {
|
pub struct WasmInterpreter {
|
||||||
program: interpreter::ProgramInstance,
|
program: runtime::InterpreterProgramInstance,
|
||||||
result: Vec<u8>,
|
result: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl WasmInterpreter {
|
impl WasmInterpreter {
|
||||||
/// New wasm interpreter instance
|
/// New wasm interpreter instance
|
||||||
pub fn new() -> Result<WasmInterpreter, RuntimeError> {
|
pub fn new() -> Result<WasmInterpreter, Error> {
|
||||||
Ok(WasmInterpreter {
|
Ok(WasmInterpreter {
|
||||||
program: interpreter::ProgramInstance::new()?,
|
program: interpreter::ProgramInstance::new()?,
|
||||||
result: Vec::with_capacity(DEFAULT_RESULT_BUFFER),
|
result: Vec::with_capacity(DEFAULT_RESULT_BUFFER),
|
||||||
@ -109,7 +129,7 @@ impl vm::Vm for WasmInterpreter {
|
|||||||
params.value.value(),
|
params.value.value(),
|
||||||
params.data.unwrap_or(Vec::with_capacity(0)),
|
params.data.unwrap_or(Vec::with_capacity(0)),
|
||||||
)
|
)
|
||||||
)?;
|
).map_err(|e| Error(e))?;
|
||||||
|
|
||||||
{
|
{
|
||||||
let execution_params = runtime.execution_params()
|
let execution_params = runtime.execution_params()
|
||||||
@ -118,27 +138,30 @@ impl vm::Vm for WasmInterpreter {
|
|||||||
let module_instance = self.program.add_module("contract", contract_module, Some(&execution_params.externals))
|
let module_instance = self.program.add_module("contract", contract_module, Some(&execution_params.externals))
|
||||||
.map_err(|err| {
|
.map_err(|err| {
|
||||||
trace!(target: "wasm", "Error adding contract module: {:?}", err);
|
trace!(target: "wasm", "Error adding contract module: {:?}", err);
|
||||||
vm::Error::from(RuntimeError::Interpreter(err))
|
vm::Error::from(Error(err))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
module_instance.execute_export("_call", execution_params)
|
match module_instance.execute_export("_call", execution_params) {
|
||||||
.map_err(|err| {
|
Ok(_) => { },
|
||||||
|
Err(interpreter::Error::User(UserTrap::Suicide)) => { },
|
||||||
|
Err(err) => {
|
||||||
trace!(target: "wasm", "Error executing contract: {:?}", err);
|
trace!(target: "wasm", "Error executing contract: {:?}", err);
|
||||||
vm::Error::from(RuntimeError::Interpreter(err))
|
return Err(vm::Error::from(Error(err)))
|
||||||
})?;
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let result = result::WasmResult::new(d_ptr);
|
let result = result::WasmResult::new(d_ptr);
|
||||||
if result.peek_empty(&*runtime.memory())? {
|
if result.peek_empty(&*runtime.memory()).map_err(|e| Error(e))? {
|
||||||
trace!(target: "wasm", "Contract execution result is empty.");
|
trace!(target: "wasm", "Contract execution result is empty.");
|
||||||
Ok(GasLeft::Known(runtime.gas_left()?.into()))
|
Ok(GasLeft::Known(runtime.gas_left()?.into()))
|
||||||
} else {
|
} else {
|
||||||
self.result.clear();
|
self.result.clear();
|
||||||
// todo: use memory views to avoid copy
|
// todo: use memory views to avoid copy
|
||||||
self.result.extend(result.pop(&*runtime.memory())?);
|
self.result.extend(result.pop(&*runtime.memory()).map_err(|e| Error(e.into()))?);
|
||||||
let len = self.result.len();
|
let len = self.result.len();
|
||||||
Ok(GasLeft::NeedsReturn {
|
Ok(GasLeft::NeedsReturn {
|
||||||
gas_left: runtime.gas_left()?.into(),
|
gas_left: runtime.gas_left().map_err(|e| Error(e.into()))?.into(),
|
||||||
data: ReturnData::new(
|
data: ReturnData::new(
|
||||||
::std::mem::replace(&mut self.result, Vec::with_capacity(DEFAULT_RESULT_BUFFER)),
|
::std::mem::replace(&mut self.result, Vec::with_capacity(DEFAULT_RESULT_BUFFER)),
|
||||||
0,
|
0,
|
||||||
@ -149,9 +172,3 @@ impl vm::Vm for WasmInterpreter {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<runtime::Error> for vm::Error {
|
|
||||||
fn from(err: runtime::Error) -> vm::Error {
|
|
||||||
vm::Error::Wasm(format!("WASM runtime-error: {:?}", err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
//! Wasm bound-checked ptr
|
//! Wasm bound-checked ptr
|
||||||
|
|
||||||
use parity_wasm::interpreter;
|
use super::runtime::{InterpreterMemoryInstance, InterpreterError, UserTrap};
|
||||||
|
|
||||||
/// Bound-checked wrapper for webassembly memory
|
/// Bound-checked wrapper for webassembly memory
|
||||||
pub struct WasmPtr(u32);
|
pub struct WasmPtr(u32);
|
||||||
@ -33,10 +33,16 @@ impl From<u32> for WasmPtr {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl From<Error> for InterpreterError {
|
||||||
|
fn from(_e: Error) -> Self {
|
||||||
|
UserTrap::MemoryAccessViolation.into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl WasmPtr {
|
impl WasmPtr {
|
||||||
// todo: use memory view when they are on
|
// todo: use memory view when they are on
|
||||||
/// Check memory range and return data with given length starting from the current pointer value
|
/// Check memory range and return data with given length starting from the current pointer value
|
||||||
pub fn slice(&self, len: u32, mem: &interpreter::MemoryInstance) -> Result<Vec<u8>, Error> {
|
pub fn slice(&self, len: u32, mem: &InterpreterMemoryInstance) -> Result<Vec<u8>, Error> {
|
||||||
mem.get(self.0, len as usize).map_err(|_| Error::AccessViolation)
|
mem.get(self.0, len as usize).map_err(|_| Error::AccessViolation)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,10 +18,8 @@
|
|||||||
|
|
||||||
use byteorder::{LittleEndian, ByteOrder};
|
use byteorder::{LittleEndian, ByteOrder};
|
||||||
|
|
||||||
use parity_wasm::interpreter;
|
|
||||||
|
|
||||||
use super::ptr::WasmPtr;
|
use super::ptr::WasmPtr;
|
||||||
use super::runtime::Error as RuntimeError;
|
use super::runtime::{InterpreterError, InterpreterMemoryInstance};
|
||||||
|
|
||||||
/// Wrapper for wasm contract call result
|
/// Wrapper for wasm contract call result
|
||||||
pub struct WasmResult {
|
pub struct WasmResult {
|
||||||
@ -35,13 +33,13 @@ impl WasmResult {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Check if the result contains any data
|
/// Check if the result contains any data
|
||||||
pub fn peek_empty(&self, mem: &interpreter::MemoryInstance) -> Result<bool, RuntimeError> {
|
pub fn peek_empty(&self, mem: &InterpreterMemoryInstance) -> Result<bool, InterpreterError> {
|
||||||
let result_len = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[12..16]);
|
let result_len = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[12..16]);
|
||||||
Ok(result_len == 0)
|
Ok(result_len == 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Consume the result ptr and return the actual data from wasm linear memory
|
/// Consume the result ptr and return the actual data from wasm linear memory
|
||||||
pub fn pop(self, mem: &interpreter::MemoryInstance) -> Result<Vec<u8>, RuntimeError> {
|
pub fn pop(self, mem: &InterpreterMemoryInstance) -> Result<Vec<u8>, InterpreterError> {
|
||||||
let result_ptr = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[8..12]);
|
let result_ptr = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[8..12]);
|
||||||
let result_len = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[12..16]);
|
let result_len = LittleEndian::read_u32(&self.ptr.slice(16, mem)?[12..16]);
|
||||||
trace!(target: "wasm", "contract result: {} bytes at @{}", result_len, result_ptr);
|
trace!(target: "wasm", "contract result: {} bytes at @{}", result_len, result_ptr);
|
||||||
|
@ -30,31 +30,68 @@ use vm::CallType;
|
|||||||
use super::ptr::{WasmPtr, Error as PtrError};
|
use super::ptr::{WasmPtr, Error as PtrError};
|
||||||
use super::call_args::CallArgs;
|
use super::call_args::CallArgs;
|
||||||
|
|
||||||
/// Wasm runtime error
|
/// User trap in native code
|
||||||
#[derive(Debug)]
|
#[derive(Debug, Clone, PartialEq)]
|
||||||
pub enum Error {
|
pub enum UserTrap {
|
||||||
/// Storage error
|
/// Storage read error
|
||||||
Storage,
|
StorageReadError,
|
||||||
/// Allocator error
|
/// Storage update error
|
||||||
Allocator,
|
StorageUpdateError,
|
||||||
/// Invalid gas state during the call
|
|
||||||
InvalidGasState,
|
|
||||||
/// Memory access violation
|
/// Memory access violation
|
||||||
AccessViolation,
|
MemoryAccessViolation,
|
||||||
/// Interpreter runtime error
|
/// Native code resulted in suicide
|
||||||
Interpreter(interpreter::Error),
|
Suicide,
|
||||||
|
/// Suicide was requested but coudn't complete
|
||||||
|
SuicideAbort,
|
||||||
|
/// Invalid gas state inside interpreter
|
||||||
|
InvalidGasState,
|
||||||
|
/// Query of the balance resulted in an error
|
||||||
|
BalanceQueryError,
|
||||||
|
/// Failed allocation
|
||||||
|
AllocationFailed,
|
||||||
|
/// Gas limit reached
|
||||||
|
GasLimit,
|
||||||
|
/// Unknown runtime function
|
||||||
|
Unknown,
|
||||||
|
/// Passed string had invalid utf-8 encoding
|
||||||
|
BadUtf8,
|
||||||
|
/// Other error in native code
|
||||||
|
Other,
|
||||||
|
/// Panic with message
|
||||||
|
Panic(String),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<interpreter::Error> for Error {
|
impl ::std::fmt::Display for UserTrap {
|
||||||
fn from(err: interpreter::Error) -> Self {
|
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> Result<(), ::std::fmt::Error> {
|
||||||
Error::Interpreter(err)
|
match *self {
|
||||||
|
UserTrap::StorageReadError => write!(f, "Storage read error"),
|
||||||
|
UserTrap::StorageUpdateError => write!(f, "Storage update error"),
|
||||||
|
UserTrap::MemoryAccessViolation => write!(f, "Memory access violation"),
|
||||||
|
UserTrap::SuicideAbort => write!(f, "Attempt to suicide resulted in an error"),
|
||||||
|
UserTrap::InvalidGasState => write!(f, "Invalid gas state"),
|
||||||
|
UserTrap::BalanceQueryError => write!(f, "Balance query resulted in an error"),
|
||||||
|
UserTrap::Suicide => write!(f, "Suicide result"),
|
||||||
|
UserTrap::Unknown => write!(f, "Unknown runtime function invoked"),
|
||||||
|
UserTrap::AllocationFailed => write!(f, "Memory allocation failed (OOM)"),
|
||||||
|
UserTrap::BadUtf8 => write!(f, "String encoding is bad utf-8 sequence"),
|
||||||
|
UserTrap::GasLimit => write!(f, "Invocation resulted in gas limit violated"),
|
||||||
|
UserTrap::Other => write!(f, "Other unspecified error"),
|
||||||
|
UserTrap::Panic(ref msg) => write!(f, "Panic: {}", msg),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<PtrError> for Error {
|
impl interpreter::UserError for UserTrap { }
|
||||||
|
|
||||||
|
pub type InterpreterError = interpreter::Error<UserTrap>;
|
||||||
|
pub type InterpreterMemoryInstance = interpreter::MemoryInstance<UserTrap>;
|
||||||
|
pub type InterpreterProgramInstance = interpreter::ProgramInstance<UserTrap>;
|
||||||
|
pub type InterpreterCallerContext<'a> = interpreter::CallerContext<'a, UserTrap>;
|
||||||
|
|
||||||
|
impl From<PtrError> for UserTrap {
|
||||||
fn from(err: PtrError) -> Self {
|
fn from(err: PtrError) -> Self {
|
||||||
match err {
|
match err {
|
||||||
PtrError::AccessViolation => Error::AccessViolation,
|
PtrError::AccessViolation => UserTrap::MemoryAccessViolation,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -79,20 +116,20 @@ pub struct Runtime<'a, 'b> {
|
|||||||
gas_limit: u64,
|
gas_limit: u64,
|
||||||
dynamic_top: u32,
|
dynamic_top: u32,
|
||||||
ext: &'a mut vm::Ext,
|
ext: &'a mut vm::Ext,
|
||||||
memory: Arc<interpreter::MemoryInstance>,
|
memory: Arc<InterpreterMemoryInstance>,
|
||||||
context: RuntimeContext,
|
context: RuntimeContext,
|
||||||
instance: &'b interpreter::ProgramInstance,
|
instance: &'b InterpreterProgramInstance,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'b> Runtime<'a, 'b> {
|
impl<'a, 'b> Runtime<'a, 'b> {
|
||||||
/// New runtime for wasm contract with specified params
|
/// New runtime for wasm contract with specified params
|
||||||
pub fn with_params<'c, 'd>(
|
pub fn with_params<'c, 'd>(
|
||||||
ext: &'c mut vm::Ext,
|
ext: &'c mut vm::Ext,
|
||||||
memory: Arc<interpreter::MemoryInstance>,
|
memory: Arc<InterpreterMemoryInstance>,
|
||||||
stack_space: u32,
|
stack_space: u32,
|
||||||
gas_limit: u64,
|
gas_limit: u64,
|
||||||
context: RuntimeContext,
|
context: RuntimeContext,
|
||||||
program_instance: &'d interpreter::ProgramInstance,
|
program_instance: &'d InterpreterProgramInstance,
|
||||||
) -> Runtime<'c, 'd> {
|
) -> Runtime<'c, 'd> {
|
||||||
Runtime {
|
Runtime {
|
||||||
gas_counter: 0,
|
gas_counter: 0,
|
||||||
@ -106,30 +143,28 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Write to the storage from wasm memory
|
/// Write to the storage from wasm memory
|
||||||
pub fn storage_write(&mut self, context: interpreter::CallerContext)
|
pub fn storage_write(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let mut context = context;
|
let mut context = context;
|
||||||
let val = self.pop_h256(&mut context)?;
|
let val = self.pop_h256(&mut context)?;
|
||||||
let key = self.pop_h256(&mut context)?;
|
let key = self.pop_h256(&mut context)?;
|
||||||
trace!(target: "wasm", "storage_write: value {} at @{}", &val, &key);
|
trace!(target: "wasm", "storage_write: value {} at @{}", &val, &key);
|
||||||
|
|
||||||
self.ext.set_storage(key, val)
|
self.ext.set_storage(key, val).map_err(|_| UserTrap::StorageUpdateError)?;
|
||||||
.map_err(|_| interpreter::Error::Trap("Storage update error".to_owned()))?;
|
|
||||||
|
|
||||||
Ok(Some(0i32.into()))
|
Ok(Some(0i32.into()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read from the storage to wasm memory
|
/// Read from the storage to wasm memory
|
||||||
pub fn storage_read(&mut self, context: interpreter::CallerContext)
|
pub fn storage_read(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let mut context = context;
|
let mut context = context;
|
||||||
let val_ptr = context.value_stack.pop_as::<i32>()?;
|
let val_ptr = context.value_stack.pop_as::<i32>()?;
|
||||||
let key = self.pop_h256(&mut context)?;
|
let key = self.pop_h256(&mut context)?;
|
||||||
|
|
||||||
let val = self.ext.storage_at(&key)
|
let val = self.ext.storage_at(&key).map_err(|_| UserTrap::StorageReadError)?;
|
||||||
.map_err(|_| interpreter::Error::Trap("Storage read error".to_owned()))?;
|
|
||||||
|
|
||||||
self.memory.set(val_ptr as u32, &*val)?;
|
self.memory.set(val_ptr as u32, &*val)?;
|
||||||
|
|
||||||
@ -137,21 +172,21 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Pass suicide to state runtime
|
/// Pass suicide to state runtime
|
||||||
pub fn suicide(&mut self, context: interpreter::CallerContext)
|
pub fn suicide(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let mut context = context;
|
let mut context = context;
|
||||||
let refund_address = self.pop_address(&mut context)?;
|
let refund_address = self.pop_address(&mut context)?;
|
||||||
|
|
||||||
self.ext.suicide(&refund_address)
|
self.ext.suicide(&refund_address).map_err(|_| UserTrap::SuicideAbort)?;
|
||||||
.map_err(|_| interpreter::Error::Trap("Suicide error".to_owned()))?;
|
|
||||||
|
|
||||||
Ok(None)
|
// We send trap to interpreter so it should abort further execution
|
||||||
|
Err(UserTrap::Suicide.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Invoke create in the state runtime
|
/// Invoke create in the state runtime
|
||||||
pub fn create(&mut self, context: interpreter::CallerContext)
|
pub fn create(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
//
|
//
|
||||||
// method signature:
|
// method signature:
|
||||||
@ -172,7 +207,7 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
let code = self.memory.get(code_ptr, code_len as usize)?;
|
let code = self.memory.get(code_ptr, code_len as usize)?;
|
||||||
|
|
||||||
let gas_left = self.gas_left()
|
let gas_left = self.gas_left()
|
||||||
.map_err(|_| interpreter::Error::Trap("Gas state error".to_owned()))?
|
.map_err(|_| UserTrap::InvalidGasState)?
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
match self.ext.create(&gas_left, &endowment, &code, vm::CreateContractAddress::FromSenderAndCodeHash) {
|
match self.ext.create(&gas_left, &endowment, &code, vm::CreateContractAddress::FromSenderAndCodeHash) {
|
||||||
@ -189,8 +224,8 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn call(&mut self, context: interpreter::CallerContext)
|
pub fn call(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
//
|
//
|
||||||
// method signature:
|
// method signature:
|
||||||
@ -207,8 +242,8 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fn call_code(&mut self, context: interpreter::CallerContext)
|
fn call_code(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
//
|
//
|
||||||
// signature (same as static call):
|
// signature (same as static call):
|
||||||
@ -227,9 +262,9 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
&mut self,
|
&mut self,
|
||||||
use_val: bool,
|
use_val: bool,
|
||||||
call_type: CallType,
|
call_type: CallType,
|
||||||
context: interpreter::CallerContext,
|
context: InterpreterCallerContext,
|
||||||
)
|
)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
|
|
||||||
trace!(target: "wasm", "runtime: call code");
|
trace!(target: "wasm", "runtime: call code");
|
||||||
@ -255,7 +290,7 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
|
|
||||||
if let Some(ref val) = val {
|
if let Some(ref val) = val {
|
||||||
let address_balance = self.ext.balance(&self.context.address)
|
let address_balance = self.ext.balance(&self.context.address)
|
||||||
.map_err(|_| interpreter::Error::Trap("Gas state error".to_owned()))?;
|
.map_err(|_| UserTrap::BalanceQueryError)?;
|
||||||
|
|
||||||
if &address_balance < val {
|
if &address_balance < val {
|
||||||
trace!(target: "wasm", "runtime: call failed due to balance check");
|
trace!(target: "wasm", "runtime: call failed due to balance check");
|
||||||
@ -266,7 +301,7 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
let mut result = Vec::with_capacity(result_alloc_len as usize);
|
let mut result = Vec::with_capacity(result_alloc_len as usize);
|
||||||
result.resize(result_alloc_len as usize, 0);
|
result.resize(result_alloc_len as usize, 0);
|
||||||
let gas = self.gas_left()
|
let gas = self.gas_left()
|
||||||
.map_err(|_| interpreter::Error::Trap("Gas state error".to_owned()))?
|
.map_err(|_| UserTrap::InvalidGasState)?
|
||||||
.into();
|
.into();
|
||||||
// todo: optimize to use memory views once it's in
|
// todo: optimize to use memory views once it's in
|
||||||
let payload = self.memory.get(input_ptr, input_len as usize)?;
|
let payload = self.memory.get(input_ptr, input_len as usize)?;
|
||||||
@ -294,8 +329,8 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn static_call(&mut self, context: interpreter::CallerContext)
|
pub fn static_call(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
// signature (same as code call):
|
// signature (same as code call):
|
||||||
// fn (
|
// fn (
|
||||||
@ -311,8 +346,8 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
|
|
||||||
|
|
||||||
/// Allocate memory using the wasm stack params
|
/// Allocate memory using the wasm stack params
|
||||||
pub fn malloc(&mut self, context: interpreter::CallerContext)
|
pub fn malloc(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let amount = context.value_stack.pop_as::<i32>()? as u32;
|
let amount = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
let previous_top = self.dynamic_top;
|
let previous_top = self.dynamic_top;
|
||||||
@ -321,21 +356,21 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Allocate memory in wasm memory instance
|
/// Allocate memory in wasm memory instance
|
||||||
pub fn alloc(&mut self, amount: u32) -> Result<u32, Error> {
|
pub fn alloc(&mut self, amount: u32) -> Result<u32, UserTrap> {
|
||||||
let previous_top = self.dynamic_top;
|
let previous_top = self.dynamic_top;
|
||||||
self.dynamic_top = previous_top + amount;
|
self.dynamic_top = previous_top + amount;
|
||||||
Ok(previous_top.into())
|
Ok(previous_top.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Report gas cost with the params passed in wasm stack
|
/// Report gas cost with the params passed in wasm stack
|
||||||
fn gas(&mut self, context: interpreter::CallerContext)
|
fn gas(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let amount = context.value_stack.pop_as::<i32>()? as u64;
|
let amount = context.value_stack.pop_as::<i32>()? as u64;
|
||||||
if self.charge_gas(amount) {
|
if self.charge_gas(amount) {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
Err(interpreter::Error::Trap(format!("Gas exceeds limits of {}", self.gas_limit)))
|
Err(UserTrap::GasLimit.into())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -350,50 +385,50 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn h256_at(&self, ptr: WasmPtr) -> Result<H256, interpreter::Error> {
|
fn h256_at(&self, ptr: WasmPtr) -> Result<H256, InterpreterError> {
|
||||||
Ok(H256::from_slice(&ptr.slice(32, &*self.memory)
|
Ok(H256::from_slice(&ptr.slice(32, &*self.memory)
|
||||||
.map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?
|
.map_err(|_| UserTrap::MemoryAccessViolation)?
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pop_h256(&self, context: &mut interpreter::CallerContext) -> Result<H256, interpreter::Error> {
|
fn pop_h256(&self, context: &mut InterpreterCallerContext) -> Result<H256, InterpreterError> {
|
||||||
let ptr = WasmPtr::from_i32(context.value_stack.pop_as::<i32>()?)
|
let ptr = WasmPtr::from_i32(context.value_stack.pop_as::<i32>()?)
|
||||||
.map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?;
|
.map_err(|_| UserTrap::MemoryAccessViolation)?;
|
||||||
self.h256_at(ptr)
|
self.h256_at(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pop_u256(&self, context: &mut interpreter::CallerContext) -> Result<U256, interpreter::Error> {
|
fn pop_u256(&self, context: &mut InterpreterCallerContext) -> Result<U256, InterpreterError> {
|
||||||
let ptr = WasmPtr::from_i32(context.value_stack.pop_as::<i32>()?)
|
let ptr = WasmPtr::from_i32(context.value_stack.pop_as::<i32>()?)
|
||||||
.map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?;
|
.map_err(|_| UserTrap::MemoryAccessViolation)?;
|
||||||
self.h256_at(ptr).map(Into::into)
|
self.h256_at(ptr).map(Into::into)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn address_at(&self, ptr: WasmPtr) -> Result<Address, interpreter::Error> {
|
fn address_at(&self, ptr: WasmPtr) -> Result<Address, InterpreterError> {
|
||||||
Ok(Address::from_slice(&ptr.slice(20, &*self.memory)
|
Ok(Address::from_slice(&ptr.slice(20, &*self.memory)
|
||||||
.map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?
|
.map_err(|_| UserTrap::MemoryAccessViolation)?
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pop_address(&self, context: &mut interpreter::CallerContext) -> Result<Address, interpreter::Error> {
|
fn pop_address(&self, context: &mut InterpreterCallerContext) -> Result<Address, InterpreterError> {
|
||||||
let ptr = WasmPtr::from_i32(context.value_stack.pop_as::<i32>()?)
|
let ptr = WasmPtr::from_i32(context.value_stack.pop_as::<i32>()?)
|
||||||
.map_err(|_| interpreter::Error::Trap("Memory access violation".to_owned()))?;
|
.map_err(|_| UserTrap::MemoryAccessViolation)?;
|
||||||
self.address_at(ptr)
|
self.address_at(ptr)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn user_trap(&mut self, _context: interpreter::CallerContext)
|
fn unknown_trap(&mut self, _context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, UserTrap>
|
||||||
{
|
{
|
||||||
Err(interpreter::Error::Trap("unknown trap".to_owned()))
|
Err(UserTrap::Unknown)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn user_noop(&mut self,
|
fn user_noop(&mut self,
|
||||||
_context: interpreter::CallerContext
|
_context: InterpreterCallerContext
|
||||||
) -> Result<Option<interpreter::RuntimeValue>, interpreter::Error> {
|
) -> Result<Option<interpreter::RuntimeValue>, InterpreterError> {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write call descriptor to wasm memory
|
/// Write call descriptor to wasm memory
|
||||||
pub fn write_descriptor(&mut self, call_args: CallArgs) -> Result<WasmPtr, Error> {
|
pub fn write_descriptor(&mut self, call_args: CallArgs) -> Result<WasmPtr, InterpreterError> {
|
||||||
let d_ptr = self.alloc(16)?;
|
let d_ptr = self.alloc(16)?;
|
||||||
|
|
||||||
let args_len = call_args.len();
|
let args_len = call_args.len();
|
||||||
@ -417,14 +452,14 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
Ok(d_ptr.into())
|
Ok(d_ptr.into())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn debug_log(&mut self, context: interpreter::CallerContext)
|
fn debug_log(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let msg_len = context.value_stack.pop_as::<i32>()? as u32;
|
let msg_len = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
let msg_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
let msg_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
|
||||||
let msg = String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?)
|
let msg = String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?)
|
||||||
.map_err(|_| interpreter::Error::Trap("Debug log utf-8 decoding error".to_owned()))?;
|
.map_err(|_| UserTrap::BadUtf8)?;
|
||||||
|
|
||||||
trace!(target: "wasm", "Contract debug message: {}", msg);
|
trace!(target: "wasm", "Contract debug message: {}", msg);
|
||||||
|
|
||||||
@ -432,18 +467,18 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Query current gas left for execution
|
/// Query current gas left for execution
|
||||||
pub fn gas_left(&self) -> Result<u64, Error> {
|
pub fn gas_left(&self) -> Result<u64, UserTrap> {
|
||||||
if self.gas_counter > self.gas_limit { return Err(Error::InvalidGasState); }
|
if self.gas_counter > self.gas_limit { return Err(UserTrap::InvalidGasState); }
|
||||||
Ok(self.gas_limit - self.gas_counter)
|
Ok(self.gas_limit - self.gas_counter)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Shared memory reference
|
/// Shared memory reference
|
||||||
pub fn memory(&self) -> &interpreter::MemoryInstance {
|
pub fn memory(&self) -> &InterpreterMemoryInstance {
|
||||||
&*self.memory
|
&*self.memory
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mem_copy(&self, context: interpreter::CallerContext)
|
fn mem_copy(&self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let len = context.value_stack.pop_as::<i32>()? as u32;
|
let len = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
let dst = context.value_stack.pop_as::<i32>()? as u32;
|
let dst = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
@ -459,8 +494,8 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
x >> 24 | x >> 8 & 0xff00 | x << 8 & 0xff0000 | x << 24
|
x >> 24 | x >> 8 & 0xff00 | x << 8 & 0xff0000 | x << 24
|
||||||
}
|
}
|
||||||
|
|
||||||
fn bitswap_i64(&mut self, context: interpreter::CallerContext)
|
fn bitswap_i64(&mut self, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
let x1 = context.value_stack.pop_as::<i32>()?;
|
let x1 = context.value_stack.pop_as::<i32>()?;
|
||||||
let x2 = context.value_stack.pop_as::<i32>()?;
|
let x2 = context.value_stack.pop_as::<i32>()?;
|
||||||
@ -471,13 +506,83 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
self.return_i64(result)
|
self.return_i64(result)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn return_i64(&mut self, val: i64) -> Result<Option<interpreter::RuntimeValue>, interpreter::Error> {
|
fn user_panic(&mut self, context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let msg_len = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
let msg_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
|
||||||
|
let msg = String::from_utf8(self.memory.get(msg_ptr, msg_len as usize)?)
|
||||||
|
.map_err(|_| UserTrap::BadUtf8)?;
|
||||||
|
|
||||||
|
trace!(target: "wasm", "Contract custom panic message: {}", msg);
|
||||||
|
|
||||||
|
Err(UserTrap::Panic(msg).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_hash(&mut self, context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let return_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
let block_hi = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
let block_lo = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
|
||||||
|
let block_num = (block_hi as u64) << 32 | block_lo as u64;
|
||||||
|
|
||||||
|
trace!("Requesting block hash for block #{}", block_num);
|
||||||
|
let hash = self.ext.blockhash(&U256::from(block_num));
|
||||||
|
|
||||||
|
self.memory.set(return_ptr, &*hash)?;
|
||||||
|
|
||||||
|
Ok(Some(0i32.into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn coinbase(&mut self, context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let return_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
self.memory.set(return_ptr, &*self.ext.env_info().author)?;
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn timestamp(&mut self, _context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let timestamp = self.ext.env_info().timestamp as i64;
|
||||||
|
self.return_i64(timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn block_number(&mut self, _context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let block_number: u64 = self.ext.env_info().number.into();
|
||||||
|
self.return_i64(block_number as i64)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn difficulty(&mut self, context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let return_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
let difficulty: H256 = self.ext.env_info().difficulty.into();
|
||||||
|
self.memory.set(return_ptr, &*difficulty)?;
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ext_gas_limit(&mut self, context: InterpreterCallerContext)
|
||||||
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
|
{
|
||||||
|
let return_ptr = context.value_stack.pop_as::<i32>()? as u32;
|
||||||
|
let gas_limit: H256 = self.ext.env_info().gas_limit.into();
|
||||||
|
self.memory.set(return_ptr, &*gas_limit)?;
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn return_i64(&mut self, val: i64) -> Result<Option<interpreter::RuntimeValue>, InterpreterError> {
|
||||||
let uval = val as u64;
|
let uval = val as u64;
|
||||||
let hi = (uval >> 32) as i32;
|
let hi = (uval >> 32) as i32;
|
||||||
let lo = (uval << 32 >> 32) as i32;
|
let lo = (uval << 32 >> 32) as i32;
|
||||||
|
|
||||||
let target = self.instance.module("contract")
|
let target = self.instance.module("contract").ok_or(UserTrap::Other)?;
|
||||||
.ok_or(interpreter::Error::Trap("Error locating main execution entry".to_owned()))?;
|
|
||||||
target.execute_export(
|
target.execute_export(
|
||||||
"setTempRet0",
|
"setTempRet0",
|
||||||
self.execution_params().add_argument(
|
self.execution_params().add_argument(
|
||||||
@ -489,7 +594,7 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn execution_params(&mut self) -> interpreter::ExecutionParams {
|
pub fn execution_params(&mut self) -> interpreter::ExecutionParams<UserTrap> {
|
||||||
use super::env;
|
use super::env;
|
||||||
|
|
||||||
let env_instance = self.instance.module("env")
|
let env_instance = self.instance.module("env")
|
||||||
@ -505,9 +610,9 @@ impl<'a, 'b> Runtime<'a, 'b> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> {
|
impl<'a, 'b> interpreter::UserFunctionExecutor<UserTrap> for Runtime<'a, 'b> {
|
||||||
fn execute(&mut self, name: &str, context: interpreter::CallerContext)
|
fn execute(&mut self, name: &str, context: InterpreterCallerContext)
|
||||||
-> Result<Option<interpreter::RuntimeValue>, interpreter::Error>
|
-> Result<Option<interpreter::RuntimeValue>, InterpreterError>
|
||||||
{
|
{
|
||||||
match name {
|
match name {
|
||||||
"_malloc" => {
|
"_malloc" => {
|
||||||
@ -551,10 +656,31 @@ impl<'a, 'b> interpreter::UserFunctionExecutor for Runtime<'a, 'b> {
|
|||||||
"_llvm_bswap_i64" => {
|
"_llvm_bswap_i64" => {
|
||||||
self.bitswap_i64(context)
|
self.bitswap_i64(context)
|
||||||
},
|
},
|
||||||
|
"_panic" => {
|
||||||
|
self.user_panic(context)
|
||||||
|
},
|
||||||
|
"_blockhash" => {
|
||||||
|
self.block_hash(context)
|
||||||
|
},
|
||||||
|
"_coinbase" => {
|
||||||
|
self.coinbase(context)
|
||||||
|
},
|
||||||
|
"_timestamp" => {
|
||||||
|
self.timestamp(context)
|
||||||
|
},
|
||||||
|
"_blocknumber" => {
|
||||||
|
self.block_number(context)
|
||||||
|
},
|
||||||
|
"_difficulty" => {
|
||||||
|
self.difficulty(context)
|
||||||
|
},
|
||||||
|
"_gaslimit" => {
|
||||||
|
self.ext_gas_limit(context)
|
||||||
|
},
|
||||||
_ => {
|
_ => {
|
||||||
trace!(target: "wasm", "Trapped due to unhandled function: '{}'", name);
|
trace!(target: "wasm", "Trapped due to unhandled function: '{}'", name);
|
||||||
self.user_trap(context)
|
Ok(self.unknown_trap(context)?)
|
||||||
}
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::collections::HashMap;
|
||||||
use byteorder::{LittleEndian, ByteOrder};
|
use byteorder::{LittleEndian, ByteOrder};
|
||||||
use bigint::prelude::U256;
|
use bigint::prelude::U256;
|
||||||
use bigint::hash::H256;
|
use bigint::hash::H256;
|
||||||
@ -87,7 +88,7 @@ fn logger() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
println!("ext.store: {:?}", ext.store);
|
println!("ext.store: {:?}", ext.store);
|
||||||
assert_eq!(gas_left, U256::from(99327));
|
assert_eq!(gas_left, U256::from(99529));
|
||||||
let address_val: H256 = address.into();
|
let address_val: H256 = address.into();
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"),
|
ext.store.get(&"0100000000000000000000000000000000000000000000000000000000000000".parse().unwrap()).expect("storage key to exist"),
|
||||||
@ -138,7 +139,7 @@ fn identity() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_672));
|
assert_eq!(gas_left, U256::from(99_762));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
Address::from_slice(&result),
|
Address::from_slice(&result),
|
||||||
@ -172,7 +173,7 @@ fn dispersion() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_270));
|
assert_eq!(gas_left, U256::from(99_360));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
result,
|
result,
|
||||||
@ -201,7 +202,7 @@ fn suicide_not() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_578));
|
assert_eq!(gas_left, U256::from(99_668));
|
||||||
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
result,
|
result,
|
||||||
@ -235,7 +236,7 @@ fn suicide() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99_621));
|
assert_eq!(gas_left, U256::from(99_699));
|
||||||
assert!(ext.suicides.contains(&refund));
|
assert!(ext.suicides.contains(&refund));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -266,7 +267,7 @@ fn create() {
|
|||||||
assert!(ext.calls.contains(
|
assert!(ext.calls.contains(
|
||||||
&FakeCall {
|
&FakeCall {
|
||||||
call_type: FakeCallType::Create,
|
call_type: FakeCallType::Create,
|
||||||
gas: U256::from(99_674),
|
gas: U256::from(99_734),
|
||||||
sender_address: None,
|
sender_address: None,
|
||||||
receive_address: None,
|
receive_address: None,
|
||||||
value: Some(1_000_000_000.into()),
|
value: Some(1_000_000_000.into()),
|
||||||
@ -274,7 +275,7 @@ fn create() {
|
|||||||
code_address: None,
|
code_address: None,
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
assert_eq!(gas_left, U256::from(99_596));
|
assert_eq!(gas_left, U256::from(99_686));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -308,7 +309,7 @@ fn call_code() {
|
|||||||
assert!(ext.calls.contains(
|
assert!(ext.calls.contains(
|
||||||
&FakeCall {
|
&FakeCall {
|
||||||
call_type: FakeCallType::Call,
|
call_type: FakeCallType::Call,
|
||||||
gas: U256::from(99_069),
|
gas: U256::from(99_129),
|
||||||
sender_address: Some(sender),
|
sender_address: Some(sender),
|
||||||
receive_address: Some(receiver),
|
receive_address: Some(receiver),
|
||||||
value: None,
|
value: None,
|
||||||
@ -316,7 +317,7 @@ fn call_code() {
|
|||||||
code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()),
|
code_address: Some("0d13710000000000000000000000000000000000".parse().unwrap()),
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
assert_eq!(gas_left, U256::from(94144));
|
assert_eq!(gas_left, U256::from(94262));
|
||||||
|
|
||||||
// siphash result
|
// siphash result
|
||||||
let res = LittleEndian::read_u32(&result[..]);
|
let res = LittleEndian::read_u32(&result[..]);
|
||||||
@ -353,7 +354,7 @@ fn call_static() {
|
|||||||
assert!(ext.calls.contains(
|
assert!(ext.calls.contains(
|
||||||
&FakeCall {
|
&FakeCall {
|
||||||
call_type: FakeCallType::Call,
|
call_type: FakeCallType::Call,
|
||||||
gas: U256::from(99_069),
|
gas: U256::from(99_129),
|
||||||
sender_address: Some(sender),
|
sender_address: Some(sender),
|
||||||
receive_address: Some(receiver),
|
receive_address: Some(receiver),
|
||||||
value: None,
|
value: None,
|
||||||
@ -361,7 +362,7 @@ fn call_static() {
|
|||||||
code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()),
|
code_address: Some("13077bfb00000000000000000000000000000000".parse().unwrap()),
|
||||||
}
|
}
|
||||||
));
|
));
|
||||||
assert_eq!(gas_left, U256::from(94144));
|
assert_eq!(gas_left, U256::from(94262));
|
||||||
|
|
||||||
// siphash result
|
// siphash result
|
||||||
let res = LittleEndian::read_u32(&result[..]);
|
let res = LittleEndian::read_u32(&result[..]);
|
||||||
@ -387,7 +388,7 @@ fn realloc() {
|
|||||||
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
assert_eq!(gas_left, U256::from(99432));
|
assert_eq!(gas_left, U256::from(99522));
|
||||||
assert_eq!(result, vec![0u8; 2]);
|
assert_eq!(result, vec![0u8; 2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -413,12 +414,15 @@ fn storage_read() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(99682));
|
assert_eq!(gas_left, U256::from(99800));
|
||||||
assert_eq!(Address::from(&result[12..32]), address);
|
assert_eq!(Address::from(&result[12..32]), address);
|
||||||
}
|
}
|
||||||
|
|
||||||
macro_rules! reqrep_test {
|
macro_rules! reqrep_test {
|
||||||
($name: expr, $input: expr) => {
|
($name: expr, $input: expr) => {
|
||||||
|
reqrep_test!($name, $input, vm::EnvInfo::default(), HashMap::new())
|
||||||
|
};
|
||||||
|
($name: expr, $input: expr, $info: expr, $block_hashes: expr) => {
|
||||||
{
|
{
|
||||||
::ethcore_logger::init_log();
|
::ethcore_logger::init_log();
|
||||||
let code = load_sample!($name);
|
let code = load_sample!($name);
|
||||||
@ -428,18 +432,18 @@ macro_rules! reqrep_test {
|
|||||||
params.code = Some(Arc::new(code));
|
params.code = Some(Arc::new(code));
|
||||||
params.data = Some($input);
|
params.data = Some($input);
|
||||||
|
|
||||||
let (gas_left, result) = {
|
let mut fake_ext = FakeExt::new();
|
||||||
|
fake_ext.info = $info;
|
||||||
|
fake_ext.blockhashes = $block_hashes;
|
||||||
|
|
||||||
let mut interpreter = wasm_interpreter();
|
let mut interpreter = wasm_interpreter();
|
||||||
let result = interpreter.exec(params, &mut FakeExt::new()).expect("Interpreter to execute without any errors");
|
interpreter.exec(params, &mut fake_ext)
|
||||||
match result {
|
.map(|result| match result {
|
||||||
GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); },
|
GasLeft::Known(_) => { panic!("Test is expected to return payload to check"); },
|
||||||
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
GasLeft::NeedsReturn { gas_left: gas, data: result, apply_state: _apply } => (gas, result.to_vec()),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
(gas_left, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// math_* tests check the ability of wasm contract to perform big integer operations
|
// math_* tests check the ability of wasm contract to perform big integer operations
|
||||||
@ -462,9 +466,9 @@ fn math_add() {
|
|||||||
arg_b.to_big_endian(&mut args[33..65]);
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
args.to_vec()
|
args.to_vec()
|
||||||
}
|
}
|
||||||
);
|
).expect("Interpreter to execute without any errors");
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(98087));
|
assert_eq!(gas_left, U256::from(98177));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
U256::from_dec_str("1888888888888888888888888888887").unwrap(),
|
U256::from_dec_str("1888888888888888888888888888887").unwrap(),
|
||||||
(&result[..]).into()
|
(&result[..]).into()
|
||||||
@ -484,16 +488,16 @@ fn math_mul() {
|
|||||||
arg_b.to_big_endian(&mut args[33..65]);
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
args.to_vec()
|
args.to_vec()
|
||||||
}
|
}
|
||||||
);
|
).expect("Interpreter to execute without any errors");
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(97236));
|
assert_eq!(gas_left, U256::from(97326));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(),
|
U256::from_dec_str("888888888888888888888888888887111111111111111111111111111112").unwrap(),
|
||||||
(&result[..]).into()
|
(&result[..]).into()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// substraction
|
// subtraction
|
||||||
#[test]
|
#[test]
|
||||||
fn math_sub() {
|
fn math_sub() {
|
||||||
let (gas_left, result) = reqrep_test!(
|
let (gas_left, result) = reqrep_test!(
|
||||||
@ -506,15 +510,33 @@ fn math_sub() {
|
|||||||
arg_b.to_big_endian(&mut args[33..65]);
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
args.to_vec()
|
args.to_vec()
|
||||||
}
|
}
|
||||||
);
|
).expect("Interpreter to execute without any errors");
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(98131));
|
assert_eq!(gas_left, U256::from(98221));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
U256::from_dec_str("111111111111111111111111111111").unwrap(),
|
U256::from_dec_str("111111111111111111111111111111").unwrap(),
|
||||||
(&result[..]).into()
|
(&result[..]).into()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// subtraction with overflow
|
||||||
|
#[test]
|
||||||
|
fn math_sub_with_overflow() {
|
||||||
|
let result = reqrep_test!(
|
||||||
|
"math.wasm",
|
||||||
|
{
|
||||||
|
let mut args = [2u8; 65];
|
||||||
|
let arg_a = U256::from_dec_str("888888888888888888888888888888").unwrap();
|
||||||
|
let arg_b = U256::from_dec_str("999999999999999999999999999999").unwrap();
|
||||||
|
arg_a.to_big_endian(&mut args[1..33]);
|
||||||
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
|
args.to_vec()
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(result, Err(vm::Error::Wasm("Wasm runtime error: User(Panic(\"arithmetic operation overflow\"))".into())));
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn math_div() {
|
fn math_div() {
|
||||||
let (gas_left, result) = reqrep_test!(
|
let (gas_left, result) = reqrep_test!(
|
||||||
@ -527,11 +549,99 @@ fn math_div() {
|
|||||||
arg_b.to_big_endian(&mut args[33..65]);
|
arg_b.to_big_endian(&mut args[33..65]);
|
||||||
args.to_vec()
|
args.to_vec()
|
||||||
}
|
}
|
||||||
);
|
).expect("Interpreter to execute without any errors");
|
||||||
|
|
||||||
assert_eq!(gas_left, U256::from(91420));
|
assert_eq!(gas_left, U256::from(91510));
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
U256::from_dec_str("1125000").unwrap(),
|
U256::from_dec_str("1125000").unwrap(),
|
||||||
(&result[..]).into()
|
(&result[..]).into()
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This test checks the ability of wasm contract to invoke
|
||||||
|
// varios blockchain runtime methods
|
||||||
|
#[test]
|
||||||
|
fn externs() {
|
||||||
|
let (gas_left, result) = reqrep_test!(
|
||||||
|
"externs.wasm",
|
||||||
|
Vec::new(),
|
||||||
|
vm::EnvInfo {
|
||||||
|
number: 0x9999999999u64.into(),
|
||||||
|
author: "efefefefefefefefefefefefefefefefefefefef".parse().unwrap(),
|
||||||
|
timestamp: 0x8888888888u64.into(),
|
||||||
|
difficulty: H256::from("0f1f2f3f4f5f6f7f8f9fafbfcfdfefff0d1d2d3d4d5d6d7d8d9dadbdcdddedfd").into(),
|
||||||
|
gas_limit: 0x777777777777u64.into(),
|
||||||
|
last_hashes: Default::default(),
|
||||||
|
gas_used: 0.into(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
let mut hashes = HashMap::new();
|
||||||
|
hashes.insert(
|
||||||
|
U256::from(0),
|
||||||
|
H256::from("9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d9d")
|
||||||
|
);
|
||||||
|
hashes.insert(
|
||||||
|
U256::from(1),
|
||||||
|
H256::from("7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b7b")
|
||||||
|
);
|
||||||
|
hashes
|
||||||
|
}
|
||||||
|
).expect("Interpreter to execute without any errors");
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&result[0..64].to_vec(),
|
||||||
|
&vec![
|
||||||
|
0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d, 0x9d,
|
||||||
|
0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,0x7b, 0x7b, 0x7b, 0x7b, 0x7b, 0x7b,
|
||||||
|
],
|
||||||
|
"Block hashes requested and returned do not match"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&result[64..84].to_vec(),
|
||||||
|
&vec![
|
||||||
|
0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef, 0xef,
|
||||||
|
],
|
||||||
|
"Coinbase requested and returned does not match"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&result[84..92].to_vec(),
|
||||||
|
&vec![
|
||||||
|
0x88, 0x88, 0x88, 0x88, 0x88, 0x00, 0x00, 0x00
|
||||||
|
],
|
||||||
|
"Timestamp requested and returned does not match"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&result[92..100].to_vec(),
|
||||||
|
&vec![
|
||||||
|
0x99, 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00
|
||||||
|
],
|
||||||
|
"Block number requested and returned does not match"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&result[100..132].to_vec(),
|
||||||
|
&vec![
|
||||||
|
0x0f, 0x1f, 0x2f, 0x3f, 0x4f, 0x5f, 0x6f, 0x7f,
|
||||||
|
0x8f, 0x9f, 0xaf, 0xbf, 0xcf, 0xdf, 0xef, 0xff,
|
||||||
|
0x0d, 0x1d, 0x2d, 0x3d, 0x4d, 0x5d, 0x6d, 0x7d,
|
||||||
|
0x8d, 0x9d, 0xad, 0xbd, 0xcd, 0xdd, 0xed, 0xfd,
|
||||||
|
],
|
||||||
|
"Difficulty requested and returned does not match"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
&result[132..164].to_vec(),
|
||||||
|
&vec![
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||||
|
0x00, 0x00, 0x77, 0x77, 0x77, 0x77, 0x77, 0x77,
|
||||||
|
],
|
||||||
|
"Gas limit requested and returned does not match"
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(gas_left, U256::from(97588));
|
||||||
|
}
|
2
js/package-lock.json
generated
2
js/package-lock.json
generated
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "Parity",
|
"name": "Parity",
|
||||||
"version": "1.8.19",
|
"version": "1.8.20",
|
||||||
"lockfileVersion": 1,
|
"lockfileVersion": 1,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "Parity",
|
"name": "Parity",
|
||||||
"version": "1.8.19",
|
"version": "1.8.20",
|
||||||
"main": "src/index.parity.js",
|
"main": "src/index.parity.js",
|
||||||
"jsnext:main": "src/index.parity.js",
|
"jsnext:main": "src/index.parity.js",
|
||||||
"author": "Parity Team <admin@parity.io>",
|
"author": "Parity Team <admin@parity.io>",
|
||||||
|
@ -24,10 +24,8 @@ export default class AccountStore {
|
|||||||
constructor (api) {
|
constructor (api) {
|
||||||
this._api = api;
|
this._api = api;
|
||||||
|
|
||||||
this.loadDefaultAccount()
|
this.subscribeDefaultAccount()
|
||||||
.then(() => this.loadAccounts());
|
.then(() => this.loadAccounts());
|
||||||
|
|
||||||
this.subscribeDefaultAccount();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@action setAccounts = (accounts) => {
|
@action setAccounts = (accounts) => {
|
||||||
@ -60,12 +58,6 @@ export default class AccountStore {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
loadDefaultAccount () {
|
|
||||||
return this._api.parity
|
|
||||||
.defaultAccount()
|
|
||||||
.then((address) => this.setDefaultAccount(address));
|
|
||||||
}
|
|
||||||
|
|
||||||
loadAccounts () {
|
loadAccounts () {
|
||||||
this.setLoading(true);
|
this.setLoading(true);
|
||||||
|
|
||||||
|
@ -61,6 +61,11 @@ class Requests extends Component {
|
|||||||
|
|
||||||
renderRequest (request, extras = {}) {
|
renderRequest (request, extras = {}) {
|
||||||
const { show, transaction } = request;
|
const { show, transaction } = request;
|
||||||
|
|
||||||
|
if (!transaction) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
const state = this.getTransactionState(request);
|
const state = this.getTransactionState(request);
|
||||||
const displayedTransaction = { ...transaction };
|
const displayedTransaction = { ...transaction };
|
||||||
|
|
||||||
@ -164,9 +169,9 @@ class Requests extends Component {
|
|||||||
return (
|
return (
|
||||||
<FormattedMessage
|
<FormattedMessage
|
||||||
id='requests.status.transactionMined'
|
id='requests.status.transactionMined'
|
||||||
defaultMessage='Transaction mined at block #{blockNumber} ({blockHeight} blocks ago)'
|
defaultMessage='Transaction mined at block #{blockNumber} ({blockHeight} confirmations)'
|
||||||
values={ {
|
values={ {
|
||||||
blockHeight: +request.blockHeight,
|
blockHeight: (+request.blockHeight || 0).toString(),
|
||||||
blockNumber: +transactionReceipt.blockNumber
|
blockNumber: +transactionReceipt.blockNumber
|
||||||
} }
|
} }
|
||||||
/>
|
/>
|
||||||
|
@ -208,7 +208,9 @@ fn execute_import_light(cmd: ImportBlockchain) -> Result<(), String> {
|
|||||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||||
config.queue.verifier_settings = cmd.verifier_settings;
|
config.queue.verifier_settings = cmd.verifier_settings;
|
||||||
|
|
||||||
let service = LightClientService::start(config, &spec, &client_path, cache)
|
// TODO: could epoch signals be avilable at the end of the file?
|
||||||
|
let fetch = ::light::client::fetch::unavailable();
|
||||||
|
let service = LightClientService::start(config, &spec, fetch, &client_path, cache)
|
||||||
.map_err(|e| format!("Failed to start client: {}", e))?;
|
.map_err(|e| format!("Failed to start client: {}", e))?;
|
||||||
|
|
||||||
// free up the spec in memory.
|
// free up the spec in memory.
|
||||||
|
@ -25,7 +25,7 @@ use futures::{future, IntoFuture, Future, BoxFuture};
|
|||||||
use hash_fetch::fetch::Client as FetchClient;
|
use hash_fetch::fetch::Client as FetchClient;
|
||||||
use hash_fetch::urlhint::ContractClient;
|
use hash_fetch::urlhint::ContractClient;
|
||||||
use helpers::replace_home;
|
use helpers::replace_home;
|
||||||
use light::client::Client as LightClient;
|
use light::client::LightChainClient;
|
||||||
use light::on_demand::{self, OnDemand};
|
use light::on_demand::{self, OnDemand};
|
||||||
use node_health::{SyncStatus, NodeHealth};
|
use node_health::{SyncStatus, NodeHealth};
|
||||||
use rpc;
|
use rpc;
|
||||||
@ -87,16 +87,16 @@ impl ContractClient for FullRegistrar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Registrar implementation for the light client.
|
/// Registrar implementation for the light client.
|
||||||
pub struct LightRegistrar {
|
pub struct LightRegistrar<T> {
|
||||||
/// The light client.
|
/// The light client.
|
||||||
pub client: Arc<LightClient>,
|
pub client: Arc<T>,
|
||||||
/// Handle to the on-demand service.
|
/// Handle to the on-demand service.
|
||||||
pub on_demand: Arc<OnDemand>,
|
pub on_demand: Arc<OnDemand>,
|
||||||
/// Handle to the light network service.
|
/// Handle to the light network service.
|
||||||
pub sync: Arc<LightSync>,
|
pub sync: Arc<LightSync>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ContractClient for LightRegistrar {
|
impl<T: LightChainClient + 'static> ContractClient for LightRegistrar<T> {
|
||||||
fn registrar(&self) -> Result<Address, String> {
|
fn registrar(&self) -> Result<Address, String> {
|
||||||
self.client.engine().additional_params().get("registrar")
|
self.client.engine().additional_params().get("registrar")
|
||||||
.ok_or_else(|| "Registrar not defined.".into())
|
.ok_or_else(|| "Registrar not defined.".into())
|
||||||
@ -106,7 +106,14 @@ impl ContractClient for LightRegistrar {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
|
fn call(&self, address: Address, data: Bytes) -> BoxFuture<Bytes, String> {
|
||||||
let (header, env_info) = (self.client.best_block_header(), self.client.latest_env_info());
|
let header = self.client.best_block_header();
|
||||||
|
let env_info = self.client.env_info(BlockId::Hash(header.hash()))
|
||||||
|
.ok_or_else(|| format!("Cannot fetch env info for header {}", header.hash()));
|
||||||
|
|
||||||
|
let env_info = match env_info {
|
||||||
|
Ok(x) => x,
|
||||||
|
Err(e) => return future::err(e).boxed(),
|
||||||
|
};
|
||||||
|
|
||||||
let maybe_future = self.sync.with_context(move |ctx| {
|
let maybe_future = self.sync.with_context(move |ctx| {
|
||||||
self.on_demand
|
self.on_demand
|
||||||
|
@ -22,7 +22,7 @@ use std::sync::{Arc};
|
|||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
|
|
||||||
use ethcore::client::*;
|
use ethcore::client::{BlockId, BlockChainClient, BlockChainInfo, BlockQueueInfo, ChainNotify, ClientReport, Client};
|
||||||
use ethcore::header::BlockNumber;
|
use ethcore::header::BlockNumber;
|
||||||
use ethcore::service::ClientIoMessage;
|
use ethcore::service::ClientIoMessage;
|
||||||
use ethcore::snapshot::{RestorationStatus, SnapshotService as SS};
|
use ethcore::snapshot::{RestorationStatus, SnapshotService as SS};
|
||||||
|
90
parity/light_helpers/epoch_fetch.rs
Normal file
90
parity/light_helpers/epoch_fetch.rs
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
// Copyright 2015-2017 Parity Technologies (UK) Ltd.
|
||||||
|
// This file is part of Parity.
|
||||||
|
|
||||||
|
// Parity is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
|
||||||
|
// Parity is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
|
use ethcore::encoded;
|
||||||
|
use ethcore::engines::{Engine, StateDependentProof};
|
||||||
|
use ethcore::header::Header;
|
||||||
|
use ethcore::receipt::Receipt;
|
||||||
|
use ethsync::LightSync;
|
||||||
|
|
||||||
|
use futures::{future, Future, BoxFuture};
|
||||||
|
|
||||||
|
use light::client::fetch::ChainDataFetcher;
|
||||||
|
use light::on_demand::{request, OnDemand};
|
||||||
|
|
||||||
|
use parking_lot::RwLock;
|
||||||
|
use bigint::hash::H256;
|
||||||
|
|
||||||
|
const ALL_VALID_BACKREFS: &str = "no back-references, therefore all back-references valid; qed";
|
||||||
|
|
||||||
|
/// Allows on-demand fetch of data useful for the light client.
|
||||||
|
pub struct EpochFetch {
|
||||||
|
/// A handle to the sync service.
|
||||||
|
pub sync: Arc<RwLock<Weak<LightSync>>>,
|
||||||
|
/// The on-demand request service.
|
||||||
|
pub on_demand: Arc<OnDemand>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EpochFetch {
|
||||||
|
fn request<T>(&self, req: T) -> BoxFuture<T::Out, &'static str>
|
||||||
|
where T: Send + request::RequestAdapter + 'static, T::Out: Send + 'static
|
||||||
|
{
|
||||||
|
match self.sync.read().upgrade() {
|
||||||
|
Some(sync) => {
|
||||||
|
let on_demand = &self.on_demand;
|
||||||
|
let maybe_future = sync.with_context(move |ctx| {
|
||||||
|
on_demand.request(ctx, req).expect(ALL_VALID_BACKREFS)
|
||||||
|
});
|
||||||
|
|
||||||
|
match maybe_future {
|
||||||
|
Some(x) => x.map_err(|_| "Request canceled").boxed(),
|
||||||
|
None => future::err("Unable to access network.").boxed(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => future::err("Unable to access network").boxed(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChainDataFetcher for EpochFetch {
|
||||||
|
type Error = &'static str;
|
||||||
|
|
||||||
|
type Body = BoxFuture<encoded::Block, &'static str>;
|
||||||
|
type Receipts = BoxFuture<Vec<Receipt>, &'static str>;
|
||||||
|
type Transition = BoxFuture<Vec<u8>, &'static str>;
|
||||||
|
|
||||||
|
fn block_body(&self, header: &Header) -> Self::Body {
|
||||||
|
self.request(request::Body(header.encoded().into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch block receipts.
|
||||||
|
fn block_receipts(&self, header: &Header) -> Self::Receipts {
|
||||||
|
self.request(request::BlockReceipts(header.encoded().into()))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch epoch transition proof at given header.
|
||||||
|
fn epoch_transition(&self, hash: H256, engine: Arc<Engine>, checker: Arc<StateDependentProof>)
|
||||||
|
-> Self::Transition
|
||||||
|
{
|
||||||
|
self.request(request::Signal {
|
||||||
|
hash: hash,
|
||||||
|
engine: engine,
|
||||||
|
proof_check: checker,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -16,6 +16,8 @@
|
|||||||
|
|
||||||
//! Utilities and helpers for the light client.
|
//! Utilities and helpers for the light client.
|
||||||
|
|
||||||
|
mod epoch_fetch;
|
||||||
mod queue_cull;
|
mod queue_cull;
|
||||||
|
|
||||||
|
pub use self::epoch_fetch::EpochFetch;
|
||||||
pub use self::queue_cull::QueueCull;
|
pub use self::queue_cull::QueueCull;
|
||||||
|
@ -23,7 +23,7 @@ use ethcore::service::ClientIoMessage;
|
|||||||
use ethsync::LightSync;
|
use ethsync::LightSync;
|
||||||
use io::{IoContext, IoHandler, TimerToken};
|
use io::{IoContext, IoHandler, TimerToken};
|
||||||
|
|
||||||
use light::client::Client;
|
use light::client::LightChainClient;
|
||||||
use light::on_demand::{request, OnDemand};
|
use light::on_demand::{request, OnDemand};
|
||||||
use light::TransactionQueue;
|
use light::TransactionQueue;
|
||||||
|
|
||||||
@ -41,9 +41,9 @@ const TIMEOUT_MS: u64 = 1000 * 60 * 10;
|
|||||||
const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9;
|
const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9;
|
||||||
|
|
||||||
/// Periodically culls the transaction queue of mined transactions.
|
/// Periodically culls the transaction queue of mined transactions.
|
||||||
pub struct QueueCull {
|
pub struct QueueCull<T> {
|
||||||
/// A handle to the client, for getting the latest block header.
|
/// A handle to the client, for getting the latest block header.
|
||||||
pub client: Arc<Client>,
|
pub client: Arc<T>,
|
||||||
/// A handle to the sync service.
|
/// A handle to the sync service.
|
||||||
pub sync: Arc<LightSync>,
|
pub sync: Arc<LightSync>,
|
||||||
/// The on-demand request service.
|
/// The on-demand request service.
|
||||||
@ -54,7 +54,7 @@ pub struct QueueCull {
|
|||||||
pub remote: Remote,
|
pub remote: Remote,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl IoHandler<ClientIoMessage> for QueueCull {
|
impl<T: LightChainClient + 'static> IoHandler<ClientIoMessage> for QueueCull<T> {
|
||||||
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
fn initialize(&self, io: &IoContext<ClientIoMessage>) {
|
||||||
io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer");
|
io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer");
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,7 @@ use ethsync::{ManageNetwork, SyncProvider, LightSync};
|
|||||||
use hash_fetch::fetch::Client as FetchClient;
|
use hash_fetch::fetch::Client as FetchClient;
|
||||||
use jsonrpc_core::{self as core, MetaIoHandler};
|
use jsonrpc_core::{self as core, MetaIoHandler};
|
||||||
use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache};
|
use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache};
|
||||||
|
use light::client::LightChainClient;
|
||||||
use node_health::NodeHealth;
|
use node_health::NodeHealth;
|
||||||
use parity_reactor;
|
use parity_reactor;
|
||||||
use parity_rpc::dispatch::{FullDispatcher, LightDispatcher};
|
use parity_rpc::dispatch::{FullDispatcher, LightDispatcher};
|
||||||
@ -353,14 +354,14 @@ impl FullDependencies {
|
|||||||
},
|
},
|
||||||
Api::Whisper => {
|
Api::Whisper => {
|
||||||
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
||||||
let whisper = whisper_rpc.make_handler();
|
let whisper = whisper_rpc.make_handler(self.net.clone());
|
||||||
handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper));
|
handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Api::WhisperPubSub => {
|
Api::WhisperPubSub => {
|
||||||
if !for_generic_pubsub {
|
if !for_generic_pubsub {
|
||||||
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
||||||
let whisper = whisper_rpc.make_handler();
|
let whisper = whisper_rpc.make_handler(self.net.clone());
|
||||||
handler.extend_with(
|
handler.extend_with(
|
||||||
::parity_whisper::rpc::WhisperPubSub::to_delegate(whisper)
|
::parity_whisper::rpc::WhisperPubSub::to_delegate(whisper)
|
||||||
);
|
);
|
||||||
@ -398,9 +399,9 @@ impl ActivityNotifier for LightClientNotifier {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// RPC dependencies for a light client.
|
/// RPC dependencies for a light client.
|
||||||
pub struct LightDependencies {
|
pub struct LightDependencies<T> {
|
||||||
pub signer_service: Arc<SignerService>,
|
pub signer_service: Arc<SignerService>,
|
||||||
pub client: Arc<::light::client::Client>,
|
pub client: Arc<T>,
|
||||||
pub sync: Arc<LightSync>,
|
pub sync: Arc<LightSync>,
|
||||||
pub net: Arc<ManageNetwork>,
|
pub net: Arc<ManageNetwork>,
|
||||||
pub secret_store: Arc<AccountProvider>,
|
pub secret_store: Arc<AccountProvider>,
|
||||||
@ -419,7 +420,7 @@ pub struct LightDependencies {
|
|||||||
pub whisper_rpc: Option<::whisper::RpcFactory>,
|
pub whisper_rpc: Option<::whisper::RpcFactory>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightDependencies {
|
impl<C: LightChainClient + 'static> LightDependencies<C> {
|
||||||
fn extend_api<T: core::Middleware<Metadata>>(
|
fn extend_api<T: core::Middleware<Metadata>>(
|
||||||
&self,
|
&self,
|
||||||
handler: &mut MetaIoHandler<Metadata, T>,
|
handler: &mut MetaIoHandler<Metadata, T>,
|
||||||
@ -553,13 +554,13 @@ impl LightDependencies {
|
|||||||
},
|
},
|
||||||
Api::Whisper => {
|
Api::Whisper => {
|
||||||
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
||||||
let whisper = whisper_rpc.make_handler();
|
let whisper = whisper_rpc.make_handler(self.net.clone());
|
||||||
handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper));
|
handler.extend_with(::parity_whisper::rpc::Whisper::to_delegate(whisper));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Api::WhisperPubSub => {
|
Api::WhisperPubSub => {
|
||||||
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
if let Some(ref whisper_rpc) = self.whisper_rpc {
|
||||||
let whisper = whisper_rpc.make_handler();
|
let whisper = whisper_rpc.make_handler(self.net.clone());
|
||||||
handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate(whisper));
|
handler.extend_with(::parity_whisper::rpc::WhisperPubSub::to_delegate(whisper));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -568,7 +569,7 @@ impl LightDependencies {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Dependencies for LightDependencies {
|
impl<T: LightChainClient + 'static> Dependencies for LightDependencies<T> {
|
||||||
type Notifier = LightClientNotifier;
|
type Notifier = LightClientNotifier;
|
||||||
|
|
||||||
fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier }
|
fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier }
|
||||||
|
@ -223,7 +223,16 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024;
|
||||||
config.queue.verifier_settings = cmd.verifier_settings;
|
config.queue.verifier_settings = cmd.verifier_settings;
|
||||||
|
|
||||||
let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm), cache.clone())
|
// start on_demand service.
|
||||||
|
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
|
||||||
|
|
||||||
|
let sync_handle = Arc::new(RwLock::new(Weak::new()));
|
||||||
|
let fetch = ::light_helpers::EpochFetch {
|
||||||
|
on_demand: on_demand.clone(),
|
||||||
|
sync: sync_handle.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let service = light_client::Service::start(config, &spec, fetch, &db_dirs.client_path(algorithm), cache.clone())
|
||||||
.map_err(|e| format!("Error starting light client: {}", e))?;
|
.map_err(|e| format!("Error starting light client: {}", e))?;
|
||||||
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
|
let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default()));
|
||||||
let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone());
|
let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone());
|
||||||
@ -235,15 +244,10 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
net_conf.boot_nodes = spec.nodes.clone();
|
net_conf.boot_nodes = spec.nodes.clone();
|
||||||
}
|
}
|
||||||
|
|
||||||
// start on_demand service.
|
|
||||||
let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone()));
|
|
||||||
|
|
||||||
let mut attached_protos = Vec::new();
|
let mut attached_protos = Vec::new();
|
||||||
let whisper_factory = if cmd.whisper.enabled {
|
let whisper_factory = if cmd.whisper.enabled {
|
||||||
let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size)
|
let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos)
|
||||||
.map_err(|e| format!("Failed to initialize whisper: {}", e))?;
|
.map_err(|e| format!("Failed to initialize whisper: {}", e))?;
|
||||||
|
|
||||||
attached_protos.push(whisper_net);
|
|
||||||
whisper_factory
|
whisper_factory
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@ -261,6 +265,7 @@ fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) ->
|
|||||||
};
|
};
|
||||||
let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?;
|
let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?;
|
||||||
let light_sync = Arc::new(light_sync);
|
let light_sync = Arc::new(light_sync);
|
||||||
|
*sync_handle.write() = Arc::downgrade(&light_sync);
|
||||||
|
|
||||||
// spin up event loop
|
// spin up event loop
|
||||||
let event_loop = EventLoop::spawn();
|
let event_loop = EventLoop::spawn();
|
||||||
@ -631,10 +636,9 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc<RotatingLogger>) -> R
|
|||||||
let mut attached_protos = Vec::new();
|
let mut attached_protos = Vec::new();
|
||||||
|
|
||||||
let whisper_factory = if cmd.whisper.enabled {
|
let whisper_factory = if cmd.whisper.enabled {
|
||||||
let (whisper_net, whisper_factory) = ::whisper::setup(cmd.whisper.target_message_pool_size)
|
let whisper_factory = ::whisper::setup(cmd.whisper.target_message_pool_size, &mut attached_protos)
|
||||||
.map_err(|e| format!("Failed to initialize whisper: {}", e))?;
|
.map_err(|e| format!("Failed to initialize whisper: {}", e))?;
|
||||||
|
|
||||||
attached_protos.push(whisper_net);
|
|
||||||
whisper_factory
|
whisper_factory
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
@ -17,10 +17,11 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use ethsync::AttachedProtocol;
|
use ethsync::{AttachedProtocol, ManageNetwork};
|
||||||
use parity_rpc::Metadata;
|
use parity_rpc::Metadata;
|
||||||
use parity_whisper::net::{self as whisper_net, PoolHandle, Network as WhisperNetwork};
|
use parity_whisper::message::Message;
|
||||||
use parity_whisper::rpc::{WhisperClient, FilterManager};
|
use parity_whisper::net::{self as whisper_net, Network as WhisperNetwork};
|
||||||
|
use parity_whisper::rpc::{WhisperClient, PoolHandle, FilterManager};
|
||||||
|
|
||||||
/// Whisper config.
|
/// Whisper config.
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
@ -38,6 +39,31 @@ impl Default for Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Standard pool handle.
|
||||||
|
pub struct NetPoolHandle {
|
||||||
|
/// Pool handle.
|
||||||
|
handle: Arc<WhisperNetwork<Arc<FilterManager>>>,
|
||||||
|
/// Network manager.
|
||||||
|
net: Arc<ManageNetwork>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PoolHandle for NetPoolHandle {
|
||||||
|
fn relay(&self, message: Message) -> bool {
|
||||||
|
let mut res = false;
|
||||||
|
let mut message = Some(message);
|
||||||
|
self.net.with_proto_context(whisper_net::PROTOCOL_ID, &mut move |ctx| {
|
||||||
|
if let Some(message) = message.take() {
|
||||||
|
res = self.handle.post_message(message, ctx);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pool_status(&self) -> whisper_net::PoolStatus {
|
||||||
|
self.handle.pool_status()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Factory for standard whisper RPC.
|
/// Factory for standard whisper RPC.
|
||||||
pub struct RpcFactory {
|
pub struct RpcFactory {
|
||||||
net: Arc<WhisperNetwork<Arc<FilterManager>>>,
|
net: Arc<WhisperNetwork<Arc<FilterManager>>>,
|
||||||
@ -45,8 +71,9 @@ pub struct RpcFactory {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl RpcFactory {
|
impl RpcFactory {
|
||||||
pub fn make_handler(&self) -> WhisperClient<PoolHandle, Metadata> {
|
pub fn make_handler(&self, net: Arc<ManageNetwork>) -> WhisperClient<NetPoolHandle, Metadata> {
|
||||||
WhisperClient::new(self.net.handle(), self.manager.clone())
|
let handle = NetPoolHandle { handle: self.net.clone(), net: net };
|
||||||
|
WhisperClient::new(handle, self.manager.clone())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,24 +81,36 @@ impl RpcFactory {
|
|||||||
///
|
///
|
||||||
/// Will target the given pool size.
|
/// Will target the given pool size.
|
||||||
#[cfg(not(feature = "ipc"))]
|
#[cfg(not(feature = "ipc"))]
|
||||||
pub fn setup(target_pool_size: usize) -> io::Result<(AttachedProtocol, Option<RpcFactory>)> {
|
pub fn setup(target_pool_size: usize, protos: &mut Vec<AttachedProtocol>)
|
||||||
|
-> io::Result<Option<RpcFactory>>
|
||||||
|
{
|
||||||
let manager = Arc::new(FilterManager::new()?);
|
let manager = Arc::new(FilterManager::new()?);
|
||||||
let net = Arc::new(WhisperNetwork::new(target_pool_size, manager.clone()));
|
let net = Arc::new(WhisperNetwork::new(target_pool_size, manager.clone()));
|
||||||
|
|
||||||
let proto = AttachedProtocol {
|
protos.push(AttachedProtocol {
|
||||||
handler: net.clone() as Arc<_>,
|
handler: net.clone() as Arc<_>,
|
||||||
packet_count: whisper_net::PACKET_COUNT,
|
packet_count: whisper_net::PACKET_COUNT,
|
||||||
versions: whisper_net::SUPPORTED_VERSIONS,
|
versions: whisper_net::SUPPORTED_VERSIONS,
|
||||||
protocol_id: *b"shh",
|
protocol_id: whisper_net::PROTOCOL_ID,
|
||||||
};
|
});
|
||||||
|
|
||||||
|
// parity-only extensions to whisper.
|
||||||
|
protos.push(AttachedProtocol {
|
||||||
|
handler: Arc::new(whisper_net::ParityExtensions),
|
||||||
|
packet_count: whisper_net::PACKET_COUNT,
|
||||||
|
versions: whisper_net::SUPPORTED_VERSIONS,
|
||||||
|
protocol_id: whisper_net::PARITY_PROTOCOL_ID,
|
||||||
|
});
|
||||||
|
|
||||||
let factory = RpcFactory { net: net, manager: manager };
|
let factory = RpcFactory { net: net, manager: manager };
|
||||||
|
|
||||||
Ok((proto, Some(factory)))
|
Ok(Some(factory))
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: make it possible to attach generic protocols in IPC.
|
// TODO: make it possible to attach generic protocols in IPC.
|
||||||
#[cfg(feature = "ipc")]
|
#[cfg(feature = "ipc")]
|
||||||
pub fn setup(_pool: usize) -> (AttachedProtocol, Option<RpcFactory>) {
|
pub fn setup(_target_pool_size: usize, _protos: &mut Vec<AttachedProtocol>)
|
||||||
Ok((AttachedProtocol, None))
|
-> io::Result<Option<RpcFactory>>
|
||||||
|
{
|
||||||
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
@ -62,5 +62,8 @@ hash = { path = "../util/hash" }
|
|||||||
clippy = { version = "0.0.103", optional = true}
|
clippy = { version = "0.0.103", optional = true}
|
||||||
pretty_assertions = "0.1"
|
pretty_assertions = "0.1"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
ethcore-network = { path = "../util/network" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev"]
|
||||||
|
@ -211,11 +211,18 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn filter_logs(&self, index: Index) -> BoxFuture<Vec<Log>, Error> {
|
fn filter_logs(&self, index: Index) -> BoxFuture<Vec<Log>, Error> {
|
||||||
|
let filter = {
|
||||||
let mut polls = self.polls().lock();
|
let mut polls = self.polls().lock();
|
||||||
|
|
||||||
match polls.poll(&index.value()) {
|
match polls.poll(&index.value()) {
|
||||||
Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => {
|
Some(&PollFilter::Logs(ref _block_number, ref _previous_log, ref filter)) => filter.clone(),
|
||||||
|
// just empty array
|
||||||
|
_ => return future::ok(Vec::new()).boxed(),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
let include_pending = filter.to_block == Some(BlockNumber::Pending);
|
||||||
let filter: EthcoreFilter = filter.clone().into();
|
let filter: EthcoreFilter = filter.into();
|
||||||
|
|
||||||
// fetch pending logs.
|
// fetch pending logs.
|
||||||
let pending = if include_pending {
|
let pending = if include_pending {
|
||||||
@ -227,14 +234,12 @@ impl<T: Filterable + Send + Sync + 'static> EthFilter for T {
|
|||||||
|
|
||||||
// retrieve logs asynchronously, appending pending logs.
|
// retrieve logs asynchronously, appending pending logs.
|
||||||
let limit = filter.limit;
|
let limit = filter.limit;
|
||||||
self.logs(filter)
|
let logs = self.logs(filter);
|
||||||
|
let res = logs
|
||||||
.map(move |mut logs| { logs.extend(pending); logs })
|
.map(move |mut logs| { logs.extend(pending); logs })
|
||||||
.map(move |logs| limit_logs(logs, limit))
|
.map(move |logs| limit_logs(logs, limit))
|
||||||
.boxed()
|
.boxed();
|
||||||
},
|
res
|
||||||
// just empty array
|
|
||||||
_ => future::ok(Vec::new()).boxed()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn uninstall_filter(&self, index: Index) -> Result<bool, Error> {
|
fn uninstall_filter(&self, index: Index) -> Result<bool, Error> {
|
||||||
|
@ -25,7 +25,7 @@ use jsonrpc_core::Error;
|
|||||||
use jsonrpc_macros::Trailing;
|
use jsonrpc_macros::Trailing;
|
||||||
|
|
||||||
use light::cache::Cache as LightDataCache;
|
use light::cache::Cache as LightDataCache;
|
||||||
use light::client::{Client as LightClient, LightChainClient};
|
use light::client::LightChainClient;
|
||||||
use light::{cht, TransactionQueue};
|
use light::{cht, TransactionQueue};
|
||||||
use light::on_demand::{request, OnDemand};
|
use light::on_demand::{request, OnDemand};
|
||||||
|
|
||||||
@ -63,9 +63,9 @@ use util::Address;
|
|||||||
const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed";
|
const NO_INVALID_BACK_REFS: &'static str = "Fails only on invalid back-references; back-references here known to be valid; qed";
|
||||||
|
|
||||||
/// Light client `ETH` (and filter) RPC.
|
/// Light client `ETH` (and filter) RPC.
|
||||||
pub struct EthClient {
|
pub struct EthClient<T> {
|
||||||
sync: Arc<LightSync>,
|
sync: Arc<LightSync>,
|
||||||
client: Arc<LightClient>,
|
client: Arc<T>,
|
||||||
on_demand: Arc<OnDemand>,
|
on_demand: Arc<OnDemand>,
|
||||||
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
||||||
accounts: Arc<AccountProvider>,
|
accounts: Arc<AccountProvider>,
|
||||||
@ -73,7 +73,7 @@ pub struct EthClient {
|
|||||||
polls: Mutex<PollManager<PollFilter>>,
|
polls: Mutex<PollManager<PollFilter>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Clone for EthClient {
|
impl<T> Clone for EthClient<T> {
|
||||||
fn clone(&self) -> Self {
|
fn clone(&self) -> Self {
|
||||||
// each instance should have its own poll manager.
|
// each instance should have its own poll manager.
|
||||||
EthClient {
|
EthClient {
|
||||||
@ -89,12 +89,12 @@ impl Clone for EthClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl EthClient {
|
impl<T: LightChainClient + 'static> EthClient<T> {
|
||||||
/// Create a new `EthClient` with a handle to the light sync instance, client,
|
/// Create a new `EthClient` with a handle to the light sync instance, client,
|
||||||
/// and on-demand request service, which is assumed to be attached as a handler.
|
/// and on-demand request service, which is assumed to be attached as a handler.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
sync: Arc<LightSync>,
|
sync: Arc<LightSync>,
|
||||||
client: Arc<LightClient>,
|
client: Arc<T>,
|
||||||
on_demand: Arc<OnDemand>,
|
on_demand: Arc<OnDemand>,
|
||||||
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
transaction_queue: Arc<RwLock<TransactionQueue>>,
|
||||||
accounts: Arc<AccountProvider>,
|
accounts: Arc<AccountProvider>,
|
||||||
@ -209,7 +209,7 @@ impl EthClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Eth for EthClient {
|
impl<T: LightChainClient + 'static> Eth for EthClient<T> {
|
||||||
type Metadata = Metadata;
|
type Metadata = Metadata;
|
||||||
|
|
||||||
fn protocol_version(&self) -> Result<String, Error> {
|
fn protocol_version(&self) -> Result<String, Error> {
|
||||||
@ -466,7 +466,7 @@ impl Eth for EthClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// This trait implementation triggers a blanked impl of `EthFilter`.
|
// This trait implementation triggers a blanked impl of `EthFilter`.
|
||||||
impl Filterable for EthClient {
|
impl<T: LightChainClient + 'static> Filterable for EthClient<T> {
|
||||||
fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number }
|
fn best_block_number(&self) -> u64 { self.client.chain_info().best_block_number }
|
||||||
|
|
||||||
fn block_hash(&self, id: BlockId) -> Option<RpcH256> {
|
fn block_hash(&self, id: BlockId) -> Option<RpcH256> {
|
||||||
|
@ -15,6 +15,9 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use ethsync::{ManageNetwork, NetworkConfiguration};
|
use ethsync::{ManageNetwork, NetworkConfiguration};
|
||||||
|
use self::ethcore_network::{ProtocolId, NetworkContext};
|
||||||
|
|
||||||
|
extern crate ethcore_network;
|
||||||
|
|
||||||
pub struct TestManageNetwork;
|
pub struct TestManageNetwork;
|
||||||
|
|
||||||
@ -27,4 +30,5 @@ impl ManageNetwork for TestManageNetwork {
|
|||||||
fn start_network(&self) {}
|
fn start_network(&self) {}
|
||||||
fn stop_network(&self) {}
|
fn stop_network(&self) {}
|
||||||
fn network_config(&self) -> NetworkConfiguration { NetworkConfiguration::new_local() }
|
fn network_config(&self) -> NetworkConfiguration { NetworkConfiguration::new_local() }
|
||||||
|
fn with_proto_context(&self, _: ProtocolId, _: &mut FnMut(&NetworkContext)) { }
|
||||||
}
|
}
|
||||||
|
@ -497,6 +497,8 @@ pub trait ManageNetwork : Send + Sync {
|
|||||||
fn stop_network(&self);
|
fn stop_network(&self);
|
||||||
/// Query the current configuration of the network
|
/// Query the current configuration of the network
|
||||||
fn network_config(&self) -> NetworkConfiguration;
|
fn network_config(&self) -> NetworkConfiguration;
|
||||||
|
/// Get network context for protocol.
|
||||||
|
fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -538,6 +540,10 @@ impl ManageNetwork for EthSync {
|
|||||||
fn network_config(&self) -> NetworkConfiguration {
|
fn network_config(&self) -> NetworkConfiguration {
|
||||||
NetworkConfiguration::from(self.network.config().clone())
|
NetworkConfiguration::from(self.network.config().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) {
|
||||||
|
self.network.with_context_eval(proto, f);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
@ -808,6 +814,10 @@ impl ManageNetwork for LightSync {
|
|||||||
fn network_config(&self) -> NetworkConfiguration {
|
fn network_config(&self) -> NetworkConfiguration {
|
||||||
NetworkConfiguration::from(self.network.config().clone())
|
NetworkConfiguration::from(self.network.config().clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn with_proto_context(&self, proto: ProtocolId, f: &mut FnMut(&NetworkContext)) {
|
||||||
|
self.network.with_context_eval(proto, f);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightSyncProvider for LightSync {
|
impl LightSyncProvider for LightSync {
|
||||||
|
@ -2244,7 +2244,7 @@ mod tests {
|
|||||||
use super::{PeerInfo, PeerAsking};
|
use super::{PeerInfo, PeerAsking};
|
||||||
use ethkey;
|
use ethkey;
|
||||||
use ethcore::header::*;
|
use ethcore::header::*;
|
||||||
use ethcore::client::*;
|
use ethcore::client::{BlockChainClient, EachBlockWith, TestBlockChainClient};
|
||||||
use ethcore::transaction::UnverifiedTransaction;
|
use ethcore::transaction::UnverifiedTransaction;
|
||||||
use ethcore::miner::MinerService;
|
use ethcore::miner::MinerService;
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ use tests::helpers::{TestNet, Peer as PeerLike, TestPacket};
|
|||||||
use ethcore::client::TestBlockChainClient;
|
use ethcore::client::TestBlockChainClient;
|
||||||
use ethcore::spec::Spec;
|
use ethcore::spec::Spec;
|
||||||
use io::IoChannel;
|
use io::IoChannel;
|
||||||
use light::client::Client as LightClient;
|
use light::client::fetch::{self, Unavailable};
|
||||||
use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams};
|
use light::net::{LightProtocol, IoContext, Capabilities, Params as LightParams};
|
||||||
use light::provider::LightProvider;
|
use light::provider::LightProvider;
|
||||||
use network::{NodeId, PeerId};
|
use network::{NodeId, PeerId};
|
||||||
@ -36,6 +36,8 @@ use light::cache::Cache;
|
|||||||
|
|
||||||
const NETWORK_ID: u64 = 0xcafebabe;
|
const NETWORK_ID: u64 = 0xcafebabe;
|
||||||
|
|
||||||
|
pub type LightClient = ::light::client::Client<Unavailable>;
|
||||||
|
|
||||||
struct TestIoContext<'a> {
|
struct TestIoContext<'a> {
|
||||||
queue: &'a RwLock<VecDeque<TestPacket>>,
|
queue: &'a RwLock<VecDeque<TestPacket>>,
|
||||||
sender: Option<PeerId>,
|
sender: Option<PeerId>,
|
||||||
@ -216,7 +218,14 @@ impl TestNet<Peer> {
|
|||||||
// skip full verification because the blocks are bad.
|
// skip full verification because the blocks are bad.
|
||||||
config.verify_full = false;
|
config.verify_full = false;
|
||||||
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
let cache = Arc::new(Mutex::new(Cache::new(Default::default(), Duration::hours(6))));
|
||||||
let client = LightClient::in_memory(config, &Spec::new_test(), IoChannel::disconnected(), cache);
|
let client = LightClient::in_memory(
|
||||||
|
config,
|
||||||
|
&Spec::new_test(),
|
||||||
|
fetch::unavailable(), // TODO: allow fetch from full nodes.
|
||||||
|
IoChannel::disconnected(),
|
||||||
|
cache
|
||||||
|
);
|
||||||
|
|
||||||
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
peers.push(Arc::new(Peer::new_light(Arc::new(client))))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,8 +71,8 @@ fn authority_round() {
|
|||||||
// Push transaction to both clients. Only one of them gets lucky to produce a block.
|
// Push transaction to both clients. Only one of them gets lucky to produce a block.
|
||||||
net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap();
|
net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap();
|
||||||
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
|
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
|
||||||
net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain));
|
net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _);
|
||||||
net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain));
|
net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _);
|
||||||
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
|
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
|
||||||
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
|
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
|
||||||
// exchange statuses
|
// exchange statuses
|
||||||
@ -160,8 +160,8 @@ fn tendermint() {
|
|||||||
trace!(target: "poa", "Peer 0 is {}.", s0.address());
|
trace!(target: "poa", "Peer 0 is {}.", s0.address());
|
||||||
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
|
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
|
||||||
trace!(target: "poa", "Peer 1 is {}.", s1.address());
|
trace!(target: "poa", "Peer 1 is {}.", s1.address());
|
||||||
net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain));
|
net.peer(0).chain.engine().register_client(Arc::downgrade(&net.peer(0).chain) as _);
|
||||||
net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain));
|
net.peer(1).chain.engine().register_client(Arc::downgrade(&net.peer(1).chain) as _);
|
||||||
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
|
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
|
||||||
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
|
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
|
||||||
// Exhange statuses
|
// Exhange statuses
|
||||||
|
2
test.sh
2
test.sh
@ -1,5 +1,5 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
# Running Parity Full Test Sute
|
# Running Parity Full Test Suite
|
||||||
|
|
||||||
FEATURES="json-tests"
|
FEATURES="json-tests"
|
||||||
OPTIONS="--release"
|
OPTIONS="--release"
|
||||||
|
@ -56,23 +56,18 @@ impl Topic {
|
|||||||
/// this takes 3 sets of 9 bits, treating each as an index in the range
|
/// this takes 3 sets of 9 bits, treating each as an index in the range
|
||||||
/// 0..512 into the bloom and setting the corresponding bit in the bloom to 1.
|
/// 0..512 into the bloom and setting the corresponding bit in the bloom to 1.
|
||||||
pub fn bloom_into(&self, bloom: &mut H512) {
|
pub fn bloom_into(&self, bloom: &mut H512) {
|
||||||
let mut set_bit = |idx: usize| {
|
|
||||||
let idx = idx & 511;
|
|
||||||
bloom[idx / 8] |= 1 << idx % 8;
|
|
||||||
};
|
|
||||||
|
|
||||||
let data = &self.0;
|
let data = &self.0;
|
||||||
let mut combined = ((data[0] as usize) << 24) |
|
for i in 0..3 {
|
||||||
((data[1] as usize) << 16) |
|
let mut idx = data[i] as usize;
|
||||||
((data[2] as usize) << 8) |
|
|
||||||
data[3] as usize;
|
|
||||||
|
|
||||||
// take off the last 5 bits as we only use 27.
|
if data[3] & (1 << i) != 0 {
|
||||||
combined >>= 5;
|
idx += 256;
|
||||||
|
}
|
||||||
|
|
||||||
set_bit(combined);
|
debug_assert!(idx <= 511);
|
||||||
set_bit(combined >> 9);
|
bloom[idx / 8] |= 1 << (7 - idx % 8);
|
||||||
set_bit(combined >> 18);
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get bloom for single topic.
|
/// Get bloom for single topic.
|
||||||
@ -118,6 +113,7 @@ pub fn bloom_topics(topics: &[Topic]) -> H512 {
|
|||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
Decoder(DecoderError),
|
Decoder(DecoderError),
|
||||||
|
EmptyTopics,
|
||||||
LivesTooLong,
|
LivesTooLong,
|
||||||
IssuedInFuture,
|
IssuedInFuture,
|
||||||
ZeroTTL,
|
ZeroTTL,
|
||||||
@ -136,10 +132,27 @@ impl fmt::Display for Error {
|
|||||||
Error::LivesTooLong => write!(f, "Message claims to be issued before the unix epoch."),
|
Error::LivesTooLong => write!(f, "Message claims to be issued before the unix epoch."),
|
||||||
Error::IssuedInFuture => write!(f, "Message issued in future."),
|
Error::IssuedInFuture => write!(f, "Message issued in future."),
|
||||||
Error::ZeroTTL => write!(f, "Message live for zero time."),
|
Error::ZeroTTL => write!(f, "Message live for zero time."),
|
||||||
|
Error::EmptyTopics => write!(f, "Message has no topics."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn append_topics<'a>(s: &'a mut RlpStream, topics: &[Topic]) -> &'a mut RlpStream {
|
||||||
|
if topics.len() == 1 {
|
||||||
|
s.append(&topics[0])
|
||||||
|
} else {
|
||||||
|
s.append_list(&topics)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn decode_topics(rlp: UntrustedRlp) -> Result<SmallVec<[Topic; 4]>, DecoderError> {
|
||||||
|
if rlp.is_list() {
|
||||||
|
rlp.iter().map(|r| r.as_val::<Topic>()).collect()
|
||||||
|
} else {
|
||||||
|
rlp.as_val().map(|t| SmallVec::from_slice(&[t]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Raw envelope struct.
|
// Raw envelope struct.
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
pub struct Envelope {
|
pub struct Envelope {
|
||||||
@ -156,15 +169,20 @@ pub struct Envelope {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Envelope {
|
impl Envelope {
|
||||||
|
/// Whether the message is multi-topic. Only relay these to Parity peers.
|
||||||
|
pub fn is_multitopic(&self) -> bool {
|
||||||
|
self.topics.len() != 1
|
||||||
|
}
|
||||||
|
|
||||||
fn proving_hash(&self) -> H256 {
|
fn proving_hash(&self) -> H256 {
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
|
|
||||||
let mut buf = [0; 32];
|
let mut buf = [0; 32];
|
||||||
|
|
||||||
let mut stream = RlpStream::new_list(4);
|
let mut stream = RlpStream::new_list(4);
|
||||||
stream.append(&self.expiry)
|
stream.append(&self.expiry).append(&self.ttl);
|
||||||
.append(&self.ttl)
|
|
||||||
.append_list(&self.topics)
|
append_topics(&mut stream, &self.topics)
|
||||||
.append(&self.data);
|
.append(&self.data);
|
||||||
|
|
||||||
let mut digest = Keccak::new_keccak256();
|
let mut digest = Keccak::new_keccak256();
|
||||||
@ -185,8 +203,9 @@ impl rlp::Encodable for Envelope {
|
|||||||
fn rlp_append(&self, s: &mut RlpStream) {
|
fn rlp_append(&self, s: &mut RlpStream) {
|
||||||
s.begin_list(5)
|
s.begin_list(5)
|
||||||
.append(&self.expiry)
|
.append(&self.expiry)
|
||||||
.append(&self.ttl)
|
.append(&self.ttl);
|
||||||
.append_list(&self.topics)
|
|
||||||
|
append_topics(s, &self.topics)
|
||||||
.append(&self.data)
|
.append(&self.data)
|
||||||
.append(&self.nonce);
|
.append(&self.nonce);
|
||||||
}
|
}
|
||||||
@ -199,13 +218,17 @@ impl rlp::Decodable for Envelope {
|
|||||||
Ok(Envelope {
|
Ok(Envelope {
|
||||||
expiry: rlp.val_at(0)?,
|
expiry: rlp.val_at(0)?,
|
||||||
ttl: rlp.val_at(1)?,
|
ttl: rlp.val_at(1)?,
|
||||||
topics: rlp.at(2)?.iter().map(|x| x.as_val()).collect::<Result<_, _>>()?,
|
topics: decode_topics(rlp.at(2)?)?,
|
||||||
data: rlp.val_at(3)?,
|
data: rlp.val_at(3)?,
|
||||||
nonce: rlp.val_at(4)?,
|
nonce: rlp.val_at(4)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Error indicating no topics.
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub struct EmptyTopics;
|
||||||
|
|
||||||
/// Message creation parameters.
|
/// Message creation parameters.
|
||||||
/// Pass this to `Message::create` to make a message.
|
/// Pass this to `Message::create` to make a message.
|
||||||
pub struct CreateParams {
|
pub struct CreateParams {
|
||||||
@ -213,7 +236,7 @@ pub struct CreateParams {
|
|||||||
pub ttl: u64,
|
pub ttl: u64,
|
||||||
/// payload data.
|
/// payload data.
|
||||||
pub payload: Vec<u8>,
|
pub payload: Vec<u8>,
|
||||||
/// Topics.
|
/// Topics. May not be empty.
|
||||||
pub topics: Vec<Topic>,
|
pub topics: Vec<Topic>,
|
||||||
/// How many milliseconds to spend proving work.
|
/// How many milliseconds to spend proving work.
|
||||||
pub work: u64,
|
pub work: u64,
|
||||||
@ -231,10 +254,12 @@ pub struct Message {
|
|||||||
impl Message {
|
impl Message {
|
||||||
/// Create a message from creation parameters.
|
/// Create a message from creation parameters.
|
||||||
/// Panics if TTL is 0.
|
/// Panics if TTL is 0.
|
||||||
pub fn create(params: CreateParams) -> Self {
|
pub fn create(params: CreateParams) -> Result<Self, EmptyTopics> {
|
||||||
use byteorder::{BigEndian, ByteOrder};
|
use byteorder::{BigEndian, ByteOrder};
|
||||||
use rand::{Rng, SeedableRng, XorShiftRng};
|
use rand::{Rng, SeedableRng, XorShiftRng};
|
||||||
|
|
||||||
|
if params.topics.is_empty() { return Err(EmptyTopics) }
|
||||||
|
|
||||||
let mut rng = {
|
let mut rng = {
|
||||||
let mut thread_rng = ::rand::thread_rng();
|
let mut thread_rng = ::rand::thread_rng();
|
||||||
|
|
||||||
@ -254,10 +279,8 @@ impl Message {
|
|||||||
|
|
||||||
let start_digest = {
|
let start_digest = {
|
||||||
let mut stream = RlpStream::new_list(4);
|
let mut stream = RlpStream::new_list(4);
|
||||||
stream.append(&expiry)
|
stream.append(&expiry).append(¶ms.ttl);
|
||||||
.append(¶ms.ttl)
|
append_topics(&mut stream, ¶ms.topics).append(¶ms.payload);
|
||||||
.append_list(¶ms.topics)
|
|
||||||
.append(¶ms.payload);
|
|
||||||
|
|
||||||
let mut digest = Keccak::new_keccak256();
|
let mut digest = Keccak::new_keccak256();
|
||||||
digest.update(&*stream.drain());
|
digest.update(&*stream.drain());
|
||||||
@ -300,12 +323,12 @@ impl Message {
|
|||||||
|
|
||||||
let encoded = ::rlp::encode(&envelope);
|
let encoded = ::rlp::encode(&envelope);
|
||||||
|
|
||||||
Message::from_components(
|
Ok(Message::from_components(
|
||||||
envelope,
|
envelope,
|
||||||
encoded.len(),
|
encoded.len(),
|
||||||
H256(keccak256(&encoded)),
|
H256(keccak256(&encoded)),
|
||||||
SystemTime::now(),
|
SystemTime::now(),
|
||||||
).expect("Message generated here known to be valid; qed")
|
).expect("Message generated here known to be valid; qed"))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Decode message from RLP and check for validity against system time.
|
/// Decode message from RLP and check for validity against system time.
|
||||||
@ -327,6 +350,8 @@ impl Message {
|
|||||||
if envelope.expiry <= envelope.ttl { return Err(Error::LivesTooLong) }
|
if envelope.expiry <= envelope.ttl { return Err(Error::LivesTooLong) }
|
||||||
if envelope.ttl == 0 { return Err(Error::ZeroTTL) }
|
if envelope.ttl == 0 { return Err(Error::ZeroTTL) }
|
||||||
|
|
||||||
|
if envelope.topics.is_empty() { return Err(Error::EmptyTopics) }
|
||||||
|
|
||||||
let issue_time_adjusted = Duration::from_secs(
|
let issue_time_adjusted = Duration::from_secs(
|
||||||
(envelope.expiry - envelope.ttl).saturating_sub(LEEWAY_SECONDS)
|
(envelope.expiry - envelope.ttl).saturating_sub(LEEWAY_SECONDS)
|
||||||
);
|
);
|
||||||
@ -394,6 +419,7 @@ mod tests {
|
|||||||
use super::*;
|
use super::*;
|
||||||
use std::time::{self, Duration, SystemTime};
|
use std::time::{self, Duration, SystemTime};
|
||||||
use rlp::UntrustedRlp;
|
use rlp::UntrustedRlp;
|
||||||
|
use smallvec::SmallVec;
|
||||||
|
|
||||||
fn unix_time(x: u64) -> SystemTime {
|
fn unix_time(x: u64) -> SystemTime {
|
||||||
time::UNIX_EPOCH + Duration::from_secs(x)
|
time::UNIX_EPOCH + Duration::from_secs(x)
|
||||||
@ -401,12 +427,12 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn create_message() {
|
fn create_message() {
|
||||||
let _ = Message::create(CreateParams {
|
assert!(Message::create(CreateParams {
|
||||||
ttl: 100,
|
ttl: 100,
|
||||||
payload: vec![1, 2, 3, 4],
|
payload: vec![1, 2, 3, 4],
|
||||||
topics: Vec::new(),
|
topics: vec![Topic([1, 2, 1, 2])],
|
||||||
work: 50,
|
work: 50,
|
||||||
});
|
}).is_ok());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -415,7 +441,23 @@ mod tests {
|
|||||||
expiry: 100_000,
|
expiry: 100_000,
|
||||||
ttl: 30,
|
ttl: 30,
|
||||||
data: vec![9; 256],
|
data: vec![9; 256],
|
||||||
topics: Default::default(),
|
topics: SmallVec::from_slice(&[Default::default()]),
|
||||||
|
nonce: 1010101,
|
||||||
|
};
|
||||||
|
|
||||||
|
let encoded = ::rlp::encode(&envelope);
|
||||||
|
let decoded = ::rlp::decode(&encoded);
|
||||||
|
|
||||||
|
assert_eq!(envelope, decoded)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn round_trip_multitopic() {
|
||||||
|
let envelope = Envelope {
|
||||||
|
expiry: 100_000,
|
||||||
|
ttl: 30,
|
||||||
|
data: vec![9; 256],
|
||||||
|
topics: SmallVec::from_slice(&[Default::default(), Topic([1, 2, 3, 4])]),
|
||||||
nonce: 1010101,
|
nonce: 1010101,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -431,7 +473,7 @@ mod tests {
|
|||||||
expiry: 100_000,
|
expiry: 100_000,
|
||||||
ttl: 30,
|
ttl: 30,
|
||||||
data: vec![9; 256],
|
data: vec![9; 256],
|
||||||
topics: Default::default(),
|
topics: SmallVec::from_slice(&[Default::default()]),
|
||||||
nonce: 1010101,
|
nonce: 1010101,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -450,7 +492,7 @@ mod tests {
|
|||||||
expiry: 100_000,
|
expiry: 100_000,
|
||||||
ttl: 30,
|
ttl: 30,
|
||||||
data: vec![9; 256],
|
data: vec![9; 256],
|
||||||
topics: Default::default(),
|
topics: SmallVec::from_slice(&[Default::default()]),
|
||||||
nonce: 1010101,
|
nonce: 1010101,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -467,7 +509,7 @@ mod tests {
|
|||||||
expiry: 100_000,
|
expiry: 100_000,
|
||||||
ttl: 200_000,
|
ttl: 200_000,
|
||||||
data: vec![9; 256],
|
data: vec![9; 256],
|
||||||
topics: Default::default(),
|
topics: SmallVec::from_slice(&[Default::default()]),
|
||||||
nonce: 1010101,
|
nonce: 1010101,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -23,31 +23,45 @@ use std::time::{Duration, SystemTime};
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use bigint::hash::{H256, H512};
|
use bigint::hash::{H256, H512};
|
||||||
use network::{HostInfo, NetworkContext, NetworkError, NodeId, PeerId, TimerToken};
|
use network::{HostInfo, NetworkContext, NetworkError, NodeId, PeerId, ProtocolId, TimerToken};
|
||||||
use ordered_float::OrderedFloat;
|
use ordered_float::OrderedFloat;
|
||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use rlp::{DecoderError, RlpStream, UntrustedRlp};
|
use rlp::{DecoderError, RlpStream, UntrustedRlp};
|
||||||
|
|
||||||
use message::{Message, Error as MessageError};
|
use message::{Message, Error as MessageError};
|
||||||
|
|
||||||
|
// how often periodic relays are. when messages are imported
|
||||||
|
// we directly broadcast.
|
||||||
const RALLY_TOKEN: TimerToken = 1;
|
const RALLY_TOKEN: TimerToken = 1;
|
||||||
const RALLY_TIMEOUT_MS: u64 = 750; // supposed to be at least once per second.
|
const RALLY_TIMEOUT_MS: u64 = 2500;
|
||||||
|
|
||||||
const PROTOCOL_VERSION: usize = 2;
|
/// Current protocol version.
|
||||||
|
pub const PROTOCOL_VERSION: usize = 6;
|
||||||
|
|
||||||
/// Supported protocol versions.
|
/// Supported protocol versions.
|
||||||
pub const SUPPORTED_VERSIONS: &'static [u8] = &[PROTOCOL_VERSION as u8];
|
pub const SUPPORTED_VERSIONS: &'static [u8] = &[PROTOCOL_VERSION as u8];
|
||||||
|
|
||||||
// maximum tolerated delay between messages packets.
|
// maximum tolerated delay between messages packets.
|
||||||
const MAX_TOLERATED_DELAY_MS: u64 = 2000;
|
const MAX_TOLERATED_DELAY_MS: u64 = 5000;
|
||||||
|
|
||||||
/// Number of packets.
|
/// Number of packets. A bunch are reserved.
|
||||||
pub const PACKET_COUNT: u8 = 3;
|
pub const PACKET_COUNT: u8 = 128;
|
||||||
|
|
||||||
|
/// Whisper protocol ID
|
||||||
|
pub const PROTOCOL_ID: ::network::ProtocolId = *b"shh";
|
||||||
|
|
||||||
|
/// Parity-whisper protocol ID
|
||||||
|
/// Current parity-specific extensions:
|
||||||
|
/// - Multiple topics in packet.
|
||||||
|
pub const PARITY_PROTOCOL_ID: ::network::ProtocolId = *b"pwh";
|
||||||
|
|
||||||
mod packet {
|
mod packet {
|
||||||
pub const STATUS: u8 = 0;
|
pub const STATUS: u8 = 0;
|
||||||
pub const MESSAGES: u8 = 1;
|
pub const MESSAGES: u8 = 1;
|
||||||
pub const TOPIC_FILTER: u8 = 2;
|
pub const POW_REQUIREMENT: u8 = 2;
|
||||||
|
pub const TOPIC_FILTER: u8 = 3;
|
||||||
|
|
||||||
|
// 126, 127 for mail server stuff we will never implement here.
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handles messages within a single packet.
|
/// Handles messages within a single packet.
|
||||||
@ -67,11 +81,9 @@ enum Error {
|
|||||||
Decoder(DecoderError),
|
Decoder(DecoderError),
|
||||||
Network(NetworkError),
|
Network(NetworkError),
|
||||||
Message(MessageError),
|
Message(MessageError),
|
||||||
UnknownPacket(u8),
|
|
||||||
UnknownPeer(PeerId),
|
UnknownPeer(PeerId),
|
||||||
ProtocolVersionMismatch(usize),
|
|
||||||
SameNodeKey,
|
|
||||||
UnexpectedMessage,
|
UnexpectedMessage,
|
||||||
|
InvalidPowReq,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<DecoderError> for Error {
|
impl From<DecoderError> for Error {
|
||||||
@ -98,12 +110,9 @@ impl fmt::Display for Error {
|
|||||||
Error::Decoder(ref err) => write!(f, "Failed to decode packet: {}", err),
|
Error::Decoder(ref err) => write!(f, "Failed to decode packet: {}", err),
|
||||||
Error::Network(ref err) => write!(f, "Network error: {}", err),
|
Error::Network(ref err) => write!(f, "Network error: {}", err),
|
||||||
Error::Message(ref err) => write!(f, "Error decoding message: {}", err),
|
Error::Message(ref err) => write!(f, "Error decoding message: {}", err),
|
||||||
Error::UnknownPacket(ref id) => write!(f, "Unknown packet kind: {}", id),
|
|
||||||
Error::UnknownPeer(ref id) => write!(f, "Message received from unknown peer: {}", id),
|
Error::UnknownPeer(ref id) => write!(f, "Message received from unknown peer: {}", id),
|
||||||
Error::ProtocolVersionMismatch(ref proto) =>
|
|
||||||
write!(f, "Unknown protocol version: {}", proto),
|
|
||||||
Error::UnexpectedMessage => write!(f, "Unexpected message."),
|
Error::UnexpectedMessage => write!(f, "Unexpected message."),
|
||||||
Error::SameNodeKey => write!(f, "Peer and us have same node key."),
|
Error::InvalidPowReq => write!(f, "Peer sent invalid PoW requirement."),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -298,15 +307,18 @@ impl Messages {
|
|||||||
|
|
||||||
enum State {
|
enum State {
|
||||||
Unconfirmed(SystemTime), // awaiting status packet.
|
Unconfirmed(SystemTime), // awaiting status packet.
|
||||||
TheirTurn(SystemTime), // it has been their turn to send since stored time.
|
Confirmed,
|
||||||
OurTurn,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)] // for node key. this will be useful for topic routing.
|
||||||
struct Peer {
|
struct Peer {
|
||||||
node_key: NodeId,
|
node_key: NodeId,
|
||||||
state: State,
|
state: State,
|
||||||
known_messages: HashSet<H256>,
|
known_messages: HashSet<H256>,
|
||||||
topic_filter: Option<H512>,
|
topic_filter: Option<H512>,
|
||||||
|
pow_requirement: f64,
|
||||||
|
is_parity: bool,
|
||||||
|
_protocol_version: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Peer {
|
impl Peer {
|
||||||
@ -319,12 +331,14 @@ impl Peer {
|
|||||||
|
|
||||||
// whether this peer will accept the message.
|
// whether this peer will accept the message.
|
||||||
fn will_accept(&self, message: &Message) -> bool {
|
fn will_accept(&self, message: &Message) -> bool {
|
||||||
let known = self.known_messages.contains(message.hash());
|
if self.known_messages.contains(message.hash()) { return false }
|
||||||
|
|
||||||
let matches_bloom = self.topic_filter.as_ref()
|
// only parity peers will accept multitopic messages.
|
||||||
.map_or(true, |topic| topic & message.bloom() == message.bloom().clone());
|
if message.envelope().is_multitopic() && !self.is_parity { return false }
|
||||||
|
if message.work_proved() < self.pow_requirement { return false }
|
||||||
|
|
||||||
!known && matches_bloom
|
self.topic_filter.as_ref()
|
||||||
|
.map_or(true, |filter| &(filter & message.bloom()) == message.bloom())
|
||||||
}
|
}
|
||||||
|
|
||||||
// note a message as known. returns true if it was already
|
// note a message as known. returns true if it was already
|
||||||
@ -337,10 +351,14 @@ impl Peer {
|
|||||||
self.topic_filter = Some(topic);
|
self.topic_filter = Some(topic);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn set_pow_requirement(&mut self, pow_requirement: f64) {
|
||||||
|
self.pow_requirement = pow_requirement;
|
||||||
|
}
|
||||||
|
|
||||||
fn can_send_messages(&self) -> bool {
|
fn can_send_messages(&self) -> bool {
|
||||||
match self.state {
|
match self.state {
|
||||||
State::Unconfirmed(_) | State::OurTurn => false,
|
State::Unconfirmed(_) => false,
|
||||||
State::TheirTurn(_) => true,
|
State::Confirmed => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -357,21 +375,41 @@ pub struct PoolStatus {
|
|||||||
pub target_size: usize,
|
pub target_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Handle to the pool, for posting messages or getting info.
|
/// Generic network context.
|
||||||
#[derive(Clone)]
|
pub trait Context {
|
||||||
pub struct PoolHandle {
|
/// Disconnect a peer.
|
||||||
messages: Arc<RwLock<Messages>>,
|
fn disconnect_peer(&self, PeerId);
|
||||||
|
/// Disable a peer.
|
||||||
|
fn disable_peer(&self, PeerId);
|
||||||
|
/// Get a peer's node key.
|
||||||
|
fn node_key(&self, PeerId) -> Option<NodeId>;
|
||||||
|
/// Get a peer's protocol version for given protocol.
|
||||||
|
fn protocol_version(&self, ProtocolId, PeerId) -> Option<u8>;
|
||||||
|
/// Send message to peer.
|
||||||
|
fn send(&self, PeerId, u8, Vec<u8>);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolHandle {
|
impl<'a> Context for NetworkContext<'a> {
|
||||||
/// Post a message to the whisper network to be relayed.
|
fn disconnect_peer(&self, peer: PeerId) {
|
||||||
pub fn post_message(&self, message: Message) -> bool {
|
NetworkContext::disconnect_peer(self, peer);
|
||||||
self.messages.write().insert(message)
|
}
|
||||||
|
fn disable_peer(&self, peer: PeerId) {
|
||||||
|
NetworkContext::disable_peer(self, peer)
|
||||||
|
}
|
||||||
|
fn node_key(&self, peer: PeerId) -> Option<NodeId> {
|
||||||
|
self.session_info(peer).and_then(|info| info.id)
|
||||||
|
}
|
||||||
|
fn protocol_version(&self, proto_id: ProtocolId, peer: PeerId) -> Option<u8> {
|
||||||
|
NetworkContext::protocol_version(self, proto_id, peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get number of messages and amount of memory used by them.
|
fn send(&self, peer: PeerId, packet_id: u8, message: Vec<u8>) {
|
||||||
pub fn pool_status(&self) -> PoolStatus {
|
if let Err(e) = NetworkContext::send(self, peer, packet_id, message) {
|
||||||
self.messages.read().status()
|
debug!(target: "whisper", "Failed to send packet {} to peer {}: {}",
|
||||||
|
packet_id, peer, e);
|
||||||
|
|
||||||
|
self.disconnect_peer(peer)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,15 +433,23 @@ impl<T> Network<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Acquire a sender to asynchronously feed messages to the whisper
|
/// Post a message to the whisper network to be relayed.
|
||||||
/// network.
|
pub fn post_message<C: Context>(&self, message: Message, context: &C) -> bool
|
||||||
pub fn handle(&self) -> PoolHandle {
|
where T: MessageHandler
|
||||||
PoolHandle { messages: self.messages.clone() }
|
{
|
||||||
|
let ok = self.messages.write().insert(message);
|
||||||
|
if ok { self.rally(context) }
|
||||||
|
ok
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get number of messages and amount of memory used by them.
|
||||||
|
pub fn pool_status(&self) -> PoolStatus {
|
||||||
|
self.messages.read().status()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: MessageHandler> Network<T> {
|
impl<T: MessageHandler> Network<T> {
|
||||||
fn rally(&self, io: &NetworkContext) {
|
fn rally<C: Context>(&self, io: &C) {
|
||||||
// cannot be greater than 16MB (protocol limitation)
|
// cannot be greater than 16MB (protocol limitation)
|
||||||
const MAX_MESSAGES_PACKET_SIZE: usize = 8 * 1024 * 1024;
|
const MAX_MESSAGES_PACKET_SIZE: usize = 8 * 1024 * 1024;
|
||||||
|
|
||||||
@ -428,11 +474,11 @@ impl<T: MessageHandler> Network<T> {
|
|||||||
|
|
||||||
// check timeouts and skip peers who we can't send a rally to.
|
// check timeouts and skip peers who we can't send a rally to.
|
||||||
match peer_data.state {
|
match peer_data.state {
|
||||||
State::Unconfirmed(ref time) | State::TheirTurn(ref time) => {
|
State::Unconfirmed(ref time) => {
|
||||||
punish_timeout(time);
|
punish_timeout(time);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
State::OurTurn => {}
|
State::Confirmed => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
// construct packet, skipping messages the peer won't accept.
|
// construct packet, skipping messages the peer won't accept.
|
||||||
@ -452,39 +498,19 @@ impl<T: MessageHandler> Network<T> {
|
|||||||
|
|
||||||
stream.complete_unbounded_list();
|
stream.complete_unbounded_list();
|
||||||
|
|
||||||
peer_data.state = State::TheirTurn(SystemTime::now());
|
io.send(*peer_id, packet::MESSAGES, stream.out());
|
||||||
if let Err(e) = io.send(*peer_id, packet::MESSAGES, stream.out()) {
|
|
||||||
debug!(target: "whisper", "Failed to send messages packet to peer {}: {}", peer_id, e);
|
|
||||||
io.disconnect_peer(*peer_id);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle status packet from peer.
|
// handle status packet from peer.
|
||||||
fn on_status(&self, peer: &PeerId, status: UntrustedRlp)
|
fn on_status(&self, peer: &PeerId, _status: UntrustedRlp)
|
||||||
-> Result<(), Error>
|
-> Result<(), Error>
|
||||||
{
|
{
|
||||||
let proto: usize = status.as_val()?;
|
|
||||||
if proto != PROTOCOL_VERSION { return Err(Error::ProtocolVersionMismatch(proto)) }
|
|
||||||
|
|
||||||
let peers = self.peers.read();
|
let peers = self.peers.read();
|
||||||
|
|
||||||
match peers.get(peer) {
|
match peers.get(peer) {
|
||||||
Some(peer) => {
|
Some(peer) => {
|
||||||
let mut peer = peer.lock();
|
peer.lock().state = State::Confirmed;
|
||||||
let our_node_key = self.node_key.read().clone();
|
|
||||||
|
|
||||||
// handle this basically impossible edge case gracefully.
|
|
||||||
if peer.node_key == our_node_key {
|
|
||||||
return Err(Error::SameNodeKey);
|
|
||||||
}
|
|
||||||
|
|
||||||
// peer with lower node key begins the rally.
|
|
||||||
if peer.node_key > our_node_key {
|
|
||||||
peer.state = State::OurTurn;
|
|
||||||
} else {
|
|
||||||
peer.state = State::TheirTurn(SystemTime::now());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
None => {
|
None => {
|
||||||
@ -513,8 +539,6 @@ impl<T: MessageHandler> Network<T> {
|
|||||||
return Err(Error::UnexpectedMessage);
|
return Err(Error::UnexpectedMessage);
|
||||||
}
|
}
|
||||||
|
|
||||||
peer.state = State::OurTurn;
|
|
||||||
|
|
||||||
let now = SystemTime::now();
|
let now = SystemTime::now();
|
||||||
let mut messages_vec = message_packet.iter().map(|rlp| Message::decode(rlp, now))
|
let mut messages_vec = message_packet.iter().map(|rlp| Message::decode(rlp, now))
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
.collect::<Result<Vec<_>, _>>()?;
|
||||||
@ -541,6 +565,42 @@ impl<T: MessageHandler> Network<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn on_pow_requirement(&self, peer: &PeerId, requirement: UntrustedRlp)
|
||||||
|
-> Result<(), Error>
|
||||||
|
{
|
||||||
|
use byteorder::{ByteOrder, BigEndian};
|
||||||
|
|
||||||
|
let peers = self.peers.read();
|
||||||
|
match peers.get(peer) {
|
||||||
|
Some(peer) => {
|
||||||
|
let mut peer = peer.lock();
|
||||||
|
|
||||||
|
if let State::Unconfirmed(_) = peer.state {
|
||||||
|
return Err(Error::UnexpectedMessage);
|
||||||
|
}
|
||||||
|
let bytes: Vec<u8> = requirement.as_val()?;
|
||||||
|
if bytes.len() != ::std::mem::size_of::<f64>() {
|
||||||
|
return Err(Error::InvalidPowReq);
|
||||||
|
}
|
||||||
|
|
||||||
|
// as of byteorder 1.1.0, this is always defined.
|
||||||
|
let req = BigEndian::read_f64(&bytes[..]);
|
||||||
|
|
||||||
|
if !req.is_normal() {
|
||||||
|
return Err(Error::InvalidPowReq);
|
||||||
|
}
|
||||||
|
|
||||||
|
peer.set_pow_requirement(req);
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
debug!(target: "whisper", "Received message from unknown peer.");
|
||||||
|
return Err(Error::UnknownPeer(*peer));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
fn on_topic_filter(&self, peer: &PeerId, filter: UntrustedRlp)
|
fn on_topic_filter(&self, peer: &PeerId, filter: UntrustedRlp)
|
||||||
-> Result<(), Error>
|
-> Result<(), Error>
|
||||||
{
|
{
|
||||||
@ -564,10 +624,10 @@ impl<T: MessageHandler> Network<T> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_connect(&self, io: &NetworkContext, peer: &PeerId) {
|
fn on_connect<C: Context>(&self, io: &C, peer: &PeerId) {
|
||||||
trace!(target: "whisper", "Connecting peer {}", peer);
|
trace!(target: "whisper", "Connecting peer {}", peer);
|
||||||
|
|
||||||
let node_key = match io.session_info(*peer).and_then(|info| info.id) {
|
let node_key = match io.node_key(*peer) {
|
||||||
Some(node_key) => node_key,
|
Some(node_key) => node_key,
|
||||||
None => {
|
None => {
|
||||||
debug!(target: "whisper", "Disconnecting peer {}, who has no node key.", peer);
|
debug!(target: "whisper", "Disconnecting peer {}, who has no node key.", peer);
|
||||||
@ -576,17 +636,25 @@ impl<T: MessageHandler> Network<T> {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let version = match io.protocol_version(PROTOCOL_ID, *peer) {
|
||||||
|
Some(version) => version as usize,
|
||||||
|
None => {
|
||||||
|
io.disable_peer(*peer);
|
||||||
|
return
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
self.peers.write().insert(*peer, Mutex::new(Peer {
|
self.peers.write().insert(*peer, Mutex::new(Peer {
|
||||||
node_key: node_key,
|
node_key: node_key,
|
||||||
state: State::Unconfirmed(SystemTime::now()),
|
state: State::Unconfirmed(SystemTime::now()),
|
||||||
known_messages: HashSet::new(),
|
known_messages: HashSet::new(),
|
||||||
topic_filter: None,
|
topic_filter: None,
|
||||||
|
pow_requirement: 0f64,
|
||||||
|
is_parity: io.protocol_version(PARITY_PROTOCOL_ID, *peer).is_some(),
|
||||||
|
_protocol_version: version,
|
||||||
}));
|
}));
|
||||||
|
|
||||||
if let Err(e) = io.send(*peer, packet::STATUS, ::rlp::encode(&PROTOCOL_VERSION).to_vec()) {
|
io.send(*peer, packet::STATUS, ::rlp::EMPTY_LIST_RLP.to_vec());
|
||||||
debug!(target: "whisper", "Error sending status: {}", e);
|
|
||||||
io.disconnect_peer(*peer);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn on_disconnect(&self, peer: &PeerId) {
|
fn on_disconnect(&self, peer: &PeerId) {
|
||||||
@ -609,8 +677,9 @@ impl<T: MessageHandler> ::network::NetworkProtocolHandler for Network<T> {
|
|||||||
let res = match packet_id {
|
let res = match packet_id {
|
||||||
packet::STATUS => self.on_status(peer, rlp),
|
packet::STATUS => self.on_status(peer, rlp),
|
||||||
packet::MESSAGES => self.on_messages(peer, rlp),
|
packet::MESSAGES => self.on_messages(peer, rlp),
|
||||||
|
packet::POW_REQUIREMENT => self.on_pow_requirement(peer, rlp),
|
||||||
packet::TOPIC_FILTER => self.on_topic_filter(peer, rlp),
|
packet::TOPIC_FILTER => self.on_topic_filter(peer, rlp),
|
||||||
other => Err(Error::UnknownPacket(other)),
|
_ => Ok(()), // ignore unknown packets.
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
@ -636,3 +705,19 @@ impl<T: MessageHandler> ::network::NetworkProtocolHandler for Network<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Dummy subprotocol used for parity extensions.
|
||||||
|
#[derive(Debug, Copy, Clone)]
|
||||||
|
pub struct ParityExtensions;
|
||||||
|
|
||||||
|
impl ::network::NetworkProtocolHandler for ParityExtensions {
|
||||||
|
fn initialize(&self, _io: &NetworkContext, _host_info: &HostInfo) { }
|
||||||
|
|
||||||
|
fn read(&self, _io: &NetworkContext, _peer: &PeerId, _id: u8, _msg: &[u8]) { }
|
||||||
|
|
||||||
|
fn connected(&self, _io: &NetworkContext, _peer: &PeerId) { }
|
||||||
|
|
||||||
|
fn disconnected(&self, _io: &NetworkContext, _peer: &PeerId) { }
|
||||||
|
|
||||||
|
fn timeout(&self, _io: &NetworkContext, _timer: TimerToken) { }
|
||||||
|
}
|
||||||
|
@ -307,7 +307,7 @@ impl Filter {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use message::{CreateParams, Message};
|
use message::{CreateParams, Message, Topic};
|
||||||
use rpc::types::{FilterRequest, HexEncode};
|
use rpc::types::{FilterRequest, HexEncode};
|
||||||
use rpc::abridge_topic;
|
use rpc::abridge_topic;
|
||||||
use super::*;
|
use super::*;
|
||||||
@ -325,38 +325,40 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn basic_match() {
|
fn basic_match() {
|
||||||
let topics = vec![vec![1, 2, 3], vec![4, 5, 6]];
|
let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]];
|
||||||
|
let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect();
|
||||||
|
|
||||||
let req = FilterRequest {
|
let req = FilterRequest {
|
||||||
decrypt_with: Default::default(),
|
decrypt_with: Default::default(),
|
||||||
from: None,
|
from: None,
|
||||||
topics: topics.iter().cloned().map(HexEncode).collect(),
|
topics: topics.into_iter().map(HexEncode).collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let filter = Filter::new(req).unwrap();
|
let filter = Filter::new(req).unwrap();
|
||||||
let message = Message::create(CreateParams {
|
let message = Message::create(CreateParams {
|
||||||
ttl: 100,
|
ttl: 100,
|
||||||
payload: vec![1, 3, 5, 7, 9],
|
payload: vec![1, 3, 5, 7, 9],
|
||||||
topics: topics.iter().map(|x| abridge_topic(&x)).collect(),
|
topics: abridged_topics.clone(),
|
||||||
work: 0,
|
work: 0,
|
||||||
});
|
}).unwrap();
|
||||||
|
|
||||||
assert!(filter.basic_matches(&message));
|
assert!(filter.basic_matches(&message));
|
||||||
|
|
||||||
let message = Message::create(CreateParams {
|
let message = Message::create(CreateParams {
|
||||||
ttl: 100,
|
ttl: 100,
|
||||||
payload: vec![1, 3, 5, 7, 9],
|
payload: vec![1, 3, 5, 7, 9],
|
||||||
topics: topics.iter().take(1).map(|x| abridge_topic(&x)).collect(),
|
topics: abridged_topics.clone(),
|
||||||
work: 0,
|
work: 0,
|
||||||
});
|
}).unwrap();
|
||||||
|
|
||||||
assert!(filter.basic_matches(&message));
|
assert!(filter.basic_matches(&message));
|
||||||
|
|
||||||
let message = Message::create(CreateParams {
|
let message = Message::create(CreateParams {
|
||||||
ttl: 100,
|
ttl: 100,
|
||||||
payload: vec![1, 3, 5, 7, 9],
|
payload: vec![1, 3, 5, 7, 9],
|
||||||
topics: Vec::new(),
|
topics: vec![Topic([1, 8, 3, 99])],
|
||||||
work: 0,
|
work: 0,
|
||||||
});
|
}).unwrap();
|
||||||
|
|
||||||
assert!(!filter.basic_matches(&message));
|
assert!(!filter.basic_matches(&message));
|
||||||
}
|
}
|
||||||
@ -366,6 +368,9 @@ mod tests {
|
|||||||
use rpc::payload::{self, EncodeParams};
|
use rpc::payload::{self, EncodeParams};
|
||||||
use rpc::key_store::{Key, KeyStore};
|
use rpc::key_store::{Key, KeyStore};
|
||||||
|
|
||||||
|
let topics = vec![vec![1, 2, 3, 4], vec![5, 6, 7, 8]];
|
||||||
|
let abridged_topics: Vec<_> = topics.iter().map(|x| abridge_topic(&x)).collect();
|
||||||
|
|
||||||
let mut store = KeyStore::new().unwrap();
|
let mut store = KeyStore::new().unwrap();
|
||||||
let signing_pair = Key::new_asymmetric(store.rng());
|
let signing_pair = Key::new_asymmetric(store.rng());
|
||||||
let encrypting_key = Key::new_symmetric(store.rng());
|
let encrypting_key = Key::new_symmetric(store.rng());
|
||||||
@ -386,24 +391,25 @@ mod tests {
|
|||||||
let message = Message::create(CreateParams {
|
let message = Message::create(CreateParams {
|
||||||
ttl: 100,
|
ttl: 100,
|
||||||
payload: encrypted,
|
payload: encrypted,
|
||||||
topics: vec![abridge_topic(&[9; 32])],
|
topics: abridged_topics.clone(),
|
||||||
work: 0,
|
work: 0,
|
||||||
});
|
}).unwrap();
|
||||||
|
|
||||||
let message2 = Message::create(CreateParams {
|
let message2 = Message::create(CreateParams {
|
||||||
ttl: 100,
|
ttl: 100,
|
||||||
payload: vec![3, 5, 7, 9],
|
payload: vec![3, 5, 7, 9],
|
||||||
topics: vec![abridge_topic(&[9; 32])],
|
topics: abridged_topics,
|
||||||
work: 0,
|
work: 0,
|
||||||
});
|
}).unwrap();
|
||||||
|
|
||||||
let filter = Filter::new(FilterRequest {
|
let filter = Filter::new(FilterRequest {
|
||||||
decrypt_with: Some(HexEncode(decrypt_id)),
|
decrypt_with: Some(HexEncode(decrypt_id)),
|
||||||
from: Some(HexEncode(signing_pair.public().unwrap().clone())),
|
from: Some(HexEncode(signing_pair.public().unwrap().clone())),
|
||||||
topics: vec![HexEncode(vec![9; 32])],
|
topics: topics.into_iter().map(HexEncode).collect(),
|
||||||
}).unwrap();
|
}).unwrap();
|
||||||
|
|
||||||
assert!(filter.basic_matches(&message));
|
assert!(filter.basic_matches(&message));
|
||||||
|
assert!(filter.basic_matches(&message2));
|
||||||
|
|
||||||
let items = ::std::cell::Cell::new(0);
|
let items = ::std::cell::Cell::new(0);
|
||||||
let on_match = |_| { items.set(items.get() + 1); };
|
let on_match = |_| { items.set(items.get() + 1); };
|
||||||
|
@ -155,16 +155,6 @@ pub trait PoolHandle: Send + Sync {
|
|||||||
fn pool_status(&self) -> ::net::PoolStatus;
|
fn pool_status(&self) -> ::net::PoolStatus;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolHandle for ::net::PoolHandle {
|
|
||||||
fn relay(&self, message: Message) -> bool {
|
|
||||||
self.post_message(message)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn pool_status(&self) -> ::net::PoolStatus {
|
|
||||||
::net::PoolHandle::pool_status(self)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Default, simple metadata implementation.
|
/// Default, simple metadata implementation.
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
pub struct Meta {
|
pub struct Meta {
|
||||||
@ -339,7 +329,7 @@ impl<P: PoolHandle + 'static, M: Send + Sync + 'static> Whisper for WhisperClien
|
|||||||
payload: encrypted,
|
payload: encrypted,
|
||||||
topics: req.topics.into_iter().map(|x| abridge_topic(&x.into_inner())).collect(),
|
topics: req.topics.into_iter().map(|x| abridge_topic(&x.into_inner())).collect(),
|
||||||
work: req.priority,
|
work: req.priority,
|
||||||
});
|
}).map_err(|_| whisper_error("Empty topics"))?;
|
||||||
|
|
||||||
if !self.pool.relay(message) {
|
if !self.pool.relay(message) {
|
||||||
Err(whisper_error("PoW too low to compete with other messages"))
|
Err(whisper_error("PoW too low to compete with other messages"))
|
||||||
|
@ -221,7 +221,7 @@ pub struct FilterItem {
|
|||||||
/// Time to live in seconds.
|
/// Time to live in seconds.
|
||||||
pub ttl: u64,
|
pub ttl: u64,
|
||||||
|
|
||||||
/// Abridged topics that matched the filter.
|
/// Topics that matched the filter.
|
||||||
pub topics: Vec<Bytes>,
|
pub topics: Vec<Bytes>,
|
||||||
|
|
||||||
/// Unix timestamp of the message generation.
|
/// Unix timestamp of the message generation.
|
||||||
|
Loading…
Reference in New Issue
Block a user