Backports to 2.0.0-beta (#9094)

* parity-version: betalize 2.0

* Multiple improvements to discovery ping handling (#8771)

* discovery: Only add nodes to routing table after receiving pong.

Previously the discovery algorithm would add nodes to the routing table
before confirming that the endpoint is participating in the protocol. This
now tracks in-flight pings and adds to the routing table only after receiving
a response.

* discovery: Refactor packet creation into its own function.

This function is useful inside unit tests.

* discovery: Additional testing for new add_node behavior.

* discovery: Track expiration of pings to non-yet-in-bucket nodes.

Now that we may ping nodes before adding to a k-bucket, the timeout tracking
must be separate from BucketEntry.

* discovery: Verify echo hash on pong packets.

Stores packet hash with in-flight requests and matches with pong response.

* discovery: Track timeouts on FIND_NODE requests.

* discovery: Retry failed pings with exponential backoff.

UDP packets may get dropped, so instead of immediately booting nodes that fail
to respond to a ping, retry 4 times with exponential backoff.

* !fixup Use slice instead of Vec for request_backoff.

* Add separate database directory for light client (#8927) (#9064)

* Add seperate default DB path for light client (#8927)

* Improve readability

* Revert "Replace `std::env::home_dir` with `dirs::home_dir` (#9077)" (#9097)

* Revert "Replace `std::env::home_dir` with `dirs::home_dir` (#9077)"

This reverts commit 7e779327eb.

* Restore some of the changes

* Update parity-common

* Offload cull to IoWorker. (#9099)

* Fix work-notify. (#9104)

* Update hidapi, fixes #7542 (#9108)

* docker: add cmake dependency (#9111)

* Update light client hardcoded headers (#9098)

* Insert Kovan hardcoded headers until #7690241

* Insert Kovan hardcoded headers until block 7690241

* Insert Ropsten hardcoded headers until #3612673

* Insert Mainnet hardcoded headers until block 5941249

* Make sure to produce full blocks. (#9115)

* Insert ETC (classic) hardcoded headers until block #6170625 (#9121)

* fix verification in ethcore-sync collect_blocks (#9135)

* Completely remove all dapps struct from rpc (#9107)

* Completely remove all dapps struct from rpc

* Remove unused pub use

* `evm bench` fix broken dependencies (#9134)

* `evm bench` use valid dependencies

Benchmarks of the `evm` used stale versions of a couple a crates that
this commit fixes!

* fix warnings

* Update snapcraft.yaml (#9132)
This commit is contained in:
Afri Schoedon 2018-07-17 13:47:14 +02:00 committed by GitHub
parent 484ecfaf47
commit 6eae372524
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
47 changed files with 3947 additions and 461 deletions

44
Cargo.lock generated
View File

@ -351,20 +351,10 @@ name = "dir"
version = "0.1.1" version = "0.1.1"
dependencies = [ dependencies = [
"app_dirs 1.2.1 (git+https://github.com/paritytech/app-dirs-rs)", "app_dirs 1.2.1 (git+https://github.com/paritytech/app-dirs-rs)",
"dirs 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"journaldb 0.2.0", "journaldb 0.2.0",
] ]
[[package]]
name = "dirs"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "docopt" name = "docopt"
version = "0.8.3" version = "0.8.3"
@ -1173,7 +1163,7 @@ dependencies = [
[[package]] [[package]]
name = "hashdb" name = "hashdb"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1203,7 +1193,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "hidapi" name = "hidapi"
version = "0.3.1" version = "0.3.1"
source = "git+https://github.com/paritytech/hidapi-rs#70ec4bd1b755ec5dd32ad2be0c8345864147c8bc" source = "git+https://github.com/paritytech/hidapi-rs#d4d323767d6f27cf5a3d73fbae0b0f2134d579bf"
dependencies = [ dependencies = [
"cc 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)", "cc 1.0.17 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1437,7 +1427,7 @@ dependencies = [
[[package]] [[package]]
name = "keccak-hash" name = "keccak-hash"
version = "0.1.2" version = "0.1.2"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "tiny-keccak 1.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1465,7 +1455,7 @@ dependencies = [
[[package]] [[package]]
name = "kvdb" name = "kvdb"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)", "parity-bytes 0.1.0 (git+https://github.com/paritytech/parity-common)",
@ -1474,7 +1464,7 @@ dependencies = [
[[package]] [[package]]
name = "kvdb-memorydb" name = "kvdb-memorydb"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)", "kvdb 0.1.0 (git+https://github.com/paritytech/parity-common)",
"parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1483,7 +1473,7 @@ dependencies = [
[[package]] [[package]]
name = "kvdb-rocksdb" name = "kvdb-rocksdb"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1654,7 +1644,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "memorydb" name = "memorydb"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)", "hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)",
@ -1951,7 +1941,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-bytes" name = "parity-bytes"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
[[package]] [[package]]
name = "parity-clib" name = "parity-clib"
@ -1963,7 +1953,7 @@ dependencies = [
[[package]] [[package]]
name = "parity-crypto" name = "parity-crypto"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2328,15 +2318,12 @@ dependencies = [
[[package]] [[package]]
name = "path" name = "path"
version = "0.1.1" version = "0.1.1"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [
"dirs 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]] [[package]]
name = "patricia-trie" name = "patricia-trie"
version = "0.2.1" version = "0.2.1"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)", "hashdb 0.2.0 (git+https://github.com/paritytech/parity-common)",
@ -2411,7 +2398,7 @@ dependencies = [
[[package]] [[package]]
name = "plain_hasher" name = "plain_hasher"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", "crunchy 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2647,7 +2634,7 @@ dependencies = [
[[package]] [[package]]
name = "rlp" name = "rlp"
version = "0.2.1" version = "0.2.1"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3324,7 +3311,7 @@ dependencies = [
[[package]] [[package]]
name = "trie-standardmap" name = "trie-standardmap"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)", "keccak-hash 0.1.2 (git+https://github.com/paritytech/parity-common)",
@ -3335,7 +3322,7 @@ dependencies = [
[[package]] [[package]]
name = "triehash" name = "triehash"
version = "0.2.0" version = "0.2.0"
source = "git+https://github.com/paritytech/parity-common#a72c34f82ff7ccc0f49827bb7f8c5d1fbff794bb" source = "git+https://github.com/paritytech/parity-common#5f05acd90cf173f2e1ce477665be2a40502fef42"
dependencies = [ dependencies = [
"elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
"ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethereum-types 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -3668,7 +3655,6 @@ dependencies = [
"checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9" "checksum custom_derive 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "ef8ae57c4978a2acd8b869ce6b9ca1dfe817bff704c220209fdef2c0b75a01b9"
"checksum daemonize 0.2.3 (git+https://github.com/paritytech/daemonize)" = "<none>" "checksum daemonize 0.2.3 (git+https://github.com/paritytech/daemonize)" = "<none>"
"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8" "checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8"
"checksum dirs 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "37a76dd8b997af7107d0bb69d43903cf37153a18266f8b3fdb9911f28efb5444"
"checksum docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d8acd393692c503b168471874953a2531df0e9ab77d0b6bbc582395743300a4a" "checksum docopt 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d8acd393692c503b168471874953a2531df0e9ab77d0b6bbc582395743300a4a"
"checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab" "checksum dtoa 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)" = "09c3753c3db574d215cba4ea76018483895d7bff25a31b49ba45db21c48e50ab"
"checksum edit-distance 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a34f5204fbc13582de418611cf3a7dcdd07c6d312a5b631597ba72c06b9d9c9" "checksum edit-distance 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a34f5204fbc13582de418611cf3a7dcdd07c6d312a5b631597ba72c06b9d9c9"

View File

@ -32,7 +32,7 @@ futures-cpupool = "0.1"
fdlimit = "0.1" fdlimit = "0.1"
ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" }
jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.11" }
ethcore = { path = "ethcore", features = ["work-notify", "price-info", "stratum"] } ethcore = { path = "ethcore", features = ["parity"] }
parity-bytes = { git = "https://github.com/paritytech/parity-common" } parity-bytes = { git = "https://github.com/paritytech/parity-common" }
ethcore-io = { path = "util/io" } ethcore-io = { path = "util/io" }
ethcore-light = { path = "ethcore/light" } ethcore-light = { path = "ethcore/light" }
@ -88,6 +88,7 @@ winapi = { version = "0.3.4", features = ["winsock2", "winuser", "shellapi"] }
daemonize = { git = "https://github.com/paritytech/daemonize" } daemonize = { git = "https://github.com/paritytech/daemonize" }
[features] [features]
miner-debug = ["ethcore/miner-debug"]
json-tests = ["ethcore/json-tests"] json-tests = ["ethcore/json-tests"]
test-heavy = ["ethcore/test-heavy"] test-heavy = ["ethcore/test-heavy"]
evm-debug = ["ethcore/evm-debug"] evm-debug = ["ethcore/evm-debug"]

View File

@ -5,7 +5,7 @@ WORKDIR /build
# install tools and dependencies # install tools and dependencies
RUN apk add --no-cache gcc musl-dev pkgconfig g++ make curl \ RUN apk add --no-cache gcc musl-dev pkgconfig g++ make curl \
eudev-dev rust cargo git file binutils \ eudev-dev rust cargo git file binutils \
libusb-dev linux-headers perl libusb-dev linux-headers perl cmake
# show backtraces # show backtraces
ENV RUST_BACKTRACE 1 ENV RUST_BACKTRACE 1

View File

@ -3,7 +3,7 @@ WORKDIR /build
# install tools and dependencies # install tools and dependencies
RUN yum -y update&& \ RUN yum -y update&& \
yum install -y git make gcc-c++ gcc file binutils yum install -y git make gcc-c++ gcc file binutils cmake
# install rustup # install rustup
RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\ RUN curl -sSf https://static.rust-lang.org/rustup.sh -o rustup.sh &&\

View File

@ -13,6 +13,7 @@ RUN apt-get update && \
# add-apt-repository # add-apt-repository
software-properties-common \ software-properties-common \
make \ make \
cmake \
curl \ curl \
wget \ wget \
git \ git \

View File

@ -6,7 +6,7 @@ RUN apt-get -y update && \
apt-get install -y --force-yes --no-install-recommends \ apt-get install -y --force-yes --no-install-recommends \
curl git make g++ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \ curl git make g++ gcc-aarch64-linux-gnu g++-aarch64-linux-gnu \
libc6-arm64-cross libc6-dev-arm64-cross wget file ca-certificates \ libc6-arm64-cross libc6-dev-arm64-cross wget file ca-certificates \
binutils-aarch64-linux-gnu \ binutils-aarch64-linux-gnu cmake \
&& \ && \
apt-get clean apt-get clean

View File

@ -6,7 +6,7 @@ RUN apt-get -y update && \
apt-get install -y --force-yes --no-install-recommends \ apt-get install -y --force-yes --no-install-recommends \
curl git make g++ gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf \ curl git make g++ gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf \
libc6-dev-armhf-cross wget file ca-certificates \ libc6-dev-armhf-cross wget file ca-certificates \
binutils-arm-linux-gnueabihf \ binutils-arm-linux-gnueabihf cmake \
&& \ && \
apt-get clean apt-get clean

View File

@ -6,6 +6,7 @@ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
g++ \ g++ \
build-essential \ build-essential \
cmake \
curl \ curl \
git \ git \
file \ file \

View File

@ -80,6 +80,17 @@ tempdir = "0.3"
trie-standardmap = { git = "https://github.com/paritytech/parity-common" } trie-standardmap = { git = "https://github.com/paritytech/parity-common" }
[features] [features]
parity = ["work-notify", "price-info", "stratum"]
# Large optional features that are enabled by default for Parity,
# but might be omitted for other dependent crates.
work-notify = ["ethcore-miner/work-notify"]
price-info = ["ethcore-miner/price-info"]
stratum = ["ethcore-stratum"]
# Disables seal verification for mined blocks.
# This allows you to submit any seal via RPC to test and benchmark
# how fast pending block get's created while running on the mainnet.
miner-debug = []
# Display EVM debug traces. # Display EVM debug traces.
evm-debug = ["evm/evm-debug"] evm-debug = ["evm/evm-debug"]
# Display EVM debug traces when running tests. # Display EVM debug traces when running tests.
@ -97,6 +108,3 @@ test-heavy = []
benches = [] benches = []
# Compile test helpers # Compile test helpers
test-helpers = ["tempdir"] test-helpers = ["tempdir"]
work-notify = ["ethcore-miner/work-notify"]
price-info = ["ethcore-miner/price-info"]
stratum = ["ethcore-stratum"]

View File

@ -16,17 +16,16 @@
#![feature(test)] #![feature(test)]
extern crate test;
extern crate ethcore_util as util;
extern crate rand;
extern crate bn; extern crate bn;
extern crate parity_crypto; extern crate ethereum_types;
extern crate ethkey; extern crate ethkey;
extern crate parity_crypto;
extern crate rand;
extern crate rustc_hex; extern crate rustc_hex;
extern crate ethcore_bigint; extern crate test;
use self::test::{Bencher}; use self::test::Bencher;
use rand::{StdRng}; use rand::StdRng;
#[bench] #[bench]
fn bn_128_pairing(b: &mut Bencher) { fn bn_128_pairing(b: &mut Bencher) {
@ -62,8 +61,7 @@ fn bn_128_mul(b: &mut Bencher) {
fn sha256(b: &mut Bencher) { fn sha256(b: &mut Bencher) {
use parity_crypto::digest::sha256; use parity_crypto::digest::sha256;
let mut input: [u8; 256] = [0; 256]; let input = [0_u8; 256];
let mut out = [0; 32];
b.iter(|| { b.iter(|| {
sha256(&input); sha256(&input);
@ -74,7 +72,7 @@ fn sha256(b: &mut Bencher) {
fn ecrecover(b: &mut Bencher) { fn ecrecover(b: &mut Bencher) {
use rustc_hex::FromHex; use rustc_hex::FromHex;
use ethkey::{Signature, recover as ec_recover}; use ethkey::{Signature, recover as ec_recover};
use ethcore_bigint::hash::H256; use ethereum_types::H256;
let input = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap(); let input = FromHex::from_hex("47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad000000000000000000000000000000000000000000000000000000000000001b650acf9d3f5f0a2c799776a1254355d5f4061762a237396a99a0e0e3fc2bcd6729514a0dacb2e623ac4abd157cb18163ff942280db4d5caad66ddf941ba12e03").unwrap();
let hash = H256::from_slice(&input[0..32]); let hash = H256::from_slice(&input[0..32]);
let v = H256::from_slice(&input[32..64]); let v = H256::from_slice(&input[32..64]);

File diff suppressed because it is too large Load Diff

View File

@ -174,8 +174,8 @@
"stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544" "stateRoot": "0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
}, },
"hardcodedSync": { "hardcodedSync": {
"header": "f90207a0cdaf426b80edd4363b336b351cbfec8c38b44847cdbd814aa92e92bc9ec05333a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347949435d50503aee35c8757ae4933f7a0ab56597805a03f28b2b384dbfd29bc0a10343e8a419e61b92782f880046170bf1d11455e94bba0f3712ef3ff24efe1afc7da11ffb2ca495a94fe7958f42e9f1599d66ed72af13ba06d01d03a15f807da601bd2dfde7490bbe91d9bb11c11eda435db9daad6e7b1efb9010000008c0000c000000000440100108040000082000800000000000000040801001004001000000010000000001000043300001000008000800000002000200040000000580c0004108000040c0008000006000000000280800080000800000000402000000a000000a810226200002881004000208006020000000510030000100040010100000086c20000000009000100000190008c80060000008000202080420008056040000000001000400001100010140822800220000c804004002000108000160001400200088082008000000412100010080205011000000800a0000810021005000000000002840000000400000000880000006000000000200002870bfca554dbc6398358b001837a137083473c8e845b27f4fd86436f72746578a0dbf31fd28bd8f69f1103196e1782a9dfb636bcfa726362ab0767235cb8d56e7188264402501145497f", "header": "f9020ba0bb120488b73cb04a3c423dfa6760eb631165fa3d6d8e0b1be360d3e2a00add78a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479452e44f279f4203dcf680395379e5f9990a69f13ca02d2cbb3c43370257122898259f1e06da38fd23031f74b40d6bd022b037ecd3daa0107b3a01662ca77aa1c72cde45bd66c062d781310d7a364e5b6442bd791431cea011e451bfe7b89addb96020182e0e7eb448d0a66303924a2835a149247bea4188b90100000000200004820000130000020000322004002000140000801000081208000880800200100000000a080000000800400000000000080240800000020028a100000400410000001088008008400080000100000000200000000220804028000000302000000180200c004644000000000101800000040040020200100020100220200a00000000280002011040000000000080a00000002002048000100001000206000000c000002010000004800030000000000300884008121000208020080000020280000000010104002000004000002084000c08402820000004000001841109008410040410080080004121044080800800000000004858040000c000870c64944ccfd130835aa801837a212d8320dc6b845b452c758a7777772e62772e636f6da02078861f3b30aaea6fad290d86919dd7542433a56edc1af557426cbd2eacd60d88a68a26940894b23f",
"totalDifficulty": "4838383800145139949936", "totalDifficulty": "5282739680501645457616",
"CHTs": [ "CHTs": [
"0x0eb474b7721727204978e92e27d31cddff56471911e424a4c8271c35f9c982cc", "0x0eb474b7721727204978e92e27d31cddff56471911e424a4c8271c35f9c982cc",
"0xe10e94515fb5ffb7ffa9bf50db4a959b3f50c2ff75e0b8bd5f5e038749e52a11", "0xe10e94515fb5ffb7ffa9bf50db4a959b3f50c2ff75e0b8bd5f5e038749e52a11",
@ -3014,7 +3014,70 @@
"0x118b3ae7ad25ab96fe8a63973312c758d4ce9ecd39cc24913c26a65b4b5534de", "0x118b3ae7ad25ab96fe8a63973312c758d4ce9ecd39cc24913c26a65b4b5534de",
"0x19cd088af8dbe2d3e6ca7987d9ee1564ea2256f482840b1d2f0da85060de9a86", "0x19cd088af8dbe2d3e6ca7987d9ee1564ea2256f482840b1d2f0da85060de9a86",
"0x98bc07422cf8b0c4d1428afb759300d9a7637de2518528d34f7d237be7e863be", "0x98bc07422cf8b0c4d1428afb759300d9a7637de2518528d34f7d237be7e863be",
"0x0c64526b393066911c7da3f17f9e652cfa38112ae324e3c84416e811d3fe7cad" "0x0c64526b393066911c7da3f17f9e652cfa38112ae324e3c84416e811d3fe7cad",
"0xa30cbfaf518996ba776b426a7068faad4ee49775db45565ebd327f9c679a45b4",
"0xd3e1a807f5940ee1a321b20b7931bef90515132ea9959df94e55529e05802cab",
"0x338aef579d9ec8acc1a0411c1674bcf213d03aa7d4bbc56707e081829ce30004",
"0x68c7a603089a220273f019001a39bfa9194590a6fa6d8ba960ddf4888b105a6b",
"0xd6a9d2c354e1dd77322800d24774eb03b589dd94bcdb3cc2b70437ed70411e6a",
"0x39c017c42ad571564792bf5741b3ae786ff0c24ebcb5ee46882ce0545b8a2262",
"0xf5caea6b23f4085c9f94c880d89b1c23eb69c03dc098b426143ae4b28969a2d8",
"0x959eebc05ff0dfae8c7e6699f069b38b5f2e5bc8c155bc35fc7f578d2d112993",
"0x199e90557d4d9e13c3e7a4b5b4ef6fe52cd2c724c36eaa44b7fa151efebbbeee",
"0x0bfe35d253227696e76f92ff13e4c545c57fca51186a16f687e76d2e6707d34f",
"0xadf8b7678f98b0e5009130d9d5d77add6e460b67b0529abc5315c44dadea0cb4",
"0x86582f3a98b218939aefd7eea438ee278d1faaf41920e8c72922c46fd56f1c32",
"0x001b728a4737fdd53cde20341fa0adec9aa8ab7c7c1db244fbd509a6c4f3f364",
"0x9207900bfdd6c87e2ba8498c0372706799604a207930eeb331580459d17f89cb",
"0xd2192fcf74cad70e6f7986a0b088de8658a14638a4c03d7ae616a88ceea00ba2",
"0xbf6f6b91742eebe70204eb7a70196ef636fc2db4d4c4f89cd5826fbf990a945d",
"0x5c1210951949402fe3b012577f1af0d3e285a0b39c3fe19c84f7930e003c06de",
"0xa16d57f777f94f032f7f2f75b2e25ebd11559effee98b39a5a1e7cc804cfbf06",
"0x1b9561fb8035ec6955454d6710f053d7ad3d8e0753aaac568ac3bc98f874465e",
"0x1a622da786425e0b65b9083a451a419c75e16908fa04d89ddc2c11d94ffe65a0",
"0xbfcb9b1d847eb40b6808e45bf3d2fb8f6588d6f103167be65f246d0733afd1f7",
"0x2317640589ab9d52e7f5e8dda95ff3b1beceacc5832341e9053b71209bfad07f",
"0xa10611b829dbb533e565ad01632b26b1fd642a4393e7fdd9b8f235f11bb606c8",
"0xe4ea4173982f342e396356b0bb0eb47e6748461ecce0f34dbe8fc084cf6a9fbc",
"0xd58399c0d0ae878338d2915eaf2d65f2d1e29eb8d551d254e68bce7a8235adcb",
"0xc0c8b73ffc675a207f73903c49d81131e6831e4c8e071b988ed9f2a5d2277024",
"0xe1ca77bafa66bb055c671978b3d1bd5df32f8e269330507f071afd627012b6af",
"0x67a1093cbddf41264009d1dbbc33fbc25a337339be0727e70c512b585897749c",
"0x0fd615782db5cf4c0a3686721cbc6245696c1bc9b403a9eaffae00968d2c8ece",
"0xc3c2dcaee8954ca86d9b3e4e98c4bb4f6b6bc183f9eb062016c5a25b2280717b",
"0x70265915f5ed94d589afafa3d0d0eab6310195cc690aa82bd65e4a488b398c58",
"0xde3fead8ce29c04a86dcb081da2cabda5366d28de5ebfe2f8064780413f71edf",
"0x0ae43394fcf6ebdabdcc1ede314fe779eb61a12eab807a9d3437d9167e2247e3",
"0x3241127e2c7fbb3db9fe0c602b0c94e22c684b7910ffcf09c1c443e567f95e4d",
"0xcb94ba286eeaa1129b490bb5891e603fd35e85c97c2132c1216d1774f9017f35",
"0xbf396cd23c29ef21fb535880ef621457fb71981f856f2a09c494cd005d38f981",
"0x3df0c90ed7aba260e820ca1d28ce1778149e163524e306309aa346bbadedcf2b",
"0x66a2724f7481aa3ab83a8f1bae2caef8a7bc607c7ffd5bbb1cf3766741db804f",
"0x7a0c3492f4022322e10d81bc10a5db9aceec81d1ab70cdffd31418b79d750fd2",
"0x96a826cb667924ed75ec708bf07cf4c7c05f84a0132e154b71eaf6e193590e87",
"0xcc7030fe617c318a31984d04e3d7f2ff2196894bc429f3f64bfd69b969dc9b56",
"0x3fa94aae223f5aeb593246f1a93d6d694b48946c09d879e791f1188a9dedc4c0",
"0xd49e51fc324fe58159575c0c24171f4eb1aaf58ed8e1311c3849538c8cec3ce1",
"0xc474123906eae5cd48a10e0d93fcbbd653f0c6d25275f827d4fac51e696c3d91",
"0xacd7b790a19026fa3f3b0a354878d4fcd79dc600f7ba5cedd2eefffe1ceda76c",
"0x41b311a188cd7ce1444d258dc379994b81a895226276c7bfe5e6cb29a5f92142",
"0xda0ae01db7f73f07fce46b94c24d6e400598378a6baf01123dd710c5425fb8c9",
"0xe62e08175a02b575e28b9e029a838c365e3ce4278f60c2d3b5d529768a4b47c4",
"0x99b804c8d0feda7f9df961527d634fb8b2d477a362e1d8158856885a13425fa9",
"0xe73597be8ef7d78f862c7a94a8ccff17f559816eba2f830821c6f6436898f9fb",
"0x36de9ee4c80853c865b16904cd8f6c0e9a99ff9e7bf05100bfbc76789cedd4d1",
"0xf480b762872373102393461ff3a21323a1df799c315fd167780a45d7bfaae84e",
"0x303b66babd21e72449cad413e04bdb0bc3ebcbb84a79dd30ed7c972c5341b82b",
"0xbf111684fbe44a973594f31cdee2c94e807bff9cf7584c22dcd609d8234f6e62",
"0x79b26cc3bbf49b6f25afbff7e97e4e45f2dcb359095fdbeb7fb7addee692afc3",
"0x2839d620cc140ba838ecba6e7e52db8cf7b5cd4cf4857f72f3bfbc9b1cf0fbd9",
"0x93074136f4eec367adcf27955d38efc0dc6da514693bfc97935c7871793e35ea",
"0x21f5af18a4cf0096b6e6a3d4c98f4043cfee5c4ee085ce106f86b713160144b8",
"0x90d16b403e2deca6cd5c80e52eba0b84b2875e1dfd75fffb1a2f82bc91eb6942",
"0x8a5cb6854c19a865f51e3ee9eaf8e843a97b272f6467634ba40e547a435ef624",
"0x9afe42a0dffca8ec063c83908fd6237d6130c9dfeab57078bdd02b6ac6d0ea07",
"0xa05cc6108b475d3e68e280e98f514cfb6df4f004e1b7708fcfd4528d346bea6b",
"0x71f10879b875caefab46669e8525b9c0487bbe3247e43a6cdb1dedbfb4d4ba33"
] ]
}, },
"nodes": [ "nodes": [

View File

@ -57,7 +57,7 @@
}, },
"hardcodedSync": { "hardcodedSync": {
"header": "f9023ea070413bfe3ceb9160c7dee87bf060a0cc5e324f7c539cfce4e78802ff805063b6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479400e4a10650e5a6d6001c38ff8e64f97016a1645ca0f8ac12c30b4fd0d27a1a50c090659014574b554ba6e9cdb76f57efbcfbd390a9a0b474ac6cc4673c17c5f511a8b43cc44dbb01bb028735830163667d7a3a2582b9a0bcd44b7c04fa24760df7d733ca8ecd99e8da89de0716e6017fffa434bfd7519ab901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000090fffffffffffffffffffffffffffffffd83755801837a11f88301d8de845b27471c96d583010b038650617269747986312e32362e31826c698416c9d1c7b8418bc805f23fb01fdd498b37df5519f49691d65160fe6a6794b8106e2ecc4782407f0dae3a512546b7d93e89bbb2a761c750553deeea1f9401231f56ae0ccb059201", "header": "f9023ea070413bfe3ceb9160c7dee87bf060a0cc5e324f7c539cfce4e78802ff805063b6a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d493479400e4a10650e5a6d6001c38ff8e64f97016a1645ca0f8ac12c30b4fd0d27a1a50c090659014574b554ba6e9cdb76f57efbcfbd390a9a0b474ac6cc4673c17c5f511a8b43cc44dbb01bb028735830163667d7a3a2582b9a0bcd44b7c04fa24760df7d733ca8ecd99e8da89de0716e6017fffa434bfd7519ab901000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000090fffffffffffffffffffffffffffffffd83755801837a11f88301d8de845b27471c96d583010b038650617269747986312e32362e31826c698416c9d1c7b8418bc805f23fb01fdd498b37df5519f49691d65160fe6a6794b8106e2ecc4782407f0dae3a512546b7d93e89bbb2a761c750553deeea1f9401231f56ae0ccb059201",
"totalDifficulty": "2566410291882451733317698215999610733750372193", "totalDifficulty": "2654916374389120143910668097894183918476475680",
"CHTs": [ "CHTs": [
"0xdb9557458495268ddd69409fc1f66631ed5ff9bf6c479be6eabe5d83a460acac", "0xdb9557458495268ddd69409fc1f66631ed5ff9bf6c479be6eabe5d83a460acac",
"0xd413800c22172be6e0b7a36348c90098955991f119ddad32c5b928e8db4deb02", "0xd413800c22172be6e0b7a36348c90098955991f119ddad32c5b928e8db4deb02",
@ -3813,7 +3813,134 @@
"0x5d71aff2a12d4f1d05c7f760d07b417a39eb0eaa72a01333befc6a2eb6b7d72a", "0x5d71aff2a12d4f1d05c7f760d07b417a39eb0eaa72a01333befc6a2eb6b7d72a",
"0x8dd7de9195d2852aeb6812638ba22e73ff5ca0a8ad921c6e924cae1dd5952255", "0x8dd7de9195d2852aeb6812638ba22e73ff5ca0a8ad921c6e924cae1dd5952255",
"0x8f1828b4cdc6c38c112b1ffee7790953112dd2225ec82581a5095e5ae4d71cae", "0x8f1828b4cdc6c38c112b1ffee7790953112dd2225ec82581a5095e5ae4d71cae",
"0xfeac88ae6c8529e87a55a259f475b7d162d01e8fa5f36c90d4665dd6105b1743" "0xfeac88ae6c8529e87a55a259f475b7d162d01e8fa5f36c90d4665dd6105b1743",
"0x2e37011bd97c6e8a24e130fdc2c60c39b14ab3eb426a4f654bf3158a19aca88b",
"0xdb59b565de21902c50e2e204374ae1ce487656eb74145c103a86707b45a63eaf",
"0xf75c26a7214acf2d050ff5c7cc8b76e1a90540410b5c8b2bc9edbfe8fb2268e6",
"0x6ca7554f2abfd22951bec80f9d280abb6f060dde4a9a829ff7a0457d67a99edd",
"0x0324bceb8b61fa7092396764d7e1933697806c6d785446e3bbab3fc3be0ab259",
"0xeb4880f177e3e673f8ee04be1451a38bd8a2c0bac681d82a19327ae2d9769d32",
"0xec6a868cd9fba9e4f5b0d4276f44aee71056fbb7f425f717d0ce9d1fa5442ded",
"0xdc12b36d165eae197487ec930e35489545d2867b6fb9f8604d279337b6a8f949",
"0xcf1241b1c9b054df34638e99447bb0359aa01ad13c38650b872f2d727e6f68f6",
"0x713765e9b76c73c2de58c480600a7125972246fdcce2324993cd6bbe49cce67f",
"0x81e096e97dd8bf1d206d0ed41c9feaad24d344323ff74707112ee8fac218994e",
"0x8943c2246d5ae3e8db5fc012e9613642c6b713e5f2a89d00f09fa73246f88d5f",
"0x5b2d0bcbd893fab4e58d4ef698d1e8d3001799b61e758b7711f319e2b8eaa645",
"0xf3e4da2cf4579f52b7e4d632d93b79714487ef179a8f5d5c46af9154efad20fd",
"0xa2348ea2cd7a5a32779e9d292a9428fa475fae790f08e42f7699ef5eb2489188",
"0x62dd923966e02db7d0d27cfdd4aeac081f9827288c8b54d9f19035331a109f53",
"0xaa1aafa8f9d9d3e0668eff761fbfc2657d1e1906c077d93200ec000643c6c272",
"0xbf40cb21561989004434d6d908451d5b63045c89c2ac9b1eb617ec0054dc18be",
"0xdae53edbb07fe84623451da0e25da631ad3465e5bc12ef8fd8323c8a72f57130",
"0x685115b38f307f984c7c90e85d167d62b0ec2c924a0bca5f23b1cac12a8f72fa",
"0x2ad7e8f86c872504d7c2c48b0db141955d770acc84222fd725ffbd2dc4095b1d",
"0x9268b3b175aa6025b959b8fcdb416c39dd339e6f1fde3c427bd1bada36e4384c",
"0x502ee814cecc8454abb95591b0d07ed5170db94af7fda8878b9b4287fd68c9f8",
"0x6b19baf6b7eae36c0b1d2754645ff282fc2905802ca6394ac00bc22eb1582eb9",
"0xecad17ae39f897ab38850cac40480b4fafcafd6624f8fdfcaa69849f3fa101ec",
"0xb0804719b391b4b5f554a49f99df3a9d1c3a9884cefc268dcb27e3821aebe385",
"0x3018401c9a31f97881852c1bd65a964bfd3011659725c849a8de4b5ad8f26490",
"0x48a6b687e62a42dc44ecb56f4187e293d6f87d328b1edf409c8f2fa6568dfddc",
"0x8e3bc1ec926a68e22ed525485186a4d9160a54bd3e80107bb77c09911564effa",
"0xd7a7cce632e1746120476a3271ea09689380eb833d5538310fa0029f9174e0de",
"0x7bae074c51f3b547568d18e85b73fcb9b5f8040ed5f96f3523f53197150517a4",
"0x9ba39f376b9444dab04e0a52e4728ec842a0aa4880d9aa9819de3f0694f46e60",
"0x40a2b84bd3d05d28d51a39deae5f23b4f7370c70ac70d1cc81224eac4939d69f",
"0x2b9f57c8c43284ba929df8f896a966afbdd341f145b6b2b2fa98382950ad915d",
"0xcaf3d2a336cf17c9b2d7116a14e654bcece012b07bc020ac30feb71d7f6cead1",
"0xd3b68cf2337ca26a9c4bf6b6b286dc65bf66641d5e9c241f4c1b147994253ac5",
"0xefc11a5944a8061c87f274515e810fee13f4c350c625a988c27ed276b6c55b6d",
"0x7a93732151f7145059424aa823b24e26341cfa57f612e6de3bddbe23562ae918",
"0x2594b625d0f5ed52425ccda4e7898a8a554300791027af6f3a19239a15868ea8",
"0x1db00091145b1b0830983b5eaa5cd3d0e4ad71c09d1b2dc20c47815bc2de5917",
"0x60563bce11f028691cf78da7326f22a4ab01d980020e61bcf2e4bdb5912b7b1d",
"0xe6515bdf1f22469a4218f54791d12698d1bc555b3e54b04cf46b10effa8ce74c",
"0x990831a56958a6bf131697e2f35ab2a45fa228eb7435c7e65814ba28778d513f",
"0x1b6e4085f0e291a8ee4d7e90158dcf15702b4e6e634d1d3bb5c4bab11bf70068",
"0xd0d4a6061bcab0f8e645ea16b285eee7f2ff84c7765d7543aa318edbced2408d",
"0x2da1a609eb1b572a47f187dd5a1e9f4cb1e1885c841f91ca82137e01a9eb4288",
"0x1293686df427f9ee1c2116000735642b3d09511cf2889dde21e1bee427d8c273",
"0x8a990d66370eaab3a46d9e4fa9b7d0c621020cd3b897d9e5b3e5fea6a6979f3c",
"0x10030534d5a06bce47d72998ad4c042f5e445a505920723d14b1d93a3a23af82",
"0xf29a399e8879386e4c2bd8e873dc8aed612cfa8dfa9e5b56f9c51d4c4d1774ad",
"0xb252062ccbb11c3181338d9912ed0d4dabcfa4d61860c211f2f702a641fe936e",
"0x822c7abd11b80c862bd67d39e16a208c8462936ca86b5cd5dd20c51d35cbfbb1",
"0x6b62aea651f1407f94906db704a364f70d827e0efba981a8515d1f515a46b266",
"0xa57b7661c0471cbd7eb35becdf468622df8338a48a075b722ebc5550bbf6b9ae",
"0xefc593d38afead5ee1fea5d8b41a52bd2a5b5a059774d0b951bb7eadaa41a46a",
"0xa556e684c26e7fd6f902b82ecdf721fc292c35e0d2240c1f362c4861c366c4c6",
"0xc9f274efc308d1e97ba9b59c92735d1ab2a72d033ed02a543dab301610e96e33",
"0xd7b43559126f88c59392fc54e2b416d2a67014abce12ddf61764df989a897bd3",
"0xc860842132de3d8c1d0ba2cdc0a7c2853ee568789f1190738862667e3539a958",
"0x2f330354084635eb507cd54549ab89fecc41886d295b1d8c91efd8ca27fe4f7d",
"0xda6edc97997b9cea1bc90662c6a8180fa03cef6189e09c86f36adb91400abe74",
"0x0d2853b5a3f02ae9e4a1c5a0534a5329d2e877d4d77f60dddb36b84b4fdf7e2f",
"0xf048b5acc9e9191ce0ab8c390bfe03d89cbed91db5b9e7c7452b01bf56bcf5ac",
"0x30fb6f6093bf59794d37ddf850c315dd9491a4cf5df378b4468dca96acf78e77",
"0xa04eeaa9d1d767c0f93553af3a259390a9576e8b6015ffa8f0e4fdf37f41f28e",
"0x02411cd53ac55407e0d31520a8c3274a2f4d1fbb2541bb140f30f25843c76860",
"0x65bc790d632d5fe2446c65325df875ba59af3c3aa7357bbbbf47c7e5f7663a7d",
"0x5f364562fed351d932eff956c3ac489ea51ad2cb12a81bd8db4fe0b76e3cbb92",
"0x104f7070d5da1aca5c8b20a96d36638e4b5f8be4e86be83f0aed7234fcece445",
"0xb2fc7a73d8d859d531e51532928671ea59ae6538d4572fd5c2d76c70920aff7f",
"0x7ebecf446825dd0010fbf47afce8b9c3b3901c839ec46269eb8744bd799699ca",
"0xc64002b5a70f18b7c51ea3d3f5246fe8db16781e146d777aaa12f3b765d108e9",
"0x9d136068c7002d2780abfefcaedc79424d3e89b17718ec369d9e64ab7b63a81f",
"0x7b9819e62e92dcead89329bcb3f1a1d6bd10794d1a22d30c3d0369a264029543",
"0x8a8c9e66f343d09d5b7897c491c851143ca4f337ddcb5c2b1462b150e22c6f47",
"0x9d86faa1a5d355d6071e09c8cca50e2dd7e7dc117c2e4f6c0136a2789e84aa8b",
"0x234e156c10a80422ca1aafd49d614e02b698a0d89a7080f87db5dd5169b419bf",
"0xafbb572b8d9119ba8126fdd5a593663db3a3e6165a60f1f439902a6321a8d243",
"0xcbfeb8af8c93b11eaddb05df2f2a8772da694f9cc16878d3154d0857f17274ae",
"0x7253cf6bd2b4d158c2bd60acf5fda9dff6cbc6468ca9b549efcd824410d2e719",
"0x67fa14af20fe738ec5c9414401149a1f198207969768a5b982454146ef720b85",
"0x6c0ddef53c7d58839c3013f2accb72190b7dcc52fbe5d71f4a20f849dd72725f",
"0x89e77c0dd9fcb4b4d4d10c25903ff5b905f5d64b6bed9a0736e9dc2035ade3eb",
"0x42466344821fc29dcdb5fe7dcbda00b3779736ca90f87ca1802207f61633877b",
"0x6e2303e8ed41e6d39488c57dff8d50c548a7079a26c3fb86d925c0aca5b8f67b",
"0xb8a14969606442bd94048f2b2dc7b87662d801097537f51c8eb8f026c52f1fe9",
"0xd7ee25e0bfd080d89c475942575fa055ce5ae268a6f01b916b399f6ff7e94a39",
"0xaf5e7da13dc0ab08ba5185074657acaa2ce753a20a364f3a97d230c247bc8d1c",
"0x92f9106bc9ccbb4e0b4c0d1b96c5bf347d856884eca91a0bcf29e896789fd9c2",
"0x4bc12bfadacd6af0737646b813b1a4a9c005e97ef4b090bb74b7475b50c85dfe",
"0x91e4ef72d0bdef33e2e2c8acbdd7926182568f559397bfc086b96979e7c4f53b",
"0xb2f57e050021b238e0b47b0b46a641644645db5bbd987e8d957038655d1fa83a",
"0xaf221401f4cfa3db7879237b40c68300060df9f65dbf68bb02694837eaa8af7a",
"0x2dd280e98a0d6bc950b7f3f0bd11a9e74105d4d15463184a3f8c03d40f3193ef",
"0x2538560e5b802a89a021685f994acbdb3181f25d0cae17160aedfc619ca2954b",
"0x133cfa3f5d6a00f7392a6b0c6a97a9ba10ec50b24cfbbe6028995258cbaa065e",
"0xc120807872f07adeb144905ff323ea79880389a2815f8441befbfa82835555b2",
"0xa2c5c5a3cbf6db506dfc13285b07c24ca7e70d9e75d80fe711c3082ba119d77a",
"0x837622b7bbb8bb18b3d42058426ef97e3fbc2842d2b64b339733b52313557562",
"0x29eda2468556699747beef75f015e6772980056cf367b819d05a4822b7d54712",
"0xc6ec29323368dc29a1d4c3c11e945ef7fdb56db78808c4c9688ab5f9eb7f27cd",
"0x44b368ab63e1fe0e0da031a4e6628ed1a4fc69fa4083cc056dd9443400be3326",
"0xf153dbc5fc03a89e80d2808ecd45e9fe6453d112508d32ffc6305d5a577d14d8",
"0x32452c0d51df7a85eac3edec46bbfe376c971ac4df4512d7e050e6fa711373ca",
"0x70cb25f3d60126cf0104a40b2a49b44ef8a42f60b93f929594938ac00647b639",
"0xed63fb33d707b93e45e958d5d5f9d3f0898c3676e43eeca55c788cbb2df3170d",
"0x93ff103a026350b0e9c2b30c36e079145892e8e3756678e8ae4b0065fb6a04ec",
"0xa466cdf6063bfe155ed75c115484ce113227385eff2cdc07dd90405239842f3f",
"0x2883c296d9ce6b1e6e7b4f471e841437603ecbb867570cbc46d86f0f26871600",
"0xea5e5b84183a3a709a51c1c6f3e7b039c4663d7495b9bdcfbd4ecb0a95ea994c",
"0xc0e97318bd3ee2957c09d6acedac77ce6b219608e5dd63ef512aa08d63c3a114",
"0xd3ccc7fcd24076afdf6249f671de402b515a131913df2da17118be47b3720b33",
"0x4f5be5a6edd66ec496207b33cefd00722f3167b9e6f2a44d9613c4c7d6541aba",
"0xfecdd1844517d0523a7b45d7b51769728902b881b7fc308f486e850c870eef5b",
"0x01a22dea1d0e25885e0a03e56489bcaf6747e712ceb5a42c74430fa6ffcc1c21",
"0x06cc521a05ce856dc55041b90dc4bbafffee578c1c315b5cfa4cc2d1ccba891f",
"0xac21d4b08fbf2891e0b3645b870695434fe703101f74e7fcf0d0e1304ce65b54",
"0xd62584c47aa1d8554cc08cf675bed128e541df54900fbfae958877595ad168ff",
"0xd8f1806ddaee8e729218fea1911efc5e663666ffb3acad4a2fda3757700d6d88",
"0x9dab2acfe01506a185276145deeecdb5c8fe0937feacffd40fb25a83e8eecc72",
"0xf0b74c6b1a441fc2ee8b2f25d2f03307164014d42a0784683a9d6cb7d2179064",
"0x7d590f0bcc891e30996adf8583803a9dd1271442c3f0e69502addbd371437767",
"0x3a66601fff95b0aa0d0660c12788ad56d2383cae290ceb2fb9ff41794abbc55a",
"0x36e94b03402f18c689f5234973ce1e626a82aac085dbdd682b51cce21f8c1872",
"0x00abd1d34c7e55f58681866558cb844c11faa55e8cac70ede75811f55341cfde",
"0x9983fc20e63e77ec0680522035b03167403681674ec62293cd6b7fe360c69157",
"0xe98b658fb8b6b7fba7463562f86348bf1e3534bc9148e8559423b3ee5ab68472"
] ]
}, },
"accounts": { "accounts": {

View File

@ -52,9 +52,9 @@
"extraData": "0x3535353535353535353535353535353535353535353535353535353535353535", "extraData": "0x3535353535353535353535353535353535353535353535353535353535353535",
"gasLimit": "0x1000000" "gasLimit": "0x1000000"
}, },
"hardcodedSync": { "hardcodedSync":{
"header": "f90214a04d45aaeb1f0e00495b99f5fdc46c2c1e6b0fd48c693678de72afc1cb6f47a086a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794120c78af68df5957e776554d138a6b75f2c34b6ca0f2746b213421be4ec9cab40fa41aabfb4a3e8acccb6506d6cea4863774374dafa0225a348dfb2ef00db09da39b1a11b31741b6d14fd4456cdf4c2528961f398b74a09528322c1ce98449eed355ddabe0192fac920910a0d88965444b9efc1ac218eab901008000000000000000000000000000000000000000000000000000000000000000000000000000000000c0000000000000000000000000040000000000000000000000200000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000400000000000000100000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000100000000000000000400000000000000000000000000000000000000000000000000000000000000000000008426e4d4518334e0018389545c8306529d845b284db896d583010a068650617269747986312e32362e32826c69a066f990a9ad374c2cb0017e96d3776e6c787f679d77c9079fe2b046279453d0f88851bec8da753bda19", "header": "f90213a0f6a1b2e8155af1d1d77879826e2535cb6023ba35705934380ab05f65bcbfb107a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d4934794f3af96f89b3d7cdcbe0c083690a28185feb0b3cea015ca95dffe4c5de6d9c02d9282df0db94855b0d602738f4b6fcb2268694cd92aa07ecb0900077c45bd4d3ca910218099f726fea18461f90be18897710767a51559a0251f2cb798e965c5d9b11c882f37c69fd2c42b314fabe64d2b4998c76eb93ae8b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008439b475f9833720018389769d82f618845b45928b96d583010b038650617269747986312e32362e31826c69a0fbd0db05012df54423a6b25395ec4f6e66d9f11af8b9c492c4fb7197fcd6a5ba8877d4a227c2bdf4de",
"totalDifficulty": "8635599198809021", "totalDifficulty": "8809217991079619",
"CHTs": [ "CHTs": [
"0x614648fc0a459451850bdfe353a932b5ff824e1b568478394f78b3ed5427e37a", "0x614648fc0a459451850bdfe353a932b5ff824e1b568478394f78b3ed5427e37a",
"0x1eae561c582dbb7f4e041998e084e165d0332c915d3a6da367638a8d24f3fafc", "0x1eae561c582dbb7f4e041998e084e165d0332c915d3a6da367638a8d24f3fafc",
@ -1747,7 +1747,79 @@
"0x8baa0703e1a050c40f85dc850fe477881f432c951b1cc1b2b71ffb68ab7fe0d7", "0x8baa0703e1a050c40f85dc850fe477881f432c951b1cc1b2b71ffb68ab7fe0d7",
"0x14ca94dfd343548e32ef5659c043d6e28f0e577fd38da1ee12f11c08e281d775", "0x14ca94dfd343548e32ef5659c043d6e28f0e577fd38da1ee12f11c08e281d775",
"0xef25357970c76a8b72a6e52f49bc30651f711c7df70444d4667e80febc0e3b2a", "0xef25357970c76a8b72a6e52f49bc30651f711c7df70444d4667e80febc0e3b2a",
"0x41b8b4ebd5919dad3bc609ded524b97403d88f019367f0f4f622561131644ffb" "0x41b8b4ebd5919dad3bc609ded524b97403d88f019367f0f4f622561131644ffb",
"0xb29a8ad1157621d0120aedbd8dfeb4b318979bd43a5b018bf7b9ce33d85da312",
"0x9ddf78b5d67ef40454867ed33de83a01cbd8c18fe09da3d9f991a196811dfccd",
"0x3604121f9cfbb5cf552cf8bfc9a7958332eb97131158e4f40f4eda481e553991",
"0xf1778830f694720a6f990f9d476b0f365e8a74880253b55ee16f5cbd6c8082a4",
"0x89831626d154fbe84a4c62c3e2638cd00e42b3844c7c7b98cfad113abdbc5347",
"0x650573f5ef274b2aeb40642e25fbd661cb0eff66245d7cc8f6fb9e9daa80fc12",
"0x479f6c652173efe94abaa850bffe1557847b26f286467013a4d72973e05e8e54",
"0x7096619d5716c34592ac2d9907ac28a74c6f6b1ebb1962a0217df82bf3e714d4",
"0xb7c7edbd8ae7eed58e973dc0750adfd04042ed56baabf372b111fce3e4b4469a",
"0x25529b597bd15317e55767b3fbfecad0657aaeda99f098186b41b70811f7af2a",
"0xd790747b09f925fd155b7bbdb5ccd89d873277163e4fe7054bbf71c0b26b8072",
"0x3aa0b221d1c4743a06692f645f38a8128d55f1c07cfa6e9711b0d2e0f2e0e738",
"0x26fb5017218cbe4250d2ceae751e99b9a34d7befa162dc248ac008c5d1221e71",
"0x0e4ada59854027601f8f81fbdceb95228db667eb65fed97cefbb35dba21d3b52",
"0xd3be75ae2da3e271dd85cd8226d789aa12d108a4b0d2681462f6539637572e50",
"0x6f041891c8219b508138f67c95a1765d08c5ec06b9ce585f52f837806fce0609",
"0x9e106515d0e80b41397b2e8e98adbb7333e76265917339a62063a07d9a7ed311",
"0x25f47483ecc5ec32f94b3dbcb4d42a4cdbf5c279e93567e14afc742a4619d3e0",
"0x5d81afc54f6b68bd820dcc629ff7e9d8397da56ffedef9addab0eec620de0757",
"0x36e192130485f248925d4b0d2fd98745a76099eb13f53093621d216d0aff0d6c",
"0xb906e5e33b63f6cc355c13b8461a260ee25376ee96909fad2c6ac121ad831496",
"0xd68e7e0d136e30f67ccb7c21e4dc43b0ad5536584e4b77c9da8d903a04e9d212",
"0x5aa7847a4bddda7fbffa62325da920a21a11524ac1b691bc3b55e4d1790f24bc",
"0x331f95f062ed1d38dd02b5ab6a95dd238ef97c5ee9777c938697340c902b4b5d",
"0x8c6a580c8b07567f747bf20946023c3b581c51075cce5cd5d47b0d81d8922135",
"0x8b89483810a49626f90846a0b49ad3e2172657dfa3003d58fa2a43d12c8f4090",
"0x944f9a5754800a33d903c8a464d602fc9da6af8ff3990c3eb669ac9cd17891f9",
"0xc75c82a2c1ecd875d16f343a58835d756740902c246d3b1deae97e49aa19f98b",
"0x1d2e0f2f87ff2b08514b18855c343966d42e0f1a048ddd3d316dca6e06292db9",
"0xa6508507463e53b3a840dd55ed9be57c8a56e1533c9001276750bdf19796f8ea",
"0x01eb6b636b1852e8f9066c12d4e6b7b06b90a325be4d97e08f7f560bab4796a1",
"0x8a0d77fb41f50808ff0a46dc9f3831a2b5093f55ec2e94a3d2e92373ff3b5695",
"0xea3383ba1d30891d1e236db2b31373541f51a9e5c4b4d017cc4960480dd20311",
"0x01129e8d7eff516a225cc0db090e4e38362d9eb2d0571ce00f4417836de2e375",
"0x92feaefb7f9466814a0220e536fe5ee73560507d071e059827d406329e609f87",
"0xac149150c11b3bdc660320a0e955f154fa3137549a73951207659e2b903c145b",
"0xcb68cdb224f9b3b0b0f3ca0056e70817146c9ebc75876dd952e6ca8ea896f2ac",
"0x157565282a12d790452e343c9762c2124456039729f3a8f97a2cee60d85628fa",
"0x42eadc181d59d8d8b26b37e0e9c9052e45bde72090d330bf9cf21d9d3c7d9048",
"0x1ea0ec8879b200e259a3a2a0f2a7aa292301784fa422f7c32ed5d945183948b2",
"0x06aeb2956be9d74ae4ff0b8a6c1874ed8ba46a186616356dc060bea1cbe5c628",
"0x814b0382b52a155a4e35639aeb3d8c859afc4fe5d151de3b0f1bac646e40f2eb",
"0xb30bf3e85be41a2a9e53321ee9f03c7078516c72c7e2d8e7e3134de709b61c36",
"0x1f97f5d334b5e6ebc72f5b846f24c7911f4fd1653f89b3477ce4b8108342810f",
"0x84c6fd181c28ad159ff18d203d14f966668468c9ef0a5d6dbd863886a7e0af1e",
"0x4b2e6947d55ea504bf205bae9dfc0e5402efd33757eea4da00a8ed2a6a3838ae",
"0x85f31d45128bb91cd3490b58a0a641ef77246ea9c83de30fa89b621307fd96f3",
"0xd362f5e6f8cbb216e66eaf49e4df25e01504ac729da86c530871a34e11d302f6",
"0xb7860983b043bc13ce5a27135eea12ffaeff71879404b18af3079b98da156bf2",
"0xf2ff82a679b2b90cb9f4a3bb903eb7ab36ee1c47cbe40024d8d570f5e16bbf4e",
"0x7e34a7e6673146b6bb7f78593b6093ef15b8e9fd1271b33dc5f7d17876b31871",
"0x725c97f83b4cf213296ef353e1c8d64854ef08983fd61320088b8d9e2ab33849",
"0x18085800d10fc7845148835d0ef0ac980a82eeafc44e12bfa296f9c38fc6e19d",
"0xc6c3cf95310cfd0254f0f8e93a3c25bad2b17df04f9c51a25927b80d02e06b69",
"0x822213c1b03cf68ecadc0b7572d37266207d5fe4efd5e56a924b0a1aab8a8e84",
"0x1ff46ffd2dd880cca76244f6af1fd8bddbb4b9ec58f86639821a16f2ff08f3a8",
"0xe9d00df19d716dc859922f2e6c907263191c8e531498ea557869ea1115317c95",
"0x6d3f1edebd562e9d1a236ed7a1d9104fd8f5a086cd78d35c7a65f27c269d98ca",
"0xfea701ced5bca0d5043512700598d3eafa0b89dc02f3c157cd1d52bcf4d84d9b",
"0x556c1cd8ff3ebc2ccd4eee9f1ad3837e346ecda961da17c0ee9cd4d084a47653",
"0x5606be2fba065424af76c94d4156ea82f77d9872ddac7a4c2517957a169e58f9",
"0x8d0223425b48487db1b371c966c7688435f4b9fcda75b088f0aac203d6657cb1",
"0xfceb55d8f3048a3f2255562e0a9ee342439253abcd048fac151ef4b910048e22",
"0x360f76e4f2ef49632e3bf8cfc3afeccff6917e98a48d3568148c3bb13f9d2d7e",
"0xd87bbf8397204cc2af883362646b0ae95392303935ec1997ab052c194e0ef117",
"0x9f1dad9dfecaaf117ab5277caf672b70540578e703c2024d3f23bb7cf8d6410b",
"0x5e130ccb23b7b66dd2fbdd912d6006d2820071dafe2890f593f952028aaa19c0",
"0xccd2f182107992fb9b002b87cdf7990cb2810b202b2ae5d6ef5e0b3bd69632e2",
"0x4b40cd83205f8b946ca9f11fc3306872650e658e631511fd4080bc8ca749d913",
"0x652acc59b71ca20bb65ca195d1a4b3e177f6a3985bdcd6120e1a45b7d4a0c7ca",
"0x49a5e2580ceb329665244e489592aea27d54da8189a665d9435e037ea70c46a5",
"0x379801356beb3a8e5fa7311792c69c7ac1f675a9c08c837f9f0e9f53c243d6a7"
] ]
}, },
"nodes": [ "nodes": [

View File

@ -94,6 +94,7 @@ impl ClientService {
let pruning = config.pruning; let pruning = config.pruning;
let client = Client::new(config, &spec, blockchain_db.clone(), miner.clone(), io_service.channel())?; let client = Client::new(config, &spec, blockchain_db.clone(), miner.clone(), io_service.channel())?;
miner.set_io_channel(io_service.channel());
let snapshot_params = SnapServiceParams { let snapshot_params = SnapServiceParams {
engine: spec.engine.clone(), engine: spec.engine.clone(),

View File

@ -200,7 +200,7 @@ pub struct Client {
/// Flag changed by `sleep` and `wake_up` methods. Not to be confused with `enabled`. /// Flag changed by `sleep` and `wake_up` methods. Not to be confused with `enabled`.
liveness: AtomicBool, liveness: AtomicBool,
io_channel: Mutex<IoChannel<ClientIoMessage>>, io_channel: RwLock<IoChannel<ClientIoMessage>>,
/// List of actors to be notified on certain chain events /// List of actors to be notified on certain chain events
notify: RwLock<Vec<Weak<ChainNotify>>>, notify: RwLock<Vec<Weak<ChainNotify>>>,
@ -761,7 +761,7 @@ impl Client {
db: RwLock::new(db.clone()), db: RwLock::new(db.clone()),
state_db: RwLock::new(state_db), state_db: RwLock::new(state_db),
report: RwLock::new(Default::default()), report: RwLock::new(Default::default()),
io_channel: Mutex::new(message_channel), io_channel: RwLock::new(message_channel),
notify: RwLock::new(Vec::new()), notify: RwLock::new(Vec::new()),
queue_transactions: IoChannelQueue::new(config.transaction_verification_queue_size), queue_transactions: IoChannelQueue::new(config.transaction_verification_queue_size),
queue_ancient_blocks: IoChannelQueue::new(MAX_ANCIENT_BLOCKS_QUEUE_SIZE), queue_ancient_blocks: IoChannelQueue::new(MAX_ANCIENT_BLOCKS_QUEUE_SIZE),
@ -995,7 +995,7 @@ impl Client {
/// Replace io channel. Useful for testing. /// Replace io channel. Useful for testing.
pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) { pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) {
*self.io_channel.lock() = io_channel; *self.io_channel.write() = io_channel;
} }
/// Get a copy of the best block's state. /// Get a copy of the best block's state.
@ -2011,7 +2011,7 @@ impl IoClient for Client {
fn queue_transactions(&self, transactions: Vec<Bytes>, peer_id: usize) { fn queue_transactions(&self, transactions: Vec<Bytes>, peer_id: usize) {
trace_time!("queue_transactions"); trace_time!("queue_transactions");
let len = transactions.len(); let len = transactions.len();
self.queue_transactions.queue(&mut self.io_channel.lock(), len, move |client| { self.queue_transactions.queue(&self.io_channel.read(), len, move |client| {
trace_time!("import_queued_transactions"); trace_time!("import_queued_transactions");
let txs: Vec<UnverifiedTransaction> = transactions let txs: Vec<UnverifiedTransaction> = transactions
@ -2060,7 +2060,7 @@ impl IoClient for Client {
let queued = self.queued_ancient_blocks.clone(); let queued = self.queued_ancient_blocks.clone();
let lock = self.ancient_blocks_import_lock.clone(); let lock = self.ancient_blocks_import_lock.clone();
match self.queue_ancient_blocks.queue(&mut self.io_channel.lock(), 1, move |client| { match self.queue_ancient_blocks.queue(&self.io_channel.read(), 1, move |client| {
trace_time!("import_ancient_block"); trace_time!("import_ancient_block");
// Make sure to hold the lock here to prevent importing out of order. // Make sure to hold the lock here to prevent importing out of order.
// We use separate lock, cause we don't want to block queueing. // We use separate lock, cause we don't want to block queueing.
@ -2092,7 +2092,7 @@ impl IoClient for Client {
} }
fn queue_consensus_message(&self, message: Bytes) { fn queue_consensus_message(&self, message: Bytes) {
match self.queue_consensus_message.queue(&mut self.io_channel.lock(), 1, move |client| { match self.queue_consensus_message.queue(&self.io_channel.read(), 1, move |client| {
if let Err(e) = client.engine().handle_message(&message) { if let Err(e) = client.engine().handle_message(&message) {
debug!(target: "poa", "Invalid message received: {}", e); debug!(target: "poa", "Invalid message received: {}", e);
} }
@ -2202,7 +2202,14 @@ impl ImportSealedBlock for Client {
route route
}; };
let route = ChainRoute::from([route].as_ref()); let route = ChainRoute::from([route].as_ref());
self.importer.miner.chain_new_blocks(self, &[h.clone()], &[], route.enacted(), route.retracted(), self.engine.seals_internally().is_some()); self.importer.miner.chain_new_blocks(
self,
&[h.clone()],
&[],
route.enacted(),
route.retracted(),
self.engine.seals_internally().is_some(),
);
self.notify(|notify| { self.notify(|notify| {
notify.new_blocks( notify.new_blocks(
vec![h.clone()], vec![h.clone()],
@ -2526,7 +2533,7 @@ impl IoChannelQueue {
} }
} }
pub fn queue<F>(&self, channel: &mut IoChannel<ClientIoMessage>, count: usize, fun: F) -> Result<(), QueueError> where pub fn queue<F>(&self, channel: &IoChannel<ClientIoMessage>, count: usize, fun: F) -> Result<(), QueueError> where
F: Fn(&Client) + Send + Sync + 'static, F: Fn(&Client) + Send + Sync + 'static,
{ {
let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed); let queue_size = self.currently_queued.load(AtomicOrdering::Relaxed);

View File

@ -280,11 +280,18 @@ impl Engine<EthereumMachine> for Arc<Ethash> {
block_reward::apply_block_rewards(&rewards, block, &self.machine) block_reward::apply_block_rewards(&rewards, block, &self.machine)
} }
#[cfg(not(feature = "miner-debug"))]
fn verify_local_seal(&self, header: &Header) -> Result<(), Error> { fn verify_local_seal(&self, header: &Header) -> Result<(), Error> {
self.verify_block_basic(header) self.verify_block_basic(header)
.and_then(|_| self.verify_block_unordered(header)) .and_then(|_| self.verify_block_unordered(header))
} }
#[cfg(feature = "miner-debug")]
fn verify_local_seal(&self, _header: &Header) -> Result<(), Error> {
warn!("Skipping seal verification, running in miner testing mode.");
Ok(())
}
fn verify_block_basic(&self, header: &Header) -> Result<(), Error> { fn verify_block_basic(&self, header: &Header) -> Result<(), Error> {
// check the seal fields. // check the seal fields.
let seal = Seal::parse_seal(header.seal())?; let seal = Seal::parse_seal(header.seal())?;

View File

@ -28,6 +28,7 @@ use ethcore_miner::pool::{self, TransactionQueue, VerifiedTransaction, QueueStat
#[cfg(feature = "work-notify")] #[cfg(feature = "work-notify")]
use ethcore_miner::work_notify::NotifyWork; use ethcore_miner::work_notify::NotifyWork;
use ethereum_types::{H256, U256, Address}; use ethereum_types::{H256, U256, Address};
use io::IoChannel;
use parking_lot::{Mutex, RwLock}; use parking_lot::{Mutex, RwLock};
use rayon::prelude::*; use rayon::prelude::*;
use transaction::{ use transaction::{
@ -44,7 +45,7 @@ use block::{ClosedBlock, IsBlock, Block, SealedBlock};
use client::{ use client::{
BlockChain, ChainInfo, CallContract, BlockProducer, SealedBlockImporter, Nonce BlockChain, ChainInfo, CallContract, BlockProducer, SealedBlockImporter, Nonce
}; };
use client::BlockId; use client::{BlockId, ClientIoMessage};
use executive::contract_address; use executive::contract_address;
use header::{Header, BlockNumber}; use header::{Header, BlockNumber};
use miner; use miner;
@ -96,7 +97,7 @@ const DEFAULT_MINIMAL_GAS_PRICE: u64 = 20_000_000_000;
/// before stopping attempts to push more transactions to the block. /// before stopping attempts to push more transactions to the block.
/// This is an optimization that prevents traversing the entire pool /// This is an optimization that prevents traversing the entire pool
/// in case we have only a fraction of available block gas limit left. /// in case we have only a fraction of available block gas limit left.
const MAX_SKIPPED_TRANSACTIONS: usize = 8; const MAX_SKIPPED_TRANSACTIONS: usize = 128;
/// Configures the behaviour of the miner. /// Configures the behaviour of the miner.
#[derive(Debug, PartialEq)] #[derive(Debug, PartialEq)]
@ -211,6 +212,7 @@ pub struct Miner {
transaction_queue: Arc<TransactionQueue>, transaction_queue: Arc<TransactionQueue>,
engine: Arc<EthEngine>, engine: Arc<EthEngine>,
accounts: Option<Arc<AccountProvider>>, accounts: Option<Arc<AccountProvider>>,
io_channel: RwLock<Option<IoChannel<ClientIoMessage>>>,
} }
impl Miner { impl Miner {
@ -227,7 +229,12 @@ impl Miner {
} }
/// Creates new instance of miner Arc. /// Creates new instance of miner Arc.
pub fn new(options: MinerOptions, gas_pricer: GasPricer, spec: &Spec, accounts: Option<Arc<AccountProvider>>) -> Self { pub fn new(
options: MinerOptions,
gas_pricer: GasPricer,
spec: &Spec,
accounts: Option<Arc<AccountProvider>>,
) -> Self {
let limits = options.pool_limits.clone(); let limits = options.pool_limits.clone();
let verifier_options = options.pool_verification_options.clone(); let verifier_options = options.pool_verification_options.clone();
let tx_queue_strategy = options.tx_queue_strategy; let tx_queue_strategy = options.tx_queue_strategy;
@ -251,6 +258,7 @@ impl Miner {
transaction_queue: Arc::new(TransactionQueue::new(limits, verifier_options, tx_queue_strategy)), transaction_queue: Arc::new(TransactionQueue::new(limits, verifier_options, tx_queue_strategy)),
accounts, accounts,
engine: spec.engine.clone(), engine: spec.engine.clone(),
io_channel: RwLock::new(None),
} }
} }
@ -270,6 +278,11 @@ impl Miner {
}, GasPricer::new_fixed(minimal_gas_price), spec, accounts) }, GasPricer::new_fixed(minimal_gas_price), spec, accounts)
} }
/// Sets `IoChannel`
pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) {
*self.io_channel.write() = Some(io_channel);
}
/// Clear all pending block states /// Clear all pending block states
pub fn clear(&self) { pub fn clear(&self) {
self.sealing.lock().queue.reset(); self.sealing.lock().queue.reset();
@ -384,7 +397,7 @@ impl Miner {
let max_transactions = if min_tx_gas.is_zero() { let max_transactions = if min_tx_gas.is_zero() {
usize::max_value() usize::max_value()
} else { } else {
(*open_block.block().header().gas_limit() / min_tx_gas).as_u64() as usize MAX_SKIPPED_TRANSACTIONS.saturating_add((*open_block.block().header().gas_limit() / min_tx_gas).as_u64() as usize)
}; };
let pending: Vec<Arc<_>> = self.transaction_queue.pending( let pending: Vec<Arc<_>> = self.transaction_queue.pending(
@ -1176,9 +1189,34 @@ impl miner::MinerService for Miner {
// (thanks to Ready), but culling can take significant amount of time, // (thanks to Ready), but culling can take significant amount of time,
// so best to leave it after we create some work for miners to prevent increased // so best to leave it after we create some work for miners to prevent increased
// uncle rate. // uncle rate.
// If the io_channel is available attempt to offload culling to a separate task
// to avoid blocking chain_new_blocks
if let Some(ref channel) = *self.io_channel.read() {
let queue = self.transaction_queue.clone();
let nonce_cache = self.nonce_cache.clone();
let engine = self.engine.clone();
let accounts = self.accounts.clone();
let refuse_service_transactions = self.options.refuse_service_transactions;
let cull = move |chain: &::client::Client| {
let client = PoolClient::new(
chain,
&nonce_cache,
&*engine,
accounts.as_ref().map(|x| &**x),
refuse_service_transactions,
);
queue.cull(client);
};
if let Err(e) = channel.send(ClientIoMessage::execute(cull)) {
warn!(target: "miner", "Error queueing cull: {:?}", e);
}
} else {
self.transaction_queue.cull(client); self.transaction_queue.cull(client);
} }
} }
}
fn pending_state(&self, latest_block_number: BlockNumber) -> Option<Self::State> { fn pending_state(&self, latest_block_number: BlockNumber) -> Option<Self::State> {
self.map_existing_pending_block(|b| b.state().clone(), latest_block_number) self.map_existing_pending_block(|b| b.state().clone(), latest_block_number)

View File

@ -16,8 +16,11 @@
//! Blockchain access for transaction pool. //! Blockchain access for transaction pool.
use std::fmt; use std::{
use std::collections::HashMap; collections::HashMap,
fmt,
sync::Arc,
};
use ethereum_types::{H256, U256, Address}; use ethereum_types::{H256, U256, Address};
use ethcore_miner::pool; use ethcore_miner::pool;
@ -37,9 +40,9 @@ use miner;
use miner::service_transaction_checker::ServiceTransactionChecker; use miner::service_transaction_checker::ServiceTransactionChecker;
/// Cache for state nonces. /// Cache for state nonces.
#[derive(Debug)] #[derive(Debug, Clone)]
pub struct NonceCache { pub struct NonceCache {
nonces: RwLock<HashMap<Address, U256>>, nonces: Arc<RwLock<HashMap<Address, U256>>>,
limit: usize limit: usize
} }
@ -47,7 +50,7 @@ impl NonceCache {
/// Create new cache with a limit of `limit` entries. /// Create new cache with a limit of `limit` entries.
pub fn new(limit: usize) -> Self { pub fn new(limit: usize) -> Self {
NonceCache { NonceCache {
nonces: RwLock::new(HashMap::with_capacity(limit / 2)), nonces: Arc::new(RwLock::new(HashMap::with_capacity(limit / 2))),
limit, limit,
} }
} }

View File

@ -477,18 +477,19 @@ impl BlockDownloader {
for block_and_receipts in blocks { for block_and_receipts in blocks {
let block = block_and_receipts.block; let block = block_and_receipts.block;
let receipts = block_and_receipts.receipts; let receipts = block_and_receipts.receipts;
// Perform basic block verification
if !Block::is_good(&block) {
debug!(target: "sync", "Bad block rlp: {:?}", block);
bad = true;
break;
}
let (h, number, parent) = { let (h, number, parent) = {
let header = view!(BlockView, &block).header_view(); let header = view!(BlockView, &block).header_view();
(header.hash(), header.number(), header.parent_hash()) (header.hash(), header.number(), header.parent_hash())
}; };
// Perform basic block verification
if !Block::is_good(&block) {
debug!(target: "sync", "Bad block rlp {:?} : {:?}", h, block);
bad = true;
break;
}
if self.target_hash.as_ref().map_or(false, |t| t == &h) { if self.target_hash.as_ref().map_or(false, |t| t == &h) {
self.state = State::Complete; self.state = State::Complete;
trace!(target: "sync", "Sync target reached"); trace!(target: "sync", "Sync target reached");

View File

@ -939,7 +939,13 @@ impl Configuration {
let is_using_base_path = self.args.arg_base_path.is_some(); let is_using_base_path = self.args.arg_base_path.is_some();
// If base_path is set and db_path is not we default to base path subdir instead of LOCAL. // If base_path is set and db_path is not we default to base path subdir instead of LOCAL.
let base_db_path = if is_using_base_path && self.args.arg_db_path.is_none() { let base_db_path = if is_using_base_path && self.args.arg_db_path.is_none() {
if self.args.flag_light {
"$BASE/chains_light"
} else {
"$BASE/chains" "$BASE/chains"
}
} else if self.args.flag_light {
self.args.arg_db_path.as_ref().map_or(dir::CHAINS_PATH_LIGHT, |s| &s)
} else { } else {
self.args.arg_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s) self.args.arg_db_path.as_ref().map_or(dir::CHAINS_PATH, |s| &s)
}; };

View File

@ -20,7 +20,6 @@ use std::str::FromStr;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
pub use parity_rpc::signer::SignerService; pub use parity_rpc::signer::SignerService;
pub use parity_rpc::dapps::LocalDapp;
use ethcore_service::PrivateTxService; use ethcore_service::PrivateTxService;
use ethcore::account_provider::AccountProvider; use ethcore::account_provider::AccountProvider;

View File

@ -33,7 +33,6 @@ use ethcore_logger::{Config as LogConfig, RotatingLogger};
use ethcore_service::ClientService; use ethcore_service::ClientService;
use ethereum_types::Address; use ethereum_types::Address;
use sync::{self, SyncConfig}; use sync::{self, SyncConfig};
#[cfg(feature = "work-notify")]
use miner::work_notify::WorkPoster; use miner::work_notify::WorkPoster;
use futures::IntoFuture; use futures::IntoFuture;
use futures_cpupool::CpuPool; use futures_cpupool::CpuPool;
@ -504,20 +503,18 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
cmd.miner_options, cmd.miner_options,
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), cpu_pool.clone()), cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), cpu_pool.clone()),
&spec, &spec,
Some(account_provider.clone()) Some(account_provider.clone()),
)); ));
miner.set_author(cmd.miner_extras.author, None).expect("Fails only if password is Some; password is None; qed"); miner.set_author(cmd.miner_extras.author, None).expect("Fails only if password is Some; password is None; qed");
miner.set_gas_range_target(cmd.miner_extras.gas_range_target); miner.set_gas_range_target(cmd.miner_extras.gas_range_target);
miner.set_extra_data(cmd.miner_extras.extra_data); miner.set_extra_data(cmd.miner_extras.extra_data);
#[cfg(feature = "work-notify")]
{
if !cmd.miner_extras.work_notify.is_empty() { if !cmd.miner_extras.work_notify.is_empty() {
miner.add_work_listener(Box::new( miner.add_work_listener(Box::new(
WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), event_loop.remote()) WorkPoster::new(&cmd.miner_extras.work_notify, fetch.clone(), event_loop.remote())
)); ));
} }
}
let engine_signer = cmd.miner_extras.engine_signer; let engine_signer = cmd.miner_extras.engine_signer;
if engine_signer != Default::default() { if engine_signer != Default::default() {

View File

@ -17,12 +17,13 @@
//! Parity upgrade logic //! Parity upgrade logic
use semver::{Version, SemVerError}; use semver::{Version, SemVerError};
use std::collections::HashMap; use std::collections::*;
use std::fs::{self, File, create_dir_all}; use std::fs::{self, File, create_dir_all};
use std::env;
use std::io; use std::io;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::path::{PathBuf, Path}; use std::path::{PathBuf, Path};
use dir::{DatabaseDirectories, default_data_path, home_dir}; use dir::{DatabaseDirectories, default_data_path};
use dir::helpers::replace_home; use dir::helpers::replace_home;
use journaldb::Algorithm; use journaldb::Algorithm;
@ -105,7 +106,7 @@ fn with_locked_version<F>(db_path: Option<&str>, script: F) -> Result<usize, Err
where F: Fn(&Version) -> Result<usize, Error> where F: Fn(&Version) -> Result<usize, Error>
{ {
let mut path = db_path.map_or({ let mut path = db_path.map_or({
let mut path = home_dir().expect("Applications should have a home dir"); let mut path = env::home_dir().expect("Applications should have a home dir");
path.push(".parity"); path.push(".parity");
path path
}, PathBuf::from); }, PathBuf::from);

View File

@ -118,7 +118,7 @@ pub use http::{
AccessControlAllowOrigin, Host, DomainsValidation AccessControlAllowOrigin, Host, DomainsValidation
}; };
pub use v1::{NetworkSettings, Metadata, Origin, informant, dispatch, signer, dapps}; pub use v1::{NetworkSettings, Metadata, Origin, informant, dispatch, signer};
pub use v1::block_import::is_major_importing; pub use v1::block_import::is_major_importing;
pub use v1::extractors::{RpcExtractor, WsExtractor, WsStats, WsDispatcher}; pub use v1::extractors::{RpcExtractor, WsExtractor, WsStats, WsDispatcher};
pub use authcodes::{AuthCodes, TimeProvider}; pub use authcodes::{AuthCodes, TimeProvider};

View File

@ -1,27 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Dapps Service
use v1::types::LocalDapp;
/// Dapps Server service.
pub trait DappsService: Send + Sync + 'static {
/// List available local dapps.
fn list_dapps(&self) -> Vec<LocalDapp>;
/// Refresh local dapps list
fn refresh_local_dapps(&self) -> bool;
}

View File

@ -211,14 +211,6 @@ pub fn signer_disabled() -> Error {
} }
} }
pub fn dapps_disabled() -> Error {
Error {
code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST),
message: "Dapps Server is disabled. This API is not available.".into(),
data: None,
}
}
pub fn ws_disabled() -> Error { pub fn ws_disabled() -> Error {
Error { Error {
code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST), code: ErrorCode::ServerError(codes::UNSUPPORTED_REQUEST),

View File

@ -18,7 +18,6 @@
pub mod errors; pub mod errors;
pub mod block_import; pub mod block_import;
pub mod dapps;
pub mod dispatch; pub mod dispatch;
pub mod fake_sign; pub mod fake_sign;
pub mod ipfs; pub mod ipfs;

View File

@ -326,10 +326,6 @@ impl Parity for ParityClient {
Ok(map) Ok(map)
} }
fn dapps_url(&self) -> Result<String> {
Err(errors::dapps_disabled())
}
fn ws_url(&self) -> Result<String> { fn ws_url(&self) -> Result<String> {
helpers::to_url(&self.ws_address) helpers::to_url(&self.ws_address)
.ok_or_else(|| errors::ws_disabled()) .ok_or_else(|| errors::ws_disabled())

View File

@ -29,7 +29,7 @@ use jsonrpc_core::{Result, BoxFuture};
use jsonrpc_core::futures::Future; use jsonrpc_core::futures::Future;
use v1::helpers::errors; use v1::helpers::errors;
use v1::traits::ParitySet; use v1::traits::ParitySet;
use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction};
/// Parity-specific rpc interface for operations altering the settings. /// Parity-specific rpc interface for operations altering the settings.
pub struct ParitySetClient<F> { pub struct ParitySetClient<F> {
@ -137,14 +137,6 @@ impl<F: Fetch> ParitySet for ParitySetClient<F> {
Box::new(self.pool.spawn(future)) Box::new(self.pool.spawn(future))
} }
fn dapps_refresh(&self) -> Result<bool> {
Err(errors::dapps_disabled())
}
fn dapps_list(&self) -> Result<Vec<LocalDapp>> {
Err(errors::dapps_disabled())
}
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> { fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {
Err(errors::light_unimplemented(None)) Err(errors::light_unimplemented(None))
} }

View File

@ -347,10 +347,6 @@ impl<C, M, U, S> Parity for ParityClient<C, M, U> where
) )
} }
fn dapps_url(&self) -> Result<String> {
Err(errors::dapps_disabled())
}
fn ws_url(&self) -> Result<String> { fn ws_url(&self) -> Result<String> {
helpers::to_url(&self.ws_address) helpers::to_url(&self.ws_address)
.ok_or_else(|| errors::ws_disabled()) .ok_or_else(|| errors::ws_disabled())

View File

@ -31,7 +31,7 @@ use jsonrpc_core::{BoxFuture, Result};
use jsonrpc_core::futures::Future; use jsonrpc_core::futures::Future;
use v1::helpers::errors; use v1::helpers::errors;
use v1::traits::ParitySet; use v1::traits::ParitySet;
use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction};
/// Parity-specific rpc interface for operations altering the settings. /// Parity-specific rpc interface for operations altering the settings.
pub struct ParitySetClient<C, M, U, F = fetch::Client> { pub struct ParitySetClient<C, M, U, F = fetch::Client> {
@ -182,14 +182,6 @@ impl<C, M, U, F> ParitySet for ParitySetClient<C, M, U, F> where
Box::new(self.pool.spawn(future)) Box::new(self.pool.spawn(future))
} }
fn dapps_refresh(&self) -> Result<bool> {
Err(errors::dapps_disabled())
}
fn dapps_list(&self) -> Result<Vec<LocalDapp>> {
Err(errors::dapps_disabled())
}
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> { fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>> {
Ok(self.updater.upgrade_ready().map(Into::into)) Ok(self.updater.upgrade_ready().map(Into::into))
} }

View File

@ -53,9 +53,3 @@ pub mod signer {
pub use super::helpers::{SigningQueue, SignerService, ConfirmationsQueue}; pub use super::helpers::{SigningQueue, SignerService, ConfirmationsQueue};
pub use super::types::{ConfirmationRequest, TransactionModification, U256, TransactionCondition}; pub use super::types::{ConfirmationRequest, TransactionModification, U256, TransactionCondition};
} }
/// Dapps integration utilities
pub mod dapps {
pub use super::helpers::dapps::DappsService;
pub use super::types::LocalDapp;
}

View File

@ -425,20 +425,6 @@ fn rpc_parity_ws_address() {
assert_eq!(io2.handle_request_sync(request), Some(response2.to_owned())); assert_eq!(io2.handle_request_sync(request), Some(response2.to_owned()));
} }
#[test]
fn rpc_parity_dapps_address() {
// given
let deps = Dependencies::new();
let io1 = deps.default_client();
// when
let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsUrl", "params": [], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#;
// then
assert_eq!(io1.handle_request_sync(request), Some(response.to_owned()));
}
#[test] #[test]
fn rpc_parity_next_nonce() { fn rpc_parity_next_nonce() {
let deps = Dependencies::new(); let deps = Dependencies::new();

View File

@ -238,18 +238,3 @@ fn rpc_parity_remove_transaction() {
miner.pending_transactions.lock().insert(hash, signed); miner.pending_transactions.lock().insert(hash, signed);
assert_eq!(io.handle_request_sync(&request), Some(response.to_owned())); assert_eq!(io.handle_request_sync(&request), Some(response.to_owned()));
} }
#[test]
fn rpc_parity_set_dapps_list() {
let miner = miner_service();
let client = client_service();
let network = network_service();
let updater = updater_service();
let mut io = IoHandler::new();
io.extend_with(parity_set_client(&client, &miner, &updater, &network).to_delegate());
let request = r#"{"jsonrpc": "2.0", "method": "parity_dappsList", "params":[], "id": 1}"#;
let response = r#"{"jsonrpc":"2.0","error":{"code":-32000,"message":"Dapps Server is disabled. This API is not available."},"id":1}"#;
assert_eq!(io.handle_request_sync(request), Some(response.to_owned()));
}

View File

@ -161,11 +161,6 @@ build_rpc_trait! {
#[rpc(name = "parity_localTransactions")] #[rpc(name = "parity_localTransactions")]
fn local_transactions(&self) -> Result<BTreeMap<H256, LocalTransactionStatus>>; fn local_transactions(&self) -> Result<BTreeMap<H256, LocalTransactionStatus>>;
/// Returns current Dapps Server interface and port or an error if dapps server is disabled.
/// (deprecated, should always return an error now).
#[rpc(name = "parity_dappsUrl")]
fn dapps_url(&self) -> Result<String>;
/// Returns current WS Server interface and port or an error if ws server is disabled. /// Returns current WS Server interface and port or an error if ws server is disabled.
#[rpc(name = "parity_wsUrl")] #[rpc(name = "parity_wsUrl")]
fn ws_url(&self) -> Result<String>; fn ws_url(&self) -> Result<String>;

View File

@ -18,7 +18,7 @@
use jsonrpc_core::{BoxFuture, Result}; use jsonrpc_core::{BoxFuture, Result};
use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction, LocalDapp}; use v1::types::{Bytes, H160, H256, U256, ReleaseInfo, Transaction};
build_rpc_trait! { build_rpc_trait! {
/// Parity-specific rpc interface for operations altering the settings. /// Parity-specific rpc interface for operations altering the settings.
@ -95,15 +95,6 @@ build_rpc_trait! {
#[rpc(name = "parity_hashContent")] #[rpc(name = "parity_hashContent")]
fn hash_content(&self, String) -> BoxFuture<H256>; fn hash_content(&self, String) -> BoxFuture<H256>;
/// Returns true if refresh successful, error if unsuccessful or server is disabled
/// (deprecated, should always return an error now).
#[rpc(name = "parity_dappsRefresh")]
fn dapps_refresh(&self) -> Result<bool>;
/// Returns a list of local dapps (deprecated, should always return an error now).
#[rpc(name = "parity_dappsList")]
fn dapps_list(&self) -> Result<Vec<LocalDapp>>;
/// Is there a release ready for install? /// Is there a release ready for install?
#[rpc(name = "parity_upgradeReady")] #[rpc(name = "parity_upgradeReady")]
fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>>; fn upgrade_ready(&self) -> Result<Option<ReleaseInfo>>;

View File

@ -1,61 +0,0 @@
// Copyright 2015-2018 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// Local Dapp
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
#[serde(deny_unknown_fields)]
pub struct LocalDapp {
/// ID of local dapp
pub id: String,
/// Dapp name
pub name: String,
/// Dapp description
pub description: String,
/// Dapp version string
pub version: String,
/// Dapp author
pub author: String,
/// Dapp icon
#[serde(rename="iconUrl")]
pub icon_url: String,
/// Local development Url
#[serde(rename="localUrl")]
pub local_url: Option<String>,
}
#[cfg(test)]
mod tests {
use serde_json;
use super::LocalDapp;
#[test]
fn dapp_serialization() {
let s = r#"{"id":"skeleton","name":"Skeleton","description":"A skeleton dapp","version":"0.1","author":"Parity Technologies Ltd","iconUrl":"title.png","localUrl":"http://localhost:5000"}"#;
let dapp = LocalDapp {
id: "skeleton".into(),
name: "Skeleton".into(),
description: "A skeleton dapp".into(),
version: "0.1".into(),
author: "Parity Technologies Ltd".into(),
icon_url: "title.png".into(),
local_url: Some("http://localhost:5000".into()),
};
let serialized = serde_json::to_string(&dapp).unwrap();
assert_eq!(serialized, s);
}
}

View File

@ -23,7 +23,6 @@ mod bytes;
mod call_request; mod call_request;
mod confirmations; mod confirmations;
mod consensus_status; mod consensus_status;
mod dapps;
mod derivation; mod derivation;
mod filter; mod filter;
mod hash; mod hash;
@ -57,7 +56,6 @@ pub use self::confirmations::{
TransactionModification, SignRequest, DecryptRequest, Either TransactionModification, SignRequest, DecryptRequest, Either
}; };
pub use self::consensus_status::*; pub use self::consensus_status::*;
pub use self::dapps::LocalDapp;
pub use self::derivation::{DeriveHash, DeriveHierarchical, Derive}; pub use self::derivation::{DeriveHash, DeriveHierarchical, Derive};
pub use self::filter::{Filter, FilterChanges}; pub use self::filter::{Filter, FilterChanges};
pub use self::hash::{H64, H160, H256, H512, H520, H2048}; pub use self::hash::{H64, H160, H256, H512, H520, H2048};

View File

@ -33,7 +33,7 @@ parts:
# rust-channel: stable # @TODO enable after https://bugs.launchpad.net/snapcraft/+bug/1778530 # rust-channel: stable # @TODO enable after https://bugs.launchpad.net/snapcraft/+bug/1778530
rust-revision: 1.26.2 # @TODO remove after https://bugs.launchpad.net/snapcraft/+bug/1778530 rust-revision: 1.26.2 # @TODO remove after https://bugs.launchpad.net/snapcraft/+bug/1778530
build-attributes: [no-system-libraries] build-attributes: [no-system-libraries]
build-packages: [g++, libudev-dev, libssl-dev, make, pkg-config] build-packages: [g++, libudev-dev, libssl-dev, make, pkg-config, cmake]
stage-packages: [libc6, libssl1.0.0, libudev1, libstdc++6] stage-packages: [libc6, libssl1.0.0, libudev1, libstdc++6]
df: df:
plugin: nil plugin: nil

View File

@ -8,4 +8,3 @@ license = "GPL3"
ethereum-types = "0.3" ethereum-types = "0.3"
journaldb = { path = "../journaldb" } journaldb = { path = "../journaldb" }
app_dirs = { git = "https://github.com/paritytech/app-dirs-rs" } app_dirs = { git = "https://github.com/paritytech/app-dirs-rs" }
dirs = "1.0.2"

View File

@ -15,11 +15,12 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>. // along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Directory helper functions //! Directory helper functions
use std::env;
/// Replaces `$HOME` str with home directory path. /// Replaces `$HOME` str with home directory path.
pub fn replace_home(base: &str, arg: &str) -> String { pub fn replace_home(base: &str, arg: &str) -> String {
// the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support`
let r = arg.replace("$HOME", ::dirs::home_dir().unwrap().to_str().unwrap()); let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap());
let r = r.replace("$BASE", base); let r = r.replace("$BASE", base);
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string()) r.replace("/", &::std::path::MAIN_SEPARATOR.to_string())
} }

View File

@ -18,12 +18,11 @@
//! Dir utilities for platform-specific operations //! Dir utilities for platform-specific operations
extern crate app_dirs; extern crate app_dirs;
extern crate dirs;
extern crate ethereum_types; extern crate ethereum_types;
extern crate journaldb; extern crate journaldb;
pub mod helpers; pub mod helpers;
use std::fs; use std::{env, fs};
use std::path::{PathBuf, Path}; use std::path::{PathBuf, Path};
use ethereum_types::{H64, H256}; use ethereum_types::{H64, H256};
use journaldb::Algorithm; use journaldb::Algorithm;
@ -32,12 +31,14 @@ use app_dirs::{AppInfo, get_app_root, AppDataType};
// re-export platform-specific functions // re-export platform-specific functions
use platform::*; use platform::*;
pub use dirs::home_dir; /// Platform-specific chains path for standard client - Windows only
/// Platform-specific chains path - Windows only
#[cfg(target_os = "windows")] pub const CHAINS_PATH: &str = "$LOCAL/chains"; #[cfg(target_os = "windows")] pub const CHAINS_PATH: &str = "$LOCAL/chains";
/// Platform-specific chains path /// Platform-specific chains path for light client - Windows only
#[cfg(target_os = "windows")] pub const CHAINS_PATH_LIGHT: &str = "$LOCAL/chains_light";
/// Platform-specific chains path for standard client
#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH: &str = "$BASE/chains"; #[cfg(not(target_os = "windows"))] pub const CHAINS_PATH: &str = "$BASE/chains";
/// Platform-specific chains path for light client
#[cfg(not(target_os = "windows"))] pub const CHAINS_PATH_LIGHT: &str = "$BASE/chains_light";
/// Platform-specific cache path - Windows only /// Platform-specific cache path - Windows only
#[cfg(target_os = "windows")] pub const CACHE_PATH: &str = "$LOCAL/cache"; #[cfg(target_os = "windows")] pub const CACHE_PATH: &str = "$LOCAL/cache";
@ -236,7 +237,7 @@ pub fn default_hypervisor_path() -> PathBuf {
/// Get home directory. /// Get home directory.
fn home() -> PathBuf { fn home() -> PathBuf {
dirs::home_dir().expect("Failed to get home dir") env::home_dir().expect("Failed to get home dir")
} }
/// Geth path /// Geth path
@ -259,9 +260,9 @@ pub fn parity(chain: &str) -> PathBuf {
#[cfg(target_os = "macos")] #[cfg(target_os = "macos")]
mod platform { mod platform {
use std::path::PathBuf; use std::path::PathBuf;
pub const AUTHOR: &'static str = "Parity"; pub const AUTHOR: &str = "Parity";
pub const PRODUCT: &'static str = "io.parity.ethereum"; pub const PRODUCT: &str = "io.parity.ethereum";
pub const PRODUCT_HYPERVISOR: &'static str = "io.parity.ethereum-updates"; pub const PRODUCT_HYPERVISOR: &str = "io.parity.ethereum-updates";
pub fn parity_base() -> PathBuf { pub fn parity_base() -> PathBuf {
let mut home = super::home(); let mut home = super::home();
@ -283,9 +284,9 @@ mod platform {
#[cfg(windows)] #[cfg(windows)]
mod platform { mod platform {
use std::path::PathBuf; use std::path::PathBuf;
pub const AUTHOR: &'static str = "Parity"; pub const AUTHOR: &str = "Parity";
pub const PRODUCT: &'static str = "Ethereum"; pub const PRODUCT: &str = "Ethereum";
pub const PRODUCT_HYPERVISOR: &'static str = "EthereumUpdates"; pub const PRODUCT_HYPERVISOR: &str = "EthereumUpdates";
pub fn parity_base() -> PathBuf { pub fn parity_base() -> PathBuf {
let mut home = super::home(); let mut home = super::home();

View File

@ -17,11 +17,12 @@
use parity_bytes::Bytes; use parity_bytes::Bytes;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::collections::{HashSet, HashMap, VecDeque}; use std::collections::{HashSet, HashMap, VecDeque};
use std::collections::hash_map::Entry;
use std::default::Default; use std::default::Default;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use hash::keccak; use hash::keccak;
use ethereum_types::{H256, H520}; use ethereum_types::{H256, H520};
use rlp::{Rlp, RlpStream, encode_list}; use rlp::{Rlp, RlpStream};
use node_table::*; use node_table::*;
use network::{Error, ErrorKind}; use network::{Error, ErrorKind};
use ethkey::{Secret, KeyPair, sign, recover}; use ethkey::{Secret, KeyPair, sign, recover};
@ -42,7 +43,15 @@ const PACKET_FIND_NODE: u8 = 3;
const PACKET_NEIGHBOURS: u8 = 4; const PACKET_NEIGHBOURS: u8 = 4;
const PING_TIMEOUT: Duration = Duration::from_millis(300); const PING_TIMEOUT: Duration = Duration::from_millis(300);
const FIND_NODE_TIMEOUT: Duration = Duration::from_secs(2);
const EXPIRY_TIME: Duration = Duration::from_secs(60);
const MAX_NODES_PING: usize = 32; // Max nodes to add/ping at once const MAX_NODES_PING: usize = 32; // Max nodes to add/ping at once
const REQUEST_BACKOFF: [Duration; 4] = [
Duration::from_secs(1),
Duration::from_secs(4),
Duration::from_secs(16),
Duration::from_secs(64)
];
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct NodeEntry { pub struct NodeEntry {
@ -53,13 +62,35 @@ pub struct NodeEntry {
pub struct BucketEntry { pub struct BucketEntry {
pub address: NodeEntry, pub address: NodeEntry,
pub id_hash: H256, pub id_hash: H256,
pub timeout: Option<Instant>, pub last_seen: Instant,
backoff_until: Instant,
fail_count: usize,
}
impl BucketEntry {
fn new(address: NodeEntry) -> Self {
let now = Instant::now();
BucketEntry {
id_hash: keccak(address.id),
address: address,
last_seen: now,
backoff_until: now,
fail_count: 0,
}
}
} }
pub struct NodeBucket { pub struct NodeBucket {
nodes: VecDeque<BucketEntry>, //sorted by last active nodes: VecDeque<BucketEntry>, //sorted by last active
} }
struct PendingRequest {
packet_id: u8,
sent_at: Instant,
packet_hash: H256,
response_count: usize, // Some requests (eg. FIND_NODE) have multi-packet responses
}
impl Default for NodeBucket { impl Default for NodeBucket {
fn default() -> Self { fn default() -> Self {
NodeBucket::new() NodeBucket::new()
@ -79,7 +110,7 @@ pub struct Datagram {
pub address: SocketAddr, pub address: SocketAddr,
} }
pub struct Discovery { pub struct Discovery<'a> {
id: NodeId, id: NodeId,
id_hash: H256, id_hash: H256,
secret: Secret, secret: Secret,
@ -88,10 +119,14 @@ pub struct Discovery {
discovery_id: NodeId, discovery_id: NodeId,
discovery_nodes: HashSet<NodeId>, discovery_nodes: HashSet<NodeId>,
node_buckets: Vec<NodeBucket>, node_buckets: Vec<NodeBucket>,
in_flight_requests: HashMap<NodeId, PendingRequest>,
expiring_pings: VecDeque<(NodeId, Instant)>,
expiring_finds: VecDeque<(NodeId, Instant)>,
send_queue: VecDeque<Datagram>, send_queue: VecDeque<Datagram>,
check_timestamps: bool, check_timestamps: bool,
adding_nodes: Vec<NodeEntry>, adding_nodes: Vec<NodeEntry>,
ip_filter: IpFilter, ip_filter: IpFilter,
request_backoff: &'a [Duration],
} }
pub struct TableUpdates { pub struct TableUpdates {
@ -99,8 +134,8 @@ pub struct TableUpdates {
pub removed: HashSet<NodeId>, pub removed: HashSet<NodeId>,
} }
impl Discovery { impl<'a> Discovery<'a> {
pub fn new(key: &KeyPair, public: NodeEndpoint, ip_filter: IpFilter) -> Discovery { pub fn new(key: &KeyPair, public: NodeEndpoint, ip_filter: IpFilter) -> Discovery<'static> {
Discovery { Discovery {
id: key.public().clone(), id: key.public().clone(),
id_hash: keccak(key.public()), id_hash: keccak(key.public()),
@ -110,86 +145,80 @@ impl Discovery {
discovery_id: NodeId::new(), discovery_id: NodeId::new(),
discovery_nodes: HashSet::new(), discovery_nodes: HashSet::new(),
node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(), node_buckets: (0..ADDRESS_BITS).map(|_| NodeBucket::new()).collect(),
in_flight_requests: HashMap::new(),
expiring_pings: VecDeque::new(),
expiring_finds: VecDeque::new(),
send_queue: VecDeque::new(), send_queue: VecDeque::new(),
check_timestamps: true, check_timestamps: true,
adding_nodes: Vec::new(), adding_nodes: Vec::new(),
ip_filter: ip_filter, ip_filter: ip_filter,
request_backoff: &REQUEST_BACKOFF,
} }
} }
/// Add a new node to discovery table. Pings the node. /// Add a new node to discovery table. Pings the node.
pub fn add_node(&mut self, e: NodeEntry) { pub fn add_node(&mut self, e: NodeEntry) {
if self.is_allowed(&e) { // If distance returns None, then we are trying to add ourself.
let endpoint = e.endpoint.clone(); let id_hash = keccak(e.id);
self.update_node(e); if let Some(dist) = Discovery::distance(&self.id_hash, &id_hash) {
self.ping(&endpoint); if self.node_buckets[dist].nodes.iter().any(|n| n.id_hash == id_hash) {
return;
}
self.try_ping(e);
} }
} }
/// Add a list of nodes. Pings a few nodes each round /// Add a list of nodes. Pings a few nodes each round
pub fn add_node_list(&mut self, nodes: Vec<NodeEntry>) { pub fn add_node_list(&mut self, nodes: Vec<NodeEntry>) {
self.adding_nodes = nodes; for node in nodes {
self.update_new_nodes(); self.add_node(node);
}
} }
/// Add a list of known nodes to the table. /// Add a list of known nodes to the table.
pub fn init_node_list(&mut self, mut nodes: Vec<NodeEntry>) { pub fn init_node_list(&mut self, nodes: Vec<NodeEntry>) {
for n in nodes.drain(..) { for n in nodes {
if self.is_allowed(&n) { if self.is_allowed(&n) {
self.update_node(n); self.update_node(n);
} }
} }
} }
fn update_node(&mut self, e: NodeEntry) { fn update_node(&mut self, e: NodeEntry) -> Option<TableUpdates> {
trace!(target: "discovery", "Inserting {:?}", &e); trace!(target: "discovery", "Inserting {:?}", &e);
let id_hash = keccak(e.id); let id_hash = keccak(e.id);
let dist = match Discovery::distance(&self.id_hash, &id_hash) { let dist = match Discovery::distance(&self.id_hash, &id_hash) {
Some(dist) => dist, Some(dist) => dist,
None => { None => {
debug!(target: "discovery", "Attempted to update own entry: {:?}", e); debug!(target: "discovery", "Attempted to update own entry: {:?}", e);
return; return None;
} }
}; };
let mut added_map = HashMap::new();
let ping = { let ping = {
let bucket = &mut self.node_buckets[dist]; let bucket = &mut self.node_buckets[dist];
let updated = if let Some(node) = bucket.nodes.iter_mut().find(|n| n.address.id == e.id) { let updated = if let Some(node) = bucket.nodes.iter_mut().find(|n| n.address.id == e.id) {
node.address = e.clone(); node.address = e.clone();
node.timeout = None; node.last_seen = Instant::now();
node.backoff_until = Instant::now();
node.fail_count = 0;
true true
} else { false }; } else { false };
if !updated { if !updated {
bucket.nodes.push_front(BucketEntry { address: e, timeout: None, id_hash: id_hash, }); added_map.insert(e.id, e.clone());
} bucket.nodes.push_front(BucketEntry::new(e));
if bucket.nodes.len() > BUCKET_SIZE { if bucket.nodes.len() > BUCKET_SIZE {
//ping least active node select_bucket_ping(bucket.nodes.iter())
let last = bucket.nodes.back_mut().expect("Last item is always present when len() > 0"); } else { None }
last.timeout = Some(Instant::now());
Some(last.address.endpoint.clone())
} else { None } } else { None }
}; };
if let Some(endpoint) = ping { if let Some(node) = ping {
self.ping(&endpoint); self.try_ping(node);
}
}
/// Removes the timeout of a given NodeId if it can be found in one of the discovery buckets
fn clear_ping(&mut self, id: &NodeId) {
let dist = match Discovery::distance(&self.id_hash, &keccak(id)) {
Some(dist) => dist,
None => {
debug!(target: "discovery", "Received ping from self");
return
}
};
let bucket = &mut self.node_buckets[dist];
if let Some(node) = bucket.nodes.iter_mut().find(|n| &n.address.id == id) {
node.timeout = None;
} }
Some(TableUpdates { added: added_map, removed: HashSet::new() })
} }
/// Starts the discovery process at round 0 /// Starts the discovery process at round 0
@ -201,11 +230,11 @@ impl Discovery {
} }
fn update_new_nodes(&mut self) { fn update_new_nodes(&mut self) {
let mut count = 0usize; while self.in_flight_requests.len() < MAX_NODES_PING {
while !self.adding_nodes.is_empty() && count < MAX_NODES_PING { match self.adding_nodes.pop() {
let node = self.adding_nodes.pop().expect("pop is always Some if not empty; qed"); Some(next) => self.try_ping(next),
self.add_node(node); None => break,
count += 1; }
} }
} }
@ -219,13 +248,17 @@ impl Discovery {
{ {
let nearest = self.nearest_node_entries(&self.discovery_id).into_iter(); let nearest = self.nearest_node_entries(&self.discovery_id).into_iter();
let nearest = nearest.filter(|x| !self.discovery_nodes.contains(&x.id)).take(ALPHA).collect::<Vec<_>>(); let nearest = nearest.filter(|x| !self.discovery_nodes.contains(&x.id)).take(ALPHA).collect::<Vec<_>>();
let target = self.discovery_id.clone();
for r in nearest { for r in nearest {
let rlp = encode_list(&(&[self.discovery_id.clone()][..])); match self.send_find_node(&r, &target) {
self.send_packet(PACKET_FIND_NODE, &r.endpoint.udp_address(), &rlp) Ok(()) => {
.unwrap_or_else(|e| warn!("Error sending node discovery packet for {:?}: {:?}", &r.endpoint, e));
self.discovery_nodes.insert(r.id.clone()); self.discovery_nodes.insert(r.id.clone());
tried_count += 1; tried_count += 1;
trace!(target: "discovery", "Sent FindNode to {:?}", &r.endpoint); },
Err(e) => {
warn!(target: "discovery", "Error sending node discovery packet for {:?}: {:?}", &r.endpoint, e);
},
};
} }
} }
@ -251,46 +284,71 @@ impl Discovery {
None // a and b are equal, so log distance is -inf None // a and b are equal, so log distance is -inf
} }
fn ping(&mut self, node: &NodeEndpoint) { fn try_ping(&mut self, node: NodeEntry) {
let mut rlp = RlpStream::new_list(3); if !self.is_allowed(&node) ||
self.in_flight_requests.contains_key(&node.id) ||
self.adding_nodes.iter().any(|n| n.id == node.id)
{
return;
}
if self.in_flight_requests.len() < MAX_NODES_PING {
self.ping(&node)
.unwrap_or_else(|e| {
warn!(target: "discovery", "Error sending Ping packet: {:?}", e);
});
} else {
self.adding_nodes.push(node);
}
}
fn ping(&mut self, node: &NodeEntry) -> Result<(), Error> {
let mut rlp = RlpStream::new_list(4);
rlp.append(&PROTOCOL_VERSION); rlp.append(&PROTOCOL_VERSION);
self.public_endpoint.to_rlp_list(&mut rlp); self.public_endpoint.to_rlp_list(&mut rlp);
node.to_rlp_list(&mut rlp); node.endpoint.to_rlp_list(&mut rlp);
trace!(target: "discovery", "Sent Ping to {:?}", &node); append_expiration(&mut rlp);
self.send_packet(PACKET_PING, &node.udp_address(), &rlp.drain()) let hash = self.send_packet(PACKET_PING, &node.endpoint.udp_address(), &rlp.drain())?;
.unwrap_or_else(|e| warn!("Error sending Ping packet: {:?}", e))
}
fn send_packet(&mut self, packet_id: u8, address: &SocketAddr, payload: &[u8]) -> Result<(), Error> { let request_info = PendingRequest {
let mut rlp = RlpStream::new(); packet_id: PACKET_PING,
rlp.append_raw(&[packet_id], 1); sent_at: Instant::now(),
let source = Rlp::new(payload); packet_hash: hash,
rlp.begin_list(source.item_count()? + 1); response_count: 0,
for i in 0 .. source.item_count()? {
rlp.append_raw(source.at(i)?.as_raw(), 1);
}
let timestamp = 60 + SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() as u32;
rlp.append(&timestamp);
let bytes = rlp.drain();
let hash = keccak(bytes.as_ref());
let signature = match sign(&self.secret, &hash) {
Ok(s) => s,
Err(e) => {
warn!("Error signing UDP packet");
return Err(Error::from(e));
}
}; };
let mut packet = Bytes::with_capacity(bytes.len() + 32 + 65); self.expiring_pings.push_back((node.id, request_info.sent_at));
packet.extend(hash.iter()); self.in_flight_requests.insert(node.id, request_info);
packet.extend(signature.iter());
packet.extend(bytes.iter()); trace!(target: "discovery", "Sent Ping to {:?}", &node.endpoint);
let signed_hash = keccak(&packet[32..]);
packet[0..32].clone_from_slice(&signed_hash);
self.send_to(packet, address.clone());
Ok(()) Ok(())
} }
fn send_find_node(&mut self, node: &NodeEntry, target: &NodeId) -> Result<(), Error> {
let mut rlp = RlpStream::new_list(2);
rlp.append(target);
append_expiration(&mut rlp);
let hash = self.send_packet(PACKET_FIND_NODE, &node.endpoint.udp_address(), &rlp.drain())?;
let request_info = PendingRequest {
packet_id: PACKET_FIND_NODE,
sent_at: Instant::now(),
packet_hash: hash,
response_count: 0,
};
self.expiring_finds.push_back((node.id, request_info.sent_at));
self.in_flight_requests.insert(node.id, request_info);
trace!(target: "discovery", "Sent FindNode to {:?}", &node.endpoint);
Ok(())
}
fn send_packet(&mut self, packet_id: u8, address: &SocketAddr, payload: &[u8]) -> Result<H256, Error> {
let packet = assemble_packet(packet_id, payload, &self.secret)?;
let hash = H256::from(&packet[0..32]);
self.send_to(packet, address.clone());
Ok(hash)
}
fn nearest_node_entries(&self, target: &NodeId) -> Vec<NodeEntry> { fn nearest_node_entries(&self, target: &NodeId) -> Vec<NodeEntry> {
let target_hash = keccak(target); let target_hash = keccak(target);
let target_distance = self.id_hash ^ target_hash; let target_distance = self.id_hash ^ target_hash;
@ -396,38 +454,58 @@ impl Discovery {
let dest = NodeEndpoint::from_rlp(&rlp.at(2)?)?; let dest = NodeEndpoint::from_rlp(&rlp.at(2)?)?;
let timestamp: u64 = rlp.val_at(3)?; let timestamp: u64 = rlp.val_at(3)?;
self.check_timestamp(timestamp)?; self.check_timestamp(timestamp)?;
let mut added_map = HashMap::new();
let mut response = RlpStream::new_list(3);
dest.to_rlp_list(&mut response);
response.append(&echo_hash);
append_expiration(&mut response);
self.send_packet(PACKET_PONG, from, &response.drain())?;
let entry = NodeEntry { id: node.clone(), endpoint: source.clone() }; let entry = NodeEntry { id: node.clone(), endpoint: source.clone() };
if !entry.endpoint.is_valid() { if !entry.endpoint.is_valid() {
debug!(target: "discovery", "Got bad address: {:?}", entry); debug!(target: "discovery", "Got bad address: {:?}", entry);
} else if !self.is_allowed(&entry) { } else if !self.is_allowed(&entry) {
debug!(target: "discovery", "Address not allowed: {:?}", entry); debug!(target: "discovery", "Address not allowed: {:?}", entry);
} else { } else {
self.update_node(entry.clone()); self.add_node(entry.clone());
added_map.insert(node.clone(), entry);
}
let mut response = RlpStream::new_list(2);
dest.to_rlp_list(&mut response);
response.append(&echo_hash);
self.send_packet(PACKET_PONG, from, &response.drain())?;
Ok(Some(TableUpdates { added: added_map, removed: HashSet::new() }))
} }
fn on_pong(&mut self, rlp: &Rlp, node: &NodeId, from: &SocketAddr) -> Result<Option<TableUpdates>, Error> { Ok(None)
}
fn on_pong(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result<Option<TableUpdates>, Error> {
trace!(target: "discovery", "Got Pong from {:?}", &from); trace!(target: "discovery", "Got Pong from {:?}", &from);
// TODO: validate pong packet in rlp.val_at(1)
let dest = NodeEndpoint::from_rlp(&rlp.at(0)?)?; let dest = NodeEndpoint::from_rlp(&rlp.at(0)?)?;
let echo_hash: H256 = rlp.val_at(1)?;
let timestamp: u64 = rlp.val_at(2)?; let timestamp: u64 = rlp.val_at(2)?;
self.check_timestamp(timestamp)?; self.check_timestamp(timestamp)?;
let mut entry = NodeEntry { id: node.clone(), endpoint: dest }; let mut node = NodeEntry { id: node_id.clone(), endpoint: dest };
if !entry.endpoint.is_valid() { if !node.endpoint.is_valid() {
debug!(target: "discovery", "Bad address: {:?}", entry); debug!(target: "discovery", "Bad address: {:?}", node);
entry.endpoint.address = from.clone(); node.endpoint.address = from.clone();
} }
self.clear_ping(node);
let is_expected = match self.in_flight_requests.entry(*node_id) {
Entry::Occupied(entry) => {
let is_expected = {
let request = entry.get();
request.packet_id == PACKET_PING && request.packet_hash == echo_hash
};
if is_expected {
entry.remove();
}
is_expected
},
Entry::Vacant(_) => false
};
if is_expected {
Ok(self.update_node(node))
} else {
debug!(target: "discovery", "Got unexpected Pong from {:?}", &from);
Ok(None) Ok(None)
} }
}
fn on_find_node(&mut self, rlp: &Rlp, _node: &NodeId, from: &SocketAddr) -> Result<Option<TableUpdates>, Error> { fn on_find_node(&mut self, rlp: &Rlp, _node: &NodeId, from: &SocketAddr) -> Result<Option<TableUpdates>, Error> {
trace!(target: "discovery", "Got FindNode from {:?}", &from); trace!(target: "discovery", "Got FindNode from {:?}", &from);
@ -450,22 +528,49 @@ impl Discovery {
let limit = (MAX_DATAGRAM_SIZE - 109) / 90; let limit = (MAX_DATAGRAM_SIZE - 109) / 90;
let chunks = nearest.chunks(limit); let chunks = nearest.chunks(limit);
let packets = chunks.map(|c| { let packets = chunks.map(|c| {
let mut rlp = RlpStream::new_list(1); let mut rlp = RlpStream::new_list(2);
rlp.begin_list(c.len()); rlp.begin_list(c.len());
for n in 0 .. c.len() { for n in 0 .. c.len() {
rlp.begin_list(4); rlp.begin_list(4);
c[n].endpoint.to_rlp(&mut rlp); c[n].endpoint.to_rlp(&mut rlp);
rlp.append(&c[n].id); rlp.append(&c[n].id);
} }
append_expiration(&mut rlp);
rlp.out() rlp.out()
}); });
packets.collect() packets.collect()
} }
fn on_neighbours(&mut self, rlp: &Rlp, _node: &NodeId, from: &SocketAddr) -> Result<Option<TableUpdates>, Error> { fn on_neighbours(&mut self, rlp: &Rlp, node_id: &NodeId, from: &SocketAddr) -> Result<Option<TableUpdates>, Error> {
// TODO: validate packet let results_count = rlp.at(0)?.item_count()?;
let mut added = HashMap::new();
trace!(target: "discovery", "Got {} Neighbours from {:?}", rlp.at(0)?.item_count()?, &from); let is_expected = match self.in_flight_requests.entry(*node_id) {
Entry::Occupied(mut entry) => {
let result = {
let request = entry.get_mut();
if request.packet_id == PACKET_FIND_NODE &&
request.response_count + results_count <= BUCKET_SIZE
{
request.response_count += results_count;
true
} else {
false
}
};
if entry.get().response_count == BUCKET_SIZE {
entry.remove();
}
result
}
Entry::Vacant(_) => false,
};
if !is_expected {
debug!(target: "discovery", "Got unexpected Neighbors from {:?}", &from);
return Ok(None);
}
trace!(target: "discovery", "Got {} Neighbours from {:?}", results_count, &from);
for r in rlp.at(0)?.iter() { for r in rlp.at(0)?.iter() {
let endpoint = NodeEndpoint::from_rlp(&r)?; let endpoint = NodeEndpoint::from_rlp(&r)?;
if !endpoint.is_valid() { if !endpoint.is_valid() {
@ -481,35 +586,62 @@ impl Discovery {
debug!(target: "discovery", "Address not allowed: {:?}", entry); debug!(target: "discovery", "Address not allowed: {:?}", entry);
continue; continue;
} }
added.insert(node_id, entry.clone()); self.add_node(entry);
self.ping(&entry.endpoint);
self.update_node(entry);
} }
Ok(Some(TableUpdates { added: added, removed: HashSet::new() })) Ok(None)
} }
fn check_expired(&mut self, force: bool) -> HashSet<NodeId> { fn check_expired(&mut self, time: Instant) -> HashSet<NodeId> {
let now = Instant::now();
let mut removed: HashSet<NodeId> = HashSet::new(); let mut removed: HashSet<NodeId> = HashSet::new();
for bucket in &mut self.node_buckets { while let Some((node_id, sent_at)) = self.expiring_pings.pop_front() {
bucket.nodes.retain(|node| { if time.duration_since(sent_at) <= PING_TIMEOUT {
if let Some(timeout) = node.timeout { self.expiring_pings.push_front((node_id, sent_at));
if !force && now.duration_since(timeout) < PING_TIMEOUT { break;
true
} }
else { self.expire_in_flight_request(node_id, sent_at, &mut removed);
trace!(target: "discovery", "Removed expired node {:?}", &node.address);
removed.insert(node.address.id.clone());
false
} }
} else { true } while let Some((node_id, sent_at)) = self.expiring_finds.pop_front() {
}); if time.duration_since(sent_at) <= FIND_NODE_TIMEOUT {
self.expiring_finds.push_front((node_id, sent_at));
break;
}
self.expire_in_flight_request(node_id, sent_at, &mut removed);
} }
removed removed
} }
fn expire_in_flight_request(&mut self, node_id: NodeId, sent_at: Instant, removed: &mut HashSet<NodeId>) {
if let Entry::Occupied(entry) = self.in_flight_requests.entry(node_id) {
if entry.get().sent_at == sent_at {
entry.remove();
// Attempt to remove from bucket if in one.
let id_hash = keccak(&node_id);
let dist = Discovery::distance(&self.id_hash, &id_hash)
.expect("distance is None only if id hashes are equal; will never send request to self; qed");
let bucket = &mut self.node_buckets[dist];
if let Some(index) = bucket.nodes.iter().position(|n| n.id_hash == id_hash) {
if bucket.nodes[index].fail_count < self.request_backoff.len() {
let node = &mut bucket.nodes[index];
node.backoff_until = Instant::now() + self.request_backoff[node.fail_count];
node.fail_count += 1;
trace!(
target: "discovery",
"Requests to node {:?} timed out {} consecutive time(s)",
&node.address, node.fail_count
);
} else {
removed.insert(node_id);
let node = bucket.nodes.remove(index).expect("index was located in if condition");
debug!(target: "discovery", "Removed expired node {:?}", &node.address);
}
}
}
}
}
pub fn round(&mut self) -> Option<TableUpdates> { pub fn round(&mut self) -> Option<TableUpdates> {
let removed = self.check_expired(false); let removed = self.check_expired(Instant::now());
self.discover(); self.discover();
if !removed.is_empty() { if !removed.is_empty() {
Some(TableUpdates { added: HashMap::new(), removed: removed }) Some(TableUpdates { added: HashMap::new(), removed: removed })
@ -533,10 +665,48 @@ impl Discovery {
} }
} }
fn append_expiration(rlp: &mut RlpStream) {
let expiry = SystemTime::now() + EXPIRY_TIME;
let timestamp = expiry.duration_since(UNIX_EPOCH).unwrap_or_default().as_secs() as u32;
rlp.append(&timestamp);
}
fn assemble_packet(packet_id: u8, bytes: &[u8], secret: &Secret) -> Result<Bytes, Error> {
let mut packet = Bytes::with_capacity(bytes.len() + 32 + 65 + 1);
packet.resize(32 + 65, 0); // Filled in below
packet.push(packet_id);
packet.extend_from_slice(bytes);
let hash = keccak(&packet[(32 + 65)..]);
let signature = match sign(secret, &hash) {
Ok(s) => s,
Err(e) => {
warn!(target: "discovery", "Error signing UDP packet");
return Err(Error::from(e));
}
};
packet[32..(32 + 65)].copy_from_slice(&signature[..]);
let signed_hash = keccak(&packet[32..]);
packet[0..32].copy_from_slice(&signed_hash);
Ok(packet)
}
// Selects the next node in a bucket to ping. Chooses the eligible node least recently seen.
fn select_bucket_ping<'a, I>(nodes: I) -> Option<NodeEntry>
where
I: Iterator<Item=&'a BucketEntry>
{
let now = Instant::now();
nodes
.filter(|n| n.backoff_until < now)
.min_by_key(|n| n.last_seen)
.map(|n| n.address.clone())
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use std::net::{SocketAddr}; use std::net::{IpAddr,Ipv4Addr};
use node_table::{Node, NodeId, NodeEndpoint}; use node_table::{Node, NodeId, NodeEndpoint};
use std::str::FromStr; use std::str::FromStr;
@ -560,50 +730,151 @@ mod tests {
assert!(packets.last().unwrap().len() > 0); assert!(packets.last().unwrap().len() > 0);
} }
#[test]
fn ping_queue() {
let key = Random.generate().unwrap();
let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40445").unwrap(), udp_port: 40445 };
let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default());
for i in 1..(MAX_NODES_PING+1) {
discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() });
assert_eq!(discovery.in_flight_requests.len(), i);
assert_eq!(discovery.send_queue.len(), i);
assert_eq!(discovery.adding_nodes.len(), 0);
}
for i in 1..20 {
discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() });
assert_eq!(discovery.in_flight_requests.len(), MAX_NODES_PING);
assert_eq!(discovery.send_queue.len(), MAX_NODES_PING);
assert_eq!(discovery.adding_nodes.len(), i);
}
}
#[test] #[test]
fn discovery() { fn discovery() {
let key1 = Random.generate().unwrap(); let mut discovery_handlers = (0..5).map(|i| {
let key2 = Random.generate().unwrap(); let key = Random.generate().unwrap();
let ep1 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40444").unwrap(), udp_port: 40444 }; let ep = NodeEndpoint {
let ep2 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40445").unwrap(), udp_port: 40445 }; address: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 41000 + i),
let mut discovery1 = Discovery::new(&key1, ep1.clone(), IpFilter::default()); udp_port: 41000 + i,
let mut discovery2 = Discovery::new(&key2, ep2.clone(), IpFilter::default()); };
Discovery::new(&key, ep, IpFilter::default())
})
.collect::<Vec<_>>();
let node1 = Node::from_str("enode://a979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7770").unwrap(); // Sort inversely by XOR distance to the 0 hash.
let node2 = Node::from_str("enode://b979fb575495b8d6db44f750317d0f4622bf4c2aa3365d6af7c284339968eef29b69ad0dce72a4d8db5ebb4968de0e3bec910127f134779fbcb0cb6d3331163c@127.0.0.1:7771").unwrap(); discovery_handlers.sort_by(|a, b| b.id_hash.cmp(&a.id_hash));
discovery1.add_node(NodeEntry { id: node1.id.clone(), endpoint: node1.endpoint.clone() });
discovery1.add_node(NodeEntry { id: node2.id.clone(), endpoint: node2.endpoint.clone() });
discovery2.add_node(NodeEntry { id: key1.public().clone(), endpoint: ep1.clone() }); // Initialize the routing table of each with the next one in order.
discovery2.refresh(); for i in 0 .. 5 {
let node = NodeEntry {
id: discovery_handlers[(i + 1) % 5].id,
endpoint: discovery_handlers[(i + 1) % 5].public_endpoint.clone(),
};
discovery_handlers[i].update_node(node);
}
for _ in 0 .. 10 { // After 4 discovery rounds, the first one should have learned about the rest.
while let Some(datagram) = discovery1.dequeue_send() { for _round in 0 .. 4 {
if datagram.address == ep2.address { discovery_handlers[0].round();
discovery2.on_packet(&datagram.payload, ep1.address.clone()).ok();
let mut continue_loop = true;
while continue_loop {
continue_loop = false;
// Process all queued messages.
for i in 0 .. 5 {
let src = discovery_handlers[i].public_endpoint.address.clone();
while let Some(datagram) = discovery_handlers[i].dequeue_send() {
let dest = discovery_handlers.iter_mut()
.find(|disc| datagram.address == disc.public_endpoint.address)
.unwrap();
dest.on_packet(&datagram.payload, src).ok();
continue_loop = true;
} }
} }
while let Some(datagram) = discovery2.dequeue_send() {
if datagram.address == ep1.address {
discovery1.on_packet(&datagram.payload, ep2.address.clone()).ok();
} }
} }
discovery2.round();
} let results = discovery_handlers[0].nearest_node_entries(&NodeId::new());
assert_eq!(discovery2.nearest_node_entries(&NodeId::new()).len(), 3) assert_eq!(results.len(), 4);
} }
#[test] #[test]
fn removes_expired() { fn removes_expired() {
let key = Random.generate().unwrap(); let key = Random.generate().unwrap();
let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40446").unwrap(), udp_port: 40447 }; let ep = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40446").unwrap(), udp_port: 40447 };
let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); let discovery = Discovery::new(&key, ep.clone(), IpFilter::default());
for _ in 0..1200 {
let mut discovery = Discovery { request_backoff: &[], ..discovery };
let total_bucket_nodes = |node_buckets: &Vec<NodeBucket>| -> usize {
node_buckets.iter().map(|bucket| bucket.nodes.len()).sum()
};
let node_entries = (0..1200)
.map(|_| NodeEntry { id: NodeId::random(), endpoint: ep.clone() })
.collect::<Vec<_>>();
discovery.init_node_list(node_entries.clone());
assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200);
// Requests have not expired yet.
let removed = discovery.check_expired(Instant::now()).len();
assert_eq!(removed, 0);
// Expiring pings to bucket nodes removes them from bucket.
let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len();
assert!(removed > 0);
assert_eq!(total_bucket_nodes(&discovery.node_buckets), 1200 - removed);
for _ in 0..100 {
discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() }); discovery.add_node(NodeEntry { id: NodeId::random(), endpoint: ep.clone() });
} }
assert!(discovery.nearest_node_entries(&NodeId::new()).len() <= 16); assert!(discovery.in_flight_requests.len() > 0);
let removed = discovery.check_expired(true).len();
// Expire pings to nodes that are not in buckets.
let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len();
assert_eq!(removed, 0);
assert_eq!(discovery.in_flight_requests.len(), 0);
let from = SocketAddr::from_str("99.99.99.99:40445").unwrap();
// FIND_NODE times out because it doesn't receive k results.
let key = Random.generate().unwrap();
discovery.send_find_node(&node_entries[100], key.public()).unwrap();
for payload in Discovery::prepare_neighbours_packets(&node_entries[101..116]) {
let packet = assemble_packet(PACKET_NEIGHBOURS, &payload, &key.secret()).unwrap();
discovery.on_packet(&packet, from.clone()).unwrap();
}
let removed = discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT).len();
assert!(removed > 0); assert!(removed > 0);
// FIND_NODE does not time out because it receives k results.
discovery.send_find_node(&node_entries[100], key.public()).unwrap();
for payload in Discovery::prepare_neighbours_packets(&node_entries[101..117]) {
let packet = assemble_packet(PACKET_NEIGHBOURS, &payload, &key.secret()).unwrap();
discovery.on_packet(&packet, from.clone()).unwrap();
}
let removed = discovery.check_expired(Instant::now() + FIND_NODE_TIMEOUT).len();
assert_eq!(removed, 0);
// Test bucket evictions with retries.
let request_backoff = [Duration::new(0, 0); 2];
let mut discovery = Discovery { request_backoff: &request_backoff, ..discovery };
for _ in 0..2 {
discovery.ping(&node_entries[101]).unwrap();
let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len();
assert_eq!(removed, 0);
}
discovery.ping(&node_entries[101]).unwrap();
let removed = discovery.check_expired(Instant::now() + PING_TIMEOUT).len();
assert_eq!(removed, 1);
} }
#[test] #[test]
@ -615,11 +886,8 @@ mod tests {
let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default());
for _ in 0..(16 + 10) { for _ in 0..(16 + 10) {
discovery.node_buckets[0].nodes.push_back(BucketEntry { let entry = BucketEntry::new(NodeEntry { id: NodeId::new(), endpoint: ep.clone() });
address: NodeEntry { id: NodeId::new(), endpoint: ep.clone() }, discovery.node_buckets[0].nodes.push_back(entry);
timeout: None,
id_hash: keccak(NodeId::new()),
});
} }
let nearest = discovery.nearest_node_entries(&NodeId::new()); let nearest = discovery.nearest_node_entries(&NodeId::new());
assert_eq!(nearest.len(), 16) assert_eq!(nearest.len(), 16)
@ -674,7 +942,7 @@ mod tests {
.unwrap(); .unwrap();
let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default()); let mut discovery = Discovery::new(&key, ep.clone(), IpFilter::default());
node_entries.iter().for_each(|entry| discovery.update_node(entry.clone())); discovery.init_node_list(node_entries.clone());
let expected_bucket_sizes = vec![ let expected_bucket_sizes = vec![
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@ -782,17 +1050,70 @@ mod tests {
fn test_ping() { fn test_ping() {
let key1 = Random.generate().unwrap(); let key1 = Random.generate().unwrap();
let key2 = Random.generate().unwrap(); let key2 = Random.generate().unwrap();
let key3 = Random.generate().unwrap();
let ep1 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40344").unwrap(), udp_port: 40344 }; let ep1 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40344").unwrap(), udp_port: 40344 };
let ep2 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40345").unwrap(), udp_port: 40345 }; let ep2 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40345").unwrap(), udp_port: 40345 };
let ep3 = NodeEndpoint { address: SocketAddr::from_str("127.0.0.1:40346").unwrap(), udp_port: 40345 };
let mut discovery1 = Discovery::new(&key1, ep1.clone(), IpFilter::default()); let mut discovery1 = Discovery::new(&key1, ep1.clone(), IpFilter::default());
let mut discovery2 = Discovery::new(&key2, ep2.clone(), IpFilter::default()); let mut discovery2 = Discovery::new(&key2, ep2.clone(), IpFilter::default());
discovery1.ping(&ep2); discovery1.ping(&NodeEntry { id: discovery2.id, endpoint: ep2.clone() }).unwrap();
let ping_data = discovery1.dequeue_send().unwrap(); let ping_data = discovery1.dequeue_send().unwrap();
discovery2.on_packet(&ping_data.payload, ep1.address.clone()).ok(); assert!(!discovery1.any_sends_queued());
let data = &ping_data.payload[(32 + 65)..];
assert_eq!(data[0], PACKET_PING);
let rlp = Rlp::new(&data[1..]);
assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap());
assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap());
if let Some(_) = discovery2.on_packet(&ping_data.payload, ep1.address.clone()).unwrap() {
panic!("Expected no changes to discovery2's table");
}
let pong_data = discovery2.dequeue_send().unwrap(); let pong_data = discovery2.dequeue_send().unwrap();
let data = &pong_data.payload[(32 + 65)..]; let data = &pong_data.payload[(32 + 65)..];
assert_eq!(data[0], PACKET_PONG);
let rlp = Rlp::new(&data[1..]); let rlp = Rlp::new(&data[1..]);
assert_eq!(ping_data.payload[0..32], rlp.val_at::<Vec<u8>>(1).unwrap()[..]) assert_eq!(ping_data.payload[0..32], rlp.val_at::<Vec<u8>>(1).unwrap()[..]);
// Create a pong packet with incorrect echo hash and assert that it is rejected.
let mut incorrect_pong_rlp = RlpStream::new_list(3);
ep1.to_rlp_list(&mut incorrect_pong_rlp);
incorrect_pong_rlp.append(&H256::default());
append_expiration(&mut incorrect_pong_rlp);
let incorrect_pong_data = assemble_packet(
PACKET_PONG, &incorrect_pong_rlp.drain(), &discovery2.secret
).unwrap();
if let Some(_) = discovery1.on_packet(&incorrect_pong_data, ep2.address.clone()).unwrap() {
panic!("Expected no changes to discovery1's table because pong hash is incorrect");
}
// Delivery of valid pong response should add to routing table.
if let Some(table_updates) = discovery1.on_packet(&pong_data.payload, ep2.address.clone()).unwrap() {
assert_eq!(table_updates.added.len(), 1);
assert_eq!(table_updates.removed.len(), 0);
assert!(table_updates.added.contains_key(&discovery2.id));
} else {
panic!("Expected discovery1 to be added to discovery1's table");
}
let ping_back = discovery2.dequeue_send().unwrap();
assert!(!discovery2.any_sends_queued());
let data = &ping_back.payload[(32 + 65)..];
assert_eq!(data[0], PACKET_PING);
let rlp = Rlp::new(&data[1..]);
assert_eq!(ep2, NodeEndpoint::from_rlp(&rlp.at(1).unwrap()).unwrap());
assert_eq!(ep1, NodeEndpoint::from_rlp(&rlp.at(2).unwrap()).unwrap());
// Deliver an unexpected PONG message to discover1.
let mut unexpected_pong_rlp = RlpStream::new_list(3);
ep3.to_rlp_list(&mut unexpected_pong_rlp);
unexpected_pong_rlp.append(&H256::default());
append_expiration(&mut unexpected_pong_rlp);
let unexpected_pong = assemble_packet(
PACKET_PONG, &unexpected_pong_rlp.drain(), key3.secret()
).unwrap();
if let Some(_) = discovery1.on_packet(&unexpected_pong, ep3.address.clone()).unwrap() {
panic!("Expected no changes to discovery1's table for unexpected pong");
}
} }
} }

View File

@ -243,7 +243,7 @@ pub struct Host {
udp_socket: Mutex<Option<UdpSocket>>, udp_socket: Mutex<Option<UdpSocket>>,
tcp_listener: Mutex<TcpListener>, tcp_listener: Mutex<TcpListener>,
sessions: Arc<RwLock<Slab<SharedSession>>>, sessions: Arc<RwLock<Slab<SharedSession>>>,
discovery: Mutex<Option<Discovery>>, discovery: Mutex<Option<Discovery<'static>>>,
nodes: RwLock<NodeTable>, nodes: RwLock<NodeTable>,
handlers: RwLock<HashMap<ProtocolId, Arc<NetworkProtocolHandler + Sync>>>, handlers: RwLock<HashMap<ProtocolId, Arc<NetworkProtocolHandler + Sync>>>,
timers: RwLock<HashMap<TimerToken, ProtocolTimer>>, timers: RwLock<HashMap<TimerToken, ProtocolTimer>>,

View File

@ -33,7 +33,7 @@ use rand::{self, Rng};
/// Node public key /// Node public key
pub type NodeId = H512; pub type NodeId = H512;
#[derive(Debug, Clone)] #[derive(Debug, Clone, PartialEq)]
/// Node address info /// Node address info
pub struct NodeEndpoint { pub struct NodeEndpoint {
/// IP(V4 or V6) address /// IP(V4 or V6) address

View File

@ -10,7 +10,7 @@ build = "build.rs"
[package.metadata] [package.metadata]
# This versions track. Should be changed to `stable` or `beta` when on respective branches. # This versions track. Should be changed to `stable` or `beta` when on respective branches.
# Used by auto-updater and for Parity version string. # Used by auto-updater and for Parity version string.
track = "nightly" track = "beta"
# Network specific settings, used ONLY by auto-updater. # Network specific settings, used ONLY by auto-updater.
# Latest supported fork blocks. # Latest supported fork blocks.