Backports for beta 2.2.2 (#9976)
* version: bump beta to 2.2.2 * Add experimental RPCs flag (#9928) * WiP * Enable experimental RPCs. * Keep existing blocks when restoring a Snapshot (#8643) * Rename db_restore => client * First step: make it compile! * Second step: working implementation! * Refactoring * Fix tests * PR Grumbles * PR Grumbles WIP * Migrate ancient blocks interating backward * Early return in block migration if snapshot is aborted * Remove RwLock getter (PR Grumble I) * Remove dependency on `Client`: only used Traits * Add test for recovering aborted snapshot recovery * Add test for migrating old blocks * Fix build * PR Grumble I * PR Grumble II * PR Grumble III * PR Grumble IV * PR Grumble V * PR Grumble VI * Fix one test * Fix test * PR Grumble * PR Grumbles * PR Grumbles II * Fix tests * Release RwLock earlier * Revert Cargo.lock * Update _update ancient block_ logic: set local in `commit` * Update typo in ethcore/src/snapshot/service.rs Co-Authored-By: ngotchac <ngotchac@gmail.com> * Adjust requests costs for light client (#9925) * PIP Table Cost relative to average peers instead of max peers * Add tracing in PIP new_cost_table * Update stat peer_count * Use number of leeching peers for Light serve costs * Fix test::light_params_load_share_depends_on_max_peers (wrong type) * Remove (now) useless test * Remove `load_share` from LightParams.Config Prevent div. by 0 * Add LEECHER_COUNT_FACTOR * PR Grumble: u64 to u32 for f64 casting * Prevent u32 overflow for avg_peer_count * Add tests for LightSync::Statistics * Fix empty steps (#9939) * Don't send empty step twice or empty step then block. * Perform basic validation of locally sealed blocks. * Don't include empty step twice. * prevent silent errors in daemon mode, closes #9367 (#9946) * Fix a deadlock (#9952) * Update informant: - decimal in Mgas/s - print every 5s (not randomly between 5s and 10s) * Fix dead-lock in `blockchain.rs` * Update locks ordering * Fix light client informant while syncing (#9932) * Add `is_idle` to LightSync to check importing status * Use SyncStateWrapper to make sure is_idle gets updates * Update is_major_import to use verified queue size as well * Add comment for `is_idle` * Add Debug to `SyncStateWrapper` * `fn get` -> `fn into_inner` * ci: rearrange pipeline by logic (#9970) * ci: rearrange pipeline by logic * ci: rename docs script * fix docker build (#9971) * Deny unknown fields for chainspec (#9972) * Add deny_unknown_fields to chainspec * Add tests and fix existing one * Remove serde_ignored dependency for chainspec * Fix rpc test eth chain spec * Fix starting_nonce_test spec * Improve block and transaction propagation (#9954) * Refactor sync to add priority tasks. * Send priority tasks notifications. * Propagate blocks, optimize transactions. * Implement transaction propagation. Use sync_channel. * Tone down info. * Prevent deadlock by not waiting forever for sync lock. * Fix lock order. * Don't use sync_channel to prevent deadlocks. * Fix tests. * Fix unstable peers and slowness in sync (#9967) * Don't sync all peers after each response * Update formating * Fix tests: add `continue_sync` to `Sync_step` * Update ethcore/sync/src/chain/mod.rs Co-Authored-By: ngotchac <ngotchac@gmail.com> * fix rpc middlewares * fix Cargo.lock * json: resolve merge in spec * rpc: fix starting_nonce_test * ci: allow nightl job to fail
This commit is contained in:
parent
5c56fc5023
commit
78ceec6c6e
@ -44,6 +44,13 @@ test-linux:
|
|||||||
tags:
|
tags:
|
||||||
- rust-stable
|
- rust-stable
|
||||||
|
|
||||||
|
test-audit:
|
||||||
|
stage: test
|
||||||
|
script:
|
||||||
|
- scripts/gitlab/cargo-audit.sh
|
||||||
|
tags:
|
||||||
|
- rust-stable
|
||||||
|
|
||||||
build-linux:
|
build-linux:
|
||||||
stage: build
|
stage: build
|
||||||
only: *releaseable_branches
|
only: *releaseable_branches
|
||||||
@ -104,25 +111,18 @@ publish-awss3:
|
|||||||
tags:
|
tags:
|
||||||
- shell
|
- shell
|
||||||
|
|
||||||
docs-jsonrpc:
|
publish-docs:
|
||||||
stage: optional
|
stage: publish
|
||||||
only:
|
only:
|
||||||
- tags
|
- tags
|
||||||
except:
|
except:
|
||||||
- nightly
|
- nightly
|
||||||
cache: {}
|
cache: {}
|
||||||
script:
|
script:
|
||||||
- scripts/gitlab/docs-jsonrpc.sh
|
- scripts/gitlab/publish-docs.sh
|
||||||
tags:
|
tags:
|
||||||
- shell
|
- shell
|
||||||
|
|
||||||
cargo-audit:
|
|
||||||
stage: optional
|
|
||||||
script:
|
|
||||||
- scripts/gitlab/cargo-audit.sh
|
|
||||||
tags:
|
|
||||||
- rust-stable
|
|
||||||
|
|
||||||
build-android:
|
build-android:
|
||||||
stage: optional
|
stage: optional
|
||||||
image: parity/rust-android:gitlab-ci
|
image: parity/rust-android:gitlab-ci
|
||||||
@ -132,6 +132,7 @@ build-android:
|
|||||||
- scripts/gitlab/build-unix.sh
|
- scripts/gitlab/build-unix.sh
|
||||||
tags:
|
tags:
|
||||||
- rust-arm
|
- rust-arm
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
test-beta:
|
test-beta:
|
||||||
stage: optional
|
stage: optional
|
||||||
@ -141,6 +142,7 @@ test-beta:
|
|||||||
- scripts/gitlab/test-all.sh beta
|
- scripts/gitlab/test-all.sh beta
|
||||||
tags:
|
tags:
|
||||||
- rust-beta
|
- rust-beta
|
||||||
|
allow_failure: true
|
||||||
|
|
||||||
test-nightly:
|
test-nightly:
|
||||||
stage: optional
|
stage: optional
|
||||||
@ -150,3 +152,4 @@ test-nightly:
|
|||||||
- scripts/gitlab/test-all.sh nightly
|
- scripts/gitlab/test-all.sh nightly
|
||||||
tags:
|
tags:
|
||||||
- rust-nightly
|
- rust-nightly
|
||||||
|
allow_failure: true
|
||||||
|
54
Cargo.lock
generated
54
Cargo.lock
generated
@ -205,7 +205,6 @@ name = "chainspec"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ethjson 0.1.0",
|
"ethjson 0.1.0",
|
||||||
"serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
"serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -460,6 +459,15 @@ dependencies = [
|
|||||||
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"heapsize 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "env_logger"
|
||||||
|
version = "0.4.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
dependencies = [
|
||||||
|
"log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"regex 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "env_logger"
|
name = "env_logger"
|
||||||
version = "0.5.13"
|
version = "0.5.13"
|
||||||
@ -483,10 +491,11 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "eth-secp256k1"
|
name = "eth-secp256k1"
|
||||||
version = "0.5.7"
|
version = "0.5.7"
|
||||||
source = "git+https://github.com/paritytech/rust-secp256k1#db81cfea59014b4d176f10f86ed52e1a130b6822"
|
source = "git+https://github.com/paritytech/rust-secp256k1#ccc06e7480148b723eb44ac56cf4d20eec380b6f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
"arrayvec 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
"cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -558,6 +567,7 @@ dependencies = [
|
|||||||
"byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"byteorder 1.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"common-types 0.1.0",
|
"common-types 0.1.0",
|
||||||
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"crossbeam 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
|
"env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethabi 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ethabi 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"ethabi-contract 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ethabi-contract 6.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1552,7 +1562,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-core"
|
name = "jsonrpc-core"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
"futures 0.1.25 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1564,7 +1574,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-http-server"
|
name = "jsonrpc-http-server"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"hyper 0.12.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"hyper 0.12.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
@ -1577,7 +1587,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-ipc-server"
|
name = "jsonrpc-ipc-server"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
"jsonrpc-server-utils 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-server-utils 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
@ -1590,7 +1600,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-macros"
|
name = "jsonrpc-macros"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
"jsonrpc-pubsub 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-pubsub 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
@ -1600,7 +1610,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-pubsub"
|
name = "jsonrpc-pubsub"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
"log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1610,7 +1620,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-server-utils"
|
name = "jsonrpc-server-utils"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
"bytes 0.4.10 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"globset 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -1626,7 +1636,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-tcp-server"
|
name = "jsonrpc-tcp-server"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
"jsonrpc-server-utils 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-server-utils 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
@ -1638,7 +1648,7 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "jsonrpc-ws-server"
|
name = "jsonrpc-ws-server"
|
||||||
version = "9.0.0"
|
version = "9.0.0"
|
||||||
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#207a277b098943864ecaf22dbab7a5e309866d6b"
|
source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2#f8a54f46f7f1d68b4e7899ca1e929803bf966a5b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
"jsonrpc-core 9.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-2.2)",
|
||||||
@ -2048,7 +2058,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num-integer 0.1.39 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
"num-traits 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -2152,7 +2162,7 @@ version = "1.12.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"jni 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"jni 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"panic_hook 0.1.0",
|
"panic_hook 0.1.0",
|
||||||
"parity-ethereum 2.2.1",
|
"parity-ethereum 2.2.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2168,7 +2178,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-ethereum"
|
name = "parity-ethereum"
|
||||||
version = "2.2.1"
|
version = "2.2.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"ansi_term 0.10.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2217,7 +2227,7 @@ dependencies = [
|
|||||||
"parity-rpc-client 1.4.0",
|
"parity-rpc-client 1.4.0",
|
||||||
"parity-runtime 0.1.0",
|
"parity-runtime 0.1.0",
|
||||||
"parity-updater 1.12.0",
|
"parity-updater 1.12.0",
|
||||||
"parity-version 2.2.1",
|
"parity-version 2.2.2",
|
||||||
"parity-whisper 0.1.0",
|
"parity-whisper 0.1.0",
|
||||||
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2372,7 +2382,7 @@ dependencies = [
|
|||||||
"parity-crypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-crypto 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-runtime 0.1.0",
|
"parity-runtime 0.1.0",
|
||||||
"parity-updater 1.12.0",
|
"parity-updater 1.12.0",
|
||||||
"parity-version 2.2.1",
|
"parity-version 2.2.2",
|
||||||
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"patricia-trie 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"patricia-trie 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
"pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2469,7 +2479,7 @@ dependencies = [
|
|||||||
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-hash-fetch 1.12.0",
|
"parity-hash-fetch 1.12.0",
|
||||||
"parity-path 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-path 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"parity-version 2.2.1",
|
"parity-version 2.2.2",
|
||||||
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parking_lot 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"semver 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -2479,7 +2489,7 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "parity-version"
|
name = "parity-version"
|
||||||
version = "2.2.1"
|
version = "2.2.2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"parity-bytes 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
"rlp 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
"rlp 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
@ -3107,14 +3117,6 @@ dependencies = [
|
|||||||
"syn 0.15.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
"syn 0.15.11 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
|
||||||
name = "serde_ignored"
|
|
||||||
version = "0.0.4"
|
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
|
||||||
dependencies = [
|
|
||||||
"serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)",
|
|
||||||
]
|
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "serde_json"
|
name = "serde_json"
|
||||||
version = "1.0.32"
|
version = "1.0.32"
|
||||||
@ -4083,6 +4085,7 @@ dependencies = [
|
|||||||
"checksum edit-distance 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3bd26878c3d921f89797a4e1a1711919f999a9f6946bb6f5a4ffda126d297b7e"
|
"checksum edit-distance 2.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "3bd26878c3d921f89797a4e1a1711919f999a9f6946bb6f5a4ffda126d297b7e"
|
||||||
"checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0"
|
"checksum either 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3be565ca5c557d7f59e7cfcf1844f9e3033650c929c6566f511e8005f205c1d0"
|
||||||
"checksum elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "88d4851b005ef16de812ea9acdb7bece2f0a40dd86c07b85631d7dafa54537bb"
|
"checksum elastic-array 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "88d4851b005ef16de812ea9acdb7bece2f0a40dd86c07b85631d7dafa54537bb"
|
||||||
|
"checksum env_logger 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "3ddf21e73e016298f5cb37d6ef8e8da8e39f91f9ec8b0df44b7deb16a9f8cd5b"
|
||||||
"checksum env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)" = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38"
|
"checksum env_logger 0.5.13 (registry+https://github.com/rust-lang/crates.io-index)" = "15b0a4d2e39f8420210be8b27eeda28029729e2fd4291019455016c348240c38"
|
||||||
"checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02"
|
"checksum error-chain 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07e791d3be96241c77c43846b665ef1384606da2cd2a48730abe606a12906e02"
|
||||||
"checksum eth-secp256k1 0.5.7 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>"
|
"checksum eth-secp256k1 0.5.7 (git+https://github.com/paritytech/rust-secp256k1)" = "<none>"
|
||||||
@ -4264,7 +4267,6 @@ dependencies = [
|
|||||||
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
"checksum semver-parser 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
|
||||||
"checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef"
|
"checksum serde 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "15c141fc7027dd265a47c090bf864cf62b42c4d228bbcf4e51a0c9e2b0d3f7ef"
|
||||||
"checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c"
|
"checksum serde_derive 1.0.80 (registry+https://github.com/rust-lang/crates.io-index)" = "225de307c6302bec3898c51ca302fc94a7a1697ef0845fcee6448f33c032249c"
|
||||||
"checksum serde_ignored 0.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "190e9765dcedb56be63b6e0993a006c7e3b071a016a304736e4a315dc01fb142"
|
|
||||||
"checksum serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)" = "43344e7ce05d0d8280c5940cabb4964bea626aa58b1ec0e8c73fa2a8512a38ce"
|
"checksum serde_json 1.0.32 (registry+https://github.com/rust-lang/crates.io-index)" = "43344e7ce05d0d8280c5940cabb4964bea626aa58b1ec0e8c73fa2a8512a38ce"
|
||||||
"checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c"
|
"checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c"
|
||||||
"checksum sha1 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "171698ce4ec7cbb93babeb3190021b4d72e96ccb98e33d277ae4ea959d6f2d9e"
|
"checksum sha1 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "171698ce4ec7cbb93babeb3190021b4d72e96ccb98e33d277ae4ea959d6f2d9e"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
description = "Parity Ethereum client"
|
description = "Parity Ethereum client"
|
||||||
name = "parity-ethereum"
|
name = "parity-ethereum"
|
||||||
# NOTE Make sure to update util/version/Cargo.toml as well
|
# NOTE Make sure to update util/version/Cargo.toml as well
|
||||||
version = "2.2.1"
|
version = "2.2.2"
|
||||||
license = "GPL-3.0"
|
license = "GPL-3.0"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
|
|
||||||
|
@ -6,4 +6,3 @@ authors = ["Marek Kotewicz <marek@parity.io>"]
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
ethjson = { path = "../json" }
|
ethjson = { path = "../json" }
|
||||||
serde_json = "1.0"
|
serde_json = "1.0"
|
||||||
serde_ignored = "0.0.4"
|
|
||||||
|
@ -15,10 +15,8 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
extern crate serde_json;
|
extern crate serde_json;
|
||||||
extern crate serde_ignored;
|
|
||||||
extern crate ethjson;
|
extern crate ethjson;
|
||||||
|
|
||||||
use std::collections::BTreeSet;
|
|
||||||
use std::{fs, env, process};
|
use std::{fs, env, process};
|
||||||
use ethjson::spec::Spec;
|
use ethjson::spec::Spec;
|
||||||
|
|
||||||
@ -41,24 +39,11 @@ fn main() {
|
|||||||
Err(_) => quit(&format!("{} could not be opened", path)),
|
Err(_) => quit(&format!("{} could not be opened", path)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut unused = BTreeSet::new();
|
let spec: Result<Spec, _> = serde_json::from_reader(file);
|
||||||
let mut deserializer = serde_json::Deserializer::from_reader(file);
|
|
||||||
|
|
||||||
let spec: Result<Spec, _> = serde_ignored::deserialize(&mut deserializer, |field| {
|
|
||||||
unused.insert(field.to_string());
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Err(err) = spec {
|
if let Err(err) = spec {
|
||||||
quit(&format!("{} {}", path, err.to_string()));
|
quit(&format!("{} {}", path, err.to_string()));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !unused.is_empty() {
|
|
||||||
let err = unused.into_iter()
|
|
||||||
.map(|field| format!("{} unexpected field `{}`", path, field))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join("\n");
|
|
||||||
quit(&err);
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("{} is valid", path);
|
println!("{} is valid", path);
|
||||||
}
|
}
|
||||||
|
@ -76,6 +76,7 @@ hardware-wallet = { path = "../hw" }
|
|||||||
fake-hardware-wallet = { path = "../util/fake-hardware-wallet" }
|
fake-hardware-wallet = { path = "../util/fake-hardware-wallet" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
env_logger = "0.4"
|
||||||
tempdir = "0.3"
|
tempdir = "0.3"
|
||||||
trie-standardmap = "0.1"
|
trie-standardmap = "0.1"
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ use parking_lot::{Mutex, RwLock};
|
|||||||
use provider::Provider;
|
use provider::Provider;
|
||||||
use request::{Request, NetworkRequests as Requests, Response};
|
use request::{Request, NetworkRequests as Requests, Response};
|
||||||
use rlp::{RlpStream, Rlp};
|
use rlp::{RlpStream, Rlp};
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet, VecDeque};
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::ops::{BitOr, BitAnd, Not};
|
use std::ops::{BitOr, BitAnd, Not};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@ -38,7 +38,7 @@ use std::time::{Duration, Instant};
|
|||||||
use self::request_credits::{Credits, FlowParams};
|
use self::request_credits::{Credits, FlowParams};
|
||||||
use self::context::{Ctx, TickCtx};
|
use self::context::{Ctx, TickCtx};
|
||||||
use self::error::Punishment;
|
use self::error::Punishment;
|
||||||
use self::load_timer::{LoadDistribution, NullStore};
|
use self::load_timer::{LoadDistribution, NullStore, MOVING_SAMPLE_SIZE};
|
||||||
use self::request_set::RequestSet;
|
use self::request_set::RequestSet;
|
||||||
use self::id_guard::IdGuard;
|
use self::id_guard::IdGuard;
|
||||||
|
|
||||||
@ -70,6 +70,16 @@ const PROPAGATE_TIMEOUT_INTERVAL: Duration = Duration::from_secs(5);
|
|||||||
const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
|
const RECALCULATE_COSTS_TIMEOUT: TimerToken = 3;
|
||||||
const RECALCULATE_COSTS_INTERVAL: Duration = Duration::from_secs(60 * 60);
|
const RECALCULATE_COSTS_INTERVAL: Duration = Duration::from_secs(60 * 60);
|
||||||
|
|
||||||
|
const STATISTICS_TIMEOUT: TimerToken = 4;
|
||||||
|
const STATISTICS_INTERVAL: Duration = Duration::from_secs(15);
|
||||||
|
|
||||||
|
/// Maximum load share for the light server
|
||||||
|
pub const MAX_LIGHTSERV_LOAD: f64 = 0.5;
|
||||||
|
|
||||||
|
/// Factor to multiply leecher count to cater for
|
||||||
|
/// extra sudden connections (should be >= 1.0)
|
||||||
|
pub const LEECHER_COUNT_FACTOR: f64 = 1.25;
|
||||||
|
|
||||||
// minimum interval between updates.
|
// minimum interval between updates.
|
||||||
const UPDATE_INTERVAL: Duration = Duration::from_millis(5000);
|
const UPDATE_INTERVAL: Duration = Duration::from_millis(5000);
|
||||||
|
|
||||||
@ -256,18 +266,18 @@ pub trait Handler: Send + Sync {
|
|||||||
pub struct Config {
|
pub struct Config {
|
||||||
/// How many stored seconds of credits peers should be able to accumulate.
|
/// How many stored seconds of credits peers should be able to accumulate.
|
||||||
pub max_stored_seconds: u64,
|
pub max_stored_seconds: u64,
|
||||||
/// How much of the total load capacity each peer should be allowed to take.
|
/// The network config median peers (used as default peer count)
|
||||||
pub load_share: f64,
|
pub median_peers: f64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for Config {
|
impl Default for Config {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
const LOAD_SHARE: f64 = 1.0 / 25.0;
|
const MEDIAN_PEERS: f64 = 25.0;
|
||||||
const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes.
|
const MAX_ACCUMULATED: u64 = 60 * 5; // only charge for 5 minutes.
|
||||||
|
|
||||||
Config {
|
Config {
|
||||||
max_stored_seconds: MAX_ACCUMULATED,
|
max_stored_seconds: MAX_ACCUMULATED,
|
||||||
load_share: LOAD_SHARE,
|
median_peers: MEDIAN_PEERS,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -335,6 +345,42 @@ mod id_guard {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Provides various statistics that could
|
||||||
|
/// be used to compute costs
|
||||||
|
pub struct Statistics {
|
||||||
|
/// Samples of peer count
|
||||||
|
peer_counts: VecDeque<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Statistics {
|
||||||
|
/// Create a new Statistics instance
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Statistics {
|
||||||
|
peer_counts: VecDeque::with_capacity(MOVING_SAMPLE_SIZE),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Add a new peer_count sample
|
||||||
|
pub fn add_peer_count(&mut self, peer_count: usize) {
|
||||||
|
while self.peer_counts.len() >= MOVING_SAMPLE_SIZE {
|
||||||
|
self.peer_counts.pop_front();
|
||||||
|
}
|
||||||
|
self.peer_counts.push_back(peer_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the average peer count from previous samples. Is always >= 1.0
|
||||||
|
pub fn avg_peer_count(&self) -> f64 {
|
||||||
|
let len = self.peer_counts.len();
|
||||||
|
if len == 0 {
|
||||||
|
return 1.0;
|
||||||
|
}
|
||||||
|
let avg = self.peer_counts.iter()
|
||||||
|
.fold(0, |sum: u32, &v| sum.saturating_add(v as u32)) as f64
|
||||||
|
/ len as f64;
|
||||||
|
avg.max(1.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// This is an implementation of the light ethereum network protocol, abstracted
|
/// This is an implementation of the light ethereum network protocol, abstracted
|
||||||
/// over a `Provider` of data and a p2p network.
|
/// over a `Provider` of data and a p2p network.
|
||||||
///
|
///
|
||||||
@ -359,6 +405,7 @@ pub struct LightProtocol {
|
|||||||
req_id: AtomicUsize,
|
req_id: AtomicUsize,
|
||||||
sample_store: Box<SampleStore>,
|
sample_store: Box<SampleStore>,
|
||||||
load_distribution: LoadDistribution,
|
load_distribution: LoadDistribution,
|
||||||
|
statistics: RwLock<Statistics>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightProtocol {
|
impl LightProtocol {
|
||||||
@ -369,9 +416,11 @@ impl LightProtocol {
|
|||||||
let genesis_hash = provider.chain_info().genesis_hash;
|
let genesis_hash = provider.chain_info().genesis_hash;
|
||||||
let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore));
|
let sample_store = params.sample_store.unwrap_or_else(|| Box::new(NullStore));
|
||||||
let load_distribution = LoadDistribution::load(&*sample_store);
|
let load_distribution = LoadDistribution::load(&*sample_store);
|
||||||
|
// Default load share relative to median peers
|
||||||
|
let load_share = MAX_LIGHTSERV_LOAD / params.config.median_peers;
|
||||||
let flow_params = FlowParams::from_request_times(
|
let flow_params = FlowParams::from_request_times(
|
||||||
|kind| load_distribution.expected_time(kind),
|
|kind| load_distribution.expected_time(kind),
|
||||||
params.config.load_share,
|
load_share,
|
||||||
Duration::from_secs(params.config.max_stored_seconds),
|
Duration::from_secs(params.config.max_stored_seconds),
|
||||||
);
|
);
|
||||||
|
|
||||||
@ -389,6 +438,7 @@ impl LightProtocol {
|
|||||||
req_id: AtomicUsize::new(0),
|
req_id: AtomicUsize::new(0),
|
||||||
sample_store,
|
sample_store,
|
||||||
load_distribution,
|
load_distribution,
|
||||||
|
statistics: RwLock::new(Statistics::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -408,6 +458,16 @@ impl LightProtocol {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the number of active light peers downloading from the
|
||||||
|
/// node
|
||||||
|
pub fn leecher_count(&self) -> usize {
|
||||||
|
let credit_limit = *self.flow_params.read().limit();
|
||||||
|
// Count the number of peers that used some credit
|
||||||
|
self.peers.read().iter()
|
||||||
|
.filter(|(_, p)| p.lock().local_credits.current() < credit_limit)
|
||||||
|
.count()
|
||||||
|
}
|
||||||
|
|
||||||
/// Make a request to a peer.
|
/// Make a request to a peer.
|
||||||
///
|
///
|
||||||
/// Fails on: nonexistent peer, network error, peer not server,
|
/// Fails on: nonexistent peer, network error, peer not server,
|
||||||
@ -772,12 +832,16 @@ impl LightProtocol {
|
|||||||
fn begin_new_cost_period(&self, io: &IoContext) {
|
fn begin_new_cost_period(&self, io: &IoContext) {
|
||||||
self.load_distribution.end_period(&*self.sample_store);
|
self.load_distribution.end_period(&*self.sample_store);
|
||||||
|
|
||||||
|
let avg_peer_count = self.statistics.read().avg_peer_count();
|
||||||
|
// Load share relative to average peer count +LEECHER_COUNT_FACTOR%
|
||||||
|
let load_share = MAX_LIGHTSERV_LOAD / (avg_peer_count * LEECHER_COUNT_FACTOR);
|
||||||
let new_params = Arc::new(FlowParams::from_request_times(
|
let new_params = Arc::new(FlowParams::from_request_times(
|
||||||
|kind| self.load_distribution.expected_time(kind),
|
|kind| self.load_distribution.expected_time(kind),
|
||||||
self.config.load_share,
|
load_share,
|
||||||
Duration::from_secs(self.config.max_stored_seconds),
|
Duration::from_secs(self.config.max_stored_seconds),
|
||||||
));
|
));
|
||||||
*self.flow_params.write() = new_params.clone();
|
*self.flow_params.write() = new_params.clone();
|
||||||
|
trace!(target: "pip", "New cost period: avg_peers={} ; cost_table:{:?}", avg_peer_count, new_params.cost_table());
|
||||||
|
|
||||||
let peers = self.peers.read();
|
let peers = self.peers.read();
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
@ -797,6 +861,11 @@ impl LightProtocol {
|
|||||||
peer_info.awaiting_acknowledge = Some((now, new_params.clone()));
|
peer_info.awaiting_acknowledge = Some((now, new_params.clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn tick_statistics(&self) {
|
||||||
|
let leecher_count = self.leecher_count();
|
||||||
|
self.statistics.write().add_peer_count(leecher_count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl LightProtocol {
|
impl LightProtocol {
|
||||||
@ -1099,6 +1168,8 @@ impl NetworkProtocolHandler for LightProtocol {
|
|||||||
.expect("Error registering sync timer.");
|
.expect("Error registering sync timer.");
|
||||||
io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL)
|
io.register_timer(RECALCULATE_COSTS_TIMEOUT, RECALCULATE_COSTS_INTERVAL)
|
||||||
.expect("Error registering request timer interval token.");
|
.expect("Error registering request timer interval token.");
|
||||||
|
io.register_timer(STATISTICS_TIMEOUT, STATISTICS_INTERVAL)
|
||||||
|
.expect("Error registering statistics timer.");
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
@ -1119,6 +1190,7 @@ impl NetworkProtocolHandler for LightProtocol {
|
|||||||
TICK_TIMEOUT => self.tick_handlers(&io),
|
TICK_TIMEOUT => self.tick_handlers(&io),
|
||||||
PROPAGATE_TIMEOUT => self.propagate_transactions(&io),
|
PROPAGATE_TIMEOUT => self.propagate_transactions(&io),
|
||||||
RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(&io),
|
RECALCULATE_COSTS_TIMEOUT => self.begin_new_cost_period(&io),
|
||||||
|
STATISTICS_TIMEOUT => self.tick_statistics(),
|
||||||
_ => warn!(target: "pip", "received timeout on unknown token {}", timer),
|
_ => warn!(target: "pip", "received timeout on unknown token {}", timer),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,9 +22,10 @@ use ethcore::client::{EachBlockWith, TestBlockChainClient};
|
|||||||
use ethcore::encoded;
|
use ethcore::encoded;
|
||||||
use ethcore::ids::BlockId;
|
use ethcore::ids::BlockId;
|
||||||
use ethereum_types::{H256, U256, Address};
|
use ethereum_types::{H256, U256, Address};
|
||||||
use net::{LightProtocol, Params, packet, Peer};
|
use net::{LightProtocol, Params, packet, Peer, Statistics};
|
||||||
use net::context::IoContext;
|
use net::context::IoContext;
|
||||||
use net::status::{Capabilities, Status};
|
use net::status::{Capabilities, Status};
|
||||||
|
use net::load_timer::MOVING_SAMPLE_SIZE;
|
||||||
use network::{PeerId, NodeId};
|
use network::{PeerId, NodeId};
|
||||||
use provider::Provider;
|
use provider::Provider;
|
||||||
use request;
|
use request;
|
||||||
@ -780,3 +781,34 @@ fn get_transaction_index() {
|
|||||||
let expected = Expect::Respond(packet::RESPONSE, response);
|
let expected = Expect::Respond(packet::RESPONSE, response);
|
||||||
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
proto.handle_packet(&expected, 1, packet::REQUEST, &request_body);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sync_statistics() {
|
||||||
|
let mut stats = Statistics::new();
|
||||||
|
|
||||||
|
// Empty set should return 1.0
|
||||||
|
assert_eq!(stats.avg_peer_count(), 1.0);
|
||||||
|
|
||||||
|
// Average < 1.0 should return 1.0
|
||||||
|
stats.add_peer_count(0);
|
||||||
|
assert_eq!(stats.avg_peer_count(), 1.0);
|
||||||
|
|
||||||
|
stats = Statistics::new();
|
||||||
|
|
||||||
|
const N: f64 = 50.0;
|
||||||
|
|
||||||
|
for i in 1..(N as usize + 1) {
|
||||||
|
stats.add_peer_count(i);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute the average for the sum 1..N
|
||||||
|
assert_eq!(stats.avg_peer_count(), N * (N + 1.0) / 2.0 / N);
|
||||||
|
|
||||||
|
for _ in 1..(MOVING_SAMPLE_SIZE + 1) {
|
||||||
|
stats.add_peer_count(40);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that it returns the average of the last
|
||||||
|
// `MOVING_SAMPLE_SIZE` values
|
||||||
|
assert_eq!(stats.avg_peer_count(), 40.0);
|
||||||
|
}
|
||||||
|
@ -176,8 +176,8 @@ impl<T: ProvingBlockChainClient + ?Sized> Provider for T {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option<request::ReceiptsResponse> {
|
fn block_receipts(&self, req: request::CompleteReceiptsRequest) -> Option<request::ReceiptsResponse> {
|
||||||
BlockChainClient::encoded_block_receipts(self, &req.hash)
|
BlockChainClient::block_receipts(self, &req.hash)
|
||||||
.map(|x| ::request::ReceiptsResponse { receipts: ::rlp::decode_list(&x) })
|
.map(|x| ::request::ReceiptsResponse { receipts: x.receipts })
|
||||||
}
|
}
|
||||||
|
|
||||||
fn account_proof(&self, req: request::CompleteAccountRequest) -> Option<request::AccountResponse> {
|
fn account_proof(&self, req: request::CompleteAccountRequest) -> Option<request::AccountResponse> {
|
||||||
|
@ -106,7 +106,13 @@ impl ClientService {
|
|||||||
info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name()));
|
info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name()));
|
||||||
|
|
||||||
let pruning = config.pruning;
|
let pruning = config.pruning;
|
||||||
let client = Client::new(config, &spec, blockchain_db.clone(), miner.clone(), io_service.channel())?;
|
let client = Client::new(
|
||||||
|
config,
|
||||||
|
&spec,
|
||||||
|
blockchain_db.clone(),
|
||||||
|
miner.clone(),
|
||||||
|
io_service.channel(),
|
||||||
|
)?;
|
||||||
miner.set_io_channel(io_service.channel());
|
miner.set_io_channel(io_service.channel());
|
||||||
miner.set_in_chain_checker(&client.clone());
|
miner.set_in_chain_checker(&client.clone());
|
||||||
|
|
||||||
@ -117,7 +123,7 @@ impl ClientService {
|
|||||||
pruning: pruning,
|
pruning: pruning,
|
||||||
channel: io_service.channel(),
|
channel: io_service.channel(),
|
||||||
snapshot_root: snapshot_path.into(),
|
snapshot_root: snapshot_path.into(),
|
||||||
db_restore: client.clone(),
|
client: client.clone(),
|
||||||
};
|
};
|
||||||
let snapshot = Arc::new(SnapshotService::new(snapshot_params)?);
|
let snapshot = Arc::new(SnapshotService::new(snapshot_params)?);
|
||||||
|
|
||||||
|
@ -229,6 +229,7 @@ pub struct BlockChain {
|
|||||||
|
|
||||||
cache_man: Mutex<CacheManager<CacheId>>,
|
cache_man: Mutex<CacheManager<CacheId>>,
|
||||||
|
|
||||||
|
pending_best_ancient_block: RwLock<Option<Option<BestAncientBlock>>>,
|
||||||
pending_best_block: RwLock<Option<BestBlock>>,
|
pending_best_block: RwLock<Option<BestBlock>>,
|
||||||
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
pending_block_hashes: RwLock<HashMap<BlockNumber, H256>>,
|
||||||
pending_block_details: RwLock<HashMap<H256, BlockDetails>>,
|
pending_block_details: RwLock<HashMap<H256, BlockDetails>>,
|
||||||
@ -538,6 +539,7 @@ impl BlockChain {
|
|||||||
block_receipts: RwLock::new(HashMap::new()),
|
block_receipts: RwLock::new(HashMap::new()),
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
cache_man: Mutex::new(cache_man),
|
cache_man: Mutex::new(cache_man),
|
||||||
|
pending_best_ancient_block: RwLock::new(None),
|
||||||
pending_best_block: RwLock::new(None),
|
pending_best_block: RwLock::new(None),
|
||||||
pending_block_hashes: RwLock::new(HashMap::new()),
|
pending_block_hashes: RwLock::new(HashMap::new()),
|
||||||
pending_block_details: RwLock::new(HashMap::new()),
|
pending_block_details: RwLock::new(HashMap::new()),
|
||||||
@ -808,18 +810,7 @@ impl BlockChain {
|
|||||||
}, is_best);
|
}, is_best);
|
||||||
|
|
||||||
if is_ancient {
|
if is_ancient {
|
||||||
let mut best_ancient_block = self.best_ancient_block.write();
|
self.set_best_ancient_block(block_number, &hash, batch);
|
||||||
let ancient_number = best_ancient_block.as_ref().map_or(0, |b| b.number);
|
|
||||||
if self.block_hash(block_number + 1).is_some() {
|
|
||||||
batch.delete(db::COL_EXTRA, b"ancient");
|
|
||||||
*best_ancient_block = None;
|
|
||||||
} else if block_number > ancient_number {
|
|
||||||
batch.put(db::COL_EXTRA, b"ancient", &hash);
|
|
||||||
*best_ancient_block = Some(BestAncientBlock {
|
|
||||||
hash: hash,
|
|
||||||
number: block_number,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
false
|
false
|
||||||
@ -860,6 +851,84 @@ impl BlockChain {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Update the best ancient block to the given hash, after checking that
|
||||||
|
/// it's directly linked to the currently known best ancient block
|
||||||
|
pub fn update_best_ancient_block(&self, hash: &H256) {
|
||||||
|
// Get the block view of the next ancient block (it must
|
||||||
|
// be in DB at this point)
|
||||||
|
let block_view = match self.block(hash) {
|
||||||
|
Some(v) => v,
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
|
||||||
|
// So that `best_ancient_block` gets unlocked before calling
|
||||||
|
// `set_best_ancient_block`
|
||||||
|
{
|
||||||
|
// Get the target hash ; if there are no ancient block,
|
||||||
|
// it means that the chain is already fully linked
|
||||||
|
// Release the `best_ancient_block` RwLock
|
||||||
|
let target_hash = {
|
||||||
|
let best_ancient_block = self.best_ancient_block.read();
|
||||||
|
let cur_ancient_block = match *best_ancient_block {
|
||||||
|
Some(ref b) => b,
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Ensure that the new best ancient block is after the current one
|
||||||
|
if block_view.number() <= cur_ancient_block.number {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
cur_ancient_block.hash.clone()
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut block_hash = *hash;
|
||||||
|
let mut is_linked = false;
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if block_hash == target_hash {
|
||||||
|
is_linked = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.block_details(&block_hash) {
|
||||||
|
Some(block_details) => {
|
||||||
|
block_hash = block_details.parent;
|
||||||
|
},
|
||||||
|
None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !is_linked {
|
||||||
|
trace!(target: "blockchain", "The given block {:x} is not linked to the known ancient block {:x}", hash, target_hash);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut batch = self.db.key_value().transaction();
|
||||||
|
self.set_best_ancient_block(block_view.number(), hash, &mut batch);
|
||||||
|
self.db.key_value().write(batch).expect("Low level database error.");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the best ancient block with the given value: private method
|
||||||
|
/// `best_ancient_block` must not be locked, otherwise a DeadLock would occur
|
||||||
|
fn set_best_ancient_block(&self, block_number: BlockNumber, block_hash: &H256, batch: &mut DBTransaction) {
|
||||||
|
let mut pending_best_ancient_block = self.pending_best_ancient_block.write();
|
||||||
|
let ancient_number = self.best_ancient_block.read().as_ref().map_or(0, |b| b.number);
|
||||||
|
if self.block_hash(block_number + 1).is_some() {
|
||||||
|
trace!(target: "blockchain", "The two ends of the chain have met.");
|
||||||
|
batch.delete(db::COL_EXTRA, b"ancient");
|
||||||
|
*pending_best_ancient_block = Some(None);
|
||||||
|
} else if block_number > ancient_number {
|
||||||
|
trace!(target: "blockchain", "Updating the best ancient block to {}.", block_number);
|
||||||
|
batch.put(db::COL_EXTRA, b"ancient", &block_hash);
|
||||||
|
*pending_best_ancient_block = Some(Some(BestAncientBlock {
|
||||||
|
hash: *block_hash,
|
||||||
|
number: block_number,
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Insert an epoch transition. Provide an epoch number being transitioned to
|
/// Insert an epoch transition. Provide an epoch number being transitioned to
|
||||||
/// and epoch transition object.
|
/// and epoch transition object.
|
||||||
///
|
///
|
||||||
@ -1112,15 +1181,21 @@ impl BlockChain {
|
|||||||
|
|
||||||
/// Apply pending insertion updates
|
/// Apply pending insertion updates
|
||||||
pub fn commit(&self) {
|
pub fn commit(&self) {
|
||||||
|
let mut pending_best_ancient_block = self.pending_best_ancient_block.write();
|
||||||
let mut pending_best_block = self.pending_best_block.write();
|
let mut pending_best_block = self.pending_best_block.write();
|
||||||
let mut pending_write_hashes = self.pending_block_hashes.write();
|
let mut pending_write_hashes = self.pending_block_hashes.write();
|
||||||
let mut pending_block_details = self.pending_block_details.write();
|
let mut pending_block_details = self.pending_block_details.write();
|
||||||
let mut pending_write_txs = self.pending_transaction_addresses.write();
|
let mut pending_write_txs = self.pending_transaction_addresses.write();
|
||||||
|
|
||||||
let mut best_block = self.best_block.write();
|
let mut best_block = self.best_block.write();
|
||||||
|
let mut best_ancient_block = self.best_ancient_block.write();
|
||||||
let mut write_block_details = self.block_details.write();
|
let mut write_block_details = self.block_details.write();
|
||||||
let mut write_hashes = self.block_hashes.write();
|
let mut write_hashes = self.block_hashes.write();
|
||||||
let mut write_txs = self.transaction_addresses.write();
|
let mut write_txs = self.transaction_addresses.write();
|
||||||
|
// update best ancient block
|
||||||
|
if let Some(block_option) = pending_best_ancient_block.take() {
|
||||||
|
*best_ancient_block = block_option;
|
||||||
|
}
|
||||||
// update best block
|
// update best block
|
||||||
if let Some(block) = pending_best_block.take() {
|
if let Some(block) = pending_best_block.take() {
|
||||||
*best_block = block;
|
*best_block = block;
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use ethereum_types::H256;
|
use ethereum_types::{H256, U256};
|
||||||
use transaction::UnverifiedTransaction;
|
use transaction::UnverifiedTransaction;
|
||||||
use blockchain::ImportRoute;
|
use blockchain::ImportRoute;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
@ -141,7 +141,15 @@ pub trait ChainNotify : Send + Sync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// fires when chain broadcasts a message
|
/// fires when chain broadcasts a message
|
||||||
fn broadcast(&self, _message_type: ChainMessageType) {}
|
fn broadcast(&self, _message_type: ChainMessageType) {
|
||||||
|
// does nothing by default
|
||||||
|
}
|
||||||
|
|
||||||
|
/// fires when new block is about to be imported
|
||||||
|
/// implementations should be light
|
||||||
|
fn block_pre_import(&self, _bytes: &Bytes, _hash: &H256, _difficulty: &U256) {
|
||||||
|
// does nothing by default
|
||||||
|
}
|
||||||
|
|
||||||
/// fires when new transactions are received from a peer
|
/// fires when new transactions are received from a peer
|
||||||
fn transactions_received(&self,
|
fn transactions_received(&self,
|
||||||
|
@ -32,7 +32,7 @@ use kvdb::{DBValue, KeyValueDB, DBTransaction};
|
|||||||
// other
|
// other
|
||||||
use ethereum_types::{H256, Address, U256};
|
use ethereum_types::{H256, Address, U256};
|
||||||
use block::{IsBlock, LockedBlock, Drain, ClosedBlock, OpenBlock, enact_verified, SealedBlock};
|
use block::{IsBlock, LockedBlock, Drain, ClosedBlock, OpenBlock, enact_verified, SealedBlock};
|
||||||
use blockchain::{BlockChain, BlockChainDB, BlockProvider, TreeRoute, ImportRoute, TransactionAddress, ExtrasInsert};
|
use blockchain::{BlockReceipts, BlockChain, BlockChainDB, BlockProvider, TreeRoute, ImportRoute, TransactionAddress, ExtrasInsert};
|
||||||
use client::ancient_import::AncientVerifier;
|
use client::ancient_import::AncientVerifier;
|
||||||
use client::{
|
use client::{
|
||||||
Nonce, Balance, ChainInfo, BlockInfo, CallContract, TransactionInfo,
|
Nonce, Balance, ChainInfo, BlockInfo, CallContract, TransactionInfo,
|
||||||
@ -66,7 +66,7 @@ use ethcore_miner::pool::VerifiedTransaction;
|
|||||||
use parking_lot::{Mutex, RwLock};
|
use parking_lot::{Mutex, RwLock};
|
||||||
use rand::OsRng;
|
use rand::OsRng;
|
||||||
use receipt::{Receipt, LocalizedReceipt};
|
use receipt::{Receipt, LocalizedReceipt};
|
||||||
use snapshot::{self, io as snapshot_io};
|
use snapshot::{self, io as snapshot_io, SnapshotClient};
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use state_db::StateDB;
|
use state_db::StateDB;
|
||||||
use state::{self, State};
|
use state::{self, State};
|
||||||
@ -881,7 +881,7 @@ impl Client {
|
|||||||
/// Flush the block import queue.
|
/// Flush the block import queue.
|
||||||
pub fn flush_queue(&self) {
|
pub fn flush_queue(&self) {
|
||||||
self.importer.block_queue.flush();
|
self.importer.block_queue.flush();
|
||||||
while !self.importer.block_queue.queue_info().is_empty() {
|
while !self.importer.block_queue.is_empty() {
|
||||||
self.import_verified_blocks();
|
self.import_verified_blocks();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1005,6 +1005,16 @@ impl Client {
|
|||||||
self.importer.miner.clone()
|
self.importer.miner.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn state_db(&self) -> ::parking_lot::RwLockReadGuard<StateDB> {
|
||||||
|
self.state_db.read()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn chain(&self) -> Arc<BlockChain> {
|
||||||
|
self.chain.read().clone()
|
||||||
|
}
|
||||||
|
|
||||||
/// Replace io channel. Useful for testing.
|
/// Replace io channel. Useful for testing.
|
||||||
pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) {
|
pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) {
|
||||||
*self.io_channel.write() = io_channel;
|
*self.io_channel.write() = io_channel;
|
||||||
@ -1413,8 +1423,21 @@ impl ImportBlock for Client {
|
|||||||
bail!(EthcoreErrorKind::Block(BlockError::UnknownParent(unverified.parent_hash())));
|
bail!(EthcoreErrorKind::Block(BlockError::UnknownParent(unverified.parent_hash())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let raw = if self.importer.block_queue.is_empty() {
|
||||||
|
Some((
|
||||||
|
unverified.bytes.clone(),
|
||||||
|
unverified.header.hash(),
|
||||||
|
*unverified.header.difficulty(),
|
||||||
|
))
|
||||||
|
} else { None };
|
||||||
|
|
||||||
match self.importer.block_queue.import(unverified) {
|
match self.importer.block_queue.import(unverified) {
|
||||||
Ok(res) => Ok(res),
|
Ok(hash) => {
|
||||||
|
if let Some((raw, hash, difficulty)) = raw {
|
||||||
|
self.notify(move |n| n.block_pre_import(&raw, &hash, &difficulty));
|
||||||
|
}
|
||||||
|
Ok(hash)
|
||||||
|
},
|
||||||
// we only care about block errors (not import errors)
|
// we only care about block errors (not import errors)
|
||||||
Err((block, EthcoreError(EthcoreErrorKind::Block(err), _))) => {
|
Err((block, EthcoreError(EthcoreErrorKind::Block(err), _))) => {
|
||||||
self.importer.bad_blocks.report(block.bytes, format!("{:?}", err));
|
self.importer.bad_blocks.report(block.bytes, format!("{:?}", err));
|
||||||
@ -1817,7 +1840,7 @@ impl BlockChainClient for Client {
|
|||||||
Some(receipt)
|
Some(receipt)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_receipts(&self, id: BlockId) -> Option<Vec<LocalizedReceipt>> {
|
fn localized_block_receipts(&self, id: BlockId) -> Option<Vec<LocalizedReceipt>> {
|
||||||
let hash = self.block_hash(id)?;
|
let hash = self.block_hash(id)?;
|
||||||
|
|
||||||
let chain = self.chain.read();
|
let chain = self.chain.read();
|
||||||
@ -1860,14 +1883,18 @@ impl BlockChainClient for Client {
|
|||||||
self.state_db.read().journal_db().state(hash)
|
self.state_db.read().journal_db().state(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encoded_block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
||||||
self.chain.read().block_receipts(hash).map(|receipts| ::rlp::encode(&receipts))
|
self.chain.read().block_receipts(hash)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn queue_info(&self) -> BlockQueueInfo {
|
fn queue_info(&self) -> BlockQueueInfo {
|
||||||
self.importer.block_queue.queue_info()
|
self.importer.block_queue.queue_info()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn is_queue_empty(&self) -> bool {
|
||||||
|
self.importer.block_queue.is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
fn clear_queue(&self) {
|
fn clear_queue(&self) {
|
||||||
self.importer.block_queue.clear();
|
self.importer.block_queue.clear();
|
||||||
}
|
}
|
||||||
@ -2277,26 +2304,37 @@ impl ScheduleInfo for Client {
|
|||||||
|
|
||||||
impl ImportSealedBlock for Client {
|
impl ImportSealedBlock for Client {
|
||||||
fn import_sealed_block(&self, block: SealedBlock) -> EthcoreResult<H256> {
|
fn import_sealed_block(&self, block: SealedBlock) -> EthcoreResult<H256> {
|
||||||
let h = block.header().hash();
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
let raw = block.rlp_bytes();
|
||||||
|
let header = block.header().clone();
|
||||||
|
let hash = header.hash();
|
||||||
|
self.notify(|n| n.block_pre_import(&raw, &hash, header.difficulty()));
|
||||||
|
|
||||||
let route = {
|
let route = {
|
||||||
|
// Do a super duper basic verification to detect potential bugs
|
||||||
|
if let Err(e) = self.engine.verify_block_basic(&header) {
|
||||||
|
self.importer.bad_blocks.report(
|
||||||
|
block.rlp_bytes(),
|
||||||
|
format!("Detected an issue with locally sealed block: {}", e),
|
||||||
|
);
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
|
||||||
// scope for self.import_lock
|
// scope for self.import_lock
|
||||||
let _import_lock = self.importer.import_lock.lock();
|
let _import_lock = self.importer.import_lock.lock();
|
||||||
trace_time!("import_sealed_block");
|
trace_time!("import_sealed_block");
|
||||||
|
|
||||||
let number = block.header().number();
|
|
||||||
let block_data = block.rlp_bytes();
|
let block_data = block.rlp_bytes();
|
||||||
let header = block.header().clone();
|
|
||||||
|
|
||||||
let route = self.importer.commit_block(block, &header, encoded::Block::new(block_data), self);
|
let route = self.importer.commit_block(block, &header, encoded::Block::new(block_data), self);
|
||||||
trace!(target: "client", "Imported sealed block #{} ({})", number, h);
|
trace!(target: "client", "Imported sealed block #{} ({})", header.number(), hash);
|
||||||
self.state_db.write().sync_cache(&route.enacted, &route.retracted, false);
|
self.state_db.write().sync_cache(&route.enacted, &route.retracted, false);
|
||||||
route
|
route
|
||||||
};
|
};
|
||||||
let route = ChainRoute::from([route].as_ref());
|
let route = ChainRoute::from([route].as_ref());
|
||||||
self.importer.miner.chain_new_blocks(
|
self.importer.miner.chain_new_blocks(
|
||||||
self,
|
self,
|
||||||
&[h.clone()],
|
&[hash],
|
||||||
&[],
|
&[],
|
||||||
route.enacted(),
|
route.enacted(),
|
||||||
route.retracted(),
|
route.retracted(),
|
||||||
@ -2304,16 +2342,16 @@ impl ImportSealedBlock for Client {
|
|||||||
);
|
);
|
||||||
self.notify(|notify| {
|
self.notify(|notify| {
|
||||||
notify.new_blocks(
|
notify.new_blocks(
|
||||||
vec![h.clone()],
|
vec![hash],
|
||||||
vec![],
|
vec![],
|
||||||
route.clone(),
|
route.clone(),
|
||||||
vec![h.clone()],
|
vec![hash],
|
||||||
vec![],
|
vec![],
|
||||||
start.elapsed(),
|
start.elapsed(),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
self.db.read().key_value().flush().expect("DB flush failed.");
|
self.db.read().key_value().flush().expect("DB flush failed.");
|
||||||
Ok(h)
|
Ok(hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2406,6 +2444,8 @@ impl ProvingBlockChainClient for Client {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl SnapshotClient for Client {}
|
||||||
|
|
||||||
impl Drop for Client {
|
impl Drop for Client {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.engine.stop();
|
self.engine.stop();
|
||||||
@ -2504,7 +2544,7 @@ mod tests {
|
|||||||
use test_helpers::{generate_dummy_client_with_data};
|
use test_helpers::{generate_dummy_client_with_data};
|
||||||
|
|
||||||
let client = generate_dummy_client_with_data(2, 2, &[1.into(), 1.into()]);
|
let client = generate_dummy_client_with_data(2, 2, &[1.into(), 1.into()]);
|
||||||
let receipts = client.block_receipts(BlockId::Latest).unwrap();
|
let receipts = client.localized_block_receipts(BlockId::Latest).unwrap();
|
||||||
|
|
||||||
assert_eq!(receipts.len(), 2);
|
assert_eq!(receipts.len(), 2);
|
||||||
assert_eq!(receipts[0].transaction_index, 0);
|
assert_eq!(receipts[0].transaction_index, 0);
|
||||||
|
@ -686,7 +686,7 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
self.receipts.read().get(&id).cloned()
|
self.receipts.read().get(&id).cloned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn block_receipts(&self, _id: BlockId) -> Option<Vec<LocalizedReceipt>> {
|
fn localized_block_receipts(&self, _id: BlockId) -> Option<Vec<LocalizedReceipt>> {
|
||||||
Some(self.receipts.read().values().cloned().collect())
|
Some(self.receipts.read().values().cloned().collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -789,16 +789,14 @@ impl BlockChainClient for TestBlockChainClient {
|
|||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
fn encoded_block_receipts(&self, hash: &H256) -> Option<Bytes> {
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts> {
|
||||||
// starts with 'f' ?
|
// starts with 'f' ?
|
||||||
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
if *hash > H256::from("f000000000000000000000000000000000000000000000000000000000000000") {
|
||||||
let receipt = BlockReceipts::new(vec![Receipt::new(
|
let receipt = BlockReceipts::new(vec![Receipt::new(
|
||||||
TransactionOutcome::StateRoot(H256::zero()),
|
TransactionOutcome::StateRoot(H256::zero()),
|
||||||
U256::zero(),
|
U256::zero(),
|
||||||
vec![])]);
|
vec![])]);
|
||||||
let mut rlp = RlpStream::new();
|
return Some(receipt);
|
||||||
rlp.append(&receipt);
|
|
||||||
return Some(rlp.out());
|
|
||||||
}
|
}
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ use std::sync::Arc;
|
|||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
|
|
||||||
use block::{OpenBlock, SealedBlock, ClosedBlock};
|
use block::{OpenBlock, SealedBlock, ClosedBlock};
|
||||||
use blockchain::TreeRoute;
|
use blockchain::{BlockReceipts, TreeRoute};
|
||||||
use client::Mode;
|
use client::Mode;
|
||||||
use encoded;
|
use encoded;
|
||||||
use vm::LastHashes;
|
use vm::LastHashes;
|
||||||
@ -282,7 +282,7 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra
|
|||||||
fn transaction_receipt(&self, id: TransactionId) -> Option<LocalizedReceipt>;
|
fn transaction_receipt(&self, id: TransactionId) -> Option<LocalizedReceipt>;
|
||||||
|
|
||||||
/// Get localized receipts for all transaction in given block.
|
/// Get localized receipts for all transaction in given block.
|
||||||
fn block_receipts(&self, id: BlockId) -> Option<Vec<LocalizedReceipt>>;
|
fn localized_block_receipts(&self, id: BlockId) -> Option<Vec<LocalizedReceipt>>;
|
||||||
|
|
||||||
/// Get a tree route between `from` and `to`.
|
/// Get a tree route between `from` and `to`.
|
||||||
/// See `BlockChain::tree_route`.
|
/// See `BlockChain::tree_route`.
|
||||||
@ -294,12 +294,17 @@ pub trait BlockChainClient : Sync + Send + AccountData + BlockChain + CallContra
|
|||||||
/// Get latest state node
|
/// Get latest state node
|
||||||
fn state_data(&self, hash: &H256) -> Option<Bytes>;
|
fn state_data(&self, hash: &H256) -> Option<Bytes>;
|
||||||
|
|
||||||
/// Get raw block receipts data by block header hash.
|
/// Get block receipts data by block header hash.
|
||||||
fn encoded_block_receipts(&self, hash: &H256) -> Option<Bytes>;
|
fn block_receipts(&self, hash: &H256) -> Option<BlockReceipts>;
|
||||||
|
|
||||||
/// Get block queue information.
|
/// Get block queue information.
|
||||||
fn queue_info(&self) -> BlockQueueInfo;
|
fn queue_info(&self) -> BlockQueueInfo;
|
||||||
|
|
||||||
|
/// Returns true if block queue is empty.
|
||||||
|
fn is_queue_empty(&self) -> bool {
|
||||||
|
self.queue_info().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
/// Clear block queue and abort all import activity.
|
/// Clear block queue and abort all import activity.
|
||||||
fn clear_queue(&self);
|
fn clear_queue(&self);
|
||||||
|
|
||||||
|
@ -16,8 +16,8 @@
|
|||||||
|
|
||||||
//! A blockchain engine that supports a non-instant BFT proof-of-authority.
|
//! A blockchain engine that supports a non-instant BFT proof-of-authority.
|
||||||
|
|
||||||
use std::collections::{BTreeMap, HashSet};
|
use std::collections::{BTreeMap, BTreeSet, HashSet};
|
||||||
use std::fmt;
|
use std::{cmp, fmt};
|
||||||
use std::iter::FromIterator;
|
use std::iter::FromIterator;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering as AtomicOrdering};
|
||||||
@ -123,10 +123,10 @@ struct Step {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Step {
|
impl Step {
|
||||||
fn load(&self) -> usize { self.inner.load(AtomicOrdering::SeqCst) }
|
fn load(&self) -> u64 { self.inner.load(AtomicOrdering::SeqCst) as u64 }
|
||||||
fn duration_remaining(&self) -> Duration {
|
fn duration_remaining(&self) -> Duration {
|
||||||
let now = unix_now();
|
let now = unix_now();
|
||||||
let expected_seconds = (self.load() as u64)
|
let expected_seconds = self.load()
|
||||||
.checked_add(1)
|
.checked_add(1)
|
||||||
.and_then(|ctr| ctr.checked_mul(self.duration as u64))
|
.and_then(|ctr| ctr.checked_mul(self.duration as u64))
|
||||||
.map(Duration::from_secs);
|
.map(Duration::from_secs);
|
||||||
@ -162,8 +162,8 @@ impl Step {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_future(&self, given: usize) -> Result<(), Option<OutOfBounds<u64>>> {
|
fn check_future(&self, given: u64) -> Result<(), Option<OutOfBounds<u64>>> {
|
||||||
const REJECTED_STEP_DRIFT: usize = 4;
|
const REJECTED_STEP_DRIFT: u64 = 4;
|
||||||
|
|
||||||
// Verify if the step is correct.
|
// Verify if the step is correct.
|
||||||
if given <= self.load() {
|
if given <= self.load() {
|
||||||
@ -182,8 +182,8 @@ impl Step {
|
|||||||
let d = self.duration as u64;
|
let d = self.duration as u64;
|
||||||
Err(Some(OutOfBounds {
|
Err(Some(OutOfBounds {
|
||||||
min: None,
|
min: None,
|
||||||
max: Some(d * current as u64),
|
max: Some(d * current),
|
||||||
found: d * given as u64,
|
found: d * given,
|
||||||
}))
|
}))
|
||||||
} else {
|
} else {
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -192,8 +192,8 @@ impl Step {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Chain scoring: total weight is sqrt(U256::max_value())*height - step
|
// Chain scoring: total weight is sqrt(U256::max_value())*height - step
|
||||||
fn calculate_score(parent_step: U256, current_step: U256, current_empty_steps: U256) -> U256 {
|
fn calculate_score(parent_step: u64, current_step: u64, current_empty_steps: usize) -> U256 {
|
||||||
U256::from(U128::max_value()) + parent_step - current_step + current_empty_steps
|
U256::from(U128::max_value()) + U256::from(parent_step) - U256::from(current_step) + U256::from(current_empty_steps)
|
||||||
}
|
}
|
||||||
|
|
||||||
struct EpochManager {
|
struct EpochManager {
|
||||||
@ -284,13 +284,26 @@ impl EpochManager {
|
|||||||
/// A message broadcast by authorities when it's their turn to seal a block but there are no
|
/// A message broadcast by authorities when it's their turn to seal a block but there are no
|
||||||
/// transactions. Other authorities accumulate these messages and later include them in the seal as
|
/// transactions. Other authorities accumulate these messages and later include them in the seal as
|
||||||
/// proof.
|
/// proof.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
struct EmptyStep {
|
struct EmptyStep {
|
||||||
signature: H520,
|
signature: H520,
|
||||||
step: usize,
|
step: u64,
|
||||||
parent_hash: H256,
|
parent_hash: H256,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for EmptyStep {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Ord for EmptyStep {
|
||||||
|
fn cmp(&self, other: &Self) -> cmp::Ordering {
|
||||||
|
self.step.cmp(&other.step)
|
||||||
|
.then_with(|| self.parent_hash.cmp(&other.parent_hash))
|
||||||
|
.then_with(|| self.signature.cmp(&other.signature))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl EmptyStep {
|
impl EmptyStep {
|
||||||
fn from_sealed(sealed_empty_step: SealedEmptyStep, parent_hash: &H256) -> EmptyStep {
|
fn from_sealed(sealed_empty_step: SealedEmptyStep, parent_hash: &H256) -> EmptyStep {
|
||||||
let signature = sealed_empty_step.signature;
|
let signature = sealed_empty_step.signature;
|
||||||
@ -353,7 +366,7 @@ pub fn empty_step_full_rlp(signature: &H520, empty_step_rlp: &[u8]) -> Vec<u8> {
|
|||||||
s.out()
|
s.out()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn empty_step_rlp(step: usize, parent_hash: &H256) -> Vec<u8> {
|
pub fn empty_step_rlp(step: u64, parent_hash: &H256) -> Vec<u8> {
|
||||||
let mut s = RlpStream::new_list(2);
|
let mut s = RlpStream::new_list(2);
|
||||||
s.append(&step).append(parent_hash);
|
s.append(&step).append(parent_hash);
|
||||||
s.out()
|
s.out()
|
||||||
@ -365,7 +378,7 @@ pub fn empty_step_rlp(step: usize, parent_hash: &H256) -> Vec<u8> {
|
|||||||
/// empty message is included.
|
/// empty message is included.
|
||||||
struct SealedEmptyStep {
|
struct SealedEmptyStep {
|
||||||
signature: H520,
|
signature: H520,
|
||||||
step: usize,
|
step: u64,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Encodable for SealedEmptyStep {
|
impl Encodable for SealedEmptyStep {
|
||||||
@ -399,7 +412,7 @@ pub struct AuthorityRound {
|
|||||||
validators: Box<ValidatorSet>,
|
validators: Box<ValidatorSet>,
|
||||||
validate_score_transition: u64,
|
validate_score_transition: u64,
|
||||||
validate_step_transition: u64,
|
validate_step_transition: u64,
|
||||||
empty_steps: Mutex<Vec<EmptyStep>>,
|
empty_steps: Mutex<BTreeSet<EmptyStep>>,
|
||||||
epoch_manager: Mutex<EpochManager>,
|
epoch_manager: Mutex<EpochManager>,
|
||||||
immediate_transitions: bool,
|
immediate_transitions: bool,
|
||||||
block_reward: U256,
|
block_reward: U256,
|
||||||
@ -494,7 +507,7 @@ fn header_expected_seal_fields(header: &Header, empty_steps_transition: u64) ->
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn header_step(header: &Header, empty_steps_transition: u64) -> Result<usize, ::rlp::DecoderError> {
|
fn header_step(header: &Header, empty_steps_transition: u64) -> Result<u64, ::rlp::DecoderError> {
|
||||||
let expected_seal_fields = header_expected_seal_fields(header, empty_steps_transition);
|
let expected_seal_fields = header_expected_seal_fields(header, empty_steps_transition);
|
||||||
Rlp::new(&header.seal().get(0).expect(
|
Rlp::new(&header.seal().get(0).expect(
|
||||||
&format!("was either checked with verify_block_basic or is genesis; has {} fields; qed (Make sure the spec file has a correct genesis seal)", expected_seal_fields))).as_val()
|
&format!("was either checked with verify_block_basic or is genesis; has {} fields; qed (Make sure the spec file has a correct genesis seal)", expected_seal_fields))).as_val()
|
||||||
@ -533,17 +546,17 @@ fn header_empty_steps_signers(header: &Header, empty_steps_transition: u64) -> R
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn step_proposer(validators: &ValidatorSet, bh: &H256, step: usize) -> Address {
|
fn step_proposer(validators: &ValidatorSet, bh: &H256, step: u64) -> Address {
|
||||||
let proposer = validators.get(bh, step);
|
let proposer = validators.get(bh, step as usize);
|
||||||
trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer);
|
trace!(target: "engine", "Fetched proposer for step {}: {}", step, proposer);
|
||||||
proposer
|
proposer
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: usize, address: &Address) -> bool {
|
fn is_step_proposer(validators: &ValidatorSet, bh: &H256, step: u64, address: &Address) -> bool {
|
||||||
step_proposer(validators, bh, step) == *address
|
step_proposer(validators, bh, step) == *address
|
||||||
}
|
}
|
||||||
|
|
||||||
fn verify_timestamp(step: &Step, header_step: usize) -> Result<(), BlockError> {
|
fn verify_timestamp(step: &Step, header_step: u64) -> Result<(), BlockError> {
|
||||||
match step.check_future(header_step) {
|
match step.check_future(header_step) {
|
||||||
Err(None) => {
|
Err(None) => {
|
||||||
trace!(target: "engine", "verify_timestamp: block from the future");
|
trace!(target: "engine", "verify_timestamp: block from the future");
|
||||||
@ -564,7 +577,7 @@ fn verify_external(header: &Header, validators: &ValidatorSet, empty_steps_trans
|
|||||||
let header_step = header_step(header, empty_steps_transition)?;
|
let header_step = header_step(header, empty_steps_transition)?;
|
||||||
|
|
||||||
let proposer_signature = header_signature(header, empty_steps_transition)?;
|
let proposer_signature = header_signature(header, empty_steps_transition)?;
|
||||||
let correct_proposer = validators.get(header.parent_hash(), header_step);
|
let correct_proposer = validators.get(header.parent_hash(), header_step as usize);
|
||||||
let is_invalid_proposer = *header.author() != correct_proposer || {
|
let is_invalid_proposer = *header.author() != correct_proposer || {
|
||||||
let empty_steps_rlp = if header.number() >= empty_steps_transition {
|
let empty_steps_rlp = if header.number() >= empty_steps_transition {
|
||||||
Some(header_empty_steps_raw(header))
|
Some(header_empty_steps_raw(header))
|
||||||
@ -634,13 +647,13 @@ impl AuthorityRound {
|
|||||||
panic!("authority_round: step duration can't be zero")
|
panic!("authority_round: step duration can't be zero")
|
||||||
}
|
}
|
||||||
let should_timeout = our_params.start_step.is_none();
|
let should_timeout = our_params.start_step.is_none();
|
||||||
let initial_step = our_params.start_step.unwrap_or_else(|| (unix_now().as_secs() / (our_params.step_duration as u64))) as usize;
|
let initial_step = our_params.start_step.unwrap_or_else(|| (unix_now().as_secs() / (our_params.step_duration as u64)));
|
||||||
let engine = Arc::new(
|
let engine = Arc::new(
|
||||||
AuthorityRound {
|
AuthorityRound {
|
||||||
transition_service: IoService::<()>::start()?,
|
transition_service: IoService::<()>::start()?,
|
||||||
step: Arc::new(PermissionedStep {
|
step: Arc::new(PermissionedStep {
|
||||||
inner: Step {
|
inner: Step {
|
||||||
inner: AtomicUsize::new(initial_step),
|
inner: AtomicUsize::new(initial_step as usize),
|
||||||
calibrate: our_params.start_step.is_none(),
|
calibrate: our_params.start_step.is_none(),
|
||||||
duration: our_params.step_duration,
|
duration: our_params.step_duration,
|
||||||
},
|
},
|
||||||
@ -651,7 +664,7 @@ impl AuthorityRound {
|
|||||||
validators: our_params.validators,
|
validators: our_params.validators,
|
||||||
validate_score_transition: our_params.validate_score_transition,
|
validate_score_transition: our_params.validate_score_transition,
|
||||||
validate_step_transition: our_params.validate_step_transition,
|
validate_step_transition: our_params.validate_step_transition,
|
||||||
empty_steps: Mutex::new(Vec::new()),
|
empty_steps: Default::default(),
|
||||||
epoch_manager: Mutex::new(EpochManager::blank()),
|
epoch_manager: Mutex::new(EpochManager::blank()),
|
||||||
immediate_transitions: our_params.immediate_transitions,
|
immediate_transitions: our_params.immediate_transitions,
|
||||||
block_reward: our_params.block_reward,
|
block_reward: our_params.block_reward,
|
||||||
@ -699,22 +712,41 @@ impl AuthorityRound {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn empty_steps(&self, from_step: U256, to_step: U256, parent_hash: H256) -> Vec<EmptyStep> {
|
fn empty_steps(&self, from_step: u64, to_step: u64, parent_hash: H256) -> Vec<EmptyStep> {
|
||||||
self.empty_steps.lock().iter().filter(|e| {
|
let from = EmptyStep {
|
||||||
U256::from(e.step) > from_step &&
|
step: from_step + 1,
|
||||||
U256::from(e.step) < to_step &&
|
parent_hash,
|
||||||
e.parent_hash == parent_hash
|
signature: Default::default(),
|
||||||
}).cloned().collect()
|
};
|
||||||
|
let to = EmptyStep {
|
||||||
|
step: to_step,
|
||||||
|
parent_hash: Default::default(),
|
||||||
|
signature: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
if from >= to {
|
||||||
|
return vec![];
|
||||||
|
}
|
||||||
|
|
||||||
|
self.empty_steps.lock()
|
||||||
|
.range(from..to)
|
||||||
|
.filter(|e| e.parent_hash == parent_hash)
|
||||||
|
.cloned()
|
||||||
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn clear_empty_steps(&self, step: U256) {
|
fn clear_empty_steps(&self, step: u64) {
|
||||||
// clear old `empty_steps` messages
|
// clear old `empty_steps` messages
|
||||||
self.empty_steps.lock().retain(|e| U256::from(e.step) > step);
|
let mut empty_steps = self.empty_steps.lock();
|
||||||
|
*empty_steps = empty_steps.split_off(&EmptyStep {
|
||||||
|
step: step + 1,
|
||||||
|
parent_hash: Default::default(),
|
||||||
|
signature: Default::default(),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_empty_step_message(&self, empty_step: EmptyStep) {
|
fn handle_empty_step_message(&self, empty_step: EmptyStep) {
|
||||||
let mut empty_steps = self.empty_steps.lock();
|
self.empty_steps.lock().insert(empty_step);
|
||||||
empty_steps.push(empty_step);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_empty_step(&self, parent_hash: &H256) {
|
fn generate_empty_step(&self, parent_hash: &H256) {
|
||||||
@ -744,7 +776,7 @@ impl AuthorityRound {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report_skipped(&self, header: &Header, current_step: usize, parent_step: usize, validators: &ValidatorSet, set_number: u64) {
|
fn report_skipped(&self, header: &Header, current_step: u64, parent_step: u64, validators: &ValidatorSet, set_number: u64) {
|
||||||
// we're building on top of the genesis block so don't report any skipped steps
|
// we're building on top of the genesis block so don't report any skipped steps
|
||||||
if header.number() == 1 {
|
if header.number() == 1 {
|
||||||
return;
|
return;
|
||||||
@ -937,12 +969,12 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
let current_step = self.step.inner.load();
|
let current_step = self.step.inner.load();
|
||||||
|
|
||||||
let current_empty_steps_len = if header.number() >= self.empty_steps_transition {
|
let current_empty_steps_len = if header.number() >= self.empty_steps_transition {
|
||||||
self.empty_steps(parent_step.into(), current_step.into(), parent.hash()).len()
|
self.empty_steps(parent_step, current_step, parent.hash()).len()
|
||||||
} else {
|
} else {
|
||||||
0
|
0
|
||||||
};
|
};
|
||||||
|
|
||||||
let score = calculate_score(parent_step.into(), current_step.into(), current_empty_steps_len.into());
|
let score = calculate_score(parent_step, current_step, current_empty_steps_len);
|
||||||
header.set_difficulty(score);
|
header.set_difficulty(score);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -986,8 +1018,8 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let header = block.header();
|
let header = block.header();
|
||||||
let parent_step: U256 = header_step(parent, self.empty_steps_transition)
|
let parent_step = header_step(parent, self.empty_steps_transition)
|
||||||
.expect("Header has been verified; qed").into();
|
.expect("Header has been verified; qed");
|
||||||
|
|
||||||
let step = self.step.inner.load();
|
let step = self.step.inner.load();
|
||||||
|
|
||||||
@ -1022,7 +1054,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
if is_step_proposer(&*validators, header.parent_hash(), step, header.author()) {
|
if is_step_proposer(&*validators, header.parent_hash(), step, header.author()) {
|
||||||
// this is guarded against by `can_propose` unless the block was signed
|
// this is guarded against by `can_propose` unless the block was signed
|
||||||
// on the same step (implies same key) and on a different node.
|
// on the same step (implies same key) and on a different node.
|
||||||
if parent_step == step.into() {
|
if parent_step == step {
|
||||||
warn!("Attempted to seal block on the same step as parent. Is this authority sealing with more than one node?");
|
warn!("Attempted to seal block on the same step as parent. Is this authority sealing with more than one node?");
|
||||||
return Seal::None;
|
return Seal::None;
|
||||||
}
|
}
|
||||||
@ -1034,7 +1066,10 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
block.transactions().is_empty() &&
|
block.transactions().is_empty() &&
|
||||||
empty_steps.len() < self.maximum_empty_steps {
|
empty_steps.len() < self.maximum_empty_steps {
|
||||||
|
|
||||||
self.generate_empty_step(header.parent_hash());
|
if self.step.can_propose.compare_and_swap(true, false, AtomicOrdering::SeqCst) {
|
||||||
|
self.generate_empty_step(header.parent_hash());
|
||||||
|
}
|
||||||
|
|
||||||
return Seal::None;
|
return Seal::None;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1058,7 +1093,7 @@ impl Engine<EthereumMachine> for AuthorityRound {
|
|||||||
// report any skipped primaries between the parent block and
|
// report any skipped primaries between the parent block and
|
||||||
// the block we're sealing, unless we have empty steps enabled
|
// the block we're sealing, unless we have empty steps enabled
|
||||||
if header.number() < self.empty_steps_transition {
|
if header.number() < self.empty_steps_transition {
|
||||||
self.report_skipped(header, step, u64::from(parent_step) as usize, &*validators, set_number);
|
self.report_skipped(header, step, parent_step, &*validators, set_number);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut fields = vec![
|
let mut fields = vec![
|
||||||
@ -1593,12 +1628,12 @@ mod tests {
|
|||||||
|
|
||||||
// Two validators.
|
// Two validators.
|
||||||
// Spec starts with step 2.
|
// Spec starts with step 2.
|
||||||
header.set_difficulty(calculate_score(U256::from(0), U256::from(2), U256::zero()));
|
header.set_difficulty(calculate_score(0, 2, 0));
|
||||||
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
|
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
|
||||||
header.set_seal(vec![encode(&2usize), encode(&(&*signature as &[u8]))]);
|
header.set_seal(vec![encode(&2usize), encode(&(&*signature as &[u8]))]);
|
||||||
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
||||||
assert!(engine.verify_block_external(&header).is_err());
|
assert!(engine.verify_block_external(&header).is_err());
|
||||||
header.set_difficulty(calculate_score(U256::from(0), U256::from(1), U256::zero()));
|
header.set_difficulty(calculate_score(0, 1, 0));
|
||||||
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
|
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
|
||||||
header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]);
|
header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]);
|
||||||
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
||||||
@ -1622,7 +1657,7 @@ mod tests {
|
|||||||
|
|
||||||
// Two validators.
|
// Two validators.
|
||||||
// Spec starts with step 2.
|
// Spec starts with step 2.
|
||||||
header.set_difficulty(calculate_score(U256::from(0), U256::from(1), U256::zero()));
|
header.set_difficulty(calculate_score(0, 1, 0));
|
||||||
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
|
let signature = tap.sign(addr, Some("0".into()), header.bare_hash()).unwrap();
|
||||||
header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]);
|
header.set_seal(vec![encode(&1usize), encode(&(&*signature as &[u8]))]);
|
||||||
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
||||||
@ -1650,10 +1685,10 @@ mod tests {
|
|||||||
// Two validators.
|
// Two validators.
|
||||||
// Spec starts with step 2.
|
// Spec starts with step 2.
|
||||||
header.set_seal(vec![encode(&5usize), encode(&(&*signature as &[u8]))]);
|
header.set_seal(vec![encode(&5usize), encode(&(&*signature as &[u8]))]);
|
||||||
header.set_difficulty(calculate_score(U256::from(4), U256::from(5), U256::zero()));
|
header.set_difficulty(calculate_score(4, 5, 0));
|
||||||
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
assert!(engine.verify_block_family(&header, &parent_header).is_ok());
|
||||||
header.set_seal(vec![encode(&3usize), encode(&(&*signature as &[u8]))]);
|
header.set_seal(vec![encode(&3usize), encode(&(&*signature as &[u8]))]);
|
||||||
header.set_difficulty(calculate_score(U256::from(4), U256::from(3), U256::zero()));
|
header.set_difficulty(calculate_score(4, 3, 0));
|
||||||
assert!(engine.verify_block_family(&header, &parent_header).is_err());
|
assert!(engine.verify_block_family(&header, &parent_header).is_err());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1687,7 +1722,7 @@ mod tests {
|
|||||||
parent_header.set_seal(vec![encode(&1usize)]);
|
parent_header.set_seal(vec![encode(&1usize)]);
|
||||||
parent_header.set_gas_limit("222222".parse::<U256>().unwrap());
|
parent_header.set_gas_limit("222222".parse::<U256>().unwrap());
|
||||||
let mut header: Header = Header::default();
|
let mut header: Header = Header::default();
|
||||||
header.set_difficulty(calculate_score(U256::from(1), U256::from(3), U256::zero()));
|
header.set_difficulty(calculate_score(1, 3, 0));
|
||||||
header.set_gas_limit("222222".parse::<U256>().unwrap());
|
header.set_gas_limit("222222".parse::<U256>().unwrap());
|
||||||
header.set_seal(vec![encode(&3usize)]);
|
header.set_seal(vec![encode(&3usize)]);
|
||||||
|
|
||||||
@ -1801,14 +1836,14 @@ mod tests {
|
|||||||
(spec, tap, accounts)
|
(spec, tap, accounts)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn empty_step(engine: &EthEngine, step: usize, parent_hash: &H256) -> EmptyStep {
|
fn empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> EmptyStep {
|
||||||
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
||||||
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
||||||
let parent_hash = parent_hash.clone();
|
let parent_hash = parent_hash.clone();
|
||||||
EmptyStep { step, signature, parent_hash }
|
EmptyStep { step, signature, parent_hash }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn sealed_empty_step(engine: &EthEngine, step: usize, parent_hash: &H256) -> SealedEmptyStep {
|
fn sealed_empty_step(engine: &EthEngine, step: u64, parent_hash: &H256) -> SealedEmptyStep {
|
||||||
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
let empty_step_rlp = super::empty_step_rlp(step, parent_hash);
|
||||||
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
let signature = engine.sign(keccak(&empty_step_rlp)).unwrap().into();
|
||||||
SealedEmptyStep { signature, step }
|
SealedEmptyStep { signature, step }
|
||||||
@ -1844,6 +1879,11 @@ mod tests {
|
|||||||
|
|
||||||
// we've received the message
|
// we've received the message
|
||||||
assert!(notify.messages.read().contains(&empty_step_rlp));
|
assert!(notify.messages.read().contains(&empty_step_rlp));
|
||||||
|
let len = notify.messages.read().len();
|
||||||
|
|
||||||
|
// make sure that we don't generate empty step for the second time
|
||||||
|
assert_eq!(engine.generate_seal(b1.block(), &genesis_header), Seal::None);
|
||||||
|
assert_eq!(len, notify.messages.read().len());
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -2058,7 +2098,7 @@ mod tests {
|
|||||||
let empty_step3 = sealed_empty_step(engine, 3, &parent_header.hash());
|
let empty_step3 = sealed_empty_step(engine, 3, &parent_header.hash());
|
||||||
|
|
||||||
let empty_steps = vec![empty_step2, empty_step3];
|
let empty_steps = vec![empty_step2, empty_step3];
|
||||||
header.set_difficulty(calculate_score(U256::from(0), U256::from(4), U256::from(2)));
|
header.set_difficulty(calculate_score(0, 4, 2));
|
||||||
let signature = tap.sign(addr1, Some("1".into()), header.bare_hash()).unwrap();
|
let signature = tap.sign(addr1, Some("1".into()), header.bare_hash()).unwrap();
|
||||||
header.set_seal(vec![
|
header.set_seal(vec![
|
||||||
encode(&4usize),
|
encode(&4usize),
|
||||||
@ -2173,4 +2213,52 @@ mod tests {
|
|||||||
BTreeMap::default(),
|
BTreeMap::default(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_empty_steps() {
|
||||||
|
let last_benign = Arc::new(AtomicUsize::new(0));
|
||||||
|
let params = AuthorityRoundParams {
|
||||||
|
step_duration: 4,
|
||||||
|
start_step: Some(1),
|
||||||
|
validators: Box::new(TestSet::new(Default::default(), last_benign.clone())),
|
||||||
|
validate_score_transition: 0,
|
||||||
|
validate_step_transition: 0,
|
||||||
|
immediate_transitions: true,
|
||||||
|
maximum_uncle_count_transition: 0,
|
||||||
|
maximum_uncle_count: 0,
|
||||||
|
empty_steps_transition: 0,
|
||||||
|
maximum_empty_steps: 10,
|
||||||
|
block_reward: Default::default(),
|
||||||
|
block_reward_contract_transition: 0,
|
||||||
|
block_reward_contract: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut c_params = ::spec::CommonParams::default();
|
||||||
|
c_params.gas_limit_bound_divisor = 5.into();
|
||||||
|
let machine = ::machine::EthereumMachine::regular(c_params, Default::default());
|
||||||
|
let engine = AuthorityRound::new(params, machine).unwrap();
|
||||||
|
|
||||||
|
|
||||||
|
let parent_hash: H256 = 1.into();
|
||||||
|
let signature = H520::default();
|
||||||
|
let step = |step: u64| EmptyStep {
|
||||||
|
step,
|
||||||
|
parent_hash,
|
||||||
|
signature,
|
||||||
|
};
|
||||||
|
|
||||||
|
engine.handle_empty_step_message(step(1));
|
||||||
|
engine.handle_empty_step_message(step(3));
|
||||||
|
engine.handle_empty_step_message(step(2));
|
||||||
|
engine.handle_empty_step_message(step(1));
|
||||||
|
|
||||||
|
assert_eq!(engine.empty_steps(0, 4, parent_hash), vec![step(1), step(2), step(3)]);
|
||||||
|
assert_eq!(engine.empty_steps(2, 3, parent_hash), vec![]);
|
||||||
|
assert_eq!(engine.empty_steps(2, 4, parent_hash), vec![step(3)]);
|
||||||
|
|
||||||
|
engine.clear_empty_steps(2);
|
||||||
|
|
||||||
|
assert_eq!(engine.empty_steps(0, 3, parent_hash), vec![]);
|
||||||
|
assert_eq!(engine.empty_steps(0, 4, parent_hash), vec![step(3)]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -139,6 +139,9 @@ extern crate trace_time;
|
|||||||
#[cfg_attr(test, macro_use)]
|
#[cfg_attr(test, macro_use)]
|
||||||
extern crate evm;
|
extern crate evm;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
extern crate env_logger;
|
||||||
|
|
||||||
pub extern crate ethstore;
|
pub extern crate ethstore;
|
||||||
|
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
|
@ -576,7 +576,7 @@ impl Miner {
|
|||||||
trace!(target: "miner", "requires_reseal: sealing enabled");
|
trace!(target: "miner", "requires_reseal: sealing enabled");
|
||||||
|
|
||||||
// Disable sealing if there were no requests for SEALING_TIMEOUT_IN_BLOCKS
|
// Disable sealing if there were no requests for SEALING_TIMEOUT_IN_BLOCKS
|
||||||
let had_requests = sealing.last_request.map(|last_request|
|
let had_requests = sealing.last_request.map(|last_request|
|
||||||
best_block.saturating_sub(last_request) <= SEALING_TIMEOUT_IN_BLOCKS
|
best_block.saturating_sub(last_request) <= SEALING_TIMEOUT_IN_BLOCKS
|
||||||
).unwrap_or(false);
|
).unwrap_or(false);
|
||||||
|
|
||||||
|
@ -65,6 +65,8 @@ pub enum Error {
|
|||||||
BadEpochProof(u64),
|
BadEpochProof(u64),
|
||||||
/// Wrong chunk format.
|
/// Wrong chunk format.
|
||||||
WrongChunkFormat(String),
|
WrongChunkFormat(String),
|
||||||
|
/// Unlinked ancient block chain
|
||||||
|
UnlinkedAncientBlockChain,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl fmt::Display for Error {
|
impl fmt::Display for Error {
|
||||||
@ -91,6 +93,7 @@ impl fmt::Display for Error {
|
|||||||
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
|
Error::SnapshotsUnsupported => write!(f, "Snapshots unsupported by consensus engine."),
|
||||||
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
|
Error::BadEpochProof(i) => write!(f, "Bad epoch proof for transition to epoch {}", i),
|
||||||
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
|
Error::WrongChunkFormat(ref msg) => write!(f, "Wrong chunk format: {}", msg),
|
||||||
|
Error::UnlinkedAncientBlockChain => write!(f, "Unlinked ancient blocks chain"),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ use rand::{Rng, OsRng};
|
|||||||
pub use self::error::Error;
|
pub use self::error::Error;
|
||||||
|
|
||||||
pub use self::consensus::*;
|
pub use self::consensus::*;
|
||||||
pub use self::service::{Service, DatabaseRestore};
|
pub use self::service::{SnapshotClient, Service, DatabaseRestore};
|
||||||
pub use self::traits::SnapshotService;
|
pub use self::traits::SnapshotService;
|
||||||
pub use self::watcher::Watcher;
|
pub use self::watcher::Watcher;
|
||||||
pub use types::snapshot_manifest::ManifestData;
|
pub use types::snapshot_manifest::ManifestData;
|
||||||
|
@ -22,12 +22,13 @@ use std::fs::{self, File};
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||||
|
use std::cmp;
|
||||||
|
|
||||||
use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService, MAX_CHUNK_SIZE};
|
use super::{ManifestData, StateRebuilder, Rebuilder, RestorationStatus, SnapshotService, MAX_CHUNK_SIZE};
|
||||||
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
|
use super::io::{SnapshotReader, LooseReader, SnapshotWriter, LooseWriter};
|
||||||
|
|
||||||
use blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler};
|
use blockchain::{BlockChain, BlockChainDB, BlockChainDBHandler};
|
||||||
use client::{Client, ChainInfo, ClientIoMessage};
|
use client::{BlockInfo, BlockChainClient, Client, ChainInfo, ClientIoMessage};
|
||||||
use engines::EthEngine;
|
use engines::EthEngine;
|
||||||
use error::{Error, ErrorKind as SnapshotErrorKind};
|
use error::{Error, ErrorKind as SnapshotErrorKind};
|
||||||
use snapshot::{Error as SnapshotError};
|
use snapshot::{Error as SnapshotError};
|
||||||
@ -40,6 +41,7 @@ use ethereum_types::H256;
|
|||||||
use parking_lot::{Mutex, RwLock, RwLockReadGuard};
|
use parking_lot::{Mutex, RwLock, RwLockReadGuard};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
|
use kvdb::DBTransaction;
|
||||||
use snappy;
|
use snappy;
|
||||||
|
|
||||||
/// Helper for removing directories in case of error.
|
/// Helper for removing directories in case of error.
|
||||||
@ -203,6 +205,9 @@ impl Restoration {
|
|||||||
/// Type alias for client io channel.
|
/// Type alias for client io channel.
|
||||||
pub type Channel = IoChannel<ClientIoMessage>;
|
pub type Channel = IoChannel<ClientIoMessage>;
|
||||||
|
|
||||||
|
/// Trait alias for the Client Service used
|
||||||
|
pub trait SnapshotClient: BlockChainClient + BlockInfo + DatabaseRestore {}
|
||||||
|
|
||||||
/// Snapshot service parameters.
|
/// Snapshot service parameters.
|
||||||
pub struct ServiceParams {
|
pub struct ServiceParams {
|
||||||
/// The consensus engine this is built on.
|
/// The consensus engine this is built on.
|
||||||
@ -219,7 +224,7 @@ pub struct ServiceParams {
|
|||||||
/// Usually "<chain hash>/snapshot"
|
/// Usually "<chain hash>/snapshot"
|
||||||
pub snapshot_root: PathBuf,
|
pub snapshot_root: PathBuf,
|
||||||
/// A handle for database restoration.
|
/// A handle for database restoration.
|
||||||
pub db_restore: Arc<DatabaseRestore>,
|
pub client: Arc<SnapshotClient>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// `SnapshotService` implementation.
|
/// `SnapshotService` implementation.
|
||||||
@ -236,7 +241,7 @@ pub struct Service {
|
|||||||
genesis_block: Bytes,
|
genesis_block: Bytes,
|
||||||
state_chunks: AtomicUsize,
|
state_chunks: AtomicUsize,
|
||||||
block_chunks: AtomicUsize,
|
block_chunks: AtomicUsize,
|
||||||
db_restore: Arc<DatabaseRestore>,
|
client: Arc<SnapshotClient>,
|
||||||
progress: super::Progress,
|
progress: super::Progress,
|
||||||
taking_snapshot: AtomicBool,
|
taking_snapshot: AtomicBool,
|
||||||
restoring_snapshot: AtomicBool,
|
restoring_snapshot: AtomicBool,
|
||||||
@ -257,7 +262,7 @@ impl Service {
|
|||||||
genesis_block: params.genesis_block,
|
genesis_block: params.genesis_block,
|
||||||
state_chunks: AtomicUsize::new(0),
|
state_chunks: AtomicUsize::new(0),
|
||||||
block_chunks: AtomicUsize::new(0),
|
block_chunks: AtomicUsize::new(0),
|
||||||
db_restore: params.db_restore,
|
client: params.client,
|
||||||
progress: Default::default(),
|
progress: Default::default(),
|
||||||
taking_snapshot: AtomicBool::new(false),
|
taking_snapshot: AtomicBool::new(false),
|
||||||
restoring_snapshot: AtomicBool::new(false),
|
restoring_snapshot: AtomicBool::new(false),
|
||||||
@ -334,12 +339,110 @@ impl Service {
|
|||||||
|
|
||||||
// replace one the client's database with our own.
|
// replace one the client's database with our own.
|
||||||
fn replace_client_db(&self) -> Result<(), Error> {
|
fn replace_client_db(&self) -> Result<(), Error> {
|
||||||
let our_db = self.restoration_db();
|
let migrated_blocks = self.migrate_blocks()?;
|
||||||
|
trace!(target: "snapshot", "Migrated {} ancient blocks", migrated_blocks);
|
||||||
|
|
||||||
self.db_restore.restore_db(&*our_db.to_string_lossy())?;
|
let rest_db = self.restoration_db();
|
||||||
|
self.client.restore_db(&*rest_db.to_string_lossy())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Migrate the blocks in the current DB into the new chain
|
||||||
|
fn migrate_blocks(&self) -> Result<usize, Error> {
|
||||||
|
// Count the number of migrated blocks
|
||||||
|
let mut count = 0;
|
||||||
|
let rest_db = self.restoration_db();
|
||||||
|
|
||||||
|
let cur_chain_info = self.client.chain_info();
|
||||||
|
|
||||||
|
let next_db = self.restoration_db_handler.open(&rest_db)?;
|
||||||
|
let next_chain = BlockChain::new(Default::default(), &[], next_db.clone());
|
||||||
|
let next_chain_info = next_chain.chain_info();
|
||||||
|
|
||||||
|
// The old database looks like this:
|
||||||
|
// [genesis, best_ancient_block] ... [first_block, best_block]
|
||||||
|
// If we are fully synced neither `best_ancient_block` nor `first_block` is set, and we can assume that the whole range from [genesis, best_block] is imported.
|
||||||
|
// The new database only contains the tip of the chain ([first_block, best_block]),
|
||||||
|
// so the useful set of blocks is defined as:
|
||||||
|
// [0 ... min(new.first_block, best_ancient_block or best_block)]
|
||||||
|
let find_range = || -> Option<(H256, H256)> {
|
||||||
|
let next_available_from = next_chain_info.first_block_number?;
|
||||||
|
let cur_available_to = cur_chain_info.ancient_block_number.unwrap_or(cur_chain_info.best_block_number);
|
||||||
|
|
||||||
|
let highest_block_num = cmp::min(next_available_from.saturating_sub(1), cur_available_to);
|
||||||
|
|
||||||
|
if highest_block_num == 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!(target: "snapshot", "Trying to import ancient blocks until {}", highest_block_num);
|
||||||
|
|
||||||
|
// Here we start from the highest block number and go backward to 0,
|
||||||
|
// thus starting at `highest_block_num` and targetting `0`.
|
||||||
|
let target_hash = self.client.block_hash(BlockId::Number(0))?;
|
||||||
|
let start_hash = self.client.block_hash(BlockId::Number(highest_block_num))?;
|
||||||
|
|
||||||
|
Some((start_hash, target_hash))
|
||||||
|
};
|
||||||
|
|
||||||
|
let (start_hash, target_hash) = match find_range() {
|
||||||
|
Some(x) => x,
|
||||||
|
None => return Ok(0),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut batch = DBTransaction::new();
|
||||||
|
let mut parent_hash = start_hash;
|
||||||
|
while parent_hash != target_hash {
|
||||||
|
// Early return if restoration is aborted
|
||||||
|
if !self.restoring_snapshot.load(Ordering::SeqCst) {
|
||||||
|
return Ok(count);
|
||||||
|
}
|
||||||
|
|
||||||
|
let block = self.client.block(BlockId::Hash(parent_hash)).ok_or(::snapshot::error::Error::UnlinkedAncientBlockChain)?;
|
||||||
|
parent_hash = block.parent_hash();
|
||||||
|
|
||||||
|
let block_number = block.number();
|
||||||
|
let block_receipts = self.client.block_receipts(&block.hash());
|
||||||
|
let parent_total_difficulty = self.client.block_total_difficulty(BlockId::Hash(parent_hash));
|
||||||
|
|
||||||
|
match (block_receipts, parent_total_difficulty) {
|
||||||
|
(Some(block_receipts), Some(parent_total_difficulty)) => {
|
||||||
|
let block_receipts = block_receipts.receipts;
|
||||||
|
|
||||||
|
next_chain.insert_unordered_block(&mut batch, block, block_receipts, Some(parent_total_difficulty), false, true);
|
||||||
|
count += 1;
|
||||||
|
},
|
||||||
|
_ => break,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writting changes to DB and logging every now and then
|
||||||
|
if block_number % 1_000 == 0 {
|
||||||
|
next_db.key_value().write_buffered(batch);
|
||||||
|
next_chain.commit();
|
||||||
|
next_db.key_value().flush().expect("DB flush failed.");
|
||||||
|
batch = DBTransaction::new();
|
||||||
|
}
|
||||||
|
|
||||||
|
if block_number % 10_000 == 0 {
|
||||||
|
trace!(target: "snapshot", "Block restoration at #{}", block_number);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Final commit to the DB
|
||||||
|
next_db.key_value().write_buffered(batch);
|
||||||
|
next_chain.commit();
|
||||||
|
next_db.key_value().flush().expect("DB flush failed.");
|
||||||
|
|
||||||
|
// We couldn't reach the targeted hash
|
||||||
|
if parent_hash != target_hash {
|
||||||
|
return Err(::snapshot::error::Error::UnlinkedAncientBlockChain.into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update best ancient block in the Next Chain
|
||||||
|
next_chain.update_best_ancient_block(&start_hash);
|
||||||
|
Ok(count)
|
||||||
|
}
|
||||||
|
|
||||||
/// Get a reference to the snapshot reader.
|
/// Get a reference to the snapshot reader.
|
||||||
pub fn reader(&self) -> RwLockReadGuard<Option<LooseReader>> {
|
pub fn reader(&self) -> RwLockReadGuard<Option<LooseReader>> {
|
||||||
self.reader.read()
|
self.reader.read()
|
||||||
@ -480,12 +583,16 @@ impl Service {
|
|||||||
// Import previous chunks, continue if it fails
|
// Import previous chunks, continue if it fails
|
||||||
self.import_prev_chunks(&mut res, manifest).ok();
|
self.import_prev_chunks(&mut res, manifest).ok();
|
||||||
|
|
||||||
*self.status.lock() = RestorationStatus::Ongoing {
|
// It could be that the restoration failed or completed in the meanwhile
|
||||||
state_chunks: state_chunks as u32,
|
let mut restoration_status = self.status.lock();
|
||||||
block_chunks: block_chunks as u32,
|
if let RestorationStatus::Initializing { .. } = *restoration_status {
|
||||||
state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32,
|
*restoration_status = RestorationStatus::Ongoing {
|
||||||
block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32,
|
state_chunks: state_chunks as u32,
|
||||||
};
|
block_chunks: block_chunks as u32,
|
||||||
|
state_chunks_done: self.state_chunks.load(Ordering::SeqCst) as u32,
|
||||||
|
block_chunks_done: self.block_chunks.load(Ordering::SeqCst) as u32,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -752,26 +859,19 @@ impl Drop for Service {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::Arc;
|
|
||||||
use client::ClientIoMessage;
|
use client::ClientIoMessage;
|
||||||
use io::{IoService};
|
use io::{IoService};
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use journaldb::Algorithm;
|
use journaldb::Algorithm;
|
||||||
use error::Error;
|
|
||||||
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
|
use snapshot::{ManifestData, RestorationStatus, SnapshotService};
|
||||||
use super::*;
|
use super::*;
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
use test_helpers::restoration_db_handler;
|
use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler};
|
||||||
|
|
||||||
struct NoopDBRestore;
|
|
||||||
impl DatabaseRestore for NoopDBRestore {
|
|
||||||
fn restore_db(&self, _new_db: &str) -> Result<(), Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sends_async_messages() {
|
fn sends_async_messages() {
|
||||||
|
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
|
||||||
|
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices);
|
||||||
let service = IoService::<ClientIoMessage>::start().unwrap();
|
let service = IoService::<ClientIoMessage>::start().unwrap();
|
||||||
let spec = Spec::new_test();
|
let spec = Spec::new_test();
|
||||||
|
|
||||||
@ -785,7 +885,7 @@ mod tests {
|
|||||||
pruning: Algorithm::Archive,
|
pruning: Algorithm::Archive,
|
||||||
channel: service.channel(),
|
channel: service.channel(),
|
||||||
snapshot_root: dir,
|
snapshot_root: dir,
|
||||||
db_restore: Arc::new(NoopDBRestore),
|
client: client,
|
||||||
};
|
};
|
||||||
|
|
||||||
let service = Service::new(snapshot_params).unwrap();
|
let service = Service::new(snapshot_params).unwrap();
|
||||||
|
@ -16,26 +16,23 @@
|
|||||||
|
|
||||||
//! Tests for the snapshot service.
|
//! Tests for the snapshot service.
|
||||||
|
|
||||||
|
use std::fs;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
use client::{Client, BlockInfo};
|
use blockchain::BlockProvider;
|
||||||
|
use client::{Client, ClientConfig, ImportBlock, BlockInfo};
|
||||||
use ids::BlockId;
|
use ids::BlockId;
|
||||||
|
use snapshot::io::{PackedReader, PackedWriter, SnapshotReader, SnapshotWriter};
|
||||||
use snapshot::service::{Service, ServiceParams};
|
use snapshot::service::{Service, ServiceParams};
|
||||||
use snapshot::{self, ManifestData, SnapshotService};
|
use snapshot::{chunk_state, chunk_secondary, ManifestData, Progress, SnapshotService, RestorationStatus};
|
||||||
use spec::Spec;
|
use spec::Spec;
|
||||||
use test_helpers::{generate_dummy_client_with_spec_and_data, restoration_db_handler};
|
use test_helpers::{new_db, new_temp_db, generate_dummy_client_with_spec_and_data, restoration_db_handler};
|
||||||
|
|
||||||
|
use parking_lot::Mutex;
|
||||||
use io::IoChannel;
|
use io::IoChannel;
|
||||||
use kvdb_rocksdb::DatabaseConfig;
|
use kvdb_rocksdb::DatabaseConfig;
|
||||||
|
use verification::queue::kind::blocks::Unverified;
|
||||||
struct NoopDBRestore;
|
|
||||||
|
|
||||||
impl snapshot::DatabaseRestore for NoopDBRestore {
|
|
||||||
fn restore_db(&self, _new_db: &str) -> Result<(), ::error::Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn restored_is_equivalent() {
|
fn restored_is_equivalent() {
|
||||||
@ -46,7 +43,6 @@ fn restored_is_equivalent() {
|
|||||||
const TX_PER: usize = 5;
|
const TX_PER: usize = 5;
|
||||||
|
|
||||||
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
|
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
|
||||||
|
|
||||||
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices);
|
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, TX_PER, &gas_prices);
|
||||||
|
|
||||||
let tempdir = TempDir::new("").unwrap();
|
let tempdir = TempDir::new("").unwrap();
|
||||||
@ -73,7 +69,7 @@ fn restored_is_equivalent() {
|
|||||||
pruning: ::journaldb::Algorithm::Archive,
|
pruning: ::journaldb::Algorithm::Archive,
|
||||||
channel: IoChannel::disconnected(),
|
channel: IoChannel::disconnected(),
|
||||||
snapshot_root: path,
|
snapshot_root: path,
|
||||||
db_restore: client2.clone(),
|
client: client2.clone(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let service = Service::new(service_params).unwrap();
|
let service = Service::new(service_params).unwrap();
|
||||||
@ -94,7 +90,7 @@ fn restored_is_equivalent() {
|
|||||||
service.feed_block_chunk(hash, &chunk);
|
service.feed_block_chunk(hash, &chunk);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(service.status(), ::snapshot::RestorationStatus::Inactive);
|
assert_eq!(service.status(), RestorationStatus::Inactive);
|
||||||
|
|
||||||
for x in 0..NUM_BLOCKS {
|
for x in 0..NUM_BLOCKS {
|
||||||
let block1 = client.block(BlockId::Number(x as u64)).unwrap();
|
let block1 = client.block(BlockId::Number(x as u64)).unwrap();
|
||||||
@ -106,6 +102,9 @@ fn restored_is_equivalent() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn guards_delete_folders() {
|
fn guards_delete_folders() {
|
||||||
|
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
|
||||||
|
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, 400, 5, &gas_prices);
|
||||||
|
|
||||||
let spec = Spec::new_null();
|
let spec = Spec::new_null();
|
||||||
let tempdir = TempDir::new("").unwrap();
|
let tempdir = TempDir::new("").unwrap();
|
||||||
let service_params = ServiceParams {
|
let service_params = ServiceParams {
|
||||||
@ -115,7 +114,7 @@ fn guards_delete_folders() {
|
|||||||
pruning: ::journaldb::Algorithm::Archive,
|
pruning: ::journaldb::Algorithm::Archive,
|
||||||
channel: IoChannel::disconnected(),
|
channel: IoChannel::disconnected(),
|
||||||
snapshot_root: tempdir.path().to_owned(),
|
snapshot_root: tempdir.path().to_owned(),
|
||||||
db_restore: Arc::new(NoopDBRestore),
|
client: client,
|
||||||
};
|
};
|
||||||
|
|
||||||
let service = Service::new(service_params).unwrap();
|
let service = Service::new(service_params).unwrap();
|
||||||
@ -146,3 +145,201 @@ fn guards_delete_folders() {
|
|||||||
assert!(!path.join("db").exists());
|
assert!(!path.join("db").exists());
|
||||||
assert!(path.join("temp").exists());
|
assert!(path.join("temp").exists());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn keep_ancient_blocks() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
|
||||||
|
// Test variables
|
||||||
|
const NUM_BLOCKS: u64 = 500;
|
||||||
|
const NUM_SNAPSHOT_BLOCKS: u64 = 300;
|
||||||
|
const SNAPSHOT_MODE: ::snapshot::PowSnapshot = ::snapshot::PowSnapshot { blocks: NUM_SNAPSHOT_BLOCKS, max_restore_blocks: NUM_SNAPSHOT_BLOCKS };
|
||||||
|
|
||||||
|
// Temporary folders
|
||||||
|
let tempdir = TempDir::new("").unwrap();
|
||||||
|
let snapshot_path = tempdir.path().join("SNAP");
|
||||||
|
|
||||||
|
// Generate blocks
|
||||||
|
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
|
||||||
|
let spec_f = Spec::new_null;
|
||||||
|
let spec = spec_f();
|
||||||
|
let client = generate_dummy_client_with_spec_and_data(spec_f, NUM_BLOCKS as u32, 5, &gas_prices);
|
||||||
|
|
||||||
|
let bc = client.chain();
|
||||||
|
|
||||||
|
// Create the Snapshot
|
||||||
|
let best_hash = bc.best_block_hash();
|
||||||
|
let writer = Mutex::new(PackedWriter::new(&snapshot_path).unwrap());
|
||||||
|
let block_hashes = chunk_secondary(
|
||||||
|
Box::new(SNAPSHOT_MODE),
|
||||||
|
&bc,
|
||||||
|
best_hash,
|
||||||
|
&writer,
|
||||||
|
&Progress::default()
|
||||||
|
).unwrap();
|
||||||
|
let state_db = client.state_db().journal_db().boxed_clone();
|
||||||
|
let start_header = bc.block_header_data(&best_hash).unwrap();
|
||||||
|
let state_root = start_header.state_root();
|
||||||
|
let state_hashes = chunk_state(
|
||||||
|
state_db.as_hashdb(),
|
||||||
|
&state_root,
|
||||||
|
&writer,
|
||||||
|
&Progress::default(),
|
||||||
|
None
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
let manifest = ::snapshot::ManifestData {
|
||||||
|
version: 2,
|
||||||
|
state_hashes: state_hashes,
|
||||||
|
state_root: state_root,
|
||||||
|
block_hashes: block_hashes,
|
||||||
|
block_number: NUM_BLOCKS,
|
||||||
|
block_hash: best_hash,
|
||||||
|
};
|
||||||
|
|
||||||
|
writer.into_inner().finish(manifest.clone()).unwrap();
|
||||||
|
|
||||||
|
// Initialize the Client
|
||||||
|
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
||||||
|
let client_db = new_temp_db(&tempdir.path());
|
||||||
|
let client2 = Client::new(
|
||||||
|
ClientConfig::default(),
|
||||||
|
&spec,
|
||||||
|
client_db,
|
||||||
|
Arc::new(::miner::Miner::new_for_tests(&spec, None)),
|
||||||
|
IoChannel::disconnected(),
|
||||||
|
).unwrap();
|
||||||
|
|
||||||
|
// Add some ancient blocks
|
||||||
|
for block_number in 1..50 {
|
||||||
|
let block_hash = bc.block_hash(block_number).unwrap();
|
||||||
|
let block = bc.block(&block_hash).unwrap();
|
||||||
|
client2.import_block(Unverified::from_rlp(block.into_inner()).unwrap()).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
client2.import_verified_blocks();
|
||||||
|
client2.flush_queue();
|
||||||
|
|
||||||
|
// Restore the Snapshot
|
||||||
|
let reader = PackedReader::new(&snapshot_path).unwrap().unwrap();
|
||||||
|
let service_params = ServiceParams {
|
||||||
|
engine: spec.engine.clone(),
|
||||||
|
genesis_block: spec.genesis_block(),
|
||||||
|
restoration_db_handler: restoration_db_handler(db_config),
|
||||||
|
pruning: ::journaldb::Algorithm::Archive,
|
||||||
|
channel: IoChannel::disconnected(),
|
||||||
|
snapshot_root: tempdir.path().to_owned(),
|
||||||
|
client: client2.clone(),
|
||||||
|
};
|
||||||
|
let service = Service::new(service_params).unwrap();
|
||||||
|
service.init_restore(manifest.clone(), false).unwrap();
|
||||||
|
|
||||||
|
for hash in &manifest.block_hashes {
|
||||||
|
let chunk = reader.chunk(*hash).unwrap();
|
||||||
|
service.feed_block_chunk(*hash, &chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
for hash in &manifest.state_hashes {
|
||||||
|
let chunk = reader.chunk(*hash).unwrap();
|
||||||
|
service.feed_state_chunk(*hash, &chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
match service.status() {
|
||||||
|
RestorationStatus::Inactive => (),
|
||||||
|
RestorationStatus::Failed => panic!("Snapshot Restoration has failed."),
|
||||||
|
RestorationStatus::Ongoing { .. } => panic!("Snapshot Restoration should be done."),
|
||||||
|
_ => panic!("Invalid Snapshot Service status."),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that the latest block number is the right one
|
||||||
|
assert_eq!(client2.block(BlockId::Latest).unwrap().number(), NUM_BLOCKS as u64);
|
||||||
|
|
||||||
|
// Check that we have blocks in [NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1 ; NUM_BLOCKS]
|
||||||
|
// but none before
|
||||||
|
assert!(client2.block(BlockId::Number(NUM_BLOCKS - NUM_SNAPSHOT_BLOCKS + 1)).is_some());
|
||||||
|
assert!(client2.block(BlockId::Number(100)).is_none());
|
||||||
|
|
||||||
|
// Check that the first 50 blocks have been migrated
|
||||||
|
for block_number in 1..49 {
|
||||||
|
assert!(client2.block(BlockId::Number(block_number)).is_some());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn recover_aborted_recovery() {
|
||||||
|
::env_logger::init().ok();
|
||||||
|
|
||||||
|
const NUM_BLOCKS: u32 = 400;
|
||||||
|
let gas_prices = vec![1.into(), 2.into(), 3.into(), 999.into()];
|
||||||
|
let client = generate_dummy_client_with_spec_and_data(Spec::new_null, NUM_BLOCKS, 5, &gas_prices);
|
||||||
|
|
||||||
|
let spec = Spec::new_null();
|
||||||
|
let tempdir = TempDir::new("").unwrap();
|
||||||
|
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
||||||
|
let client_db = new_db();
|
||||||
|
let client2 = Client::new(
|
||||||
|
Default::default(),
|
||||||
|
&spec,
|
||||||
|
client_db,
|
||||||
|
Arc::new(::miner::Miner::new_for_tests(&spec, None)),
|
||||||
|
IoChannel::disconnected(),
|
||||||
|
).unwrap();
|
||||||
|
let service_params = ServiceParams {
|
||||||
|
engine: spec.engine.clone(),
|
||||||
|
genesis_block: spec.genesis_block(),
|
||||||
|
restoration_db_handler: restoration_db_handler(db_config),
|
||||||
|
pruning: ::journaldb::Algorithm::Archive,
|
||||||
|
channel: IoChannel::disconnected(),
|
||||||
|
snapshot_root: tempdir.path().to_owned(),
|
||||||
|
client: client2.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let service = Service::new(service_params).unwrap();
|
||||||
|
service.take_snapshot(&client, NUM_BLOCKS as u64).unwrap();
|
||||||
|
|
||||||
|
let manifest = service.manifest().unwrap();
|
||||||
|
service.init_restore(manifest.clone(), true).unwrap();
|
||||||
|
|
||||||
|
// Restore only the state chunks
|
||||||
|
for hash in &manifest.state_hashes {
|
||||||
|
let chunk = service.chunk(*hash).unwrap();
|
||||||
|
service.feed_state_chunk(*hash, &chunk);
|
||||||
|
}
|
||||||
|
|
||||||
|
match service.status() {
|
||||||
|
RestorationStatus::Ongoing { block_chunks_done, state_chunks_done, .. } => {
|
||||||
|
assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32);
|
||||||
|
assert_eq!(block_chunks_done, 0);
|
||||||
|
},
|
||||||
|
e => panic!("Snapshot restoration must be ongoing ; {:?}", e),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Abort the restore...
|
||||||
|
service.abort_restore();
|
||||||
|
|
||||||
|
// And try again!
|
||||||
|
service.init_restore(manifest.clone(), true).unwrap();
|
||||||
|
|
||||||
|
match service.status() {
|
||||||
|
RestorationStatus::Ongoing { block_chunks_done, state_chunks_done, .. } => {
|
||||||
|
assert_eq!(state_chunks_done, manifest.state_hashes.len() as u32);
|
||||||
|
assert_eq!(block_chunks_done, 0);
|
||||||
|
},
|
||||||
|
e => panic!("Snapshot restoration must be ongoing ; {:?}", e),
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove the snapshot directory, and restart the restoration
|
||||||
|
// It shouldn't have restored any previous blocks
|
||||||
|
fs::remove_dir_all(tempdir.path()).unwrap();
|
||||||
|
|
||||||
|
// And try again!
|
||||||
|
service.init_restore(manifest.clone(), true).unwrap();
|
||||||
|
|
||||||
|
match service.status() {
|
||||||
|
RestorationStatus::Ongoing { block_chunks_done, state_chunks_done, .. } => {
|
||||||
|
assert_eq!(block_chunks_done, 0);
|
||||||
|
assert_eq!(state_chunks_done, 0);
|
||||||
|
},
|
||||||
|
_ => panic!("Snapshot restoration must be ongoing"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -41,7 +41,7 @@ use transaction::{Action, Transaction, SignedTransaction};
|
|||||||
use views::BlockView;
|
use views::BlockView;
|
||||||
use blooms_db;
|
use blooms_db;
|
||||||
use kvdb::KeyValueDB;
|
use kvdb::KeyValueDB;
|
||||||
use kvdb_rocksdb;
|
use kvdb_rocksdb::{self, Database, DatabaseConfig};
|
||||||
use tempdir::TempDir;
|
use tempdir::TempDir;
|
||||||
use verification::queue::kind::blocks::Unverified;
|
use verification::queue::kind::blocks::Unverified;
|
||||||
use encoded;
|
use encoded;
|
||||||
@ -263,30 +263,30 @@ pub fn get_test_client_with_blocks(blocks: Vec<Bytes>) -> Arc<Client> {
|
|||||||
client
|
client
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct TestBlockChainDB {
|
||||||
|
_blooms_dir: TempDir,
|
||||||
|
_trace_blooms_dir: TempDir,
|
||||||
|
blooms: blooms_db::Database,
|
||||||
|
trace_blooms: blooms_db::Database,
|
||||||
|
key_value: Arc<KeyValueDB>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl BlockChainDB for TestBlockChainDB {
|
||||||
|
fn key_value(&self) -> &Arc<KeyValueDB> {
|
||||||
|
&self.key_value
|
||||||
|
}
|
||||||
|
|
||||||
|
fn blooms(&self) -> &blooms_db::Database {
|
||||||
|
&self.blooms
|
||||||
|
}
|
||||||
|
|
||||||
|
fn trace_blooms(&self) -> &blooms_db::Database {
|
||||||
|
&self.trace_blooms
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates new test instance of `BlockChainDB`
|
/// Creates new test instance of `BlockChainDB`
|
||||||
pub fn new_db() -> Arc<BlockChainDB> {
|
pub fn new_db() -> Arc<BlockChainDB> {
|
||||||
struct TestBlockChainDB {
|
|
||||||
_blooms_dir: TempDir,
|
|
||||||
_trace_blooms_dir: TempDir,
|
|
||||||
blooms: blooms_db::Database,
|
|
||||||
trace_blooms: blooms_db::Database,
|
|
||||||
key_value: Arc<KeyValueDB>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockChainDB for TestBlockChainDB {
|
|
||||||
fn key_value(&self) -> &Arc<KeyValueDB> {
|
|
||||||
&self.key_value
|
|
||||||
}
|
|
||||||
|
|
||||||
fn blooms(&self) -> &blooms_db::Database {
|
|
||||||
&self.blooms
|
|
||||||
}
|
|
||||||
|
|
||||||
fn trace_blooms(&self) -> &blooms_db::Database {
|
|
||||||
&self.trace_blooms
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let blooms_dir = TempDir::new("").unwrap();
|
let blooms_dir = TempDir::new("").unwrap();
|
||||||
let trace_blooms_dir = TempDir::new("").unwrap();
|
let trace_blooms_dir = TempDir::new("").unwrap();
|
||||||
|
|
||||||
@ -301,6 +301,26 @@ pub fn new_db() -> Arc<BlockChainDB> {
|
|||||||
Arc::new(db)
|
Arc::new(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Creates a new temporary `BlockChainDB` on FS
|
||||||
|
pub fn new_temp_db(tempdir: &Path) -> Arc<BlockChainDB> {
|
||||||
|
let blooms_dir = TempDir::new("").unwrap();
|
||||||
|
let trace_blooms_dir = TempDir::new("").unwrap();
|
||||||
|
let key_value_dir = tempdir.join("key_value");
|
||||||
|
|
||||||
|
let db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
|
||||||
|
let key_value_db = Database::open(&db_config, key_value_dir.to_str().unwrap()).unwrap();
|
||||||
|
|
||||||
|
let db = TestBlockChainDB {
|
||||||
|
blooms: blooms_db::Database::open(blooms_dir.path()).unwrap(),
|
||||||
|
trace_blooms: blooms_db::Database::open(trace_blooms_dir.path()).unwrap(),
|
||||||
|
_blooms_dir: blooms_dir,
|
||||||
|
_trace_blooms_dir: trace_blooms_dir,
|
||||||
|
key_value: Arc::new(key_value_db)
|
||||||
|
};
|
||||||
|
|
||||||
|
Arc::new(db)
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates new instance of KeyValueDBHandler
|
/// Creates new instance of KeyValueDBHandler
|
||||||
pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box<BlockChainDBHandler> {
|
pub fn restoration_db_handler(config: kvdb_rocksdb::DatabaseConfig) -> Box<BlockChainDBHandler> {
|
||||||
struct RestorationDBHandler {
|
struct RestorationDBHandler {
|
||||||
|
@ -583,6 +583,13 @@ impl<K: Kind> VerificationQueue<K> {
|
|||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns true if there is nothing currently in the queue.
|
||||||
|
/// TODO [ToDr] Optimize to avoid locking
|
||||||
|
pub fn is_empty(&self) -> bool {
|
||||||
|
let v = &self.verification;
|
||||||
|
v.unverified.lock().is_empty() && v.verifying.lock().is_empty() && v.verified.lock().is_empty()
|
||||||
|
}
|
||||||
|
|
||||||
/// Get queue status.
|
/// Get queue status.
|
||||||
pub fn queue_info(&self) -> QueueInfo {
|
pub fn queue_info(&self) -> QueueInfo {
|
||||||
use std::mem::size_of;
|
use std::mem::size_of;
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, mpsc, atomic};
|
||||||
use std::collections::{HashMap, BTreeMap};
|
use std::collections::{HashMap, BTreeMap};
|
||||||
use std::io;
|
use std::io;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
@ -33,10 +33,10 @@ use ethcore::client::{BlockChainClient, ChainNotify, ChainRoute, ChainMessageTyp
|
|||||||
use ethcore::snapshot::SnapshotService;
|
use ethcore::snapshot::SnapshotService;
|
||||||
use ethcore::header::BlockNumber;
|
use ethcore::header::BlockNumber;
|
||||||
use sync_io::NetSyncIo;
|
use sync_io::NetSyncIo;
|
||||||
use chain::{ChainSync, SyncStatus as EthSyncStatus};
|
use chain::{ChainSyncApi, SyncStatus as EthSyncStatus};
|
||||||
use std::net::{SocketAddr, AddrParseError};
|
use std::net::{SocketAddr, AddrParseError};
|
||||||
use std::str::FromStr;
|
use std::str::FromStr;
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{RwLock, Mutex};
|
||||||
use chain::{ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_62,
|
use chain::{ETH_PROTOCOL_VERSION_63, ETH_PROTOCOL_VERSION_62,
|
||||||
PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, PAR_PROTOCOL_VERSION_3,
|
PAR_PROTOCOL_VERSION_1, PAR_PROTOCOL_VERSION_2, PAR_PROTOCOL_VERSION_3,
|
||||||
PRIVATE_TRANSACTION_PACKET, SIGNED_PRIVATE_TRANSACTION_PACKET};
|
PRIVATE_TRANSACTION_PACKET, SIGNED_PRIVATE_TRANSACTION_PACKET};
|
||||||
@ -228,6 +228,37 @@ impl AttachedProtocol {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A prioritized tasks run in a specialised timer.
|
||||||
|
/// Every task should be completed within a hard deadline,
|
||||||
|
/// if it's not it's either cancelled or split into multiple tasks.
|
||||||
|
/// NOTE These tasks might not complete at all, so anything
|
||||||
|
/// that happens here should work even if the task is cancelled.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum PriorityTask {
|
||||||
|
/// Propagate given block
|
||||||
|
PropagateBlock {
|
||||||
|
/// When the task was initiated
|
||||||
|
started: ::std::time::Instant,
|
||||||
|
/// Raw block RLP to propagate
|
||||||
|
block: Bytes,
|
||||||
|
/// Block hash
|
||||||
|
hash: H256,
|
||||||
|
/// Blocks difficulty
|
||||||
|
difficulty: U256,
|
||||||
|
},
|
||||||
|
/// Propagate a list of transactions
|
||||||
|
PropagateTransactions(::std::time::Instant, Arc<atomic::AtomicBool>),
|
||||||
|
}
|
||||||
|
impl PriorityTask {
|
||||||
|
/// Mark the task as being processed, right after it's retrieved from the queue.
|
||||||
|
pub fn starting(&self) {
|
||||||
|
match *self {
|
||||||
|
PriorityTask::PropagateTransactions(_, ref is_ready) => is_ready.store(true, atomic::Ordering::SeqCst),
|
||||||
|
_ => {},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// EthSync initialization parameters.
|
/// EthSync initialization parameters.
|
||||||
pub struct Params {
|
pub struct Params {
|
||||||
/// Configuration.
|
/// Configuration.
|
||||||
@ -260,16 +291,16 @@ pub struct EthSync {
|
|||||||
subprotocol_name: [u8; 3],
|
subprotocol_name: [u8; 3],
|
||||||
/// Light subprotocol name.
|
/// Light subprotocol name.
|
||||||
light_subprotocol_name: [u8; 3],
|
light_subprotocol_name: [u8; 3],
|
||||||
|
/// Priority tasks notification channel
|
||||||
|
priority_tasks: Mutex<mpsc::Sender<PriorityTask>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
fn light_params(
|
fn light_params(
|
||||||
network_id: u64,
|
network_id: u64,
|
||||||
max_peers: u32,
|
median_peers: f64,
|
||||||
pruning_info: PruningInfo,
|
pruning_info: PruningInfo,
|
||||||
sample_store: Option<Box<SampleStore>>,
|
sample_store: Option<Box<SampleStore>>,
|
||||||
) -> LightParams {
|
) -> LightParams {
|
||||||
const MAX_LIGHTSERV_LOAD: f64 = 0.5;
|
|
||||||
|
|
||||||
let mut light_params = LightParams {
|
let mut light_params = LightParams {
|
||||||
network_id: network_id,
|
network_id: network_id,
|
||||||
config: Default::default(),
|
config: Default::default(),
|
||||||
@ -282,9 +313,7 @@ fn light_params(
|
|||||||
sample_store: sample_store,
|
sample_store: sample_store,
|
||||||
};
|
};
|
||||||
|
|
||||||
let max_peers = ::std::cmp::max(max_peers, 1);
|
light_params.config.median_peers = median_peers;
|
||||||
light_params.config.load_share = MAX_LIGHTSERV_LOAD / max_peers as f64;
|
|
||||||
|
|
||||||
light_params
|
light_params
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -301,9 +330,10 @@ impl EthSync {
|
|||||||
.map(|mut p| { p.push("request_timings"); light_net::FileStore(p) })
|
.map(|mut p| { p.push("request_timings"); light_net::FileStore(p) })
|
||||||
.map(|store| Box::new(store) as Box<_>);
|
.map(|store| Box::new(store) as Box<_>);
|
||||||
|
|
||||||
|
let median_peers = (params.network_config.min_peers + params.network_config.max_peers) as f64 / 2.0;
|
||||||
let light_params = light_params(
|
let light_params = light_params(
|
||||||
params.config.network_id,
|
params.config.network_id,
|
||||||
params.network_config.max_peers,
|
median_peers,
|
||||||
pruning_info,
|
pruning_info,
|
||||||
sample_store,
|
sample_store,
|
||||||
);
|
);
|
||||||
@ -315,13 +345,19 @@ impl EthSync {
|
|||||||
})
|
})
|
||||||
};
|
};
|
||||||
|
|
||||||
let chain_sync = ChainSync::new(params.config, &*params.chain, params.private_tx_handler.clone());
|
let (priority_tasks_tx, priority_tasks_rx) = mpsc::channel();
|
||||||
|
let sync = ChainSyncApi::new(
|
||||||
|
params.config,
|
||||||
|
&*params.chain,
|
||||||
|
params.private_tx_handler.clone(),
|
||||||
|
priority_tasks_rx,
|
||||||
|
);
|
||||||
let service = NetworkService::new(params.network_config.clone().into_basic()?, connection_filter)?;
|
let service = NetworkService::new(params.network_config.clone().into_basic()?, connection_filter)?;
|
||||||
|
|
||||||
let sync = Arc::new(EthSync {
|
let sync = Arc::new(EthSync {
|
||||||
network: service,
|
network: service,
|
||||||
eth_handler: Arc::new(SyncProtocolHandler {
|
eth_handler: Arc::new(SyncProtocolHandler {
|
||||||
sync: RwLock::new(chain_sync),
|
sync,
|
||||||
chain: params.chain,
|
chain: params.chain,
|
||||||
snapshot_service: params.snapshot_service,
|
snapshot_service: params.snapshot_service,
|
||||||
overlay: RwLock::new(HashMap::new()),
|
overlay: RwLock::new(HashMap::new()),
|
||||||
@ -330,26 +366,32 @@ impl EthSync {
|
|||||||
subprotocol_name: params.config.subprotocol_name,
|
subprotocol_name: params.config.subprotocol_name,
|
||||||
light_subprotocol_name: params.config.light_subprotocol_name,
|
light_subprotocol_name: params.config.light_subprotocol_name,
|
||||||
attached_protos: params.attached_protos,
|
attached_protos: params.attached_protos,
|
||||||
|
priority_tasks: Mutex::new(priority_tasks_tx),
|
||||||
});
|
});
|
||||||
|
|
||||||
Ok(sync)
|
Ok(sync)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Priority tasks producer
|
||||||
|
pub fn priority_tasks(&self) -> mpsc::Sender<PriorityTask> {
|
||||||
|
self.priority_tasks.lock().clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SyncProvider for EthSync {
|
impl SyncProvider for EthSync {
|
||||||
/// Get sync status
|
/// Get sync status
|
||||||
fn status(&self) -> EthSyncStatus {
|
fn status(&self) -> EthSyncStatus {
|
||||||
self.eth_handler.sync.read().status()
|
self.eth_handler.sync.status()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get sync peers
|
/// Get sync peers
|
||||||
fn peers(&self) -> Vec<PeerInfo> {
|
fn peers(&self) -> Vec<PeerInfo> {
|
||||||
self.network.with_context_eval(self.subprotocol_name, |ctx| {
|
self.network.with_context_eval(self.subprotocol_name, |ctx| {
|
||||||
let peer_ids = self.network.connected_peers();
|
let peer_ids = self.network.connected_peers();
|
||||||
let eth_sync = self.eth_handler.sync.read();
|
|
||||||
let light_proto = self.light_proto.as_ref();
|
let light_proto = self.light_proto.as_ref();
|
||||||
|
|
||||||
peer_ids.into_iter().filter_map(|peer_id| {
|
let peer_info = self.eth_handler.sync.peer_info(&peer_ids);
|
||||||
|
peer_ids.into_iter().zip(peer_info).filter_map(|(peer_id, peer_info)| {
|
||||||
let session_info = match ctx.session_info(peer_id) {
|
let session_info = match ctx.session_info(peer_id) {
|
||||||
None => return None,
|
None => return None,
|
||||||
Some(info) => info,
|
Some(info) => info,
|
||||||
@ -361,7 +403,7 @@ impl SyncProvider for EthSync {
|
|||||||
capabilities: session_info.peer_capabilities.into_iter().map(|c| c.to_string()).collect(),
|
capabilities: session_info.peer_capabilities.into_iter().map(|c| c.to_string()).collect(),
|
||||||
remote_address: session_info.remote_address,
|
remote_address: session_info.remote_address,
|
||||||
local_address: session_info.local_address,
|
local_address: session_info.local_address,
|
||||||
eth_info: eth_sync.peer_info(&peer_id),
|
eth_info: peer_info,
|
||||||
pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(peer_id)).map(Into::into),
|
pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(peer_id)).map(Into::into),
|
||||||
})
|
})
|
||||||
}).collect()
|
}).collect()
|
||||||
@ -373,17 +415,17 @@ impl SyncProvider for EthSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats> {
|
fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats> {
|
||||||
let sync = self.eth_handler.sync.read();
|
self.eth_handler.sync.transactions_stats()
|
||||||
sync.transactions_stats()
|
|
||||||
.iter()
|
|
||||||
.map(|(hash, stats)| (*hash, stats.into()))
|
|
||||||
.collect()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const PEERS_TIMER: TimerToken = 0;
|
const PEERS_TIMER: TimerToken = 0;
|
||||||
const SYNC_TIMER: TimerToken = 1;
|
const MAINTAIN_SYNC_TIMER: TimerToken = 1;
|
||||||
const TX_TIMER: TimerToken = 2;
|
const CONTINUE_SYNC_TIMER: TimerToken = 2;
|
||||||
|
const TX_TIMER: TimerToken = 3;
|
||||||
|
const PRIORITY_TIMER: TimerToken = 4;
|
||||||
|
|
||||||
|
pub(crate) const PRIORITY_TIMER_INTERVAL: Duration = Duration::from_millis(250);
|
||||||
|
|
||||||
struct SyncProtocolHandler {
|
struct SyncProtocolHandler {
|
||||||
/// Shared blockchain client.
|
/// Shared blockchain client.
|
||||||
@ -391,7 +433,7 @@ struct SyncProtocolHandler {
|
|||||||
/// Shared snapshot service.
|
/// Shared snapshot service.
|
||||||
snapshot_service: Arc<SnapshotService>,
|
snapshot_service: Arc<SnapshotService>,
|
||||||
/// Sync strategy
|
/// Sync strategy
|
||||||
sync: RwLock<ChainSync>,
|
sync: ChainSyncApi,
|
||||||
/// Chain overlay used to cache data such as fork block.
|
/// Chain overlay used to cache data such as fork block.
|
||||||
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
|
||||||
}
|
}
|
||||||
@ -400,13 +442,16 @@ impl NetworkProtocolHandler for SyncProtocolHandler {
|
|||||||
fn initialize(&self, io: &NetworkContext) {
|
fn initialize(&self, io: &NetworkContext) {
|
||||||
if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID {
|
if io.subprotocol_name() != WARP_SYNC_PROTOCOL_ID {
|
||||||
io.register_timer(PEERS_TIMER, Duration::from_millis(700)).expect("Error registering peers timer");
|
io.register_timer(PEERS_TIMER, Duration::from_millis(700)).expect("Error registering peers timer");
|
||||||
io.register_timer(SYNC_TIMER, Duration::from_millis(1100)).expect("Error registering sync timer");
|
io.register_timer(MAINTAIN_SYNC_TIMER, Duration::from_millis(1100)).expect("Error registering sync timer");
|
||||||
|
io.register_timer(CONTINUE_SYNC_TIMER, Duration::from_millis(2500)).expect("Error registering sync timer");
|
||||||
io.register_timer(TX_TIMER, Duration::from_millis(1300)).expect("Error registering transactions timer");
|
io.register_timer(TX_TIMER, Duration::from_millis(1300)).expect("Error registering transactions timer");
|
||||||
|
|
||||||
|
io.register_timer(PRIORITY_TIMER, PRIORITY_TIMER_INTERVAL).expect("Error registering peers timer");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) {
|
||||||
ChainSync::dispatch_packet(&self.sync, &mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data);
|
self.sync.dispatch_packet(&mut NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay), *peer, packet_id, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
fn connected(&self, io: &NetworkContext, peer: &PeerId) {
|
||||||
@ -431,16 +476,28 @@ impl NetworkProtocolHandler for SyncProtocolHandler {
|
|||||||
let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay);
|
let mut io = NetSyncIo::new(io, &*self.chain, &*self.snapshot_service, &self.overlay);
|
||||||
match timer {
|
match timer {
|
||||||
PEERS_TIMER => self.sync.write().maintain_peers(&mut io),
|
PEERS_TIMER => self.sync.write().maintain_peers(&mut io),
|
||||||
SYNC_TIMER => self.sync.write().maintain_sync(&mut io),
|
MAINTAIN_SYNC_TIMER => self.sync.write().maintain_sync(&mut io),
|
||||||
TX_TIMER => {
|
CONTINUE_SYNC_TIMER => self.sync.write().continue_sync(&mut io),
|
||||||
self.sync.write().propagate_new_transactions(&mut io);
|
TX_TIMER => self.sync.write().propagate_new_transactions(&mut io),
|
||||||
},
|
PRIORITY_TIMER => self.sync.process_priority_queue(&mut io),
|
||||||
_ => warn!("Unknown timer {} triggered.", timer),
|
_ => warn!("Unknown timer {} triggered.", timer),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainNotify for EthSync {
|
impl ChainNotify for EthSync {
|
||||||
|
fn block_pre_import(&self, bytes: &Bytes, hash: &H256, difficulty: &U256) {
|
||||||
|
let task = PriorityTask::PropagateBlock {
|
||||||
|
started: ::std::time::Instant::now(),
|
||||||
|
block: bytes.clone(),
|
||||||
|
hash: *hash,
|
||||||
|
difficulty: *difficulty,
|
||||||
|
};
|
||||||
|
if let Err(e) = self.priority_tasks.lock().send(task) {
|
||||||
|
warn!(target: "sync", "Unexpected error during priority block propagation: {:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fn new_blocks(&self,
|
fn new_blocks(&self,
|
||||||
imported: Vec<H256>,
|
imported: Vec<H256>,
|
||||||
invalid: Vec<H256>,
|
invalid: Vec<H256>,
|
||||||
@ -940,19 +997,3 @@ impl LightSyncProvider for LightSync {
|
|||||||
Default::default() // TODO
|
Default::default() // TODO
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn light_params_load_share_depends_on_max_peers() {
|
|
||||||
let pruning_info = PruningInfo {
|
|
||||||
earliest_chain: 0,
|
|
||||||
earliest_state: 0,
|
|
||||||
};
|
|
||||||
let params1 = light_params(0, 10, pruning_info.clone(), None);
|
|
||||||
let params2 = light_params(0, 20, pruning_info, None);
|
|
||||||
assert!(params1.config.load_share > params2.config.load_share)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -29,7 +29,6 @@ use rlp::Rlp;
|
|||||||
use snapshot::ChunkType;
|
use snapshot::ChunkType;
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::mem;
|
use std::mem;
|
||||||
use std::collections::HashSet;
|
|
||||||
use std::time::Instant;
|
use std::time::Instant;
|
||||||
use sync_io::SyncIo;
|
use sync_io::SyncIo;
|
||||||
|
|
||||||
@ -58,7 +57,6 @@ use super::{
|
|||||||
SNAPSHOT_DATA_PACKET,
|
SNAPSHOT_DATA_PACKET,
|
||||||
SNAPSHOT_MANIFEST_PACKET,
|
SNAPSHOT_MANIFEST_PACKET,
|
||||||
STATUS_PACKET,
|
STATUS_PACKET,
|
||||||
TRANSACTIONS_PACKET,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The Chain Sync Handler: handles responses from peers
|
/// The Chain Sync Handler: handles responses from peers
|
||||||
@ -67,14 +65,9 @@ pub struct SyncHandler;
|
|||||||
impl SyncHandler {
|
impl SyncHandler {
|
||||||
/// Handle incoming packet from peer
|
/// Handle incoming packet from peer
|
||||||
pub fn on_packet(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
pub fn on_packet(sync: &mut ChainSync, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
||||||
if packet_id != STATUS_PACKET && !sync.peers.contains_key(&peer) {
|
|
||||||
debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_info(peer));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let rlp = Rlp::new(data);
|
let rlp = Rlp::new(data);
|
||||||
let result = match packet_id {
|
let result = match packet_id {
|
||||||
STATUS_PACKET => SyncHandler::on_peer_status(sync, io, peer, &rlp),
|
STATUS_PACKET => SyncHandler::on_peer_status(sync, io, peer, &rlp),
|
||||||
TRANSACTIONS_PACKET => SyncHandler::on_peer_transactions(sync, io, peer, &rlp),
|
|
||||||
BLOCK_HEADERS_PACKET => SyncHandler::on_peer_block_headers(sync, io, peer, &rlp),
|
BLOCK_HEADERS_PACKET => SyncHandler::on_peer_block_headers(sync, io, peer, &rlp),
|
||||||
BLOCK_BODIES_PACKET => SyncHandler::on_peer_block_bodies(sync, io, peer, &rlp),
|
BLOCK_BODIES_PACKET => SyncHandler::on_peer_block_bodies(sync, io, peer, &rlp),
|
||||||
RECEIPTS_PACKET => SyncHandler::on_peer_block_receipts(sync, io, peer, &rlp),
|
RECEIPTS_PACKET => SyncHandler::on_peer_block_receipts(sync, io, peer, &rlp),
|
||||||
@ -104,15 +97,12 @@ impl SyncHandler {
|
|||||||
sync.sync_peer(io, peer, false);
|
sync.sync_peer(io, peer, false);
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
// give tasks to other peers
|
|
||||||
sync.continue_sync(io);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Called when peer sends us new consensus packet
|
/// Called when peer sends us new consensus packet
|
||||||
pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> {
|
pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) {
|
||||||
trace!(target: "sync", "Received consensus packet from {:?}", peer_id);
|
trace!(target: "sync", "Received consensus packet from {:?}", peer_id);
|
||||||
io.chain().queue_consensus_message(r.as_raw().to_vec());
|
io.chain().queue_consensus_message(r.as_raw().to_vec());
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Called by peer when it is disconnecting
|
/// Called by peer when it is disconnecting
|
||||||
@ -578,8 +568,8 @@ impl SyncHandler {
|
|||||||
asking_blocks: Vec::new(),
|
asking_blocks: Vec::new(),
|
||||||
asking_hash: None,
|
asking_hash: None,
|
||||||
ask_time: Instant::now(),
|
ask_time: Instant::now(),
|
||||||
last_sent_transactions: HashSet::new(),
|
last_sent_transactions: Default::default(),
|
||||||
last_sent_private_transactions: HashSet::new(),
|
last_sent_private_transactions: Default::default(),
|
||||||
expired: false,
|
expired: false,
|
||||||
confirmation: if sync.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed },
|
confirmation: if sync.fork_block.is_none() { ForkConfirmation::Confirmed } else { ForkConfirmation::Unconfirmed },
|
||||||
asking_snapshot_data: None,
|
asking_snapshot_data: None,
|
||||||
@ -635,7 +625,7 @@ impl SyncHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Called when peer sends us new transactions
|
/// Called when peer sends us new transactions
|
||||||
fn on_peer_transactions(sync: &mut ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), DownloaderImportError> {
|
pub fn on_peer_transactions(sync: &ChainSync, io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> {
|
||||||
// Accept transactions only when fully synced
|
// Accept transactions only when fully synced
|
||||||
if !io.is_chain_queue_empty() || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) {
|
if !io.is_chain_queue_empty() || (sync.state != SyncState::Idle && sync.state != SyncState::NewBlocks) {
|
||||||
trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id);
|
trace!(target: "sync", "{} Ignoring transactions while syncing", peer_id);
|
||||||
|
@ -92,17 +92,17 @@ mod propagator;
|
|||||||
mod requester;
|
mod requester;
|
||||||
mod supplier;
|
mod supplier;
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, mpsc};
|
||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap, BTreeMap};
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use hash::keccak;
|
use hash::keccak;
|
||||||
use heapsize::HeapSizeOf;
|
use heapsize::HeapSizeOf;
|
||||||
use ethereum_types::{H256, U256};
|
use ethereum_types::{H256, U256};
|
||||||
use fastmap::H256FastMap;
|
use fastmap::{H256FastMap, H256FastSet};
|
||||||
use parking_lot::RwLock;
|
use parking_lot::{Mutex, RwLock, RwLockWriteGuard};
|
||||||
use bytes::Bytes;
|
use bytes::Bytes;
|
||||||
use rlp::{Rlp, RlpStream, DecoderError};
|
use rlp::{RlpStream, DecoderError};
|
||||||
use network::{self, PeerId, PacketId};
|
use network::{self, PeerId, PacketId};
|
||||||
use ethcore::header::{BlockNumber};
|
use ethcore::header::{BlockNumber};
|
||||||
use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockQueueInfo};
|
use ethcore::client::{BlockChainClient, BlockStatus, BlockId, BlockChainInfo, BlockQueueInfo};
|
||||||
@ -112,7 +112,7 @@ use super::{WarpSync, SyncConfig};
|
|||||||
use block_sync::{BlockDownloader, DownloadAction};
|
use block_sync::{BlockDownloader, DownloadAction};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use snapshot::{Snapshot};
|
use snapshot::{Snapshot};
|
||||||
use api::{EthProtocolInfo as PeerInfoDigest, WARP_SYNC_PROTOCOL_ID};
|
use api::{EthProtocolInfo as PeerInfoDigest, WARP_SYNC_PROTOCOL_ID, PriorityTask};
|
||||||
use private_tx::PrivateTxHandler;
|
use private_tx::PrivateTxHandler;
|
||||||
use transactions_stats::{TransactionsStats, Stats as TransactionStats};
|
use transactions_stats::{TransactionsStats, Stats as TransactionStats};
|
||||||
use transaction::UnverifiedTransaction;
|
use transaction::UnverifiedTransaction;
|
||||||
@ -120,7 +120,7 @@ use transaction::UnverifiedTransaction;
|
|||||||
use self::handler::SyncHandler;
|
use self::handler::SyncHandler;
|
||||||
use self::propagator::SyncPropagator;
|
use self::propagator::SyncPropagator;
|
||||||
use self::requester::SyncRequester;
|
use self::requester::SyncRequester;
|
||||||
use self::supplier::SyncSupplier;
|
pub(crate) use self::supplier::SyncSupplier;
|
||||||
|
|
||||||
known_heap_size!(0, PeerInfo);
|
known_heap_size!(0, PeerInfo);
|
||||||
|
|
||||||
@ -187,6 +187,11 @@ const FORK_HEADER_TIMEOUT: Duration = Duration::from_secs(3);
|
|||||||
const SNAPSHOT_MANIFEST_TIMEOUT: Duration = Duration::from_secs(5);
|
const SNAPSHOT_MANIFEST_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
const SNAPSHOT_DATA_TIMEOUT: Duration = Duration::from_secs(120);
|
const SNAPSHOT_DATA_TIMEOUT: Duration = Duration::from_secs(120);
|
||||||
|
|
||||||
|
/// Defines how much time we have to complete priority transaction or block propagation.
|
||||||
|
/// after the deadline is reached the task is considered finished
|
||||||
|
/// (so we might sent only to some part of the peers we originally intended to send to)
|
||||||
|
const PRIORITY_TASK_DEADLINE: Duration = Duration::from_millis(100);
|
||||||
|
|
||||||
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
|
||||||
/// Sync state
|
/// Sync state
|
||||||
pub enum SyncState {
|
pub enum SyncState {
|
||||||
@ -323,9 +328,9 @@ pub struct PeerInfo {
|
|||||||
/// Request timestamp
|
/// Request timestamp
|
||||||
ask_time: Instant,
|
ask_time: Instant,
|
||||||
/// Holds a set of transactions recently sent to this peer to avoid spamming.
|
/// Holds a set of transactions recently sent to this peer to avoid spamming.
|
||||||
last_sent_transactions: HashSet<H256>,
|
last_sent_transactions: H256FastSet,
|
||||||
/// Holds a set of private transactions and their signatures recently sent to this peer to avoid spamming.
|
/// Holds a set of private transactions and their signatures recently sent to this peer to avoid spamming.
|
||||||
last_sent_private_transactions: HashSet<H256>,
|
last_sent_private_transactions: H256FastSet,
|
||||||
/// Pending request is expired and result should be ignored
|
/// Pending request is expired and result should be ignored
|
||||||
expired: bool,
|
expired: bool,
|
||||||
/// Peer fork confirmation status
|
/// Peer fork confirmation status
|
||||||
@ -375,6 +380,217 @@ pub mod random {
|
|||||||
pub type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
|
pub type RlpResponseResult = Result<Option<(PacketId, RlpStream)>, PacketDecodeError>;
|
||||||
pub type Peers = HashMap<PeerId, PeerInfo>;
|
pub type Peers = HashMap<PeerId, PeerInfo>;
|
||||||
|
|
||||||
|
/// Thread-safe wrapper for `ChainSync`.
|
||||||
|
///
|
||||||
|
/// NOTE always lock in order of fields declaration
|
||||||
|
pub struct ChainSyncApi {
|
||||||
|
/// Priority tasks queue
|
||||||
|
priority_tasks: Mutex<mpsc::Receiver<PriorityTask>>,
|
||||||
|
/// The rest of sync data
|
||||||
|
sync: RwLock<ChainSync>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChainSyncApi {
|
||||||
|
/// Creates new `ChainSyncApi`
|
||||||
|
pub fn new(
|
||||||
|
config: SyncConfig,
|
||||||
|
chain: &BlockChainClient,
|
||||||
|
private_tx_handler: Arc<PrivateTxHandler>,
|
||||||
|
priority_tasks: mpsc::Receiver<PriorityTask>,
|
||||||
|
) -> Self {
|
||||||
|
ChainSyncApi {
|
||||||
|
sync: RwLock::new(ChainSync::new(config, chain, private_tx_handler)),
|
||||||
|
priority_tasks: Mutex::new(priority_tasks),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gives `write` access to underlying `ChainSync`
|
||||||
|
pub fn write(&self) -> RwLockWriteGuard<ChainSync> {
|
||||||
|
self.sync.write()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns info about given list of peers
|
||||||
|
pub fn peer_info(&self, ids: &[PeerId]) -> Vec<Option<PeerInfoDigest>> {
|
||||||
|
let sync = self.sync.read();
|
||||||
|
ids.iter().map(|id| sync.peer_info(id)).collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns synchonization status
|
||||||
|
pub fn status(&self) -> SyncStatus {
|
||||||
|
self.sync.read().status()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns transactions propagation statistics
|
||||||
|
pub fn transactions_stats(&self) -> BTreeMap<H256, ::TransactionStats> {
|
||||||
|
self.sync.read().transactions_stats()
|
||||||
|
.iter()
|
||||||
|
.map(|(hash, stats)| (*hash, stats.into()))
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Dispatch incoming requests and responses
|
||||||
|
pub fn dispatch_packet(&self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
||||||
|
SyncSupplier::dispatch_packet(&self.sync, io, peer, packet_id, data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Process a priority propagation queue.
|
||||||
|
/// This task is run from a timer and should be time constrained.
|
||||||
|
/// Hence we set up a deadline for the execution and cancel the task if the deadline is exceeded.
|
||||||
|
///
|
||||||
|
/// NOTE This method should only handle stuff that can be canceled and would reach other peers
|
||||||
|
/// by other means.
|
||||||
|
pub fn process_priority_queue(&self, io: &mut SyncIo) {
|
||||||
|
fn check_deadline(deadline: Instant) -> Option<Duration> {
|
||||||
|
let now = Instant::now();
|
||||||
|
if now > deadline {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(deadline - now)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deadline to get the task from the queue
|
||||||
|
let deadline = Instant::now() + ::api::PRIORITY_TIMER_INTERVAL;
|
||||||
|
let mut work = || {
|
||||||
|
let task = {
|
||||||
|
let tasks = self.priority_tasks.try_lock_until(deadline)?;
|
||||||
|
let left = check_deadline(deadline)?;
|
||||||
|
tasks.recv_timeout(left).ok()?
|
||||||
|
};
|
||||||
|
task.starting();
|
||||||
|
// wait for the sync lock until deadline,
|
||||||
|
// note we might drop the task here if we won't manage to acquire the lock.
|
||||||
|
let mut sync = self.sync.try_write_until(deadline)?;
|
||||||
|
// since we already have everything let's use a different deadline
|
||||||
|
// to do the rest of the job now, so that previous work is not wasted.
|
||||||
|
let deadline = Instant::now() + PRIORITY_TASK_DEADLINE;
|
||||||
|
let as_ms = move |prev| {
|
||||||
|
let dur: Duration = Instant::now() - prev;
|
||||||
|
dur.as_secs() * 1_000 + dur.subsec_millis() as u64
|
||||||
|
};
|
||||||
|
match task {
|
||||||
|
// NOTE We can't simply use existing methods,
|
||||||
|
// cause the block is not in the DB yet.
|
||||||
|
PriorityTask::PropagateBlock { started, block, hash, difficulty } => {
|
||||||
|
// try to send to peers that are on the same block as us
|
||||||
|
// (they will most likely accept the new block).
|
||||||
|
let chain_info = io.chain().chain_info();
|
||||||
|
let total_difficulty = chain_info.total_difficulty + difficulty;
|
||||||
|
let rlp = ChainSync::create_block_rlp(&block, total_difficulty);
|
||||||
|
for peers in sync.get_peers(&chain_info, PeerState::SameBlock).chunks(10) {
|
||||||
|
check_deadline(deadline)?;
|
||||||
|
for peer in peers {
|
||||||
|
SyncPropagator::send_packet(io, *peer, NEW_BLOCK_PACKET, rlp.clone());
|
||||||
|
if let Some(ref mut peer) = sync.peers.get_mut(peer) {
|
||||||
|
peer.latest_hash = hash;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
debug!(target: "sync", "Finished block propagation, took {}ms", as_ms(started));
|
||||||
|
},
|
||||||
|
PriorityTask::PropagateTransactions(time, _) => {
|
||||||
|
SyncPropagator::propagate_new_transactions(&mut sync, io, || {
|
||||||
|
check_deadline(deadline).is_some()
|
||||||
|
});
|
||||||
|
debug!(target: "sync", "Finished transaction propagation, took {}ms", as_ms(time));
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(())
|
||||||
|
};
|
||||||
|
|
||||||
|
// Process as many items as we can until the deadline is reached.
|
||||||
|
loop {
|
||||||
|
if work().is_none() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Static methods
|
||||||
|
impl ChainSync {
|
||||||
|
/// creates rlp to send for the tree defined by 'from' and 'to' hashes
|
||||||
|
fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option<Bytes> {
|
||||||
|
match chain.tree_route(from, to) {
|
||||||
|
Some(route) => {
|
||||||
|
let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new);
|
||||||
|
match route.blocks.len() {
|
||||||
|
0 => None,
|
||||||
|
_ => {
|
||||||
|
let mut blocks = route.blocks;
|
||||||
|
blocks.extend(uncles);
|
||||||
|
let mut rlp_stream = RlpStream::new_list(blocks.len());
|
||||||
|
for block_hash in blocks {
|
||||||
|
let mut hash_rlp = RlpStream::new_list(2);
|
||||||
|
let number = chain.block_header(BlockId::Hash(block_hash.clone()))
|
||||||
|
.expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.").number();
|
||||||
|
hash_rlp.append(&block_hash);
|
||||||
|
hash_rlp.append(&number);
|
||||||
|
rlp_stream.append_raw(hash_rlp.as_raw(), 1);
|
||||||
|
}
|
||||||
|
Some(rlp_stream.out())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
None => None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// creates rlp from block bytes and total difficulty
|
||||||
|
fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes {
|
||||||
|
let mut rlp_stream = RlpStream::new_list(2);
|
||||||
|
rlp_stream.append_raw(bytes, 1);
|
||||||
|
rlp_stream.append(&total_difficulty);
|
||||||
|
rlp_stream.out()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// creates latest block rlp for the given client
|
||||||
|
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes {
|
||||||
|
Self::create_block_rlp(
|
||||||
|
&chain.block(BlockId::Hash(chain.chain_info().best_block_hash))
|
||||||
|
.expect("Best block always exists").into_inner(),
|
||||||
|
chain.chain_info().total_difficulty
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// creates given hash block rlp for the given client
|
||||||
|
fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes {
|
||||||
|
Self::create_block_rlp(
|
||||||
|
&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(),
|
||||||
|
chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.")
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn select_random_peers(peers: &[PeerId]) -> Vec<PeerId> {
|
||||||
|
// take sqrt(x) peers
|
||||||
|
let mut peers = peers.to_vec();
|
||||||
|
let mut count = (peers.len() as f64).powf(0.5).round() as usize;
|
||||||
|
count = cmp::min(count, MAX_PEERS_PROPAGATION);
|
||||||
|
count = cmp::max(count, MIN_PEERS_PROPAGATION);
|
||||||
|
random::new().shuffle(&mut peers);
|
||||||
|
peers.truncate(count);
|
||||||
|
peers
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_init_state(warp_sync: WarpSync, chain: &BlockChainClient) -> SyncState {
|
||||||
|
let best_block = chain.chain_info().best_block_number;
|
||||||
|
match warp_sync {
|
||||||
|
WarpSync::Enabled => SyncState::WaitingPeers,
|
||||||
|
WarpSync::OnlyAndAfter(block) if block > best_block => SyncState::WaitingPeers,
|
||||||
|
_ => SyncState::Idle,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A peer query method for getting a list of peers
|
||||||
|
enum PeerState {
|
||||||
|
/// Peer is on different hash than us
|
||||||
|
Lagging,
|
||||||
|
/// Peer is on the same block as us
|
||||||
|
SameBlock
|
||||||
|
}
|
||||||
|
|
||||||
/// Blockchain sync handler.
|
/// Blockchain sync handler.
|
||||||
/// See module documentation for more details.
|
/// See module documentation for more details.
|
||||||
pub struct ChainSync {
|
pub struct ChainSync {
|
||||||
@ -417,10 +633,14 @@ pub struct ChainSync {
|
|||||||
|
|
||||||
impl ChainSync {
|
impl ChainSync {
|
||||||
/// Create a new instance of syncing strategy.
|
/// Create a new instance of syncing strategy.
|
||||||
pub fn new(config: SyncConfig, chain: &BlockChainClient, private_tx_handler: Arc<PrivateTxHandler>) -> ChainSync {
|
pub fn new(
|
||||||
|
config: SyncConfig,
|
||||||
|
chain: &BlockChainClient,
|
||||||
|
private_tx_handler: Arc<PrivateTxHandler>,
|
||||||
|
) -> Self {
|
||||||
let chain_info = chain.chain_info();
|
let chain_info = chain.chain_info();
|
||||||
let best_block = chain.chain_info().best_block_number;
|
let best_block = chain.chain_info().best_block_number;
|
||||||
let state = ChainSync::get_init_state(config.warp_sync, chain);
|
let state = Self::get_init_state(config.warp_sync, chain);
|
||||||
|
|
||||||
let mut sync = ChainSync {
|
let mut sync = ChainSync {
|
||||||
state,
|
state,
|
||||||
@ -445,15 +665,6 @@ impl ChainSync {
|
|||||||
sync
|
sync
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_init_state(warp_sync: WarpSync, chain: &BlockChainClient) -> SyncState {
|
|
||||||
let best_block = chain.chain_info().best_block_number;
|
|
||||||
match warp_sync {
|
|
||||||
WarpSync::Enabled => SyncState::WaitingPeers,
|
|
||||||
WarpSync::OnlyAndAfter(block) if block > best_block => SyncState::WaitingPeers,
|
|
||||||
_ => SyncState::Idle,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns synchonization status
|
/// Returns synchonization status
|
||||||
pub fn status(&self) -> SyncStatus {
|
pub fn status(&self) -> SyncStatus {
|
||||||
let last_imported_number = self.new_blocks.last_imported_block_number();
|
let last_imported_number = self.new_blocks.last_imported_block_number();
|
||||||
@ -521,7 +732,7 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.state = state.unwrap_or_else(|| ChainSync::get_init_state(self.warp_sync, io.chain()));
|
self.state = state.unwrap_or_else(|| Self::get_init_state(self.warp_sync, io.chain()));
|
||||||
// Reactivate peers only if some progress has been made
|
// Reactivate peers only if some progress has been made
|
||||||
// since the last sync round of if starting fresh.
|
// since the last sync round of if starting fresh.
|
||||||
self.active_peers = self.peers.keys().cloned().collect();
|
self.active_peers = self.peers.keys().cloned().collect();
|
||||||
@ -655,37 +866,35 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Resume downloading
|
/// Resume downloading
|
||||||
fn continue_sync(&mut self, io: &mut SyncIo) {
|
pub fn continue_sync(&mut self, io: &mut SyncIo) {
|
||||||
// Collect active peers that can sync
|
|
||||||
let confirmed_peers: Vec<(PeerId, u8)> = self.peers.iter().filter_map(|(peer_id, peer)|
|
|
||||||
if peer.can_sync() {
|
|
||||||
Some((*peer_id, peer.protocol_version))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
).collect();
|
|
||||||
|
|
||||||
trace!(
|
|
||||||
target: "sync",
|
|
||||||
"Syncing with peers: {} active, {} confirmed, {} total",
|
|
||||||
self.active_peers.len(), confirmed_peers.len(), self.peers.len()
|
|
||||||
);
|
|
||||||
|
|
||||||
if self.state == SyncState::Waiting {
|
if self.state == SyncState::Waiting {
|
||||||
trace!(target: "sync", "Waiting for the block queue");
|
trace!(target: "sync", "Waiting for the block queue");
|
||||||
} else if self.state == SyncState::SnapshotWaiting {
|
} else if self.state == SyncState::SnapshotWaiting {
|
||||||
trace!(target: "sync", "Waiting for the snapshot restoration");
|
trace!(target: "sync", "Waiting for the snapshot restoration");
|
||||||
} else {
|
} else {
|
||||||
let mut peers: Vec<(PeerId, u8)> = confirmed_peers.iter().filter(|&&(peer_id, _)|
|
// Collect active peers that can sync
|
||||||
self.active_peers.contains(&peer_id)
|
let mut peers: Vec<(PeerId, u8)> = self.peers.iter().filter_map(|(peer_id, peer)|
|
||||||
).map(|v| *v).collect();
|
if peer.can_sync() && peer.asking == PeerAsking::Nothing && self.active_peers.contains(&peer_id) {
|
||||||
|
Some((*peer_id, peer.protocol_version))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
).collect();
|
||||||
|
|
||||||
random::new().shuffle(&mut peers); //TODO: sort by rating
|
if peers.len() > 0 {
|
||||||
// prefer peers with higher protocol version
|
trace!(
|
||||||
peers.sort_by(|&(_, ref v1), &(_, ref v2)| v1.cmp(v2));
|
target: "sync",
|
||||||
|
"Syncing with peers: {} active, {} available, {} total",
|
||||||
|
self.active_peers.len(), peers.len(), self.peers.len()
|
||||||
|
);
|
||||||
|
|
||||||
for (peer_id, _) in peers {
|
random::new().shuffle(&mut peers); // TODO (#646): sort by rating
|
||||||
self.sync_peer(io, peer_id, false);
|
// prefer peers with higher protocol version
|
||||||
|
peers.sort_by(|&(_, ref v1), &(_, ref v2)| v1.cmp(v2));
|
||||||
|
|
||||||
|
for (peer_id, _) in peers {
|
||||||
|
self.sync_peer(io, peer_id, false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -970,6 +1179,12 @@ impl ChainSync {
|
|||||||
self.state = SyncState::Blocks;
|
self.state = SyncState::Blocks;
|
||||||
self.continue_sync(io);
|
self.continue_sync(io);
|
||||||
},
|
},
|
||||||
|
SyncState::SnapshotData => match io.snapshot_service().status() {
|
||||||
|
RestorationStatus::Inactive | RestorationStatus::Failed => {
|
||||||
|
self.state = SyncState::SnapshotWaiting;
|
||||||
|
},
|
||||||
|
RestorationStatus::Initializing { .. } | RestorationStatus::Ongoing { .. } => (),
|
||||||
|
},
|
||||||
SyncState::SnapshotWaiting => {
|
SyncState::SnapshotWaiting => {
|
||||||
match io.snapshot_service().status() {
|
match io.snapshot_service().status() {
|
||||||
RestorationStatus::Inactive => {
|
RestorationStatus::Inactive => {
|
||||||
@ -998,67 +1213,24 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates rlp to send for the tree defined by 'from' and 'to' hashes
|
/// returns peer ids that have different block than our chain
|
||||||
fn create_new_hashes_rlp(chain: &BlockChainClient, from: &H256, to: &H256) -> Option<Bytes> {
|
fn get_lagging_peers(&self, chain_info: &BlockChainInfo) -> Vec<PeerId> {
|
||||||
match chain.tree_route(from, to) {
|
self.get_peers(chain_info, PeerState::Lagging)
|
||||||
Some(route) => {
|
|
||||||
let uncles = chain.find_uncles(from).unwrap_or_else(Vec::new);
|
|
||||||
match route.blocks.len() {
|
|
||||||
0 => None,
|
|
||||||
_ => {
|
|
||||||
let mut blocks = route.blocks;
|
|
||||||
blocks.extend(uncles);
|
|
||||||
let mut rlp_stream = RlpStream::new_list(blocks.len());
|
|
||||||
for block_hash in blocks {
|
|
||||||
let mut hash_rlp = RlpStream::new_list(2);
|
|
||||||
let number = chain.block_header(BlockId::Hash(block_hash.clone()))
|
|
||||||
.expect("chain.tree_route and chain.find_uncles only return hahses of blocks that are in the blockchain. qed.").number();
|
|
||||||
hash_rlp.append(&block_hash);
|
|
||||||
hash_rlp.append(&number);
|
|
||||||
rlp_stream.append_raw(hash_rlp.as_raw(), 1);
|
|
||||||
}
|
|
||||||
Some(rlp_stream.out())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
None => None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// creates rlp from block bytes and total difficulty
|
/// returns peer ids that have different or the same blocks than our chain
|
||||||
fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes {
|
fn get_peers(&self, chain_info: &BlockChainInfo, peers: PeerState) -> Vec<PeerId> {
|
||||||
let mut rlp_stream = RlpStream::new_list(2);
|
|
||||||
rlp_stream.append_raw(bytes, 1);
|
|
||||||
rlp_stream.append(&total_difficulty);
|
|
||||||
rlp_stream.out()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// creates latest block rlp for the given client
|
|
||||||
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes {
|
|
||||||
ChainSync::create_block_rlp(
|
|
||||||
&chain.block(BlockId::Hash(chain.chain_info().best_block_hash))
|
|
||||||
.expect("Best block always exists").into_inner(),
|
|
||||||
chain.chain_info().total_difficulty
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// creates given hash block rlp for the given client
|
|
||||||
fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes {
|
|
||||||
ChainSync::create_block_rlp(
|
|
||||||
&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed").into_inner(),
|
|
||||||
chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.")
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// returns peer ids that have different blocks than our chain
|
|
||||||
fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo) -> Vec<PeerId> {
|
|
||||||
let latest_hash = chain_info.best_block_hash;
|
let latest_hash = chain_info.best_block_hash;
|
||||||
self
|
self
|
||||||
.peers
|
.peers
|
||||||
.iter_mut()
|
.iter()
|
||||||
.filter_map(|(&id, ref mut peer_info)| {
|
.filter_map(|(&id, ref mut peer_info)| {
|
||||||
trace!(target: "sync", "Checking peer our best {} their best {}", latest_hash, peer_info.latest_hash);
|
trace!(target: "sync", "Checking peer our best {} their best {}", latest_hash, peer_info.latest_hash);
|
||||||
if peer_info.latest_hash != latest_hash {
|
let matches = match peers {
|
||||||
|
PeerState::Lagging => peer_info.latest_hash != latest_hash,
|
||||||
|
PeerState::SameBlock => peer_info.latest_hash == latest_hash,
|
||||||
|
};
|
||||||
|
if matches {
|
||||||
Some(id)
|
Some(id)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@ -1067,17 +1239,6 @@ impl ChainSync {
|
|||||||
.collect::<Vec<_>>()
|
.collect::<Vec<_>>()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn select_random_peers(peers: &[PeerId]) -> Vec<PeerId> {
|
|
||||||
// take sqrt(x) peers
|
|
||||||
let mut peers = peers.to_vec();
|
|
||||||
let mut count = (peers.len() as f64).powf(0.5).round() as usize;
|
|
||||||
count = cmp::min(count, MAX_PEERS_PROPAGATION);
|
|
||||||
count = cmp::max(count, MIN_PEERS_PROPAGATION);
|
|
||||||
random::new().shuffle(&mut peers);
|
|
||||||
peers.truncate(count);
|
|
||||||
peers
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_consensus_peers(&self) -> Vec<PeerId> {
|
fn get_consensus_peers(&self) -> Vec<PeerId> {
|
||||||
self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_2.0 { Some(*id) } else { None }).collect()
|
self.peers.iter().filter_map(|(id, p)| if p.protocol_version >= PAR_PROTOCOL_VERSION_2.0 { Some(*id) } else { None }).collect()
|
||||||
}
|
}
|
||||||
@ -1126,21 +1287,10 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatch incoming requests and responses
|
|
||||||
pub fn dispatch_packet(sync: &RwLock<ChainSync>, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
|
||||||
SyncSupplier::dispatch_packet(sync, io, peer, packet_id, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
pub fn on_packet(&mut self, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
||||||
debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id);
|
|
||||||
SyncHandler::on_packet(self, io, peer, packet_id, data);
|
SyncHandler::on_packet(self, io, peer, packet_id, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Called when peer sends us new consensus packet
|
|
||||||
pub fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &Rlp) -> Result<(), PacketDecodeError> {
|
|
||||||
SyncHandler::on_consensus_packet(io, peer_id, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Called by peer when it is disconnecting
|
/// Called by peer when it is disconnecting
|
||||||
pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) {
|
pub fn on_peer_aborting(&mut self, io: &mut SyncIo, peer: PeerId) {
|
||||||
SyncHandler::on_peer_aborting(self, io, peer);
|
SyncHandler::on_peer_aborting(self, io, peer);
|
||||||
@ -1152,8 +1302,16 @@ impl ChainSync {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// propagates new transactions to all peers
|
/// propagates new transactions to all peers
|
||||||
pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) -> usize {
|
pub fn propagate_new_transactions(&mut self, io: &mut SyncIo) {
|
||||||
SyncPropagator::propagate_new_transactions(self, io)
|
let deadline = Instant::now() + Duration::from_millis(500);
|
||||||
|
SyncPropagator::propagate_new_transactions(self, io, || {
|
||||||
|
if deadline > Instant::now() {
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
debug!(target: "sync", "Wasn't able to finish transaction propagation within a deadline.");
|
||||||
|
false
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broadcast consensus message to peers.
|
/// Broadcast consensus message to peers.
|
||||||
@ -1169,7 +1327,7 @@ impl ChainSync {
|
|||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
pub mod tests {
|
pub mod tests {
|
||||||
use std::collections::{HashSet, VecDeque};
|
use std::collections::{VecDeque};
|
||||||
use ethkey;
|
use ethkey;
|
||||||
use network::PeerId;
|
use network::PeerId;
|
||||||
use tests::helpers::{TestIo};
|
use tests::helpers::{TestIo};
|
||||||
@ -1285,8 +1443,8 @@ pub mod tests {
|
|||||||
asking_blocks: Vec::new(),
|
asking_blocks: Vec::new(),
|
||||||
asking_hash: None,
|
asking_hash: None,
|
||||||
ask_time: Instant::now(),
|
ask_time: Instant::now(),
|
||||||
last_sent_transactions: HashSet::new(),
|
last_sent_transactions: Default::default(),
|
||||||
last_sent_private_transactions: HashSet::new(),
|
last_sent_private_transactions: Default::default(),
|
||||||
expired: false,
|
expired: false,
|
||||||
confirmation: super::ForkConfirmation::Confirmed,
|
confirmation: super::ForkConfirmation::Confirmed,
|
||||||
snapshot_number: None,
|
snapshot_number: None,
|
||||||
@ -1301,7 +1459,7 @@ pub mod tests {
|
|||||||
fn finds_lagging_peers() {
|
fn finds_lagging_peers() {
|
||||||
let mut client = TestBlockChainClient::new();
|
let mut client = TestBlockChainClient::new();
|
||||||
client.add_blocks(100, EachBlockWith::Uncle);
|
client.add_blocks(100, EachBlockWith::Uncle);
|
||||||
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client);
|
let sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client);
|
||||||
let chain_info = client.chain_info();
|
let chain_info = client.chain_info();
|
||||||
|
|
||||||
let lagging_peers = sync.get_lagging_peers(&chain_info);
|
let lagging_peers = sync.get_lagging_peers(&chain_info);
|
||||||
@ -1441,3 +1599,4 @@ pub mod tests {
|
|||||||
assert_eq!(status.status.transaction_count, 0);
|
assert_eq!(status.status.transaction_count, 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ use bytes::Bytes;
|
|||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use ethcore::client::BlockChainInfo;
|
use ethcore::client::BlockChainInfo;
|
||||||
use ethcore::header::BlockNumber;
|
use ethcore::header::BlockNumber;
|
||||||
|
use fastmap::H256FastSet;
|
||||||
use network::{PeerId, PacketId};
|
use network::{PeerId, PacketId};
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rlp::{Encodable, RlpStream};
|
use rlp::{Encodable, RlpStream};
|
||||||
@ -69,49 +70,51 @@ impl SyncPropagator {
|
|||||||
/// propagates latest block to a set of peers
|
/// propagates latest block to a set of peers
|
||||||
pub fn propagate_blocks(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize {
|
pub fn propagate_blocks(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize {
|
||||||
trace!(target: "sync", "Sending NewBlocks to {:?}", peers);
|
trace!(target: "sync", "Sending NewBlocks to {:?}", peers);
|
||||||
let mut sent = 0;
|
let sent = peers.len();
|
||||||
for peer_id in peers {
|
let mut send_packet = |io: &mut SyncIo, rlp: Bytes| {
|
||||||
if blocks.is_empty() {
|
for peer_id in peers {
|
||||||
let rlp = ChainSync::create_latest_block_rlp(io.chain());
|
SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp.clone());
|
||||||
SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
|
if let Some(ref mut peer) = sync.peers.get_mut(peer_id) {
|
||||||
} else {
|
peer.latest_hash = chain_info.best_block_hash.clone();
|
||||||
for h in blocks {
|
|
||||||
let rlp = ChainSync::create_new_block_rlp(io.chain(), h);
|
|
||||||
SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(ref mut peer) = sync.peers.get_mut(peer_id) {
|
};
|
||||||
peer.latest_hash = chain_info.best_block_hash.clone();
|
|
||||||
|
if blocks.is_empty() {
|
||||||
|
let rlp = ChainSync::create_latest_block_rlp(io.chain());
|
||||||
|
send_packet(io, rlp);
|
||||||
|
} else {
|
||||||
|
for h in blocks {
|
||||||
|
let rlp = ChainSync::create_new_block_rlp(io.chain(), h);
|
||||||
|
send_packet(io, rlp);
|
||||||
}
|
}
|
||||||
sent += 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sent
|
sent
|
||||||
}
|
}
|
||||||
|
|
||||||
/// propagates new known hashes to all peers
|
/// propagates new known hashes to all peers
|
||||||
pub fn propagate_new_hashes(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize {
|
pub fn propagate_new_hashes(sync: &mut ChainSync, chain_info: &BlockChainInfo, io: &mut SyncIo, peers: &[PeerId]) -> usize {
|
||||||
trace!(target: "sync", "Sending NewHashes to {:?}", peers);
|
trace!(target: "sync", "Sending NewHashes to {:?}", peers);
|
||||||
let mut sent = 0;
|
|
||||||
let last_parent = *io.chain().best_block_header().parent_hash();
|
let last_parent = *io.chain().best_block_header().parent_hash();
|
||||||
|
let best_block_hash = chain_info.best_block_hash;
|
||||||
|
let rlp = match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &best_block_hash) {
|
||||||
|
Some(rlp) => rlp,
|
||||||
|
None => return 0
|
||||||
|
};
|
||||||
|
|
||||||
|
let sent = peers.len();
|
||||||
for peer_id in peers {
|
for peer_id in peers {
|
||||||
sent += match ChainSync::create_new_hashes_rlp(io.chain(), &last_parent, &chain_info.best_block_hash) {
|
if let Some(ref mut peer) = sync.peers.get_mut(peer_id) {
|
||||||
Some(rlp) => {
|
peer.latest_hash = best_block_hash;
|
||||||
{
|
|
||||||
if let Some(ref mut peer) = sync.peers.get_mut(peer_id) {
|
|
||||||
peer.latest_hash = chain_info.best_block_hash.clone();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_HASHES_PACKET, rlp);
|
|
||||||
1
|
|
||||||
},
|
|
||||||
None => 0
|
|
||||||
}
|
}
|
||||||
|
SyncPropagator::send_packet(io, *peer_id, NEW_BLOCK_HASHES_PACKET, rlp.clone());
|
||||||
}
|
}
|
||||||
sent
|
sent
|
||||||
}
|
}
|
||||||
|
|
||||||
/// propagates new transactions to all peers
|
/// propagates new transactions to all peers
|
||||||
pub fn propagate_new_transactions(sync: &mut ChainSync, io: &mut SyncIo) -> usize {
|
pub fn propagate_new_transactions<F: FnMut() -> bool>(sync: &mut ChainSync, io: &mut SyncIo, mut should_continue: F) -> usize {
|
||||||
// Early out if nobody to send to.
|
// Early out if nobody to send to.
|
||||||
if sync.peers.is_empty() {
|
if sync.peers.is_empty() {
|
||||||
return 0;
|
return 0;
|
||||||
@ -122,6 +125,10 @@ impl SyncPropagator {
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !should_continue() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.iter()
|
let (transactions, service_transactions): (Vec<_>, Vec<_>) = transactions.iter()
|
||||||
.map(|tx| tx.signed())
|
.map(|tx| tx.signed())
|
||||||
.partition(|tx| !tx.gas_price.is_zero());
|
.partition(|tx| !tx.gas_price.is_zero());
|
||||||
@ -130,24 +137,34 @@ impl SyncPropagator {
|
|||||||
let mut affected_peers = HashSet::new();
|
let mut affected_peers = HashSet::new();
|
||||||
if !transactions.is_empty() {
|
if !transactions.is_empty() {
|
||||||
let peers = SyncPropagator::select_peers_for_transactions(sync, |_| true);
|
let peers = SyncPropagator::select_peers_for_transactions(sync, |_| true);
|
||||||
affected_peers = SyncPropagator::propagate_transactions_to_peers(sync, io, peers, transactions);
|
affected_peers = SyncPropagator::propagate_transactions_to_peers(
|
||||||
|
sync, io, peers, transactions, &mut should_continue,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// most of times service_transactions will be empty
|
// most of times service_transactions will be empty
|
||||||
// => there's no need to merge packets
|
// => there's no need to merge packets
|
||||||
if !service_transactions.is_empty() {
|
if !service_transactions.is_empty() {
|
||||||
let service_transactions_peers = SyncPropagator::select_peers_for_transactions(sync, |peer_id| accepts_service_transaction(&io.peer_info(*peer_id)));
|
let service_transactions_peers = SyncPropagator::select_peers_for_transactions(sync, |peer_id| accepts_service_transaction(&io.peer_info(*peer_id)));
|
||||||
let service_transactions_affected_peers = SyncPropagator::propagate_transactions_to_peers(sync, io, service_transactions_peers, service_transactions);
|
let service_transactions_affected_peers = SyncPropagator::propagate_transactions_to_peers(
|
||||||
|
sync, io, service_transactions_peers, service_transactions, &mut should_continue
|
||||||
|
);
|
||||||
affected_peers.extend(&service_transactions_affected_peers);
|
affected_peers.extend(&service_transactions_affected_peers);
|
||||||
}
|
}
|
||||||
|
|
||||||
affected_peers.len()
|
affected_peers.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn propagate_transactions_to_peers(sync: &mut ChainSync, io: &mut SyncIo, peers: Vec<PeerId>, transactions: Vec<&SignedTransaction>) -> HashSet<PeerId> {
|
fn propagate_transactions_to_peers<F: FnMut() -> bool>(
|
||||||
|
sync: &mut ChainSync,
|
||||||
|
io: &mut SyncIo,
|
||||||
|
peers: Vec<PeerId>,
|
||||||
|
transactions: Vec<&SignedTransaction>,
|
||||||
|
mut should_continue: F,
|
||||||
|
) -> HashSet<PeerId> {
|
||||||
let all_transactions_hashes = transactions.iter()
|
let all_transactions_hashes = transactions.iter()
|
||||||
.map(|tx| tx.hash())
|
.map(|tx| tx.hash())
|
||||||
.collect::<HashSet<H256>>();
|
.collect::<H256FastSet>();
|
||||||
let all_transactions_rlp = {
|
let all_transactions_rlp = {
|
||||||
let mut packet = RlpStream::new_list(transactions.len());
|
let mut packet = RlpStream::new_list(transactions.len());
|
||||||
for tx in &transactions { packet.append(&**tx); }
|
for tx in &transactions { packet.append(&**tx); }
|
||||||
@ -157,102 +174,104 @@ impl SyncPropagator {
|
|||||||
// Clear old transactions from stats
|
// Clear old transactions from stats
|
||||||
sync.transactions_stats.retain(&all_transactions_hashes);
|
sync.transactions_stats.retain(&all_transactions_hashes);
|
||||||
|
|
||||||
// sqrt(x)/x scaled to max u32
|
let send_packet = |io: &mut SyncIo, peer_id: PeerId, sent: usize, rlp: Bytes| {
|
||||||
let block_number = io.chain().chain_info().best_block_number;
|
let size = rlp.len();
|
||||||
|
SyncPropagator::send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
|
||||||
let lucky_peers = {
|
trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size);
|
||||||
peers.into_iter()
|
|
||||||
.filter_map(|peer_id| {
|
|
||||||
let stats = &mut sync.transactions_stats;
|
|
||||||
let peer_info = sync.peers.get_mut(&peer_id)
|
|
||||||
.expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed");
|
|
||||||
|
|
||||||
// Send all transactions
|
|
||||||
if peer_info.last_sent_transactions.is_empty() {
|
|
||||||
// update stats
|
|
||||||
for hash in &all_transactions_hashes {
|
|
||||||
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
|
||||||
stats.propagated(hash, id, block_number);
|
|
||||||
}
|
|
||||||
peer_info.last_sent_transactions = all_transactions_hashes.clone();
|
|
||||||
return Some((peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get hashes of all transactions to send to this peer
|
|
||||||
let to_send = all_transactions_hashes.difference(&peer_info.last_sent_transactions)
|
|
||||||
.cloned()
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
if to_send.is_empty() {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Construct RLP
|
|
||||||
let (packet, to_send) = {
|
|
||||||
let mut to_send = to_send;
|
|
||||||
let mut packet = RlpStream::new();
|
|
||||||
packet.begin_unbounded_list();
|
|
||||||
let mut pushed = 0;
|
|
||||||
for tx in &transactions {
|
|
||||||
let hash = tx.hash();
|
|
||||||
if to_send.contains(&hash) {
|
|
||||||
let mut transaction = RlpStream::new();
|
|
||||||
tx.rlp_append(&mut transaction);
|
|
||||||
let appended = packet.append_raw_checked(&transaction.drain(), 1, MAX_TRANSACTION_PACKET_SIZE);
|
|
||||||
if !appended {
|
|
||||||
// Maximal packet size reached just proceed with sending
|
|
||||||
debug!(target: "sync", "Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len());
|
|
||||||
to_send = to_send.into_iter().take(pushed).collect();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
pushed += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
packet.complete_unbounded_list();
|
|
||||||
(packet, to_send)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Update stats
|
|
||||||
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
|
||||||
for hash in &to_send {
|
|
||||||
// update stats
|
|
||||||
stats.propagated(hash, id, block_number);
|
|
||||||
}
|
|
||||||
|
|
||||||
peer_info.last_sent_transactions = all_transactions_hashes
|
|
||||||
.intersection(&peer_info.last_sent_transactions)
|
|
||||||
.chain(&to_send)
|
|
||||||
.cloned()
|
|
||||||
.collect();
|
|
||||||
Some((peer_id, to_send.len(), packet.out()))
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Send RLPs
|
let block_number = io.chain().chain_info().best_block_number;
|
||||||
let mut peers = HashSet::new();
|
let mut sent_to_peers = HashSet::new();
|
||||||
if lucky_peers.len() > 0 {
|
let mut max_sent = 0;
|
||||||
let mut max_sent = 0;
|
|
||||||
let lucky_peers_len = lucky_peers.len();
|
// for every peer construct and send transactions packet
|
||||||
for (peer_id, sent, rlp) in lucky_peers {
|
for peer_id in peers {
|
||||||
peers.insert(peer_id);
|
if !should_continue() {
|
||||||
let size = rlp.len();
|
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, sent_to_peers.len());
|
||||||
SyncPropagator::send_packet(io, peer_id, TRANSACTIONS_PACKET, rlp);
|
return sent_to_peers;
|
||||||
trace!(target: "sync", "{:02} <- Transactions ({} entries; {} bytes)", peer_id, sent, size);
|
|
||||||
max_sent = cmp::max(max_sent, sent);
|
|
||||||
}
|
}
|
||||||
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, lucky_peers_len);
|
|
||||||
|
let stats = &mut sync.transactions_stats;
|
||||||
|
let peer_info = sync.peers.get_mut(&peer_id)
|
||||||
|
.expect("peer_id is form peers; peers is result of select_peers_for_transactions; select_peers_for_transactions selects peers from self.peers; qed");
|
||||||
|
|
||||||
|
// Send all transactions, if the peer doesn't know about anything
|
||||||
|
if peer_info.last_sent_transactions.is_empty() {
|
||||||
|
// update stats
|
||||||
|
for hash in &all_transactions_hashes {
|
||||||
|
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
||||||
|
stats.propagated(hash, id, block_number);
|
||||||
|
}
|
||||||
|
peer_info.last_sent_transactions = all_transactions_hashes.clone();
|
||||||
|
|
||||||
|
send_packet(io, peer_id, all_transactions_hashes.len(), all_transactions_rlp.clone());
|
||||||
|
sent_to_peers.insert(peer_id);
|
||||||
|
max_sent = cmp::max(max_sent, all_transactions_hashes.len());
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get hashes of all transactions to send to this peer
|
||||||
|
let to_send = all_transactions_hashes.difference(&peer_info.last_sent_transactions)
|
||||||
|
.cloned()
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
if to_send.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Construct RLP
|
||||||
|
let (packet, to_send) = {
|
||||||
|
let mut to_send = to_send;
|
||||||
|
let mut packet = RlpStream::new();
|
||||||
|
packet.begin_unbounded_list();
|
||||||
|
let mut pushed = 0;
|
||||||
|
for tx in &transactions {
|
||||||
|
let hash = tx.hash();
|
||||||
|
if to_send.contains(&hash) {
|
||||||
|
let mut transaction = RlpStream::new();
|
||||||
|
tx.rlp_append(&mut transaction);
|
||||||
|
let appended = packet.append_raw_checked(&transaction.drain(), 1, MAX_TRANSACTION_PACKET_SIZE);
|
||||||
|
if !appended {
|
||||||
|
// Maximal packet size reached just proceed with sending
|
||||||
|
debug!(target: "sync", "Transaction packet size limit reached. Sending incomplete set of {}/{} transactions.", pushed, to_send.len());
|
||||||
|
to_send = to_send.into_iter().take(pushed).collect();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
pushed += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
packet.complete_unbounded_list();
|
||||||
|
(packet, to_send)
|
||||||
|
};
|
||||||
|
|
||||||
|
// Update stats
|
||||||
|
let id = io.peer_session_info(peer_id).and_then(|info| info.id);
|
||||||
|
for hash in &to_send {
|
||||||
|
// update stats
|
||||||
|
stats.propagated(hash, id, block_number);
|
||||||
|
}
|
||||||
|
|
||||||
|
peer_info.last_sent_transactions = all_transactions_hashes
|
||||||
|
.intersection(&peer_info.last_sent_transactions)
|
||||||
|
.chain(&to_send)
|
||||||
|
.cloned()
|
||||||
|
.collect();
|
||||||
|
send_packet(io, peer_id, to_send.len(), packet.out());
|
||||||
|
sent_to_peers.insert(peer_id);
|
||||||
|
max_sent = cmp::max(max_sent, to_send.len());
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
peers
|
debug!(target: "sync", "Sent up to {} transactions to {} peers.", max_sent, sent_to_peers.len());
|
||||||
|
sent_to_peers
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut SyncIo, sealed: &[H256]) {
|
pub fn propagate_latest_blocks(sync: &mut ChainSync, io: &mut SyncIo, sealed: &[H256]) {
|
||||||
let chain_info = io.chain().chain_info();
|
let chain_info = io.chain().chain_info();
|
||||||
if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
|
if (((chain_info.best_block_number as i64) - (sync.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
|
||||||
let mut peers = sync.get_lagging_peers(&chain_info);
|
let peers = sync.get_lagging_peers(&chain_info);
|
||||||
if sealed.is_empty() {
|
if sealed.is_empty() {
|
||||||
let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers);
|
let hashes = SyncPropagator::propagate_new_hashes(sync, &chain_info, io, &peers);
|
||||||
peers = ChainSync::select_random_peers(&peers);
|
let peers = ChainSync::select_random_peers(&peers);
|
||||||
let blocks = SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers);
|
let blocks = SyncPropagator::propagate_blocks(sync, &chain_info, io, sealed, &peers);
|
||||||
if blocks != 0 || hashes != 0 {
|
if blocks != 0 || hashes != 0 {
|
||||||
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
|
||||||
@ -318,7 +337,7 @@ impl SyncPropagator {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Generic packet sender
|
/// Generic packet sender
|
||||||
fn send_packet(sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) {
|
pub fn send_packet(sync: &mut SyncIo, peer_id: PeerId, packet_id: PacketId, packet: Bytes) {
|
||||||
if let Err(e) = sync.send(peer_id, packet_id, packet) {
|
if let Err(e) = sync.send(peer_id, packet_id, packet) {
|
||||||
debug!(target:"sync", "Error sending packet: {:?}", e);
|
debug!(target:"sync", "Error sending packet: {:?}", e);
|
||||||
sync.disconnect_peer(peer_id);
|
sync.disconnect_peer(peer_id);
|
||||||
@ -419,8 +438,8 @@ mod tests {
|
|||||||
asking_blocks: Vec::new(),
|
asking_blocks: Vec::new(),
|
||||||
asking_hash: None,
|
asking_hash: None,
|
||||||
ask_time: Instant::now(),
|
ask_time: Instant::now(),
|
||||||
last_sent_transactions: HashSet::new(),
|
last_sent_transactions: Default::default(),
|
||||||
last_sent_private_transactions: HashSet::new(),
|
last_sent_private_transactions: Default::default(),
|
||||||
expired: false,
|
expired: false,
|
||||||
confirmation: ForkConfirmation::Confirmed,
|
confirmation: ForkConfirmation::Confirmed,
|
||||||
snapshot_number: None,
|
snapshot_number: None,
|
||||||
@ -447,13 +466,13 @@ mod tests {
|
|||||||
let queue = RwLock::new(VecDeque::new());
|
let queue = RwLock::new(VecDeque::new());
|
||||||
let ss = TestSnapshotService::new();
|
let ss = TestSnapshotService::new();
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
// Try to propagate same transactions for the second time
|
// Try to propagate same transactions for the second time
|
||||||
let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
// Even after new block transactions should not be propagated twice
|
// Even after new block transactions should not be propagated twice
|
||||||
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]);
|
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]);
|
||||||
// Try to propagate same transactions for the third time
|
// Try to propagate same transactions for the third time
|
||||||
let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
|
|
||||||
// 1 message should be send
|
// 1 message should be send
|
||||||
assert_eq!(1, io.packets.len());
|
assert_eq!(1, io.packets.len());
|
||||||
@ -474,7 +493,7 @@ mod tests {
|
|||||||
let queue = RwLock::new(VecDeque::new());
|
let queue = RwLock::new(VecDeque::new());
|
||||||
let ss = TestSnapshotService::new();
|
let ss = TestSnapshotService::new();
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
io.chain.insert_transaction_to_queue();
|
io.chain.insert_transaction_to_queue();
|
||||||
// New block import should not trigger propagation.
|
// New block import should not trigger propagation.
|
||||||
// (we only propagate on timeout)
|
// (we only propagate on timeout)
|
||||||
@ -498,10 +517,10 @@ mod tests {
|
|||||||
let queue = RwLock::new(VecDeque::new());
|
let queue = RwLock::new(VecDeque::new());
|
||||||
let ss = TestSnapshotService::new();
|
let ss = TestSnapshotService::new();
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]);
|
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]);
|
||||||
// Try to propagate same transactions for the second time
|
// Try to propagate same transactions for the second time
|
||||||
let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
|
|
||||||
assert_eq!(0, io.packets.len());
|
assert_eq!(0, io.packets.len());
|
||||||
assert_eq!(0, peer_count);
|
assert_eq!(0, peer_count);
|
||||||
@ -519,7 +538,7 @@ mod tests {
|
|||||||
// should sent some
|
// should sent some
|
||||||
{
|
{
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
assert_eq!(1, io.packets.len());
|
assert_eq!(1, io.packets.len());
|
||||||
assert_eq!(1, peer_count);
|
assert_eq!(1, peer_count);
|
||||||
}
|
}
|
||||||
@ -528,9 +547,9 @@ mod tests {
|
|||||||
let (peer_count2, peer_count3) = {
|
let (peer_count2, peer_count3) = {
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
// Propagate new transactions
|
// Propagate new transactions
|
||||||
let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count2 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
// And now the peer should have all transactions
|
// And now the peer should have all transactions
|
||||||
let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
let peer_count3 = SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
(peer_count2, peer_count3)
|
(peer_count2, peer_count3)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -553,7 +572,7 @@ mod tests {
|
|||||||
let queue = RwLock::new(VecDeque::new());
|
let queue = RwLock::new(VecDeque::new());
|
||||||
let ss = TestSnapshotService::new();
|
let ss = TestSnapshotService::new();
|
||||||
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
let mut io = TestIo::new(&mut client, &ss, &queue, None);
|
||||||
SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
|
|
||||||
let stats = sync.transactions_stats();
|
let stats = sync.transactions_stats();
|
||||||
assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.")
|
assert_eq!(stats.len(), 1, "Should maintain stats for single transaction.")
|
||||||
@ -583,7 +602,7 @@ mod tests {
|
|||||||
io.peers_info.insert(4, "Parity-Ethereum/v2.7.3-ABCDEFGH".to_owned());
|
io.peers_info.insert(4, "Parity-Ethereum/v2.7.3-ABCDEFGH".to_owned());
|
||||||
|
|
||||||
// and new service transaction is propagated to peers
|
// and new service transaction is propagated to peers
|
||||||
SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
|
|
||||||
// peer#2 && peer#4 are receiving service transaction
|
// peer#2 && peer#4 are receiving service transaction
|
||||||
assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET
|
assert!(io.packets.iter().any(|p| p.packet_id == 0x02 && p.recipient == 2)); // TRANSACTIONS_PACKET
|
||||||
@ -607,7 +626,7 @@ mod tests {
|
|||||||
io.peers_info.insert(1, "Parity-Ethereum/v2.6".to_owned());
|
io.peers_info.insert(1, "Parity-Ethereum/v2.6".to_owned());
|
||||||
|
|
||||||
// and service + non-service transactions are propagated to peers
|
// and service + non-service transactions are propagated to peers
|
||||||
SyncPropagator::propagate_new_transactions(&mut sync, &mut io);
|
SyncPropagator::propagate_new_transactions(&mut sync, &mut io, || true);
|
||||||
|
|
||||||
// two separate packets for peer are queued:
|
// two separate packets for peer are queued:
|
||||||
// 1) with non-service-transaction
|
// 1) with non-service-transaction
|
||||||
|
@ -27,6 +27,7 @@ use sync_io::SyncIo;
|
|||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
ChainSync,
|
ChainSync,
|
||||||
|
SyncHandler,
|
||||||
RlpResponseResult,
|
RlpResponseResult,
|
||||||
PacketDecodeError,
|
PacketDecodeError,
|
||||||
BLOCK_BODIES_PACKET,
|
BLOCK_BODIES_PACKET,
|
||||||
@ -47,6 +48,8 @@ use super::{
|
|||||||
RECEIPTS_PACKET,
|
RECEIPTS_PACKET,
|
||||||
SNAPSHOT_DATA_PACKET,
|
SNAPSHOT_DATA_PACKET,
|
||||||
SNAPSHOT_MANIFEST_PACKET,
|
SNAPSHOT_MANIFEST_PACKET,
|
||||||
|
STATUS_PACKET,
|
||||||
|
TRANSACTIONS_PACKET,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// The Chain Sync Supplier: answers requests from peers with available data
|
/// The Chain Sync Supplier: answers requests from peers with available data
|
||||||
@ -56,6 +59,7 @@ impl SyncSupplier {
|
|||||||
/// Dispatch incoming requests and responses
|
/// Dispatch incoming requests and responses
|
||||||
pub fn dispatch_packet(sync: &RwLock<ChainSync>, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
pub fn dispatch_packet(sync: &RwLock<ChainSync>, io: &mut SyncIo, peer: PeerId, packet_id: u8, data: &[u8]) {
|
||||||
let rlp = Rlp::new(data);
|
let rlp = Rlp::new(data);
|
||||||
|
|
||||||
let result = match packet_id {
|
let result = match packet_id {
|
||||||
GET_BLOCK_BODIES_PACKET => SyncSupplier::return_rlp(io, &rlp, peer,
|
GET_BLOCK_BODIES_PACKET => SyncSupplier::return_rlp(io, &rlp, peer,
|
||||||
SyncSupplier::return_block_bodies,
|
SyncSupplier::return_block_bodies,
|
||||||
@ -80,9 +84,39 @@ impl SyncSupplier {
|
|||||||
GET_SNAPSHOT_DATA_PACKET => SyncSupplier::return_rlp(io, &rlp, peer,
|
GET_SNAPSHOT_DATA_PACKET => SyncSupplier::return_rlp(io, &rlp, peer,
|
||||||
SyncSupplier::return_snapshot_data,
|
SyncSupplier::return_snapshot_data,
|
||||||
|e| format!("Error sending snapshot data: {:?}", e)),
|
|e| format!("Error sending snapshot data: {:?}", e)),
|
||||||
CONSENSUS_DATA_PACKET => ChainSync::on_consensus_packet(io, peer, &rlp),
|
|
||||||
_ => {
|
STATUS_PACKET => {
|
||||||
sync.write().on_packet(io, peer, packet_id, data);
|
sync.write().on_packet(io, peer, packet_id, data);
|
||||||
|
Ok(())
|
||||||
|
},
|
||||||
|
// Packets that require the peer to be confirmed
|
||||||
|
_ => {
|
||||||
|
if !sync.read().peers.contains_key(&peer) {
|
||||||
|
debug!(target:"sync", "Unexpected packet {} from unregistered peer: {}:{}", packet_id, peer, io.peer_info(peer));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
debug!(target: "sync", "{} -> Dispatching packet: {}", peer, packet_id);
|
||||||
|
|
||||||
|
match packet_id {
|
||||||
|
CONSENSUS_DATA_PACKET => {
|
||||||
|
SyncHandler::on_consensus_packet(io, peer, &rlp)
|
||||||
|
},
|
||||||
|
TRANSACTIONS_PACKET => {
|
||||||
|
let res = {
|
||||||
|
let sync_ro = sync.read();
|
||||||
|
SyncHandler::on_peer_transactions(&*sync_ro, io, peer, &rlp)
|
||||||
|
};
|
||||||
|
if res.is_err() {
|
||||||
|
// peer sent invalid data, disconnect.
|
||||||
|
io.disable_peer(peer);
|
||||||
|
sync.write().deactivate_peer(io, peer);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ => {
|
||||||
|
sync.write().on_packet(io, peer, packet_id, data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -226,7 +260,8 @@ impl SyncSupplier {
|
|||||||
let mut added_receipts = 0usize;
|
let mut added_receipts = 0usize;
|
||||||
let mut data = Bytes::new();
|
let mut data = Bytes::new();
|
||||||
for i in 0..count {
|
for i in 0..count {
|
||||||
if let Some(mut receipts_bytes) = io.chain().encoded_block_receipts(&rlp.val_at::<H256>(i)?) {
|
if let Some(receipts) = io.chain().block_receipts(&rlp.val_at::<H256>(i)?) {
|
||||||
|
let mut receipts_bytes = ::rlp::encode(&receipts);
|
||||||
data.append(&mut receipts_bytes);
|
data.append(&mut receipts_bytes);
|
||||||
added_receipts += receipts_bytes.len();
|
added_receipts += receipts_bytes.len();
|
||||||
added_headers += 1;
|
added_headers += 1;
|
||||||
@ -403,7 +438,7 @@ mod test {
|
|||||||
|
|
||||||
io.sender = Some(2usize);
|
io.sender = Some(2usize);
|
||||||
|
|
||||||
ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GET_NODE_DATA_PACKET, &node_request);
|
SyncSupplier::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GET_NODE_DATA_PACKET, &node_request);
|
||||||
assert_eq!(1, io.packets.len());
|
assert_eq!(1, io.packets.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -445,7 +480,7 @@ mod test {
|
|||||||
assert_eq!(603, rlp_result.unwrap().1.out().len());
|
assert_eq!(603, rlp_result.unwrap().1.out().len());
|
||||||
|
|
||||||
io.sender = Some(2usize);
|
io.sender = Some(2usize);
|
||||||
ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GET_RECEIPTS_PACKET, &receipts_request);
|
SyncSupplier::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, GET_RECEIPTS_PACKET, &receipts_request);
|
||||||
assert_eq!(1, io.packets.len());
|
assert_eq!(1, io.packets.len());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
|
|
||||||
use std::collections::{HashMap, HashSet};
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::mem;
|
use std::mem;
|
||||||
|
use std::ops::Deref;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
|
|
||||||
@ -213,6 +214,44 @@ enum SyncState {
|
|||||||
Rounds(SyncRound),
|
Rounds(SyncRound),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A wrapper around the SyncState that makes sure to
|
||||||
|
/// update the giving reference to `is_idle`
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct SyncStateWrapper {
|
||||||
|
state: SyncState,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SyncStateWrapper {
|
||||||
|
/// Create a new wrapper for SyncState::Idle
|
||||||
|
pub fn idle() -> Self {
|
||||||
|
SyncStateWrapper {
|
||||||
|
state: SyncState::Idle,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set the new state's value, making sure `is_idle` gets updated
|
||||||
|
pub fn set(&mut self, state: SyncState, is_idle_handle: &mut bool) {
|
||||||
|
*is_idle_handle = match state {
|
||||||
|
SyncState::Idle => true,
|
||||||
|
_ => false,
|
||||||
|
};
|
||||||
|
self.state = state;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the internal state's value
|
||||||
|
pub fn into_inner(self) -> SyncState {
|
||||||
|
self.state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deref for SyncStateWrapper {
|
||||||
|
type Target = SyncState;
|
||||||
|
|
||||||
|
fn deref(&self) -> &SyncState {
|
||||||
|
&self.state
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
struct ResponseCtx<'a> {
|
struct ResponseCtx<'a> {
|
||||||
peer: PeerId,
|
peer: PeerId,
|
||||||
req_id: ReqId,
|
req_id: ReqId,
|
||||||
@ -235,7 +274,9 @@ pub struct LightSync<L: AsLightClient> {
|
|||||||
pending_reqs: Mutex<HashMap<ReqId, PendingReq>>, // requests from this handler
|
pending_reqs: Mutex<HashMap<ReqId, PendingReq>>, // requests from this handler
|
||||||
client: Arc<L>,
|
client: Arc<L>,
|
||||||
rng: Mutex<OsRng>,
|
rng: Mutex<OsRng>,
|
||||||
state: Mutex<SyncState>,
|
state: Mutex<SyncStateWrapper>,
|
||||||
|
// We duplicate this state tracking to avoid deadlocks in `is_major_importing`.
|
||||||
|
is_idle: Mutex<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
#[derive(Debug, Clone)]
|
||||||
@ -309,16 +350,17 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
|
|||||||
|
|
||||||
if new_best.is_none() {
|
if new_best.is_none() {
|
||||||
debug!(target: "sync", "No peers remain. Reverting to idle");
|
debug!(target: "sync", "No peers remain. Reverting to idle");
|
||||||
*self.state.lock() = SyncState::Idle;
|
self.set_state(&mut self.state.lock(), SyncState::Idle);
|
||||||
} else {
|
} else {
|
||||||
let mut state = self.state.lock();
|
let mut state = self.state.lock();
|
||||||
|
|
||||||
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||||
SyncState::Idle => SyncState::Idle,
|
SyncState::Idle => SyncState::Idle,
|
||||||
SyncState::AncestorSearch(search) =>
|
SyncState::AncestorSearch(search) =>
|
||||||
SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)),
|
SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)),
|
||||||
SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)),
|
SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)),
|
||||||
};
|
};
|
||||||
|
self.set_state(&mut state, next_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.maintain_sync(ctx.as_basic());
|
self.maintain_sync(ctx.as_basic());
|
||||||
@ -390,12 +432,13 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
|
|||||||
data: headers,
|
data: headers,
|
||||||
};
|
};
|
||||||
|
|
||||||
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||||
SyncState::Idle => SyncState::Idle,
|
SyncState::Idle => SyncState::Idle,
|
||||||
SyncState::AncestorSearch(search) =>
|
SyncState::AncestorSearch(search) =>
|
||||||
SyncState::AncestorSearch(search.process_response(&ctx, &*self.client)),
|
SyncState::AncestorSearch(search.process_response(&ctx, &*self.client)),
|
||||||
SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)),
|
SyncState::Rounds(round) => SyncState::Rounds(round.process_response(&ctx)),
|
||||||
};
|
};
|
||||||
|
self.set_state(&mut state, next_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.maintain_sync(ctx.as_basic());
|
self.maintain_sync(ctx.as_basic());
|
||||||
@ -408,12 +451,18 @@ impl<L: AsLightClient + Send + Sync> Handler for LightSync<L> {
|
|||||||
|
|
||||||
// private helpers
|
// private helpers
|
||||||
impl<L: AsLightClient> LightSync<L> {
|
impl<L: AsLightClient> LightSync<L> {
|
||||||
|
/// Sets the LightSync's state, and update
|
||||||
|
/// `is_idle`
|
||||||
|
fn set_state(&self, state: &mut SyncStateWrapper, next_state: SyncState) {
|
||||||
|
state.set(next_state, &mut self.is_idle.lock());
|
||||||
|
}
|
||||||
|
|
||||||
// Begins a search for the common ancestor and our best block.
|
// Begins a search for the common ancestor and our best block.
|
||||||
// does not lock state, instead has a mutable reference to it passed.
|
// does not lock state, instead has a mutable reference to it passed.
|
||||||
fn begin_search(&self, state: &mut SyncState) {
|
fn begin_search(&self, state: &mut SyncStateWrapper) {
|
||||||
if let None = *self.best_seen.lock() {
|
if let None = *self.best_seen.lock() {
|
||||||
// no peers.
|
// no peers.
|
||||||
*state = SyncState::Idle;
|
self.set_state(state, SyncState::Idle);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -422,7 +471,8 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
|
|
||||||
trace!(target: "sync", "Beginning search for common ancestor from {:?}",
|
trace!(target: "sync", "Beginning search for common ancestor from {:?}",
|
||||||
(chain_info.best_block_number, chain_info.best_block_hash));
|
(chain_info.best_block_number, chain_info.best_block_hash));
|
||||||
*state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number));
|
let next_state = SyncState::AncestorSearch(AncestorSearch::begin(chain_info.best_block_number));
|
||||||
|
self.set_state(state, next_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
// handles request dispatch, block import, state machine transitions, and timeouts.
|
// handles request dispatch, block import, state machine transitions, and timeouts.
|
||||||
@ -435,7 +485,7 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
let chain_info = client.chain_info();
|
let chain_info = client.chain_info();
|
||||||
|
|
||||||
let mut state = self.state.lock();
|
let mut state = self.state.lock();
|
||||||
debug!(target: "sync", "Maintaining sync ({:?})", &*state);
|
debug!(target: "sync", "Maintaining sync ({:?})", **state);
|
||||||
|
|
||||||
// drain any pending blocks into the queue.
|
// drain any pending blocks into the queue.
|
||||||
{
|
{
|
||||||
@ -445,11 +495,12 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
loop {
|
loop {
|
||||||
if client.queue_info().is_full() { break }
|
if client.queue_info().is_full() { break }
|
||||||
|
|
||||||
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||||
SyncState::Rounds(round)
|
SyncState::Rounds(round)
|
||||||
=> SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT))),
|
=> SyncState::Rounds(round.drain(&mut sink, Some(DRAIN_AMOUNT))),
|
||||||
other => other,
|
other => other,
|
||||||
};
|
};
|
||||||
|
self.set_state(&mut state, next_state);
|
||||||
|
|
||||||
if sink.is_empty() { break }
|
if sink.is_empty() { break }
|
||||||
trace!(target: "sync", "Drained {} headers to import", sink.len());
|
trace!(target: "sync", "Drained {} headers to import", sink.len());
|
||||||
@ -483,15 +534,15 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
let network_score = other.as_ref().map(|target| target.head_td);
|
let network_score = other.as_ref().map(|target| target.head_td);
|
||||||
trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}",
|
trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}",
|
||||||
network_score, best_td);
|
network_score, best_td);
|
||||||
*state = SyncState::Idle;
|
self.set_state(&mut state, SyncState::Idle);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match mem::replace(&mut *state, SyncState::Idle) {
|
match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||||
SyncState::Rounds(SyncRound::Abort(reason, remaining)) => {
|
SyncState::Rounds(SyncRound::Abort(reason, remaining)) => {
|
||||||
if remaining.len() > 0 {
|
if remaining.len() > 0 {
|
||||||
*state = SyncState::Rounds(SyncRound::Abort(reason, remaining));
|
self.set_state(&mut state, SyncState::Rounds(SyncRound::Abort(reason, remaining)));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -505,7 +556,7 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
AbortReason::NoResponses => {}
|
AbortReason::NoResponses => {}
|
||||||
AbortReason::TargetReached => {
|
AbortReason::TargetReached => {
|
||||||
debug!(target: "sync", "Sync target reached. Going idle");
|
debug!(target: "sync", "Sync target reached. Going idle");
|
||||||
*state = SyncState::Idle;
|
self.set_state(&mut state, SyncState::Idle);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -514,15 +565,15 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
self.begin_search(&mut state);
|
self.begin_search(&mut state);
|
||||||
}
|
}
|
||||||
SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => {
|
SyncState::AncestorSearch(AncestorSearch::FoundCommon(num, hash)) => {
|
||||||
*state = SyncState::Rounds(SyncRound::begin((num, hash), sync_target));
|
self.set_state(&mut state, SyncState::Rounds(SyncRound::begin((num, hash), sync_target)));
|
||||||
}
|
}
|
||||||
SyncState::AncestorSearch(AncestorSearch::Genesis) => {
|
SyncState::AncestorSearch(AncestorSearch::Genesis) => {
|
||||||
// Same here.
|
// Same here.
|
||||||
let g_hash = chain_info.genesis_hash;
|
let g_hash = chain_info.genesis_hash;
|
||||||
*state = SyncState::Rounds(SyncRound::begin((0, g_hash), sync_target));
|
self.set_state(&mut state, SyncState::Rounds(SyncRound::begin((0, g_hash), sync_target)));
|
||||||
}
|
}
|
||||||
SyncState::Idle => self.begin_search(&mut state),
|
SyncState::Idle => self.begin_search(&mut state),
|
||||||
other => *state = other, // restore displaced state.
|
other => self.set_state(&mut state, other), // restore displaced state.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -543,12 +594,13 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
}
|
}
|
||||||
drop(pending_reqs);
|
drop(pending_reqs);
|
||||||
|
|
||||||
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||||
SyncState::Idle => SyncState::Idle,
|
SyncState::Idle => SyncState::Idle,
|
||||||
SyncState::AncestorSearch(search) =>
|
SyncState::AncestorSearch(search) =>
|
||||||
SyncState::AncestorSearch(search.requests_abandoned(&unfulfilled)),
|
SyncState::AncestorSearch(search.requests_abandoned(&unfulfilled)),
|
||||||
SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(&unfulfilled)),
|
SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(&unfulfilled)),
|
||||||
};
|
};
|
||||||
|
self.set_state(&mut state, next_state);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -605,34 +657,14 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
*state = match mem::replace(&mut *state, SyncState::Idle) {
|
let next_state = match mem::replace(&mut *state, SyncStateWrapper::idle()).into_inner() {
|
||||||
SyncState::Rounds(round) =>
|
SyncState::Rounds(round) =>
|
||||||
SyncState::Rounds(round.dispatch_requests(dispatcher)),
|
SyncState::Rounds(round.dispatch_requests(dispatcher)),
|
||||||
SyncState::AncestorSearch(search) =>
|
SyncState::AncestorSearch(search) =>
|
||||||
SyncState::AncestorSearch(search.dispatch_request(dispatcher)),
|
SyncState::AncestorSearch(search.dispatch_request(dispatcher)),
|
||||||
other => other,
|
other => other,
|
||||||
};
|
};
|
||||||
}
|
self.set_state(&mut state, next_state);
|
||||||
}
|
|
||||||
|
|
||||||
fn is_major_importing_do_wait(&self, wait: bool) -> bool {
|
|
||||||
const EMPTY_QUEUE: usize = 3;
|
|
||||||
|
|
||||||
if self.client.as_light_client().queue_info().unverified_queue_size > EMPTY_QUEUE {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
let mg_state = if wait {
|
|
||||||
self.state.lock()
|
|
||||||
} else {
|
|
||||||
if let Some(mg_state) = self.state.try_lock() {
|
|
||||||
mg_state
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
match *mg_state {
|
|
||||||
SyncState::Idle => false,
|
|
||||||
_ => true,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -651,7 +683,8 @@ impl<L: AsLightClient> LightSync<L> {
|
|||||||
pending_reqs: Mutex::new(HashMap::new()),
|
pending_reqs: Mutex::new(HashMap::new()),
|
||||||
client: client,
|
client: client,
|
||||||
rng: Mutex::new(OsRng::new()?),
|
rng: Mutex::new(OsRng::new()?),
|
||||||
state: Mutex::new(SyncState::Idle),
|
state: Mutex::new(SyncStateWrapper::idle()),
|
||||||
|
is_idle: Mutex::new(true),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -666,9 +699,6 @@ pub trait SyncInfo {
|
|||||||
|
|
||||||
/// Whether major sync is underway.
|
/// Whether major sync is underway.
|
||||||
fn is_major_importing(&self) -> bool;
|
fn is_major_importing(&self) -> bool;
|
||||||
|
|
||||||
/// Whether major sync is underway, skipping some synchronization.
|
|
||||||
fn is_major_importing_no_sync(&self) -> bool;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<L: AsLightClient> SyncInfo for LightSync<L> {
|
impl<L: AsLightClient> SyncInfo for LightSync<L> {
|
||||||
@ -681,11 +711,13 @@ impl<L: AsLightClient> SyncInfo for LightSync<L> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn is_major_importing(&self) -> bool {
|
fn is_major_importing(&self) -> bool {
|
||||||
self.is_major_importing_do_wait(true)
|
const EMPTY_QUEUE: usize = 3;
|
||||||
}
|
|
||||||
|
|
||||||
fn is_major_importing_no_sync(&self) -> bool {
|
let queue_info = self.client.as_light_client().queue_info();
|
||||||
self.is_major_importing_do_wait(false)
|
let is_verifying = queue_info.unverified_queue_size + queue_info.verified_queue_size > EMPTY_QUEUE;
|
||||||
|
let is_syncing = !*self.is_idle.lock();
|
||||||
|
|
||||||
|
is_verifying || is_syncing
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ pub trait SyncIo {
|
|||||||
fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8;
|
fn protocol_version(&self, protocol: &ProtocolId, peer_id: PeerId) -> u8;
|
||||||
/// Returns if the chain block queue empty
|
/// Returns if the chain block queue empty
|
||||||
fn is_chain_queue_empty(&self) -> bool {
|
fn is_chain_queue_empty(&self) -> bool {
|
||||||
self.chain().queue_info().is_empty()
|
self.chain().is_queue_empty()
|
||||||
}
|
}
|
||||||
/// Check if the session is expired
|
/// Check if the session is expired
|
||||||
fn is_expired(&self) -> bool;
|
fn is_expired(&self) -> bool;
|
||||||
|
@ -33,7 +33,7 @@ use ethcore::test_helpers;
|
|||||||
use sync_io::SyncIo;
|
use sync_io::SyncIo;
|
||||||
use io::{IoChannel, IoContext, IoHandler};
|
use io::{IoChannel, IoContext, IoHandler};
|
||||||
use api::WARP_SYNC_PROTOCOL_ID;
|
use api::WARP_SYNC_PROTOCOL_ID;
|
||||||
use chain::{ChainSync, ETH_PROTOCOL_VERSION_63, PAR_PROTOCOL_VERSION_3, PRIVATE_TRANSACTION_PACKET, SIGNED_PRIVATE_TRANSACTION_PACKET};
|
use chain::{ChainSync, ETH_PROTOCOL_VERSION_63, PAR_PROTOCOL_VERSION_3, PRIVATE_TRANSACTION_PACKET, SIGNED_PRIVATE_TRANSACTION_PACKET, SyncSupplier};
|
||||||
use SyncConfig;
|
use SyncConfig;
|
||||||
use private_tx::SimplePrivateTxHandler;
|
use private_tx::SimplePrivateTxHandler;
|
||||||
|
|
||||||
@ -271,7 +271,7 @@ impl<C: FlushingBlockChainClient> Peer for EthPeer<C> {
|
|||||||
|
|
||||||
fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet<PeerId> {
|
fn receive_message(&self, from: PeerId, msg: TestPacket) -> HashSet<PeerId> {
|
||||||
let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, Some(from));
|
let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, Some(from));
|
||||||
ChainSync::dispatch_packet(&self.sync, &mut io, from, msg.packet_id, &msg.data);
|
SyncSupplier::dispatch_packet(&self.sync, &mut io, from, msg.packet_id, &msg.data);
|
||||||
self.chain.flush();
|
self.chain.flush();
|
||||||
io.to_disconnect.clone()
|
io.to_disconnect.clone()
|
||||||
}
|
}
|
||||||
@ -286,10 +286,12 @@ impl<C: FlushingBlockChainClient> Peer for EthPeer<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sync_step(&self) {
|
fn sync_step(&self) {
|
||||||
|
let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None);
|
||||||
self.chain.flush();
|
self.chain.flush();
|
||||||
self.sync.write().maintain_peers(&mut TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None));
|
self.sync.write().maintain_peers(&mut io);
|
||||||
self.sync.write().maintain_sync(&mut TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None));
|
self.sync.write().maintain_sync(&mut io);
|
||||||
self.sync.write().propagate_new_transactions(&mut TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None));
|
self.sync.write().continue_sync(&mut io);
|
||||||
|
self.sync.write().propagate_new_transactions(&mut io);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn restart_sync(&self) {
|
fn restart_sync(&self) {
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use api::TransactionStats;
|
use api::TransactionStats;
|
||||||
|
use std::hash::BuildHasher;
|
||||||
use std::collections::{HashSet, HashMap};
|
use std::collections::{HashSet, HashMap};
|
||||||
use ethereum_types::{H256, H512};
|
use ethereum_types::{H256, H512};
|
||||||
use fastmap::H256FastMap;
|
use fastmap::H256FastMap;
|
||||||
@ -74,7 +75,7 @@ impl TransactionsStats {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Retains only transactions present in given `HashSet`.
|
/// Retains only transactions present in given `HashSet`.
|
||||||
pub fn retain(&mut self, hashes: &HashSet<H256>) {
|
pub fn retain<S: BuildHasher>(&mut self, hashes: &HashSet<H256, S>) {
|
||||||
let to_remove = self.pending_transactions.keys()
|
let to_remove = self.pending_transactions.keys()
|
||||||
.filter(|hash| !hashes.contains(hash))
|
.filter(|hash| !hashes.contains(hash))
|
||||||
.cloned()
|
.cloned()
|
||||||
|
@ -23,6 +23,7 @@ use spec::builtin::Builtin;
|
|||||||
|
|
||||||
/// Spec account.
|
/// Spec account.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Account {
|
pub struct Account {
|
||||||
/// Builtin contract.
|
/// Builtin contract.
|
||||||
pub builtin: Option<Builtin>,
|
pub builtin: Option<Builtin>,
|
||||||
|
@ -23,6 +23,7 @@ use super::ValidatorSet;
|
|||||||
|
|
||||||
/// Authority params deserialization.
|
/// Authority params deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct AuthorityRoundParams {
|
pub struct AuthorityRoundParams {
|
||||||
/// Block duration, in seconds.
|
/// Block duration, in seconds.
|
||||||
#[serde(rename="stepDuration")]
|
#[serde(rename="stepDuration")]
|
||||||
@ -71,6 +72,7 @@ pub struct AuthorityRoundParams {
|
|||||||
|
|
||||||
/// Authority engine deserialization.
|
/// Authority engine deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct AuthorityRound {
|
pub struct AuthorityRound {
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
pub params: AuthorityRoundParams,
|
pub params: AuthorityRoundParams,
|
||||||
|
@ -21,6 +21,7 @@ use super::ValidatorSet;
|
|||||||
|
|
||||||
/// Authority params deserialization.
|
/// Authority params deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct BasicAuthorityParams {
|
pub struct BasicAuthorityParams {
|
||||||
/// Block duration.
|
/// Block duration.
|
||||||
#[serde(rename="durationLimit")]
|
#[serde(rename="durationLimit")]
|
||||||
@ -31,6 +32,7 @@ pub struct BasicAuthorityParams {
|
|||||||
|
|
||||||
/// Authority engine deserialization.
|
/// Authority engine deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct BasicAuthority {
|
pub struct BasicAuthority {
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
pub params: BasicAuthorityParams,
|
pub params: BasicAuthorityParams,
|
||||||
|
@ -20,6 +20,7 @@ use uint::Uint;
|
|||||||
|
|
||||||
/// Linear pricing.
|
/// Linear pricing.
|
||||||
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Linear {
|
pub struct Linear {
|
||||||
/// Base price.
|
/// Base price.
|
||||||
pub base: usize,
|
pub base: usize,
|
||||||
@ -29,6 +30,7 @@ pub struct Linear {
|
|||||||
|
|
||||||
/// Pricing for modular exponentiation.
|
/// Pricing for modular exponentiation.
|
||||||
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Modexp {
|
pub struct Modexp {
|
||||||
/// Price divisor.
|
/// Price divisor.
|
||||||
pub divisor: usize,
|
pub divisor: usize,
|
||||||
@ -36,6 +38,7 @@ pub struct Modexp {
|
|||||||
|
|
||||||
/// Pricing for alt_bn128_pairing.
|
/// Pricing for alt_bn128_pairing.
|
||||||
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct AltBn128Pairing {
|
pub struct AltBn128Pairing {
|
||||||
/// Base price.
|
/// Base price.
|
||||||
pub base: usize,
|
pub base: usize,
|
||||||
@ -45,6 +48,7 @@ pub struct AltBn128Pairing {
|
|||||||
|
|
||||||
/// Pricing variants.
|
/// Pricing variants.
|
||||||
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub enum Pricing {
|
pub enum Pricing {
|
||||||
/// Linear pricing.
|
/// Linear pricing.
|
||||||
#[serde(rename="linear")]
|
#[serde(rename="linear")]
|
||||||
@ -59,6 +63,7 @@ pub enum Pricing {
|
|||||||
|
|
||||||
/// Spec builtin.
|
/// Spec builtin.
|
||||||
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
#[derive(Debug, PartialEq, Deserialize, Clone)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Builtin {
|
pub struct Builtin {
|
||||||
/// Builtin name.
|
/// Builtin name.
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
@ -20,6 +20,7 @@ use super::{Ethash, BasicAuthority, AuthorityRound, Tendermint, NullEngine, Inst
|
|||||||
|
|
||||||
/// Engine deserialization.
|
/// Engine deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub enum Engine {
|
pub enum Engine {
|
||||||
/// Null engine.
|
/// Null engine.
|
||||||
#[serde(rename="null")]
|
#[serde(rename="null")]
|
||||||
@ -28,6 +29,7 @@ pub enum Engine {
|
|||||||
#[serde(rename="instantSeal")]
|
#[serde(rename="instantSeal")]
|
||||||
InstantSeal(Option<InstantSeal>),
|
InstantSeal(Option<InstantSeal>),
|
||||||
/// Ethash engine.
|
/// Ethash engine.
|
||||||
|
#[serde(rename = "Ethash")]
|
||||||
Ethash(Ethash),
|
Ethash(Ethash),
|
||||||
/// BasicAuthority engine.
|
/// BasicAuthority engine.
|
||||||
#[serde(rename="basicAuthority")]
|
#[serde(rename="basicAuthority")]
|
||||||
@ -88,7 +90,6 @@ mod tests {
|
|||||||
"minimumDifficulty": "0x020000",
|
"minimumDifficulty": "0x020000",
|
||||||
"difficultyBoundDivisor": "0x0800",
|
"difficultyBoundDivisor": "0x0800",
|
||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
|
||||||
"homesteadTransition" : "0x",
|
"homesteadTransition" : "0x",
|
||||||
"daoHardforkTransition": "0xffffffffffffffff",
|
"daoHardforkTransition": "0xffffffffffffffff",
|
||||||
"daoHardforkBeneficiary": "0x0000000000000000000000000000000000000000",
|
"daoHardforkBeneficiary": "0x0000000000000000000000000000000000000000",
|
||||||
|
@ -23,6 +23,7 @@ use hash::Address;
|
|||||||
|
|
||||||
/// Deserializable doppelganger of block rewards for EthashParams
|
/// Deserializable doppelganger of block rewards for EthashParams
|
||||||
#[derive(Clone, Debug, PartialEq, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
#[serde(untagged)]
|
#[serde(untagged)]
|
||||||
pub enum BlockReward {
|
pub enum BlockReward {
|
||||||
Single(Uint),
|
Single(Uint),
|
||||||
@ -31,6 +32,7 @@ pub enum BlockReward {
|
|||||||
|
|
||||||
/// Deserializable doppelganger of EthashParams.
|
/// Deserializable doppelganger of EthashParams.
|
||||||
#[derive(Clone, Debug, PartialEq, Deserialize)]
|
#[derive(Clone, Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct EthashParams {
|
pub struct EthashParams {
|
||||||
/// See main EthashParams docs.
|
/// See main EthashParams docs.
|
||||||
#[serde(rename="minimumDifficulty")]
|
#[serde(rename="minimumDifficulty")]
|
||||||
@ -119,6 +121,7 @@ pub struct EthashParams {
|
|||||||
|
|
||||||
/// Ethash engine deserialization.
|
/// Ethash engine deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Ethash {
|
pub struct Ethash {
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
pub params: EthashParams,
|
pub params: EthashParams,
|
||||||
|
@ -23,6 +23,7 @@ use spec::Seal;
|
|||||||
|
|
||||||
/// Spec genesis.
|
/// Spec genesis.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Genesis {
|
pub struct Genesis {
|
||||||
/// Seal.
|
/// Seal.
|
||||||
pub seal: Seal,
|
pub seal: Seal,
|
||||||
@ -70,7 +71,6 @@ mod tests {
|
|||||||
#[test]
|
#[test]
|
||||||
fn genesis_deserialization() {
|
fn genesis_deserialization() {
|
||||||
let s = r#"{
|
let s = r#"{
|
||||||
"nonce": "0x0000000000000042",
|
|
||||||
"difficulty": "0x400000000",
|
"difficulty": "0x400000000",
|
||||||
"seal": {
|
"seal": {
|
||||||
"ethereum": {
|
"ethereum": {
|
||||||
|
@ -21,6 +21,7 @@ use uint::Uint;
|
|||||||
|
|
||||||
/// Spec hardcoded sync.
|
/// Spec hardcoded sync.
|
||||||
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
#[derive(Debug, PartialEq, Serialize, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct HardcodedSync {
|
pub struct HardcodedSync {
|
||||||
/// Hexadecimal of the RLP encoding of the header of the block to start synchronization from.
|
/// Hexadecimal of the RLP encoding of the header of the block to start synchronization from.
|
||||||
pub header: String,
|
pub header: String,
|
||||||
@ -28,7 +29,7 @@ pub struct HardcodedSync {
|
|||||||
#[serde(rename="totalDifficulty")]
|
#[serde(rename="totalDifficulty")]
|
||||||
pub total_difficulty: Uint,
|
pub total_difficulty: Uint,
|
||||||
/// Ordered trie roots of blocks before and including `header`.
|
/// Ordered trie roots of blocks before and including `header`.
|
||||||
#[serde(rename="CHTs")]
|
#[serde(rename = "CHTs")]
|
||||||
pub chts: Vec<H256>,
|
pub chts: Vec<H256>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
|
|
||||||
/// Instant seal engine params deserialization.
|
/// Instant seal engine params deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct InstantSealParams {
|
pub struct InstantSealParams {
|
||||||
/// Whether to enable millisecond timestamp.
|
/// Whether to enable millisecond timestamp.
|
||||||
#[serde(rename="millisecondTimestamp")]
|
#[serde(rename="millisecondTimestamp")]
|
||||||
@ -27,6 +28,7 @@ pub struct InstantSealParams {
|
|||||||
|
|
||||||
/// Instant seal engine descriptor.
|
/// Instant seal engine descriptor.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct InstantSeal {
|
pub struct InstantSeal {
|
||||||
/// Instant seal parameters.
|
/// Instant seal parameters.
|
||||||
pub params: InstantSealParams,
|
pub params: InstantSealParams,
|
||||||
|
@ -20,6 +20,7 @@ use uint::Uint;
|
|||||||
|
|
||||||
/// Authority params deserialization.
|
/// Authority params deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct NullEngineParams {
|
pub struct NullEngineParams {
|
||||||
/// Block reward.
|
/// Block reward.
|
||||||
#[serde(rename="blockReward")]
|
#[serde(rename="blockReward")]
|
||||||
@ -28,6 +29,7 @@ pub struct NullEngineParams {
|
|||||||
|
|
||||||
/// Null engine descriptor
|
/// Null engine descriptor
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct NullEngine {
|
pub struct NullEngine {
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
pub params: NullEngineParams,
|
pub params: NullEngineParams,
|
||||||
|
@ -22,6 +22,7 @@ use bytes::Bytes;
|
|||||||
|
|
||||||
/// Spec params.
|
/// Spec params.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Params {
|
pub struct Params {
|
||||||
/// Account start nonce, defaults to 0.
|
/// Account start nonce, defaults to 0.
|
||||||
#[serde(rename="accountStartNonce")]
|
#[serde(rename="accountStartNonce")]
|
||||||
|
@ -22,6 +22,7 @@ use bytes::Bytes;
|
|||||||
|
|
||||||
/// Ethereum seal.
|
/// Ethereum seal.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Ethereum {
|
pub struct Ethereum {
|
||||||
/// Seal nonce.
|
/// Seal nonce.
|
||||||
pub nonce: H64,
|
pub nonce: H64,
|
||||||
@ -32,6 +33,7 @@ pub struct Ethereum {
|
|||||||
|
|
||||||
/// AuthorityRound seal.
|
/// AuthorityRound seal.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct AuthorityRoundSeal {
|
pub struct AuthorityRoundSeal {
|
||||||
/// Seal step.
|
/// Seal step.
|
||||||
pub step: Uint,
|
pub step: Uint,
|
||||||
@ -41,6 +43,7 @@ pub struct AuthorityRoundSeal {
|
|||||||
|
|
||||||
/// Tendermint seal.
|
/// Tendermint seal.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct TendermintSeal {
|
pub struct TendermintSeal {
|
||||||
/// Seal round.
|
/// Seal round.
|
||||||
pub round: Uint,
|
pub round: Uint,
|
||||||
@ -52,6 +55,7 @@ pub struct TendermintSeal {
|
|||||||
|
|
||||||
/// Seal variants.
|
/// Seal variants.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub enum Seal {
|
pub enum Seal {
|
||||||
/// Ethereum seal.
|
/// Ethereum seal.
|
||||||
#[serde(rename="ethereum")]
|
#[serde(rename="ethereum")]
|
||||||
|
@ -38,6 +38,7 @@ pub enum ForkSpec {
|
|||||||
|
|
||||||
/// Spec deserialization.
|
/// Spec deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Spec {
|
pub struct Spec {
|
||||||
/// Spec name.
|
/// Spec name.
|
||||||
pub name: String,
|
pub name: String,
|
||||||
@ -71,6 +72,71 @@ mod tests {
|
|||||||
use serde_json;
|
use serde_json;
|
||||||
use spec::spec::Spec;
|
use spec::spec::Spec;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_error_on_unknown_fields() {
|
||||||
|
let s = r#"{
|
||||||
|
"name": "Morden",
|
||||||
|
"dataDir": "morden",
|
||||||
|
"engine": {
|
||||||
|
"Ethash": {
|
||||||
|
"params": {
|
||||||
|
"minimumDifficulty": "0x020000",
|
||||||
|
"difficultyBoundDivisor": "0x0800",
|
||||||
|
"durationLimit": "0x0d",
|
||||||
|
"homesteadTransition" : "0x",
|
||||||
|
"daoHardforkTransition": "0xffffffffffffffff",
|
||||||
|
"daoHardforkBeneficiary": "0x0000000000000000000000000000000000000000",
|
||||||
|
"daoHardforkAccounts": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"params": {
|
||||||
|
"accountStartNonce": "0x0100000",
|
||||||
|
"maximumExtraDataSize": "0x20",
|
||||||
|
"minGasLimit": "0x1388",
|
||||||
|
"networkID" : "0x2",
|
||||||
|
"forkBlock": "0xffffffffffffffff",
|
||||||
|
"forkCanonHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"gasLimitBoundDivisor": "0x20",
|
||||||
|
"unknownField": "0x0"
|
||||||
|
},
|
||||||
|
"genesis": {
|
||||||
|
"seal": {
|
||||||
|
"ethereum": {
|
||||||
|
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"nonce": "0x00006d6f7264656e"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"difficulty": "0x20000",
|
||||||
|
"author": "0x0000000000000000000000000000000000000000",
|
||||||
|
"timestamp": "0x00",
|
||||||
|
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||||
|
"extraData": "0x",
|
||||||
|
"gasLimit": "0x2fefd8"
|
||||||
|
},
|
||||||
|
"nodes": [
|
||||||
|
"enode://b1217cbaa440e35ed471157123fe468e19e8b5ad5bedb4b1fdbcbdab6fb2f5ed3e95dd9c24a22a79fdb2352204cea207df27d92bfd21bfd41545e8b16f637499@104.44.138.37:30303"
|
||||||
|
],
|
||||||
|
"accounts": {
|
||||||
|
"0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
|
||||||
|
"0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
|
||||||
|
"0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
|
||||||
|
"0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
|
||||||
|
"102e61f5d8f9bc71d0ad4a084df4e65e05ce0e1c": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" }
|
||||||
|
},
|
||||||
|
"hardcodedSync": {
|
||||||
|
"header": "f901f9a0d405da4e66f1445d455195229624e133f5baafe72b5cf7b3c36c12c8146e98b7a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347948888f1f195afa192cfee860698584c030f4c9db1a05fb2b4bfdef7b314451cb138a534d225c922fc0e5fbe25e451142732c3e25c25a088d2ec6b9860aae1a2c3b299f72b6a5d70d7f7ba4722c78f2c49ba96273c2158a007c6fdfa8eea7e86b81f5b0fc0f78f90cc19f4aa60d323151e0cac660199e9a1b90100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008302008003832fefba82524d84568e932a80a0a0349d8c3df71f1a48a9df7d03fd5f14aeee7d91332c009ecaff0a71ead405bd88ab4e252a7e8c2a23",
|
||||||
|
"totalDifficulty": "0x400000000",
|
||||||
|
"CHTs": [
|
||||||
|
"0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa",
|
||||||
|
"0xd7f8974fb5ac78d9ac099b9ad5018bedc2ce0a72dad1827a1709da30580f0544"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}"#;
|
||||||
|
let result: Result<Spec, _> = serde_json::from_str(s);
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn spec_deserialization() {
|
fn spec_deserialization() {
|
||||||
let s = r#"{
|
let s = r#"{
|
||||||
@ -91,7 +157,6 @@ mod tests {
|
|||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"accountStartNonce": "0x0100000",
|
"accountStartNonce": "0x0100000",
|
||||||
"homesteadTransition": "0x789b0",
|
|
||||||
"maximumExtraDataSize": "0x20",
|
"maximumExtraDataSize": "0x20",
|
||||||
"minGasLimit": "0x1388",
|
"minGasLimit": "0x1388",
|
||||||
"networkID" : "0x2",
|
"networkID" : "0x2",
|
||||||
|
@ -23,6 +23,7 @@ use spec::{Account, Builtin};
|
|||||||
|
|
||||||
/// Blockchain test state deserializer.
|
/// Blockchain test state deserializer.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct State(BTreeMap<Address, Account>);
|
pub struct State(BTreeMap<Address, Account>);
|
||||||
|
|
||||||
impl State {
|
impl State {
|
||||||
|
@ -21,6 +21,7 @@ use super::ValidatorSet;
|
|||||||
|
|
||||||
/// Tendermint params deserialization.
|
/// Tendermint params deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct TendermintParams {
|
pub struct TendermintParams {
|
||||||
/// Valid validators.
|
/// Valid validators.
|
||||||
pub validators: ValidatorSet,
|
pub validators: ValidatorSet,
|
||||||
@ -43,6 +44,7 @@ pub struct TendermintParams {
|
|||||||
|
|
||||||
/// Tendermint engine deserialization.
|
/// Tendermint engine deserialization.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub struct Tendermint {
|
pub struct Tendermint {
|
||||||
/// Ethash params.
|
/// Ethash params.
|
||||||
pub params: TendermintParams,
|
pub params: TendermintParams,
|
||||||
|
@ -22,6 +22,7 @@ use hash::Address;
|
|||||||
|
|
||||||
/// Different ways of specifying validators.
|
/// Different ways of specifying validators.
|
||||||
#[derive(Debug, PartialEq, Deserialize)]
|
#[derive(Debug, PartialEq, Deserialize)]
|
||||||
|
#[serde(deny_unknown_fields)]
|
||||||
pub enum ValidatorSet {
|
pub enum ValidatorSet {
|
||||||
/// A simple list of authorities.
|
/// A simple list of authorities.
|
||||||
#[serde(rename="list")]
|
#[serde(rename="list")]
|
||||||
|
@ -50,6 +50,10 @@ impl Notifier {
|
|||||||
|
|
||||||
/// Notify listeners about all currently pending transactions.
|
/// Notify listeners about all currently pending transactions.
|
||||||
pub fn notify(&mut self) {
|
pub fn notify(&mut self) {
|
||||||
|
if self.pending.is_empty() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
for l in &self.listeners {
|
for l in &self.listeners {
|
||||||
(l)(&self.pending);
|
(l)(&self.pending);
|
||||||
}
|
}
|
||||||
|
@ -467,6 +467,10 @@ usage! {
|
|||||||
"--no-jsonrpc",
|
"--no-jsonrpc",
|
||||||
"Disable the HTTP JSON-RPC API server.",
|
"Disable the HTTP JSON-RPC API server.",
|
||||||
|
|
||||||
|
FLAG flag_jsonrpc_experimental: (bool) = false, or |c: &Config| c.rpc.as_ref()?.experimental_rpcs.clone(),
|
||||||
|
"--jsonrpc-experimental",
|
||||||
|
"Enable experimental RPCs. Enable to have access to methods from unfinalised EIPs in all namespaces",
|
||||||
|
|
||||||
ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| c.rpc.as_ref()?.port.clone(),
|
ARG arg_jsonrpc_port: (u16) = 8545u16, or |c: &Config| c.rpc.as_ref()?.port.clone(),
|
||||||
"--jsonrpc-port=[PORT]",
|
"--jsonrpc-port=[PORT]",
|
||||||
"Specify the port portion of the HTTP JSON-RPC API server.",
|
"Specify the port portion of the HTTP JSON-RPC API server.",
|
||||||
@ -1141,7 +1145,7 @@ struct Operating {
|
|||||||
no_persistent_txqueue: Option<bool>,
|
no_persistent_txqueue: Option<bool>,
|
||||||
no_hardcoded_sync: Option<bool>,
|
no_hardcoded_sync: Option<bool>,
|
||||||
|
|
||||||
#[serde(rename="public_node")]
|
#[serde(rename = "public_node")]
|
||||||
_legacy_public_node: Option<bool>,
|
_legacy_public_node: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1173,15 +1177,15 @@ struct PrivateTransactions {
|
|||||||
struct Ui {
|
struct Ui {
|
||||||
path: Option<String>,
|
path: Option<String>,
|
||||||
|
|
||||||
#[serde(rename="force")]
|
#[serde(rename = "force")]
|
||||||
_legacy_force: Option<bool>,
|
_legacy_force: Option<bool>,
|
||||||
#[serde(rename="disable")]
|
#[serde(rename = "disable")]
|
||||||
_legacy_disable: Option<bool>,
|
_legacy_disable: Option<bool>,
|
||||||
#[serde(rename="port")]
|
#[serde(rename = "port")]
|
||||||
_legacy_port: Option<u16>,
|
_legacy_port: Option<u16>,
|
||||||
#[serde(rename="interface")]
|
#[serde(rename = "interface")]
|
||||||
_legacy_interface: Option<String>,
|
_legacy_interface: Option<String>,
|
||||||
#[serde(rename="hosts")]
|
#[serde(rename = "hosts")]
|
||||||
_legacy_hosts: Option<Vec<String>>,
|
_legacy_hosts: Option<Vec<String>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1219,6 +1223,7 @@ struct Rpc {
|
|||||||
server_threads: Option<usize>,
|
server_threads: Option<usize>,
|
||||||
processing_threads: Option<usize>,
|
processing_threads: Option<usize>,
|
||||||
max_payload: Option<usize>,
|
max_payload: Option<usize>,
|
||||||
|
experimental_rpcs: Option<bool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||||
@ -1244,21 +1249,21 @@ struct Ipc {
|
|||||||
#[derive(Default, Debug, PartialEq, Deserialize)]
|
#[derive(Default, Debug, PartialEq, Deserialize)]
|
||||||
#[serde(deny_unknown_fields)]
|
#[serde(deny_unknown_fields)]
|
||||||
struct Dapps {
|
struct Dapps {
|
||||||
#[serde(rename="disable")]
|
#[serde(rename = "disable")]
|
||||||
_legacy_disable: Option<bool>,
|
_legacy_disable: Option<bool>,
|
||||||
#[serde(rename="port")]
|
#[serde(rename = "port")]
|
||||||
_legacy_port: Option<u16>,
|
_legacy_port: Option<u16>,
|
||||||
#[serde(rename="interface")]
|
#[serde(rename = "interface")]
|
||||||
_legacy_interface: Option<String>,
|
_legacy_interface: Option<String>,
|
||||||
#[serde(rename="hosts")]
|
#[serde(rename = "hosts")]
|
||||||
_legacy_hosts: Option<Vec<String>>,
|
_legacy_hosts: Option<Vec<String>>,
|
||||||
#[serde(rename="cors")]
|
#[serde(rename = "cors")]
|
||||||
_legacy_cors: Option<String>,
|
_legacy_cors: Option<String>,
|
||||||
#[serde(rename="path")]
|
#[serde(rename = "path")]
|
||||||
_legacy_path: Option<String>,
|
_legacy_path: Option<String>,
|
||||||
#[serde(rename="user")]
|
#[serde(rename = "user")]
|
||||||
_legacy_user: Option<String>,
|
_legacy_user: Option<String>,
|
||||||
#[serde(rename="pass")]
|
#[serde(rename = "pass")]
|
||||||
_legacy_pass: Option<String>,
|
_legacy_pass: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1676,6 +1681,7 @@ mod tests {
|
|||||||
// -- API and Console Options
|
// -- API and Console Options
|
||||||
// RPC
|
// RPC
|
||||||
flag_no_jsonrpc: false,
|
flag_no_jsonrpc: false,
|
||||||
|
flag_jsonrpc_experimental: false,
|
||||||
arg_jsonrpc_port: 8545u16,
|
arg_jsonrpc_port: 8545u16,
|
||||||
arg_jsonrpc_interface: "local".into(),
|
arg_jsonrpc_interface: "local".into(),
|
||||||
arg_jsonrpc_cors: "null".into(),
|
arg_jsonrpc_cors: "null".into(),
|
||||||
@ -1958,6 +1964,7 @@ mod tests {
|
|||||||
server_threads: None,
|
server_threads: None,
|
||||||
processing_threads: None,
|
processing_threads: None,
|
||||||
max_payload: None,
|
max_payload: None,
|
||||||
|
experimental_rpcs: None,
|
||||||
}),
|
}),
|
||||||
ipc: Some(Ipc {
|
ipc: Some(Ipc {
|
||||||
disable: None,
|
disable: None,
|
||||||
|
@ -138,6 +138,7 @@ impl Configuration {
|
|||||||
let compaction = self.args.arg_db_compaction.parse()?;
|
let compaction = self.args.arg_db_compaction.parse()?;
|
||||||
let warp_sync = !self.args.flag_no_warp;
|
let warp_sync = !self.args.flag_no_warp;
|
||||||
let geth_compatibility = self.args.flag_geth;
|
let geth_compatibility = self.args.flag_geth;
|
||||||
|
let experimental_rpcs = self.args.flag_jsonrpc_experimental;
|
||||||
let ipfs_conf = self.ipfs_config();
|
let ipfs_conf = self.ipfs_config();
|
||||||
let secretstore_conf = self.secretstore_config()?;
|
let secretstore_conf = self.secretstore_config()?;
|
||||||
let format = self.format()?;
|
let format = self.format()?;
|
||||||
@ -377,6 +378,7 @@ impl Configuration {
|
|||||||
warp_sync: warp_sync,
|
warp_sync: warp_sync,
|
||||||
warp_barrier: self.args.arg_warp_barrier,
|
warp_barrier: self.args.arg_warp_barrier,
|
||||||
geth_compatibility: geth_compatibility,
|
geth_compatibility: geth_compatibility,
|
||||||
|
experimental_rpcs,
|
||||||
net_settings: self.network_settings()?,
|
net_settings: self.network_settings()?,
|
||||||
ipfs_conf: ipfs_conf,
|
ipfs_conf: ipfs_conf,
|
||||||
secretstore_conf: secretstore_conf,
|
secretstore_conf: secretstore_conf,
|
||||||
@ -1418,6 +1420,7 @@ mod tests {
|
|||||||
compaction: Default::default(),
|
compaction: Default::default(),
|
||||||
vm_type: Default::default(),
|
vm_type: Default::default(),
|
||||||
geth_compatibility: false,
|
geth_compatibility: false,
|
||||||
|
experimental_rpcs: false,
|
||||||
net_settings: Default::default(),
|
net_settings: Default::default(),
|
||||||
ipfs_conf: Default::default(),
|
ipfs_conf: Default::default(),
|
||||||
secretstore_conf: Default::default(),
|
secretstore_conf: Default::default(),
|
||||||
|
@ -184,7 +184,7 @@ impl InformantData for LightNodeInformantData {
|
|||||||
fn executes_transactions(&self) -> bool { false }
|
fn executes_transactions(&self) -> bool { false }
|
||||||
|
|
||||||
fn is_major_importing(&self) -> bool {
|
fn is_major_importing(&self) -> bool {
|
||||||
self.sync.is_major_importing_no_sync()
|
self.sync.is_major_importing()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn report(&self) -> Report {
|
fn report(&self) -> Report {
|
||||||
@ -256,16 +256,13 @@ impl<T: InformantData> Informant<T> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn tick(&self) {
|
pub fn tick(&self) {
|
||||||
let elapsed = self.last_tick.read().elapsed();
|
let now = Instant::now();
|
||||||
if elapsed < Duration::from_secs(5) {
|
let elapsed = now.duration_since(*self.last_tick.read());
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (client_report, full_report) = {
|
let (client_report, full_report) = {
|
||||||
let mut last_report = self.last_report.lock();
|
let mut last_report = self.last_report.lock();
|
||||||
let full_report = self.target.report();
|
let full_report = self.target.report();
|
||||||
let diffed = full_report.client_report.clone() - &*last_report;
|
let diffed = full_report.client_report.clone() - &*last_report;
|
||||||
*last_report = full_report.client_report.clone();
|
|
||||||
(diffed, full_report)
|
(diffed, full_report)
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -289,7 +286,8 @@ impl<T: InformantData> Informant<T> {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
*self.last_tick.write() = Instant::now();
|
*self.last_tick.write() = now;
|
||||||
|
*self.last_report.lock() = full_report.client_report.clone();
|
||||||
|
|
||||||
let paint = |c: Style, t: String| match self.with_color && atty::is(atty::Stream::Stdout) {
|
let paint = |c: Style, t: String| match self.with_color && atty::is(atty::Stream::Stdout) {
|
||||||
true => format!("{}", c.paint(t)),
|
true => format!("{}", c.paint(t)),
|
||||||
@ -306,7 +304,7 @@ impl<T: InformantData> Informant<T> {
|
|||||||
format!("{} blk/s {} tx/s {} Mgas/s",
|
format!("{} blk/s {} tx/s {} Mgas/s",
|
||||||
paint(Yellow.bold(), format!("{:7.2}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)),
|
paint(Yellow.bold(), format!("{:7.2}", (client_report.blocks_imported * 1000) as f64 / elapsed.as_milliseconds() as f64)),
|
||||||
paint(Yellow.bold(), format!("{:6.1}", (client_report.transactions_applied * 1000) as f64 / elapsed.as_milliseconds() as f64)),
|
paint(Yellow.bold(), format!("{:6.1}", (client_report.transactions_applied * 1000) as f64 / elapsed.as_milliseconds() as f64)),
|
||||||
paint(Yellow.bold(), format!("{:4}", (client_report.gas_processed / (elapsed.as_milliseconds() * 1000)).low_u64()))
|
paint(Yellow.bold(), format!("{:6.1}", (client_report.gas_processed / 1000).low_u64() as f64 / elapsed.as_milliseconds() as f64))
|
||||||
)
|
)
|
||||||
} else {
|
} else {
|
||||||
format!("{} hdr/s",
|
format!("{} hdr/s",
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
// You should have received a copy of the GNU General Public License
|
// You should have received a copy of the GNU General Public License
|
||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::sync::Arc;
|
use std::sync::{Arc, mpsc};
|
||||||
|
|
||||||
use ethcore::client::BlockChainClient;
|
use ethcore::client::BlockChainClient;
|
||||||
use sync::{self, AttachedProtocol, SyncConfig, NetworkConfiguration, Params, ConnectionFilter};
|
use sync::{self, AttachedProtocol, SyncConfig, NetworkConfiguration, Params, ConnectionFilter};
|
||||||
@ -25,12 +25,17 @@ pub use sync::{EthSync, SyncProvider, ManageNetwork, PrivateTxHandler};
|
|||||||
pub use ethcore::client::ChainNotify;
|
pub use ethcore::client::ChainNotify;
|
||||||
use ethcore_logger::Config as LogConfig;
|
use ethcore_logger::Config as LogConfig;
|
||||||
|
|
||||||
pub type SyncModules = (Arc<SyncProvider>, Arc<ManageNetwork>, Arc<ChainNotify>);
|
pub type SyncModules = (
|
||||||
|
Arc<SyncProvider>,
|
||||||
|
Arc<ManageNetwork>,
|
||||||
|
Arc<ChainNotify>,
|
||||||
|
mpsc::Sender<sync::PriorityTask>,
|
||||||
|
);
|
||||||
|
|
||||||
pub fn sync(
|
pub fn sync(
|
||||||
sync_cfg: SyncConfig,
|
config: SyncConfig,
|
||||||
net_cfg: NetworkConfiguration,
|
network_config: NetworkConfiguration,
|
||||||
client: Arc<BlockChainClient>,
|
chain: Arc<BlockChainClient>,
|
||||||
snapshot_service: Arc<SnapshotService>,
|
snapshot_service: Arc<SnapshotService>,
|
||||||
private_tx_handler: Arc<PrivateTxHandler>,
|
private_tx_handler: Arc<PrivateTxHandler>,
|
||||||
provider: Arc<Provider>,
|
provider: Arc<Provider>,
|
||||||
@ -39,15 +44,20 @@ pub fn sync(
|
|||||||
connection_filter: Option<Arc<ConnectionFilter>>,
|
connection_filter: Option<Arc<ConnectionFilter>>,
|
||||||
) -> Result<SyncModules, sync::Error> {
|
) -> Result<SyncModules, sync::Error> {
|
||||||
let eth_sync = EthSync::new(Params {
|
let eth_sync = EthSync::new(Params {
|
||||||
config: sync_cfg,
|
config,
|
||||||
chain: client,
|
chain,
|
||||||
provider: provider,
|
provider,
|
||||||
snapshot_service: snapshot_service,
|
snapshot_service,
|
||||||
private_tx_handler,
|
private_tx_handler,
|
||||||
network_config: net_cfg,
|
network_config,
|
||||||
attached_protos: attached_protos,
|
attached_protos,
|
||||||
},
|
},
|
||||||
connection_filter)?;
|
connection_filter)?;
|
||||||
|
|
||||||
Ok((eth_sync.clone() as Arc<SyncProvider>, eth_sync.clone() as Arc<ManageNetwork>, eth_sync.clone() as Arc<ChainNotify>))
|
Ok((
|
||||||
|
eth_sync.clone() as Arc<SyncProvider>,
|
||||||
|
eth_sync.clone() as Arc<ManageNetwork>,
|
||||||
|
eth_sync.clone() as Arc<ChainNotify>,
|
||||||
|
eth_sync.priority_tasks()
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
@ -228,6 +228,7 @@ pub struct FullDependencies {
|
|||||||
pub net_service: Arc<ManageNetwork>,
|
pub net_service: Arc<ManageNetwork>,
|
||||||
pub updater: Arc<Updater>,
|
pub updater: Arc<Updater>,
|
||||||
pub geth_compatibility: bool,
|
pub geth_compatibility: bool,
|
||||||
|
pub experimental_rpcs: bool,
|
||||||
pub ws_address: Option<Host>,
|
pub ws_address: Option<Host>,
|
||||||
pub fetch: FetchClient,
|
pub fetch: FetchClient,
|
||||||
pub executor: Executor,
|
pub executor: Executor,
|
||||||
@ -317,7 +318,7 @@ impl FullDependencies {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
Api::Personal => {
|
Api::Personal => {
|
||||||
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
|
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility, self.experimental_rpcs).to_delegate());
|
||||||
},
|
},
|
||||||
Api::Signer => {
|
Api::Signer => {
|
||||||
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
|
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
|
||||||
@ -438,6 +439,7 @@ pub struct LightDependencies<T> {
|
|||||||
pub ws_address: Option<Host>,
|
pub ws_address: Option<Host>,
|
||||||
pub fetch: FetchClient,
|
pub fetch: FetchClient,
|
||||||
pub geth_compatibility: bool,
|
pub geth_compatibility: bool,
|
||||||
|
pub experimental_rpcs: bool,
|
||||||
pub executor: Executor,
|
pub executor: Executor,
|
||||||
pub whisper_rpc: Option<::whisper::RpcFactory>,
|
pub whisper_rpc: Option<::whisper::RpcFactory>,
|
||||||
pub private_tx_service: Option<Arc<PrivateTransactionManager>>,
|
pub private_tx_service: Option<Arc<PrivateTransactionManager>>,
|
||||||
@ -531,7 +533,7 @@ impl<C: LightChainClient + 'static> LightDependencies<C> {
|
|||||||
handler.extend_with(EthPubSub::to_delegate(client));
|
handler.extend_with(EthPubSub::to_delegate(client));
|
||||||
},
|
},
|
||||||
Api::Personal => {
|
Api::Personal => {
|
||||||
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate());
|
handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility, self.experimental_rpcs).to_delegate());
|
||||||
},
|
},
|
||||||
Api::Signer => {
|
Api::Signer => {
|
||||||
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
|
handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service, self.executor.clone()).to_delegate());
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
use std::any::Any;
|
use std::any::Any;
|
||||||
use std::sync::{Arc, Weak};
|
use std::sync::{Arc, Weak, atomic};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
@ -115,6 +115,7 @@ pub struct RunCmd {
|
|||||||
pub compaction: DatabaseCompactionProfile,
|
pub compaction: DatabaseCompactionProfile,
|
||||||
pub vm_type: VMType,
|
pub vm_type: VMType,
|
||||||
pub geth_compatibility: bool,
|
pub geth_compatibility: bool,
|
||||||
|
pub experimental_rpcs: bool,
|
||||||
pub net_settings: NetworkSettings,
|
pub net_settings: NetworkSettings,
|
||||||
pub ipfs_conf: ipfs::Configuration,
|
pub ipfs_conf: ipfs::Configuration,
|
||||||
pub secretstore_conf: secretstore::Configuration,
|
pub secretstore_conf: secretstore::Configuration,
|
||||||
@ -312,6 +313,7 @@ fn execute_light_impl(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<Runnin
|
|||||||
ws_address: cmd.ws_conf.address(),
|
ws_address: cmd.ws_conf.address(),
|
||||||
fetch: fetch,
|
fetch: fetch,
|
||||||
geth_compatibility: cmd.geth_compatibility,
|
geth_compatibility: cmd.geth_compatibility,
|
||||||
|
experimental_rpcs: cmd.experimental_rpcs,
|
||||||
executor: runtime.executor(),
|
executor: runtime.executor(),
|
||||||
whisper_rpc: whisper_factory,
|
whisper_rpc: whisper_factory,
|
||||||
private_tx_service: None, //TODO: add this to client.
|
private_tx_service: None, //TODO: add this to client.
|
||||||
@ -402,11 +404,6 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
// create dirs used by parity
|
// create dirs used by parity
|
||||||
cmd.dirs.create_dirs(cmd.acc_conf.unlocked_accounts.len() == 0, cmd.secretstore_conf.enabled)?;
|
cmd.dirs.create_dirs(cmd.acc_conf.unlocked_accounts.len() == 0, cmd.secretstore_conf.enabled)?;
|
||||||
|
|
||||||
// run in daemon mode
|
|
||||||
if let Some(pid_file) = cmd.daemon {
|
|
||||||
daemonize(pid_file)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
//print out running parity environment
|
//print out running parity environment
|
||||||
print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs);
|
print_running_environment(&spec.data_dir, &cmd.dirs, &db_dirs);
|
||||||
|
|
||||||
@ -482,7 +479,6 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), runtime.executor()),
|
cmd.gas_pricer_conf.to_gas_pricer(fetch.clone(), runtime.executor()),
|
||||||
&spec,
|
&spec,
|
||||||
Some(account_provider.clone()),
|
Some(account_provider.clone()),
|
||||||
|
|
||||||
));
|
));
|
||||||
miner.set_author(cmd.miner_extras.author, None).expect("Fails only if password is Some; password is None; qed");
|
miner.set_author(cmd.miner_extras.author, None).expect("Fails only if password is Some; password is None; qed");
|
||||||
miner.set_gas_range_target(cmd.miner_extras.gas_range_target);
|
miner.set_gas_range_target(cmd.miner_extras.gas_range_target);
|
||||||
@ -639,7 +635,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
};
|
};
|
||||||
|
|
||||||
// create sync object
|
// create sync object
|
||||||
let (sync_provider, manage_network, chain_notify) = modules::sync(
|
let (sync_provider, manage_network, chain_notify, priority_tasks) = modules::sync(
|
||||||
sync_config,
|
sync_config,
|
||||||
net_conf.clone().into(),
|
net_conf.clone().into(),
|
||||||
client.clone(),
|
client.clone(),
|
||||||
@ -653,6 +649,18 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
|
|
||||||
service.add_notify(chain_notify.clone());
|
service.add_notify(chain_notify.clone());
|
||||||
|
|
||||||
|
// Propagate transactions as soon as they are imported.
|
||||||
|
let tx = ::parking_lot::Mutex::new(priority_tasks);
|
||||||
|
let is_ready = Arc::new(atomic::AtomicBool::new(true));
|
||||||
|
miner.add_transactions_listener(Box::new(move |_hashes| {
|
||||||
|
// we want to have only one PendingTransactions task in the queue.
|
||||||
|
if is_ready.compare_and_swap(true, false, atomic::Ordering::SeqCst) {
|
||||||
|
let task = ::sync::PriorityTask::PropagateTransactions(Instant::now(), is_ready.clone());
|
||||||
|
// we ignore error cause it means that we are closing
|
||||||
|
let _ = tx.lock().send(task);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
// provider not added to a notification center is effectively disabled
|
// provider not added to a notification center is effectively disabled
|
||||||
// TODO [debris] refactor it later on
|
// TODO [debris] refactor it later on
|
||||||
if cmd.private_tx_enabled {
|
if cmd.private_tx_enabled {
|
||||||
@ -712,6 +720,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
net_service: manage_network.clone(),
|
net_service: manage_network.clone(),
|
||||||
updater: updater.clone(),
|
updater: updater.clone(),
|
||||||
geth_compatibility: cmd.geth_compatibility,
|
geth_compatibility: cmd.geth_compatibility,
|
||||||
|
experimental_rpcs: cmd.experimental_rpcs,
|
||||||
ws_address: cmd.ws_conf.address(),
|
ws_address: cmd.ws_conf.address(),
|
||||||
fetch: fetch.clone(),
|
fetch: fetch.clone(),
|
||||||
executor: runtime.executor(),
|
executor: runtime.executor(),
|
||||||
@ -737,7 +746,7 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
let secretstore_deps = secretstore::Dependencies {
|
let secretstore_deps = secretstore::Dependencies {
|
||||||
client: client.clone(),
|
client: client.clone(),
|
||||||
sync: sync_provider.clone(),
|
sync: sync_provider.clone(),
|
||||||
miner: miner,
|
miner: miner.clone(),
|
||||||
account_provider: account_provider,
|
account_provider: account_provider,
|
||||||
accounts_passwords: &passwords,
|
accounts_passwords: &passwords,
|
||||||
};
|
};
|
||||||
@ -798,6 +807,12 @@ fn execute_impl<Cr, Rr>(cmd: RunCmd, logger: Arc<RotatingLogger>, on_client_rq:
|
|||||||
client.set_exit_handler(on_client_rq);
|
client.set_exit_handler(on_client_rq);
|
||||||
updater.set_exit_handler(on_updater_rq);
|
updater.set_exit_handler(on_updater_rq);
|
||||||
|
|
||||||
|
// run in daemon mode
|
||||||
|
if let Some(pid_file) = cmd.daemon {
|
||||||
|
info!("Running as a daemon process!");
|
||||||
|
daemonize(pid_file)?;
|
||||||
|
}
|
||||||
|
|
||||||
Ok(RunningClient {
|
Ok(RunningClient {
|
||||||
inner: RunningClientInner::Full {
|
inner: RunningClientInner::Full {
|
||||||
rpc: rpc_direct,
|
rpc: rpc_direct,
|
||||||
|
@ -218,9 +218,10 @@ impl<M: core::Middleware<Metadata>> WsDispatcher<M> {
|
|||||||
|
|
||||||
impl<M: core::Middleware<Metadata>> core::Middleware<Metadata> for WsDispatcher<M> {
|
impl<M: core::Middleware<Metadata>> core::Middleware<Metadata> for WsDispatcher<M> {
|
||||||
type Future = Either<
|
type Future = Either<
|
||||||
core::FutureRpcResult<M::Future>,
|
core::FutureRpcResult<M::Future, M::CallFuture>,
|
||||||
core::FutureResponse,
|
core::FutureResponse,
|
||||||
>;
|
>;
|
||||||
|
type CallFuture = core::middleware::NoopCallFuture;
|
||||||
|
|
||||||
fn on_request<F, X>(&self, request: core::Request, meta: Metadata, process: F)
|
fn on_request<F, X>(&self, request: core::Request, meta: Metadata, process: F)
|
||||||
-> Either<Self::Future, X>
|
-> Either<Self::Future, X>
|
||||||
|
@ -53,6 +53,7 @@ mod codes {
|
|||||||
pub const FETCH_ERROR: i64 = -32060;
|
pub const FETCH_ERROR: i64 = -32060;
|
||||||
pub const NO_LIGHT_PEERS: i64 = -32065;
|
pub const NO_LIGHT_PEERS: i64 = -32065;
|
||||||
pub const DEPRECATED: i64 = -32070;
|
pub const DEPRECATED: i64 = -32070;
|
||||||
|
pub const EXPERIMENTAL_RPC: i64 = -32071;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn unimplemented(details: Option<String>) -> Error {
|
pub fn unimplemented(details: Option<String>) -> Error {
|
||||||
@ -500,3 +501,15 @@ pub fn on_demand_others(err: &OnDemandError) -> Error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a descriptive error in case experimental RPCs are not enabled.
|
||||||
|
pub fn require_experimental(allow_experimental_rpcs: bool, eip: &str) -> Result<(), Error> {
|
||||||
|
if allow_experimental_rpcs {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error {
|
||||||
|
code: ErrorCode::ServerError(codes::EXPERIMENTAL_RPC),
|
||||||
|
message: format!("This method is not part of the official RPC API yet (EIP-{}). Run with `--jsonrpc-experimental` to enable it.", eip),
|
||||||
|
data: Some(Value::String(format!("See EIP: https://eips.ethereum.org/EIPS/eip-{}", eip))),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -434,7 +434,7 @@ impl<C, M, U, S> Parity for ParityClient<C, M, U> where
|
|||||||
BlockNumber::Earliest => BlockId::Earliest,
|
BlockNumber::Earliest => BlockId::Earliest,
|
||||||
BlockNumber::Latest => BlockId::Latest,
|
BlockNumber::Latest => BlockId::Latest,
|
||||||
};
|
};
|
||||||
let receipts = try_bf!(self.client.block_receipts(id).ok_or_else(errors::unknown_block));
|
let receipts = try_bf!(self.client.localized_block_receipts(id).ok_or_else(errors::unknown_block));
|
||||||
Box::new(future::ok(receipts.into_iter().map(Into::into).collect()))
|
Box::new(future::ok(receipts.into_iter().map(Into::into).collect()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,15 +47,22 @@ pub struct PersonalClient<D: Dispatcher> {
|
|||||||
accounts: Arc<AccountProvider>,
|
accounts: Arc<AccountProvider>,
|
||||||
dispatcher: D,
|
dispatcher: D,
|
||||||
allow_perm_unlock: bool,
|
allow_perm_unlock: bool,
|
||||||
|
allow_experimental_rpcs: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Dispatcher> PersonalClient<D> {
|
impl<D: Dispatcher> PersonalClient<D> {
|
||||||
/// Creates new PersonalClient
|
/// Creates new PersonalClient
|
||||||
pub fn new(accounts: &Arc<AccountProvider>, dispatcher: D, allow_perm_unlock: bool) -> Self {
|
pub fn new(
|
||||||
|
accounts: &Arc<AccountProvider>,
|
||||||
|
dispatcher: D,
|
||||||
|
allow_perm_unlock: bool,
|
||||||
|
allow_experimental_rpcs: bool,
|
||||||
|
) -> Self {
|
||||||
PersonalClient {
|
PersonalClient {
|
||||||
accounts: accounts.clone(),
|
accounts: accounts.clone(),
|
||||||
dispatcher,
|
dispatcher,
|
||||||
allow_perm_unlock,
|
allow_perm_unlock,
|
||||||
|
allow_experimental_rpcs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -154,6 +161,8 @@ impl<D: Dispatcher + 'static> Personal for PersonalClient<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sign_191(&self, version: EIP191Version, data: Value, account: RpcH160, password: String) -> BoxFuture<RpcH520> {
|
fn sign_191(&self, version: EIP191Version, data: Value, account: RpcH160, password: String) -> BoxFuture<RpcH520> {
|
||||||
|
try_bf!(errors::require_experimental(self.allow_experimental_rpcs, "191"));
|
||||||
|
|
||||||
let data = try_bf!(eip191::hash_message(version, data));
|
let data = try_bf!(eip191::hash_message(version, data));
|
||||||
let dispatcher = self.dispatcher.clone();
|
let dispatcher = self.dispatcher.clone();
|
||||||
let accounts = self.accounts.clone();
|
let accounts = self.accounts.clone();
|
||||||
@ -174,6 +183,8 @@ impl<D: Dispatcher + 'static> Personal for PersonalClient<D> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn sign_typed_data(&self, typed_data: EIP712, account: RpcH160, password: String) -> BoxFuture<RpcH520> {
|
fn sign_typed_data(&self, typed_data: EIP712, account: RpcH160, password: String) -> BoxFuture<RpcH520> {
|
||||||
|
try_bf!(errors::require_experimental(self.allow_experimental_rpcs, "712"));
|
||||||
|
|
||||||
let data = match hash_structured_data(typed_data) {
|
let data = match hash_structured_data(typed_data) {
|
||||||
Ok(d) => d,
|
Ok(d) => d,
|
||||||
Err(err) => return Box::new(future::err(errors::invalid_call_data(err.kind()))),
|
Err(err) => return Box::new(future::err(errors::invalid_call_data(err.kind()))),
|
||||||
|
@ -205,6 +205,7 @@ impl<T: ActivityNotifier> Middleware<T> {
|
|||||||
|
|
||||||
impl<M: core::Metadata, T: ActivityNotifier> core::Middleware<M> for Middleware<T> {
|
impl<M: core::Metadata, T: ActivityNotifier> core::Middleware<M> for Middleware<T> {
|
||||||
type Future = core::FutureResponse;
|
type Future = core::FutureResponse;
|
||||||
|
type CallFuture = core::middleware::NoopCallFuture;
|
||||||
|
|
||||||
fn on_request<F, X>(&self, request: core::Request, meta: M, process: F) -> Either<Self::Future, X> where
|
fn on_request<F, X>(&self, request: core::Request, meta: M, process: F) -> Either<Self::Future, X> where
|
||||||
F: FnOnce(core::Request, M) -> X,
|
F: FnOnce(core::Request, M) -> X,
|
||||||
|
@ -243,6 +243,7 @@ const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
|
|||||||
"params": {
|
"params": {
|
||||||
"minimumDifficulty": "0x020000",
|
"minimumDifficulty": "0x020000",
|
||||||
"difficultyBoundDivisor": "0x0800",
|
"difficultyBoundDivisor": "0x0800",
|
||||||
|
"blockReward": "0x4563918244F40000",
|
||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
"homesteadTransition": "0xffffffffffffffff",
|
"homesteadTransition": "0xffffffffffffffff",
|
||||||
"daoHardforkTransition": "0xffffffffffffffff",
|
"daoHardforkTransition": "0xffffffffffffffff",
|
||||||
@ -253,7 +254,6 @@ const TRANSACTION_COUNT_SPEC: &'static [u8] = br#"{
|
|||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"gasLimitBoundDivisor": "0x0400",
|
"gasLimitBoundDivisor": "0x0400",
|
||||||
"blockReward": "0x4563918244F40000",
|
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"accountStartNonce": "0x00",
|
"accountStartNonce": "0x00",
|
||||||
"maximumExtraDataSize": "0x20",
|
"maximumExtraDataSize": "0x20",
|
||||||
@ -292,6 +292,7 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{
|
|||||||
"minimumDifficulty": "0x020000",
|
"minimumDifficulty": "0x020000",
|
||||||
"difficultyBoundDivisor": "0x0800",
|
"difficultyBoundDivisor": "0x0800",
|
||||||
"durationLimit": "0x0d",
|
"durationLimit": "0x0d",
|
||||||
|
"blockReward": "0x4563918244F40000",
|
||||||
"homesteadTransition": "0xffffffffffffffff",
|
"homesteadTransition": "0xffffffffffffffff",
|
||||||
"daoHardforkTransition": "0xffffffffffffffff",
|
"daoHardforkTransition": "0xffffffffffffffff",
|
||||||
"daoHardforkBeneficiary": "0x0000000000000000000000000000000000000000",
|
"daoHardforkBeneficiary": "0x0000000000000000000000000000000000000000",
|
||||||
@ -301,7 +302,6 @@ const POSITIVE_NONCE_SPEC: &'static [u8] = br#"{
|
|||||||
},
|
},
|
||||||
"params": {
|
"params": {
|
||||||
"gasLimitBoundDivisor": "0x0400",
|
"gasLimitBoundDivisor": "0x0400",
|
||||||
"blockReward": "0x4563918244F40000",
|
|
||||||
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
"registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b",
|
||||||
"accountStartNonce": "0x0100",
|
"accountStartNonce": "0x0100",
|
||||||
"maximumExtraDataSize": "0x20",
|
"maximumExtraDataSize": "0x20",
|
||||||
|
@ -57,6 +57,16 @@ fn miner_service() -> Arc<TestMinerService> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn setup() -> PersonalTester {
|
fn setup() -> PersonalTester {
|
||||||
|
setup_with(Config {
|
||||||
|
allow_experimental_rpcs: true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Config {
|
||||||
|
pub allow_experimental_rpcs: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn setup_with(c: Config) -> PersonalTester {
|
||||||
let runtime = Runtime::with_thread_count(1);
|
let runtime = Runtime::with_thread_count(1);
|
||||||
let accounts = accounts_provider();
|
let accounts = accounts_provider();
|
||||||
let client = blockchain_client();
|
let client = blockchain_client();
|
||||||
@ -64,7 +74,7 @@ fn setup() -> PersonalTester {
|
|||||||
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
|
let reservations = Arc::new(Mutex::new(nonce::Reservations::new(runtime.executor())));
|
||||||
|
|
||||||
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50);
|
let dispatcher = FullDispatcher::new(client, miner.clone(), reservations, 50);
|
||||||
let personal = PersonalClient::new(&accounts, dispatcher, false);
|
let personal = PersonalClient::new(&accounts, dispatcher, false, c.allow_experimental_rpcs);
|
||||||
|
|
||||||
let mut io = IoHandler::default();
|
let mut io = IoHandler::default();
|
||||||
io.extend_with(personal.to_delegate());
|
io.extend_with(personal.to_delegate());
|
||||||
@ -418,3 +428,109 @@ fn sign_eip191_structured_data() {
|
|||||||
let response = tester.io.handle_request_sync(&request).unwrap();
|
let response = tester.io.handle_request_sync(&request).unwrap();
|
||||||
assert_eq!(response, expected)
|
assert_eq!(response, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sign_structured_data() {
|
||||||
|
let tester = setup();
|
||||||
|
let secret: Secret = keccak("cow").into();
|
||||||
|
let address = tester.accounts.insert_account(secret, &"lol".into()).unwrap();
|
||||||
|
let request = r#"{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "personal_signTypedData",
|
||||||
|
"params": [
|
||||||
|
{
|
||||||
|
"primaryType": "Mail",
|
||||||
|
"domain": {
|
||||||
|
"name": "Ether Mail",
|
||||||
|
"version": "1",
|
||||||
|
"chainId": "0x1",
|
||||||
|
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"from": {
|
||||||
|
"name": "Cow",
|
||||||
|
"wallet": "0xCD2a3d9F938E13CD947Ec05AbC7FE734Df8DD826"
|
||||||
|
},
|
||||||
|
"to": {
|
||||||
|
"name": "Bob",
|
||||||
|
"wallet": "0xbBbBBBBbbBBBbbbBbbBbbbbBBbBbbbbBbBbbBBbB"
|
||||||
|
},
|
||||||
|
"contents": "Hello, Bob!"
|
||||||
|
},
|
||||||
|
"types": {
|
||||||
|
"EIP712Domain": [
|
||||||
|
{ "name": "name", "type": "string" },
|
||||||
|
{ "name": "version", "type": "string" },
|
||||||
|
{ "name": "chainId", "type": "uint256" },
|
||||||
|
{ "name": "verifyingContract", "type": "address" }
|
||||||
|
],
|
||||||
|
"Person": [
|
||||||
|
{ "name": "name", "type": "string" },
|
||||||
|
{ "name": "wallet", "type": "address" }
|
||||||
|
],
|
||||||
|
"Mail": [
|
||||||
|
{ "name": "from", "type": "Person" },
|
||||||
|
{ "name": "to", "type": "Person" },
|
||||||
|
{ "name": "contents", "type": "string" }
|
||||||
|
]
|
||||||
|
}
|
||||||
|
},
|
||||||
|
""#.to_owned() + &format!("0x{:x}", address) + r#"",
|
||||||
|
"lol"
|
||||||
|
],
|
||||||
|
"id": 1
|
||||||
|
}"#;
|
||||||
|
let expected = r#"{"jsonrpc":"2.0","result":"0x4355c47d63924e8a72e509b65029052eb6c299d53a04e167c5775fd466751c9d07299936d304c153f6443dfa05f40ff007d72911b6f72307f996231605b915621c","id":1}"#;
|
||||||
|
let response = tester.io.handle_request_sync(&request).unwrap();
|
||||||
|
assert_eq!(response, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn should_disable_experimental_apis() {
|
||||||
|
// given
|
||||||
|
let tester = setup_with(Config {
|
||||||
|
allow_experimental_rpcs: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
// when
|
||||||
|
let request = r#"{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "personal_sign191",
|
||||||
|
"params": [
|
||||||
|
"0x01",
|
||||||
|
{},
|
||||||
|
"0x1234567891234567891234567891234567891234",
|
||||||
|
"lol"
|
||||||
|
],
|
||||||
|
"id": 1
|
||||||
|
}"#;
|
||||||
|
let r1 = tester.io.handle_request_sync(&request).unwrap();
|
||||||
|
let request = r#"{
|
||||||
|
"jsonrpc": "2.0",
|
||||||
|
"method": "personal_signTypedData",
|
||||||
|
"params": [
|
||||||
|
{
|
||||||
|
"types": {},
|
||||||
|
"message": {},
|
||||||
|
"domain": {
|
||||||
|
"name": "",
|
||||||
|
"version": "1",
|
||||||
|
"chainId": "0x1",
|
||||||
|
"verifyingContract": "0xCcCCccccCCCCcCCCCCCcCcCccCcCCCcCcccccccC"
|
||||||
|
},
|
||||||
|
"primaryType": ""
|
||||||
|
},
|
||||||
|
"0x1234567891234567891234567891234678912344",
|
||||||
|
"lol"
|
||||||
|
],
|
||||||
|
"id": 1
|
||||||
|
}"#;
|
||||||
|
let r2 = tester.io.handle_request_sync(&request).unwrap();
|
||||||
|
|
||||||
|
// then
|
||||||
|
let expected = r#"{"jsonrpc":"2.0","error":{"code":-32071,"message":"This method is not part of the official RPC API yet (EIP-191). Run with `--jsonrpc-experimental` to enable it.","data":"See EIP: https://eips.ethereum.org/EIPS/eip-191"},"id":1}"#;
|
||||||
|
assert_eq!(r1, expected);
|
||||||
|
|
||||||
|
let expected = r#"{"jsonrpc":"2.0","error":{"code":-32071,"message":"This method is not part of the official RPC API yet (EIP-712). Run with `--jsonrpc-experimental` to enable it.","data":"See EIP: https://eips.ethereum.org/EIPS/eip-712"},"id":1}"#;
|
||||||
|
assert_eq!(r2, expected);
|
||||||
|
}
|
||||||
|
@ -18,7 +18,6 @@ RUN rm -rf /tmp/* /var/tmp/* /var/lib/apt/lists/*
|
|||||||
RUN groupadd -g 1000 parity \
|
RUN groupadd -g 1000 parity \
|
||||||
&& useradd -m -u 1000 -g parity -s /bin/sh parity
|
&& useradd -m -u 1000 -g parity -s /bin/sh parity
|
||||||
|
|
||||||
USER parity
|
|
||||||
|
|
||||||
WORKDIR /home/parity
|
WORKDIR /home/parity
|
||||||
|
|
||||||
@ -33,6 +32,9 @@ RUN chmod +x ./entrypoint.sh
|
|||||||
|
|
||||||
COPY scripts/docker/hub/check_sync.sh /check_sync.sh
|
COPY scripts/docker/hub/check_sync.sh /check_sync.sh
|
||||||
|
|
||||||
|
# switch to user parity here
|
||||||
|
USER parity
|
||||||
|
|
||||||
# setup ENTRYPOINT
|
# setup ENTRYPOINT
|
||||||
EXPOSE 5001 8080 8082 8083 8545 8546 8180 30303/tcp 30303/udp
|
EXPOSE 5001 8080 8082 8083 8545 8546 8180 30303/tcp 30303/udp
|
||||||
ENTRYPOINT ["./entrypoint.sh"]
|
ENTRYPOINT ["./entrypoint.sh"]
|
||||||
|
@ -21,11 +21,13 @@ extern crate plain_hasher;
|
|||||||
|
|
||||||
use ethereum_types::H256;
|
use ethereum_types::H256;
|
||||||
use std::hash;
|
use std::hash;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
use plain_hasher::PlainHasher;
|
use plain_hasher::PlainHasher;
|
||||||
|
|
||||||
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
|
/// Specialized version of `HashMap` with H256 keys and fast hashing function.
|
||||||
pub type H256FastMap<T> = HashMap<H256, T, hash::BuildHasherDefault<PlainHasher>>;
|
pub type H256FastMap<T> = HashMap<H256, T, hash::BuildHasherDefault<PlainHasher>>;
|
||||||
|
/// Specialized version of HashSet with H256 values and fast hashing function.
|
||||||
|
pub type H256FastSet = HashSet<H256, hash::BuildHasherDefault<PlainHasher>>;
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
@ -36,4 +38,4 @@ mod tests {
|
|||||||
let mut h = H256FastMap::default();
|
let mut h = H256FastMap::default();
|
||||||
h.insert(H256::from(123), "abc");
|
h.insert(H256::from(123), "abc");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "parity-version"
|
name = "parity-version"
|
||||||
# NOTE: this value is used for Parity Ethereum version string (via env CARGO_PKG_VERSION)
|
# NOTE: this value is used for Parity Ethereum version string (via env CARGO_PKG_VERSION)
|
||||||
version = "2.2.1"
|
version = "2.2.2"
|
||||||
authors = ["Parity Technologies <admin@parity.io>"]
|
authors = ["Parity Technologies <admin@parity.io>"]
|
||||||
build = "build.rs"
|
build = "build.rs"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user