Merge branch 'master' into cli-rpc

This commit is contained in:
Gav Wood 2016-12-15 14:45:15 +01:00 committed by GitHub
commit 0da564a4c2
100 changed files with 4156 additions and 813 deletions

View File

@ -231,10 +231,10 @@ linux-armv6:
stage: build
image: ethcore/rust-armv6:latest
only:
# - beta
- beta
# - tags
# - stable
- triggers
# - triggers
script:
- export CC=arm-linux-gnueabi-gcc
- export CXX=arm-linux-gnueabi-g++
@ -312,8 +312,8 @@ darwin:
- stable
- triggers
script:
- cargo build -j 8 --release -p ethstore #$CARGOFLAGS
- cargo build -j 8 --release #$CARGOFLAGS
- cargo build -j 8 --release -p ethstore #$CARGOFLAGS
- rm -rf parity.md5
- md5sum target/release/parity > parity.md5
- packagesbuild -v mac/Parity.pkgproj
@ -350,7 +350,7 @@ windows:
- set RUST_BACKTRACE=1
- set RUSTFLAGS=%RUSTFLAGS%
- rustup default stable-x86_64-pc-windows-msvc
- cargo build -j 8 --release #%CARGOFLAGS%
- cargo build --release #%CARGOFLAGS%
- curl -sL --url "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -o nsis\SimpleFC.dll
- curl -sL --url "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -o nsis\vc_redist.x64.exe
- signtool sign /f %keyfile% /p %certpass% target\release\parity.exe
@ -408,7 +408,7 @@ test-darwin:
test-windows:
stage: test
only:
- triggers
# - triggers
before_script:
- git submodule update --init --recursive
script:

111
Cargo.lock generated
View File

@ -3,6 +3,7 @@ name = "parity"
version = "1.5.0"
dependencies = [
"ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
"app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)",
"ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)",
"daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -44,7 +45,7 @@ dependencies = [
"serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -60,6 +61,17 @@ name = "ansi_term"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "app_dirs"
version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "arrayvec"
version = "0.3.16"
@ -198,7 +210,7 @@ source = "git+https://github.com/ethcore/rust-ctrlc.git#f4927770f89eca80ec250911
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -279,7 +291,7 @@ name = "ethash"
version = "1.5.0"
dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"sha3 0.1.0",
]
@ -397,7 +409,7 @@ dependencies = [
"crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.1 (git+https://github.com/ethcore/mio)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -502,7 +514,7 @@ dependencies = [
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"mio 0.6.1 (git+https://github.com/ethcore/mio)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
@ -599,7 +611,7 @@ dependencies = [
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0",
@ -661,7 +673,7 @@ dependencies = [
"itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)",
"rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)",
@ -690,7 +702,7 @@ dependencies = [
"ethkey 0.2.0",
"heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"rlp 0.1.0",
"semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
@ -855,7 +867,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -886,7 +898,7 @@ version = "4.0.0"
source = "git+https://github.com/ethcore/jsonrpc.git#33262d626a294a00c20435dec331058ba65e224a"
dependencies = [
"log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
"serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_codegen 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)",
"serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
@ -947,7 +959,7 @@ name = "kernel32-sys"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1048,7 +1060,7 @@ dependencies = [
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1064,7 +1076,7 @@ dependencies = [
"nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1079,7 +1091,7 @@ dependencies = [
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1095,7 +1107,7 @@ dependencies = [
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
"slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1105,7 +1117,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1116,7 +1128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1145,7 +1157,7 @@ dependencies = [
"cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -1284,6 +1296,15 @@ name = "odds"
version = "0.2.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "ole32-sys"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "owning_ref"
version = "0.2.2"
@ -1342,25 +1363,14 @@ dependencies = [
[[package]]
name = "parity-ui-precompiled"
version = "1.4.0"
source = "git+https://github.com/ethcore/js-precompiled.git#175003ae159b126302fd1a90dd875dc86d7adba0"
source = "git+https://github.com/ethcore/js-precompiled.git#a80a0c512cf569985ec1fffc5ea5ae70eb6c1e1f"
dependencies = [
"parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "parking_lot"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "parking_lot"
version = "0.3.5"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
@ -1376,7 +1386,7 @@ dependencies = [
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)",
"smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1599,7 +1609,7 @@ dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1726,6 +1736,15 @@ dependencies = [
"gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "shell32-sys"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "siphasher"
version = "0.1.1"
@ -1869,7 +1888,7 @@ version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1878,7 +1897,7 @@ version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -1913,7 +1932,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
"libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
@ -2009,7 +2028,7 @@ name = "vecio"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
@ -2040,7 +2059,7 @@ dependencies = [
[[package]]
name = "winapi"
version = "0.2.6"
version = "0.2.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
@ -2068,10 +2087,15 @@ name = "ws2_32-sys"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
dependencies = [
"winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)",
"winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
]
[[package]]
name = "xdg"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
name = "xml-rs"
version = "0.3.4"
@ -2102,6 +2126,7 @@ dependencies = [
[metadata]
"checksum aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67077478f0a03952bed2e6786338d400d40c25e9836e08ad50af96607317fd03"
"checksum ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1f46cd5b1d660c938e3f92dfe7a73d832b3281479363dd0cd9c1c2fbf60f7962"
"checksum app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7d1c0d48a81bbb13043847f957971f4d87c81542d80ece5e84ba3cba4058fd4"
"checksum arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "16e3bdb2f54b3ace0285975d59a97cf8ed3855294b2b6bc651fcf22a9c352975"
"checksum aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07d344974f0a155f091948aa389fb1b912d3a58414fbdb9c8d446d193ee3496a"
"checksum aster 0.25.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4df293303e8a52e1df7984ac1415e195f5fcbf51e4bb7bda54557861a3954a08"
@ -2190,11 +2215,11 @@ dependencies = [
"checksum num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "51fedae97a05f7353612fe017ab705a37e6db8f4d67c5c6fe739a9e70d6eed09"
"checksum number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "084d05f4bf60621a9ac9bde941a410df548f4de9545f06e5ee9d3aef4b97cd77"
"checksum odds 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "b28c06e81b0f789122d415d6394b5fe849bde8067469f4c2980d3cdc10c78ec1"
"checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c"
"checksum owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d91377085359426407a287ab16884a0111ba473aa6844ff01d4ec20ce3d75e7"
"checksum parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98378dec0a185da2b7180308752f0bad73aaa949c3e0a3b0528d0e067945f7ab"
"checksum parity-ui-precompiled 1.4.0 (git+https://github.com/ethcore/js-precompiled.git)" = "<none>"
"checksum parking_lot 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "968f685642555d2f7e202c48b8b11de80569e9bfea817f7f12d7c61aac62d4e6"
"checksum parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dbc5847584161f273e69edc63c1a86254a22f570a0b5dd87aa6f9773f6f7d125"
"checksum parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e1435e7a2a00dfebededd6c6bdbd54008001e94b4a2aadd6aef0dc4c56317621"
"checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068"
"checksum phf 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "447d9d45f2e0b4a9b532e808365abf18fc211be6ca217202fcd45236ef12f026"
"checksum phf_codegen 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "8af7ae7c3f75a502292b491e5cc0a1f69e3407744abe6e57e2a3b712bb82f01d"
@ -2233,6 +2258,7 @@ dependencies = [
"checksum serde_codegen_internals 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f877e2781ed0a323295d1c9f0e26556117b5a11489fc47b1848dfb98b3173d21"
"checksum serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e10f8a9d94b06cf5d3bef66475f04c8ff90950f1be7004c357ff9472ccbaebc"
"checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c"
"checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d"
"checksum siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c44e42fa187b5a8782489cf7740cc27c3125806be2bf33563cf5e02e9533fcd"
"checksum slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d807fd58c4181bbabed77cb3b891ba9748241a552bcc5be698faaebefc54f46e"
"checksum slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)" = "<none>"
@ -2274,10 +2300,11 @@ dependencies = [
"checksum vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56b639f935488eb40f06d17c3e3bcc3054f6f75d264e187b1107c8d1cba8d31c"
"checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
"checksum webpki 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "813503a5985585e0812d430cd1328ee322f47f66629c8ed4ecab939cf9e92f91"
"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4"
"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
"checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
"checksum ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "<none>"
"checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e"
"checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453"
"checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef"
"checksum xmltree 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "472a9d37c7c53ab2391161df5b89b1f3bf76dab6ab150d7941ecbdd832282082"
"checksum zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "3ceb33a75b3d0608942302eed325b59d2c3ed777cc6c01627ae14e5697c6a31c"

View File

@ -28,6 +28,7 @@ isatty = "0.1"
toml = "0.2"
serde = "0.8.0"
serde_json = "0.8.0"
app_dirs = "1.1.1"
hyper = { version = "0.9", default-features = false }
ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" }
fdlimit = "0.1"

View File

@ -21,8 +21,9 @@
},
"genesis": {
"seal": {
"generic": {
"rlp": "0xc28080"
"authority_round": {
"step": "0x0",
"signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
},
"difficulty": "0x20000",

View File

@ -17,10 +17,7 @@
},
"genesis": {
"seal": {
"generic": {
"fields": 1,
"rlp": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa"
}
"generic": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa"
},
"difficulty": "0x20000",
"author": "0x0000000000000000000000000000000000000000",

View File

@ -1,6 +1,6 @@
{
"name": "Ethereum Classic",
"forkName": "classic",
"dataDir": "classic",
"engine": {
"Ethash": {
"params": {

View File

@ -1,6 +1,6 @@
{
"name": "Expanse",
"forkName": "expanse",
"dataDir": "expanse",
"engine": {
"Ethash": {
"params": {

View File

@ -1,5 +1,6 @@
{
"name": "Frontier/Homestead",
"dataDir": "ethereum",
"engine": {
"Ethash": {
"params": {

View File

@ -1,5 +1,6 @@
{
"name": "Morden",
"dataDir": "test",
"engine": {
"Ethash": {
"params": {

View File

@ -1,5 +1,6 @@
{
"name": "Ropsten",
"dataDir": "test",
"engine": {
"Ethash": {
"params": {

View File

@ -4,29 +4,27 @@
"InstantSeal": null
},
"params": {
"accountStartNonce": "0x0100000",
"accountStartNonce": "0x0",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x2"
"networkID" : "0x11"
},
"genesis": {
"seal": {
"generic": {
"rlp": "0x0"
}
"generic": "0x0"
},
"difficulty": "0x20000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x",
"gasLimit": "0x2fefd8"
"gasLimit": "0x5B8D80"
},
"accounts": {
"0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0x00a329c0648769a73afac7f9381e08fb43dbea72": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" }
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"0x00a329c0648769a73afac7f9381e08fb43dbea72": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
}
}

View File

@ -0,0 +1,44 @@
{
"name": "TestBFT",
"engine": {
"Tendermint": {
"params": {
"gasLimitBoundDivisor": "0x0400",
"authorities" : [
"0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1",
"0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e"
]
}
}
},
"params": {
"accountStartNonce": "0x0",
"maximumExtraDataSize": "0x20",
"minGasLimit": "0x1388",
"networkID" : "0x2323"
},
"genesis": {
"seal": {
"tendermint": {
"round": "0x0",
"proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"precommits": [
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
]
}
},
"difficulty": "0x20000",
"author": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x",
"gasLimit": "0x2fefd8"
},
"accounts": {
"0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } },
"0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } },
"0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } },
"0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } },
"9cce34f7ab185c7aba1b7c8140d620b4bda941d6": { "balance": "1606938044258990275541962092341162602522202993782792835301376" }
}
}

View File

@ -23,9 +23,9 @@ use self::stores::{AddressBook, DappsSettingsStore};
use std::fmt;
use std::collections::HashMap;
use std::time::{Instant, Duration};
use util::{Mutex, RwLock};
use ethstore::{SecretStore, Error as SSError, SafeAccount, EthStore};
use ethstore::dir::{KeyDirectory};
use util::RwLock;
use ethstore::{SimpleSecretStore, SecretStore, Error as SSError, EthStore, EthMultiStore, random_string};
use ethstore::dir::MemoryDirectory;
use ethstore::ethkey::{Address, Message, Public, Secret, Random, Generator};
use ethjson::misc::AccountMeta;
pub use ethstore::ethkey::Signature;
@ -73,58 +73,47 @@ impl From<SSError> for Error {
}
}
#[derive(Default)]
struct NullDir {
accounts: RwLock<HashMap<Address, SafeAccount>>,
}
impl KeyDirectory for NullDir {
fn load(&self) -> Result<Vec<SafeAccount>, SSError> {
Ok(self.accounts.read().values().cloned().collect())
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, SSError> {
self.accounts.write().insert(account.address.clone(), account.clone());
Ok(account)
}
fn remove(&self, address: &Address) -> Result<(), SSError> {
self.accounts.write().remove(address);
Ok(())
}
}
/// Dapp identifier
pub type DappId = String;
fn transient_sstore() -> EthMultiStore {
EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed")
}
type AccountToken = String;
/// Account management.
/// Responsible for unlocking accounts.
pub struct AccountProvider {
unlocked: Mutex<HashMap<Address, AccountData>>,
sstore: Box<SecretStore>,
unlocked: RwLock<HashMap<Address, AccountData>>,
address_book: RwLock<AddressBook>,
dapps_settings: RwLock<DappsSettingsStore>,
/// Accounts on disk
sstore: Box<SecretStore>,
/// Accounts unlocked with rolling tokens
transient_sstore: EthMultiStore,
}
impl AccountProvider {
/// Creates new account provider.
pub fn new(sstore: Box<SecretStore>) -> Self {
AccountProvider {
unlocked: Mutex::new(HashMap::new()),
unlocked: RwLock::new(HashMap::new()),
address_book: RwLock::new(AddressBook::new(sstore.local_path().into())),
dapps_settings: RwLock::new(DappsSettingsStore::new(sstore.local_path().into())),
sstore: sstore,
transient_sstore: transient_sstore(),
}
}
/// Creates not disk backed provider.
pub fn transient_provider() -> Self {
AccountProvider {
unlocked: Mutex::new(HashMap::new()),
unlocked: RwLock::new(HashMap::new()),
address_book: RwLock::new(AddressBook::transient()),
dapps_settings: RwLock::new(DappsSettingsStore::transient()),
sstore: Box::new(EthStore::open(Box::new(NullDir::default()))
.expect("NullDir load always succeeds; qed"))
sstore: Box::new(EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed")),
transient_sstore: transient_sstore(),
}
}
@ -231,11 +220,8 @@ impl AccountProvider {
/// Returns `true` if the password for `account` is `password`. `false` if not.
pub fn test_password(&self, account: &Address, password: &str) -> Result<bool, Error> {
match self.sstore.sign(account, password, &Default::default()) {
Ok(_) => Ok(true),
Err(SSError::InvalidPassword) => Ok(false),
Err(e) => Err(Error::SStore(e)),
}
self.sstore.test_password(account, password)
.map_err(Into::into)
}
/// Permanently removes an account.
@ -256,7 +242,7 @@ impl AccountProvider {
let _ = try!(self.sstore.sign(&account, &password, &Default::default()));
// check if account is already unlocked pernamently, if it is, do nothing
let mut unlocked = self.unlocked.lock();
let mut unlocked = self.unlocked.write();
if let Some(data) = unlocked.get(&account) {
if let Unlock::Perm = data.unlock {
return Ok(())
@ -273,7 +259,7 @@ impl AccountProvider {
}
fn password(&self, account: &Address) -> Result<String, Error> {
let mut unlocked = self.unlocked.lock();
let mut unlocked = self.unlocked.write();
let data = try!(unlocked.get(account).ok_or(Error::NotUnlocked)).clone();
if let Unlock::Temp = data.unlock {
unlocked.remove(account).expect("data exists: so key must exist: qed");
@ -304,7 +290,7 @@ impl AccountProvider {
/// Checks if given account is unlocked
pub fn is_unlocked(&self, account: Address) -> bool {
let unlocked = self.unlocked.lock();
let unlocked = self.unlocked.read();
unlocked.get(&account).is_some()
}
@ -314,6 +300,48 @@ impl AccountProvider {
Ok(try!(self.sstore.sign(&account, &password, &message)))
}
/// Signs given message with supplied token. Returns a token to use in next signing within this session.
pub fn sign_with_token(&self, account: Address, token: AccountToken, message: Message) -> Result<(Signature, AccountToken), Error> {
let is_std_password = try!(self.sstore.test_password(&account, &token));
let new_token = random_string(16);
let signature = if is_std_password {
// Insert to transient store
try!(self.sstore.copy_account(&self.transient_sstore, &account, &token, &new_token));
// sign
try!(self.sstore.sign(&account, &token, &message))
} else {
// check transient store
try!(self.transient_sstore.change_password(&account, &token, &new_token));
// and sign
try!(self.transient_sstore.sign(&account, &new_token, &message))
};
Ok((signature, new_token))
}
/// Decrypts a message with given token. Returns a token to use in next operation for this account.
pub fn decrypt_with_token(&self, account: Address, token: AccountToken, shared_mac: &[u8], message: &[u8])
-> Result<(Vec<u8>, AccountToken), Error>
{
let is_std_password = try!(self.sstore.test_password(&account, &token));
let new_token = random_string(16);
let message = if is_std_password {
// Insert to transient store
try!(self.sstore.copy_account(&self.transient_sstore, &account, &token, &new_token));
// decrypt
try!(self.sstore.decrypt(&account, &token, shared_mac, message))
} else {
// check transient store
try!(self.transient_sstore.change_password(&account, &token, &new_token));
// and decrypt
try!(self.transient_sstore.decrypt(&account, &token, shared_mac, message))
};
Ok((message, new_token))
}
/// Decrypts a message. If password is not provided the account must be unlocked.
pub fn decrypt(&self, account: Address, password: Option<String>, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let password = try!(password.map(Ok).unwrap_or_else(|| self.password(&account)));
@ -370,10 +398,26 @@ mod tests {
assert!(ap.unlock_account_timed(kp.address(), "test1".into(), 60000).is_err());
assert!(ap.unlock_account_timed(kp.address(), "test".into(), 60000).is_ok());
assert!(ap.sign(kp.address(), None, Default::default()).is_ok());
ap.unlocked.lock().get_mut(&kp.address()).unwrap().unlock = Unlock::Timed(Instant::now());
ap.unlocked.write().get_mut(&kp.address()).unwrap().unlock = Unlock::Timed(Instant::now());
assert!(ap.sign(kp.address(), None, Default::default()).is_err());
}
#[test]
fn should_sign_and_return_token() {
// given
let kp = Random.generate().unwrap();
let ap = AccountProvider::transient_provider();
assert!(ap.insert_account(kp.secret().clone(), "test").is_ok());
// when
let (_signature, token) = ap.sign_with_token(kp.address(), "test".into(), Default::default()).unwrap();
// then
ap.sign_with_token(kp.address(), token.clone(), Default::default())
.expect("First usage of token should be correct.");
assert!(ap.sign_with_token(kp.address(), token, Default::default()).is_err(), "Second usage of the same token should fail.");
}
#[test]
fn should_set_dapps_addresses() {
// given

View File

@ -15,7 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ipc::IpcConfig;
use util::H256;
use util::{H256, Bytes};
/// Represents what has to be handled by actor listening to chain events
#[ipc]
@ -27,6 +27,8 @@ pub trait ChainNotify : Send + Sync {
_enacted: Vec<H256>,
_retracted: Vec<H256>,
_sealed: Vec<H256>,
// Block bytes.
_proposed: Vec<Bytes>,
_duration: u64) {
// does nothing by default
}
@ -41,6 +43,9 @@ pub trait ChainNotify : Send + Sync {
// does nothing by default
}
/// fires when chain broadcasts a message
fn broadcast(&self, _data: Vec<u8>) {}
/// fires when new transactions are received from a peer
fn transactions_received(&self,
_hashes: Vec<H256>,

View File

@ -24,8 +24,8 @@ use time::precise_time_ns;
// util
use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, Hashable};
use util::{journaldb, TrieFactory, Trie};
use util::trie::TrieSpec;
use util::{U256, H256, Address, H2048, Uint, FixedHash};
use util::trie::TrieSpec;
use util::kvdb::*;
// other
@ -396,9 +396,10 @@ impl Client {
/// This is triggered by a message coming from a block queue when the block is ready for insertion
pub fn import_verified_blocks(&self) -> usize {
let max_blocks_to_import = 4;
let (imported_blocks, import_results, invalid_blocks, imported, duration, is_empty) = {
let (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration, is_empty) = {
let mut imported_blocks = Vec::with_capacity(max_blocks_to_import);
let mut invalid_blocks = HashSet::new();
let mut proposed_blocks = Vec::with_capacity(max_blocks_to_import);
let mut import_results = Vec::with_capacity(max_blocks_to_import);
let _import_lock = self.import_lock.lock();
@ -417,12 +418,17 @@ impl Client {
continue;
}
if let Ok(closed_block) = self.check_and_close_block(&block) {
imported_blocks.push(header.hash());
if self.engine.is_proposal(&block.header) {
self.block_queue.mark_as_good(&[header.hash()]);
proposed_blocks.push(block.bytes);
} else {
imported_blocks.push(header.hash());
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
import_results.push(route);
let route = self.commit_block(closed_block, &header.hash(), &block.bytes);
import_results.push(route);
self.report.write().accrue_block(&block);
self.report.write().accrue_block(&block);
}
} else {
invalid_blocks.insert(header.hash());
}
@ -436,7 +442,7 @@ impl Client {
}
let is_empty = self.block_queue.mark_as_good(&imported_blocks);
let duration_ns = precise_time_ns() - start;
(imported_blocks, import_results, invalid_blocks, imported, duration_ns, is_empty)
(imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration_ns, is_empty)
};
{
@ -454,6 +460,7 @@ impl Client {
enacted.clone(),
retracted.clone(),
Vec::new(),
proposed_blocks.clone(),
duration,
);
});
@ -577,9 +584,10 @@ impl Client {
self.miner.clone()
}
/// Used by PoA to try sealing on period change.
pub fn update_sealing(&self) {
self.miner.update_sealing(self)
/// Replace io channel. Useful for testing.
pub fn set_io_channel(&self, io_channel: IoChannel<ClientIoMessage>) {
*self.io_channel.lock() = io_channel;
}
/// Attempt to get a copy of a specific block's final state.
@ -1290,6 +1298,18 @@ impl BlockChainClient for Client {
self.miner.pending_transactions(self.chain.read().best_block_number())
}
fn queue_consensus_message(&self, message: Bytes) {
let channel = self.io_channel.lock().clone();
if let Err(e) = channel.send(ClientIoMessage::NewMessage(message)) {
debug!("Ignoring the message, error queueing: {}", e);
}
}
fn broadcast_consensus_message(&self, message: Bytes) {
self.notify(|notify| notify.broadcast(message.clone()));
}
fn signing_network_id(&self) -> Option<u64> {
self.engine.signing_network_id(&self.latest_env_info())
}
@ -1314,7 +1334,6 @@ impl BlockChainClient for Client {
}
impl MiningBlockChainClient for Client {
fn latest_schedule(&self) -> Schedule {
self.engine.schedule(&self.latest_env_info())
}
@ -1357,6 +1376,30 @@ impl MiningBlockChainClient for Client {
&self.factories.vm
}
fn update_sealing(&self) {
self.miner.update_sealing(self)
}
fn submit_seal(&self, block_hash: H256, seal: Vec<Bytes>) {
if self.miner.submit_seal(self, block_hash, seal).is_err() {
warn!(target: "poa", "Wrong internal seal submission!")
}
}
fn broadcast_proposal_block(&self, block: SealedBlock) {
self.notify(|notify| {
notify.new_blocks(
vec![],
vec![],
vec![],
vec![],
vec![],
vec![block.rlp_bytes()],
0,
);
});
}
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult {
let h = block.header().hash();
let start = precise_time_ns();
@ -1381,6 +1424,7 @@ impl MiningBlockChainClient for Client {
enacted.clone(),
retracted.clone(),
vec![h.clone()],
vec![],
precise_time_ns() - start,
);
});
@ -1416,6 +1460,12 @@ impl ::client::ProvingBlockChainClient for Client {
}
}
impl Drop for Client {
fn drop(&mut self) {
self.engine.stop();
}
}
#[cfg(test)]
mod tests {

View File

@ -360,6 +360,18 @@ impl MiningBlockChainClient for TestBlockChainClient {
fn import_sealed_block(&self, _block: SealedBlock) -> ImportResult {
Ok(H256::default())
}
fn broadcast_proposal_block(&self, _block: SealedBlock) {}
fn update_sealing(&self) {
self.miner.update_sealing(self)
}
fn submit_seal(&self, block_hash: H256, seal: Vec<Bytes>) {
if self.miner.submit_seal(self, block_hash, seal).is_err() {
warn!(target: "poa", "Wrong internal seal submission!")
}
}
}
impl BlockChainClient for TestBlockChainClient {
@ -663,6 +675,12 @@ impl BlockChainClient for TestBlockChainClient {
self.miner.import_external_transactions(self, txs);
}
fn queue_consensus_message(&self, message: Bytes) {
self.spec.engine.handle_message(&message).unwrap();
}
fn broadcast_consensus_message(&self, _message: Bytes) {}
fn pending_transactions(&self) -> Vec<SignedTransaction> {
self.miner.pending_transactions(self.chain_info().best_block_number)
}

View File

@ -202,6 +202,12 @@ pub trait BlockChainClient : Sync + Send {
/// Queue transactions for importing.
fn queue_transactions(&self, transactions: Vec<Bytes>, peer_id: usize);
/// Queue conensus engine message.
fn queue_consensus_message(&self, message: Bytes);
/// Used by PoA to communicate with peers.
fn broadcast_consensus_message(&self, message: Bytes);
/// list all transactions
fn pending_transactions(&self) -> Vec<SignedTransaction>;
@ -273,6 +279,15 @@ pub trait MiningBlockChainClient: BlockChainClient {
/// Returns EvmFactory.
fn vm_factory(&self) -> &EvmFactory;
/// Used by PoA to try sealing on period change.
fn update_sealing(&self);
/// Used by PoA to submit gathered signatures.
fn submit_seal(&self, block_hash: H256, seal: Vec<Bytes>);
/// Broadcast a block proposal.
fn broadcast_proposal_block(&self, block: SealedBlock);
/// Import sealed block. Skips all verifications.
fn import_sealed_block(&self, block: SealedBlock) -> ImportResult;

View File

@ -25,7 +25,7 @@ use rlp::{UntrustedRlp, Rlp, View, encode};
use account_provider::AccountProvider;
use block::*;
use spec::CommonParams;
use engines::Engine;
use engines::{Engine, Seal, EngineError};
use header::Header;
use error::{Error, BlockError};
use blockchain::extras::BlockDetails;
@ -225,8 +225,8 @@ impl Engine for AuthorityRound {
///
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
/// be returned.
fn generate_seal(&self, block: &ExecutedBlock) -> Option<Vec<Bytes>> {
if self.proposed.load(AtomicOrdering::SeqCst) { return None; }
fn generate_seal(&self, block: &ExecutedBlock) -> Seal {
if self.proposed.load(AtomicOrdering::SeqCst) { return Seal::None; }
let header = block.header();
let step = self.step();
if self.is_step_proposer(step, header.author()) {
@ -235,7 +235,8 @@ impl Engine for AuthorityRound {
if let Ok(signature) = ap.sign(*header.author(), self.password.read().clone(), header.bare_hash()) {
trace!(target: "poa", "generate_seal: Issuing a block for step {}.", step);
self.proposed.store(true, AtomicOrdering::SeqCst);
return Some(vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]);
let rlps = vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()];
return Seal::Regular(rlps);
} else {
warn!(target: "poa", "generate_seal: FAIL: Accounts secret key unavailable.");
}
@ -245,7 +246,7 @@ impl Engine for AuthorityRound {
} else {
trace!(target: "poa", "generate_seal: Not a proposer for step {}.", step);
}
None
Seal::None
}
/// Check the number of seal fields.
@ -288,7 +289,7 @@ impl Engine for AuthorityRound {
// Check if parent is from a previous step.
if step == try!(header_step(parent)) {
trace!(target: "poa", "Multiple blocks proposed for step {}.", step);
try!(Err(BlockError::DoubleVote(header.author().clone())));
try!(Err(EngineError::DoubleVote(header.author().clone())));
}
let gas_limit_divisor = self.our_params.gas_limit_bound_divisor;
@ -347,6 +348,7 @@ mod tests {
use tests::helpers::*;
use account_provider::AccountProvider;
use spec::Spec;
use engines::Seal;
#[test]
fn has_valid_metadata() {
@ -416,17 +418,17 @@ mod tests {
let b2 = b2.close_and_lock();
engine.set_signer(addr1, "1".into());
if let Some(seal) = engine.generate_seal(b1.block()) {
if let Seal::Regular(seal) = engine.generate_seal(b1.block()) {
assert!(b1.clone().try_seal(engine, seal).is_ok());
// Second proposal is forbidden.
assert!(engine.generate_seal(b1.block()).is_none());
assert!(engine.generate_seal(b1.block()) == Seal::None);
}
engine.set_signer(addr2, "2".into());
if let Some(seal) = engine.generate_seal(b2.block()) {
if let Seal::Regular(seal) = engine.generate_seal(b2.block()) {
assert!(b2.clone().try_seal(engine, seal).is_ok());
// Second proposal is forbidden.
assert!(engine.generate_seal(b2.block()).is_none());
assert!(engine.generate_seal(b2.block()) == Seal::None);
}
}

View File

@ -21,7 +21,7 @@ use account_provider::AccountProvider;
use block::*;
use builtin::Builtin;
use spec::CommonParams;
use engines::Engine;
use engines::{Engine, Seal};
use env_info::EnvInfo;
use error::{BlockError, Error};
use evm::Schedule;
@ -112,20 +112,20 @@ impl Engine for BasicAuthority {
///
/// This operation is synchronous and may (quite reasonably) not be available, in which `false` will
/// be returned.
fn generate_seal(&self, block: &ExecutedBlock) -> Option<Vec<Bytes>> {
fn generate_seal(&self, block: &ExecutedBlock) -> Seal {
if let Some(ref ap) = *self.account_provider.lock() {
let header = block.header();
let message = header.bare_hash();
// account should be pernamently unlocked, otherwise sealing will fail
if let Ok(signature) = ap.sign(*block.header().author(), self.password.read().clone(), message) {
return Some(vec![::rlp::encode(&(&*signature as &[u8])).to_vec()]);
return Seal::Regular(vec![::rlp::encode(&(&*signature as &[u8])).to_vec()]);
} else {
trace!(target: "basicauthority", "generate_seal: FAIL: accounts secret key unavailable");
}
} else {
trace!(target: "basicauthority", "generate_seal: FAIL: accounts not provided");
}
None
Seal::None
}
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> {
@ -199,6 +199,7 @@ mod tests {
use account_provider::AccountProvider;
use header::Header;
use spec::Spec;
use engines::Seal;
/// Create a new test chain spec with `BasicAuthority` consensus engine.
fn new_test_authority() -> Spec {
@ -269,8 +270,9 @@ mod tests {
let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock();
let seal = engine.generate_seal(b.block()).unwrap();
assert!(b.try_seal(engine, seal).is_ok());
if let Seal::Regular(seal) = engine.generate_seal(b.block()) {
assert!(b.try_seal(engine, seal).is_ok());
}
}
#[test]

View File

@ -17,12 +17,11 @@
use std::collections::BTreeMap;
use util::Address;
use builtin::Builtin;
use engines::Engine;
use engines::{Engine, Seal};
use env_info::EnvInfo;
use spec::CommonParams;
use evm::Schedule;
use block::ExecutedBlock;
use util::Bytes;
/// An engine which does not provide any consensus mechanism, just seals blocks internally.
pub struct InstantSeal {
@ -54,13 +53,13 @@ impl Engine for InstantSeal {
}
fn schedule(&self, _env_info: &EnvInfo) -> Schedule {
Schedule::new_post_eip150(usize::max_value(), false, false, false)
Schedule::new_post_eip150(usize::max_value(), true, true, true)
}
fn is_sealer(&self, _author: &Address) -> Option<bool> { Some(true) }
fn generate_seal(&self, _block: &ExecutedBlock) -> Option<Vec<Bytes>> {
Some(Vec::new())
fn generate_seal(&self, _block: &ExecutedBlock) -> Seal {
Seal::Regular(Vec::new())
}
}
@ -72,6 +71,7 @@ mod tests {
use spec::Spec;
use header::Header;
use block::*;
use engines::Seal;
#[test]
fn instant_can_seal() {
@ -84,8 +84,9 @@ mod tests {
let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock();
let seal = engine.generate_seal(b.block()).unwrap();
assert!(b.try_seal(engine, seal).is_ok());
if let Seal::Regular(seal) = engine.generate_seal(b.block()) {
assert!(b.try_seal(engine, seal).is_ok());
}
}
#[test]

View File

@ -20,11 +20,13 @@ mod null_engine;
mod instant_seal;
mod basic_authority;
mod authority_round;
mod tendermint;
pub use self::null_engine::NullEngine;
pub use self::instant_seal::InstantSeal;
pub use self::basic_authority::BasicAuthority;
pub use self::authority_round::AuthorityRound;
pub use self::tendermint::Tendermint;
use util::*;
use account_provider::AccountProvider;
@ -42,6 +44,47 @@ use ethereum::ethash;
use blockchain::extras::BlockDetails;
use views::HeaderView;
/// Voting errors.
#[derive(Debug)]
pub enum EngineError {
/// Signature does not belong to an authority.
NotAuthorized(Address),
/// The same author issued different votes at the same step.
DoubleVote(Address),
/// The received block is from an incorrect proposer.
NotProposer(Mismatch<Address>),
/// Message was not expected.
UnexpectedMessage,
/// Seal field has an unexpected size.
BadSealFieldSize(OutOfBounds<usize>),
}
impl fmt::Display for EngineError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::EngineError::*;
let msg = match *self {
DoubleVote(ref address) => format!("Author {} issued too many blocks.", address),
NotProposer(ref mis) => format!("Author is not a current proposer: {}", mis),
NotAuthorized(ref address) => format!("Signer {} is not authorized.", address),
UnexpectedMessage => "This Engine should not be fed messages.".into(),
BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob),
};
f.write_fmt(format_args!("Engine error ({})", msg))
}
}
/// Seal type.
#[derive(Debug, PartialEq, Eq)]
pub enum Seal {
/// Proposal seal; should be broadcasted, but not inserted into blockchain.
Proposal(Vec<Bytes>),
/// Regular block seal; should be part of the blockchain.
Regular(Vec<Bytes>),
/// Engine does generate seal for this block right now.
None,
}
/// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based.
/// Provides hooks into each of the major parts of block import.
pub trait Engine : Sync + Send {
@ -94,7 +137,7 @@ pub trait Engine : Sync + Send {
///
/// This operation is synchronous and may (quite reasonably) not be available, in which None will
/// be returned.
fn generate_seal(&self, _block: &ExecutedBlock) -> Option<Vec<Bytes>> { None }
fn generate_seal(&self, _block: &ExecutedBlock) -> Seal { Seal::None }
/// Phase 1 quick block verification. Only does checks that are cheap. `block` (the header's full block)
/// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import.
@ -133,6 +176,10 @@ pub trait Engine : Sync + Send {
header.set_gas_limit(parent.gas_limit().clone());
}
/// Handle any potential consensus messages;
/// updating consensus state and potentially issuing a new one.
fn handle_message(&self, _message: &[u8]) -> Result<(), Error> { Err(EngineError::UnexpectedMessage.into()) }
// TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic
// from Spec into here and removing the Spec::builtins field.
/// Determine whether a particular address is a builtin contract.
@ -153,9 +200,16 @@ pub trait Engine : Sync + Send {
ethash::is_new_best_block(best_total_difficulty, parent_details, new_header)
}
/// Find out if the block is a proposal block and should not be inserted into the DB.
/// Takes a header of a fully verified block.
fn is_proposal(&self, _verified_header: &Header) -> bool { false }
/// Register an account which signs consensus messages.
fn set_signer(&self, _address: Address, _password: String) {}
/// Stops any services that the may hold the Engine and makes it safe to drop.
fn stop(&self) {}
/// Add a channel for communication with Client which can be used for sealing.
fn register_message_channel(&self, _message_channel: IoChannel<ClientIoMessage>) {}

View File

@ -0,0 +1,279 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint message handling.
use util::*;
use super::{Height, Round, BlockHash, Step};
use error::Error;
use header::Header;
use rlp::*;
use ethkey::{recover, public_to_address};
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct ConsensusMessage {
pub signature: H520,
pub height: Height,
pub round: Round,
pub step: Step,
pub block_hash: Option<BlockHash>,
}
fn consensus_round(header: &Header) -> Result<Round, ::rlp::DecoderError> {
let round_rlp = header.seal().get(0).expect("seal passed basic verification; seal has 3 fields; qed");
UntrustedRlp::new(round_rlp.as_slice()).as_val()
}
impl ConsensusMessage {
pub fn new(signature: H520, height: Height, round: Round, step: Step, block_hash: Option<BlockHash>) -> Self {
ConsensusMessage {
signature: signature,
height: height,
round: round,
step: step,
block_hash: block_hash,
}
}
pub fn new_proposal(header: &Header) -> Result<Self, ::rlp::DecoderError> {
Ok(ConsensusMessage {
signature: try!(UntrustedRlp::new(header.seal().get(1).expect("seal passed basic verification; seal has 3 fields; qed").as_slice()).as_val()),
height: header.number() as Height,
round: try!(consensus_round(header)),
step: Step::Propose,
block_hash: Some(header.bare_hash()),
})
}
pub fn new_commit(proposal: &ConsensusMessage, signature: H520) -> Self {
ConsensusMessage {
signature: signature,
height: proposal.height,
round: proposal.round,
step: Step::Precommit,
block_hash: proposal.block_hash,
}
}
pub fn is_height(&self, height: Height) -> bool {
self.height == height
}
pub fn is_round(&self, height: Height, round: Round) -> bool {
self.height == height && self.round == round
}
pub fn is_step(&self, height: Height, round: Round, step: Step) -> bool {
self.height == height && self.round == round && self.step == step
}
pub fn is_block_hash(&self, h: Height, r: Round, s: Step, block_hash: Option<BlockHash>) -> bool {
self.height == h && self.round == r && self.step == s && self.block_hash == block_hash
}
pub fn is_aligned(&self, m: &ConsensusMessage) -> bool {
self.is_block_hash(m.height, m.round, m.step, m.block_hash)
}
pub fn verify(&self) -> Result<Address, Error> {
let full_rlp = ::rlp::encode(self);
let block_info = Rlp::new(&full_rlp).at(1);
let public_key = try!(recover(&self.signature.into(), &block_info.as_raw().sha3()));
Ok(public_to_address(&public_key))
}
pub fn precommit_hash(&self) -> H256 {
message_info_rlp(self.height, self.round, Step::Precommit, self.block_hash).sha3()
}
}
impl PartialOrd for ConsensusMessage {
fn partial_cmp(&self, m: &ConsensusMessage) -> Option<Ordering> {
Some(self.cmp(m))
}
}
impl Step {
fn number(&self) -> u8 {
match *self {
Step::Propose => 0,
Step::Prevote => 1,
Step::Precommit => 2,
Step::Commit => 3,
}
}
}
impl Ord for ConsensusMessage {
fn cmp(&self, m: &ConsensusMessage) -> Ordering {
if self.height != m.height {
self.height.cmp(&m.height)
} else if self.round != m.round {
self.round.cmp(&m.round)
} else if self.step != m.step {
self.step.number().cmp(&m.step.number())
} else {
self.signature.cmp(&m.signature)
}
}
}
impl Decodable for Step {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
match try!(decoder.as_rlp().as_val()) {
0u8 => Ok(Step::Propose),
1 => Ok(Step::Prevote),
2 => Ok(Step::Precommit),
_ => Err(DecoderError::Custom("Invalid step.")),
}
}
}
impl Encodable for Step {
fn rlp_append(&self, s: &mut RlpStream) {
s.append(&self.number());
}
}
/// (signature, height, round, step, block_hash)
impl Decodable for ConsensusMessage {
fn decode<D>(decoder: &D) -> Result<Self, DecoderError> where D: Decoder {
let rlp = decoder.as_rlp();
let m = try!(rlp.at(1));
let block_message: H256 = try!(m.val_at(3));
Ok(ConsensusMessage {
signature: try!(rlp.val_at(0)),
height: try!(m.val_at(0)),
round: try!(m.val_at(1)),
step: try!(m.val_at(2)),
block_hash: match block_message.is_zero() {
true => None,
false => Some(block_message),
}
})
}
}
impl Encodable for ConsensusMessage {
fn rlp_append(&self, s: &mut RlpStream) {
let info = message_info_rlp(self.height, self.round, self.step, self.block_hash);
s.begin_list(2)
.append(&self.signature)
.append_raw(&info, 1);
}
}
pub fn message_info_rlp(height: Height, round: Round, step: Step, block_hash: Option<BlockHash>) -> Bytes {
// TODO: figure out whats wrong with nested list encoding
let mut s = RlpStream::new_list(5);
s.append(&height).append(&round).append(&step).append(&block_hash.unwrap_or_else(H256::zero));
s.out()
}
pub fn message_full_rlp(signature: &H520, vote_info: &Bytes) -> Bytes {
let mut s = RlpStream::new_list(2);
s.append(signature).append_raw(vote_info, 1);
s.out()
}
#[cfg(test)]
mod tests {
use util::*;
use rlp::*;
use super::super::Step;
use super::*;
use account_provider::AccountProvider;
use header::Header;
#[test]
fn encode_decode() {
let message = ConsensusMessage {
signature: H520::default(),
height: 10,
round: 123,
step: Step::Precommit,
block_hash: Some("1".sha3())
};
let raw_rlp = ::rlp::encode(&message).to_vec();
let rlp = Rlp::new(&raw_rlp);
assert_eq!(message, rlp.as_val());
let message = ConsensusMessage {
signature: H520::default(),
height: 1314,
round: 0,
step: Step::Prevote,
block_hash: None
};
let raw_rlp = ::rlp::encode(&message);
let rlp = Rlp::new(&raw_rlp);
assert_eq!(message, rlp.as_val());
}
#[test]
fn generate_and_verify() {
let tap = Arc::new(AccountProvider::transient_provider());
let addr = tap.insert_account("0".sha3(), "0").unwrap();
tap.unlock_account_permanently(addr, "0".into()).unwrap();
let mi = message_info_rlp(123, 2, Step::Precommit, Some(H256::default()));
let raw_rlp = message_full_rlp(&tap.sign(addr, None, mi.sha3()).unwrap().into(), &mi);
let rlp = UntrustedRlp::new(&raw_rlp);
let message: ConsensusMessage = rlp.as_val().unwrap();
match message.verify() { Ok(a) if a == addr => {}, _ => panic!(), };
}
#[test]
fn proposal_message() {
let mut header = Header::default();
let seal = vec![
::rlp::encode(&0u8).to_vec(),
::rlp::encode(&H520::default()).to_vec(),
Vec::new()
];
header.set_seal(seal);
let message = ConsensusMessage::new_proposal(&header).unwrap();
assert_eq!(
message,
ConsensusMessage {
signature: Default::default(),
height: 0,
round: 0,
step: Step::Propose,
block_hash: Some(header.bare_hash())
}
);
}
#[test]
fn message_info_from_header() {
let header = Header::default();
let pro = ConsensusMessage {
signature: Default::default(),
height: 0,
round: 0,
step: Step::Propose,
block_hash: Some(header.bare_hash())
};
let pre = message_info_rlp(0, 0, Step::Precommit, Some(header.bare_hash()));
assert_eq!(pro.precommit_hash(), pre.sha3());
}
}

View File

@ -0,0 +1,966 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
/// Tendermint BFT consensus engine with round robin proof-of-authority.
/// At each blockchain `Height` there can be multiple `Round`s of voting.
/// Signatures always sign `Height`, `Round`, `Step` and `BlockHash` which is a block hash without seal.
/// First a block with `Seal::Proposal` is issued by the designated proposer.
/// Next the `Round` proceeds through `Prevote` and `Precommit` `Step`s.
/// Block is issued when there is enough `Precommit` votes collected on a particular block at the end of a `Round`.
/// Once enough votes have been gathered the proposer issues that block in the `Commit` step.
mod message;
mod transition;
mod params;
mod vote_collector;
use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
use util::*;
use error::{Error, BlockError};
use header::Header;
use builtin::Builtin;
use env_info::EnvInfo;
use transaction::SignedTransaction;
use rlp::{UntrustedRlp, View};
use ethkey::{recover, public_to_address};
use account_provider::AccountProvider;
use block::*;
use spec::CommonParams;
use engines::{Engine, Seal, EngineError};
use blockchain::extras::BlockDetails;
use views::HeaderView;
use evm::Schedule;
use io::{IoService, IoChannel};
use service::ClientIoMessage;
use self::message::*;
use self::transition::TransitionHandler;
use self::params::TendermintParams;
use self::vote_collector::VoteCollector;
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum Step {
Propose,
Prevote,
Precommit,
Commit
}
impl Step {
pub fn is_pre(self) -> bool {
match self {
Step::Prevote | Step::Precommit => true,
_ => false,
}
}
}
pub type Height = usize;
pub type Round = usize;
pub type BlockHash = H256;
/// Engine using `Tendermint` consensus algorithm, suitable for EVM chain.
pub struct Tendermint {
params: CommonParams,
our_params: TendermintParams,
builtins: BTreeMap<Address, Builtin>,
step_service: IoService<Step>,
/// Address to be used as authority.
authority: RwLock<Address>,
/// Password used for signing messages.
password: RwLock<Option<String>>,
/// Blockchain height.
height: AtomicUsize,
/// Consensus round.
round: AtomicUsize,
/// Consensus step.
step: RwLock<Step>,
/// Vote accumulator.
votes: VoteCollector,
/// Channel for updating the sealing.
message_channel: Mutex<Option<IoChannel<ClientIoMessage>>>,
/// Used to sign messages and proposals.
account_provider: Mutex<Option<Arc<AccountProvider>>>,
/// Message for the last PoLC.
lock_change: RwLock<Option<ConsensusMessage>>,
/// Last lock round.
last_lock: AtomicUsize,
/// Bare hash of the proposed block, used for seal submission.
proposal: RwLock<Option<H256>>,
}
impl Tendermint {
/// Create a new instance of Tendermint engine
pub fn new(params: CommonParams, our_params: TendermintParams, builtins: BTreeMap<Address, Builtin>) -> Result<Arc<Self>, Error> {
let engine = Arc::new(
Tendermint {
params: params,
our_params: our_params,
builtins: builtins,
step_service: try!(IoService::<Step>::start()),
authority: RwLock::new(Address::default()),
password: RwLock::new(None),
height: AtomicUsize::new(1),
round: AtomicUsize::new(0),
step: RwLock::new(Step::Propose),
votes: VoteCollector::new(),
message_channel: Mutex::new(None),
account_provider: Mutex::new(None),
lock_change: RwLock::new(None),
last_lock: AtomicUsize::new(0),
proposal: RwLock::new(None),
});
let handler = TransitionHandler { engine: Arc::downgrade(&engine) };
try!(engine.step_service.register_handler(Arc::new(handler)));
Ok(engine)
}
fn update_sealing(&self) {
if let Some(ref channel) = *self.message_channel.lock() {
match channel.send(ClientIoMessage::UpdateSealing) {
Ok(_) => trace!(target: "poa", "UpdateSealing message sent."),
Err(err) => warn!(target: "poa", "Could not send a sealing message {}.", err),
}
}
}
fn submit_seal(&self, block_hash: H256, seal: Vec<Bytes>) {
if let Some(ref channel) = *self.message_channel.lock() {
match channel.send(ClientIoMessage::SubmitSeal(block_hash, seal)) {
Ok(_) => trace!(target: "poa", "SubmitSeal message sent."),
Err(err) => warn!(target: "poa", "Could not send a sealing message {}.", err),
}
}
}
fn broadcast_message(&self, message: Bytes) {
let channel = self.message_channel.lock().clone();
if let Some(ref channel) = channel {
match channel.send(ClientIoMessage::BroadcastMessage(message)) {
Ok(_) => trace!(target: "poa", "BroadcastMessage message sent."),
Err(err) => warn!(target: "poa", "broadcast_message: Could not send a sealing message {}.", err),
}
} else {
warn!(target: "poa", "broadcast_message: No IoChannel available.");
}
}
fn generate_message(&self, block_hash: Option<BlockHash>) -> Option<Bytes> {
if let Some(ref ap) = *self.account_provider.lock() {
let h = self.height.load(AtomicOrdering::SeqCst);
let r = self.round.load(AtomicOrdering::SeqCst);
let s = self.step.read();
let vote_info = message_info_rlp(h, r, *s, block_hash);
let authority = self.authority.read();
match ap.sign(*authority, self.password.read().clone(), vote_info.sha3()).map(Into::into) {
Ok(signature) => {
let message_rlp = message_full_rlp(&signature, &vote_info);
let message = ConsensusMessage::new(signature, h, r, *s, block_hash);
self.votes.vote(message.clone(), *authority);
debug!(target: "poa", "Generated {:?} as {}.", message, *authority);
self.handle_valid_message(&message);
Some(message_rlp)
},
Err(e) => {
trace!(target: "poa", "Could not sign the message {}", e);
None
},
}
} else {
warn!(target: "poa", "No AccountProvider available.");
None
}
}
fn generate_and_broadcast_message(&self, block_hash: Option<BlockHash>) {
if let Some(message) = self.generate_message(block_hash) {
self.broadcast_message(message);
}
}
/// Broadcast all messages since last issued block to get the peers up to speed.
fn broadcast_old_messages(&self) {
for m in self.votes.get_up_to(self.height.load(AtomicOrdering::SeqCst)).into_iter() {
self.broadcast_message(m);
}
}
fn to_next_height(&self, height: Height) {
let new_height = height + 1;
debug!(target: "poa", "Received a Commit, transitioning to height {}.", new_height);
self.last_lock.store(0, AtomicOrdering::SeqCst);
self.height.store(new_height, AtomicOrdering::SeqCst);
self.round.store(0, AtomicOrdering::SeqCst);
*self.lock_change.write() = None;
}
/// Use via step_service to transition steps.
fn to_step(&self, step: Step) {
if let Err(io_err) = self.step_service.send_message(step) {
warn!(target: "poa", "Could not proceed to step {}.", io_err)
}
*self.step.write() = step;
match step {
Step::Propose => {
*self.proposal.write() = None;
self.update_sealing()
},
Step::Prevote => {
let block_hash = match *self.lock_change.read() {
Some(ref m) if !self.should_unlock(m.round) => m.block_hash,
_ => self.proposal.read().clone(),
};
self.generate_and_broadcast_message(block_hash);
},
Step::Precommit => {
trace!(target: "poa", "to_step: Precommit.");
let block_hash = match *self.lock_change.read() {
Some(ref m) if self.is_round(m) && m.block_hash.is_some() => {
trace!(target: "poa", "Setting last lock: {}", m.round);
self.last_lock.store(m.round, AtomicOrdering::SeqCst);
m.block_hash
},
_ => None,
};
self.generate_and_broadcast_message(block_hash);
},
Step::Commit => {
trace!(target: "poa", "to_step: Commit.");
// Commit the block using a complete signature set.
let round = self.round.load(AtomicOrdering::SeqCst);
let height = self.height.load(AtomicOrdering::SeqCst);
if let Some(block_hash) = *self.proposal.read() {
// Generate seal and remove old votes.
if self.is_proposer(&*self.authority.read()).is_ok() {
if let Some(seal) = self.votes.seal_signatures(height, round, block_hash) {
trace!(target: "poa", "Collected seal: {:?}", seal);
let seal = vec![
::rlp::encode(&round).to_vec(),
::rlp::encode(&seal.proposal).to_vec(),
::rlp::encode(&seal.votes).to_vec()
];
self.submit_seal(block_hash, seal);
self.to_next_height(height);
} else {
warn!(target: "poa", "Not enough votes found!");
}
}
}
},
}
}
fn is_authority(&self, address: &Address) -> bool {
self.our_params.authorities.contains(address)
}
fn is_above_threshold(&self, n: usize) -> bool {
n > self.our_params.authority_n * 2/3
}
/// Check if address is a proposer for given round.
fn is_round_proposer(&self, height: Height, round: Round, address: &Address) -> Result<(), EngineError> {
let ref p = self.our_params;
let proposer_nonce = height + round;
trace!(target: "poa", "is_proposer: Proposer nonce: {}", proposer_nonce);
let proposer = p.authorities.get(proposer_nonce % p.authority_n).expect("There are authority_n authorities; taking number modulo authority_n gives number in authority_n range; qed");
if proposer == address {
Ok(())
} else {
Err(EngineError::NotProposer(Mismatch { expected: proposer.clone(), found: address.clone() }))
}
}
/// Check if address is the current proposer.
fn is_proposer(&self, address: &Address) -> Result<(), EngineError> {
self.is_round_proposer(self.height.load(AtomicOrdering::SeqCst), self.round.load(AtomicOrdering::SeqCst), address)
}
fn is_height(&self, message: &ConsensusMessage) -> bool {
message.is_height(self.height.load(AtomicOrdering::SeqCst))
}
fn is_round(&self, message: &ConsensusMessage) -> bool {
message.is_round(self.height.load(AtomicOrdering::SeqCst), self.round.load(AtomicOrdering::SeqCst))
}
fn increment_round(&self, n: Round) {
trace!(target: "poa", "increment_round: New round.");
self.round.fetch_add(n, AtomicOrdering::SeqCst);
}
fn should_unlock(&self, lock_change_round: Round) -> bool {
self.last_lock.load(AtomicOrdering::SeqCst) < lock_change_round
&& lock_change_round < self.round.load(AtomicOrdering::SeqCst)
}
fn has_enough_any_votes(&self) -> bool {
let step_votes = self.votes.count_step_votes(self.height.load(AtomicOrdering::SeqCst), self.round.load(AtomicOrdering::SeqCst), *self.step.read());
self.is_above_threshold(step_votes)
}
fn has_enough_future_step_votes(&self, message: &ConsensusMessage) -> bool {
if message.round > self.round.load(AtomicOrdering::SeqCst) {
let step_votes = self.votes.count_step_votes(message.height, message.round, message.step);
self.is_above_threshold(step_votes)
} else {
false
}
}
fn has_enough_aligned_votes(&self, message: &ConsensusMessage) -> bool {
let aligned_count = self.votes.count_aligned_votes(&message);
self.is_above_threshold(aligned_count)
}
fn handle_valid_message(&self, message: &ConsensusMessage) {
let is_newer_than_lock = match *self.lock_change.read() {
Some(ref lock) => message > lock,
None => true,
};
let lock_change = is_newer_than_lock
&& message.step == Step::Prevote
&& message.block_hash.is_some()
&& self.has_enough_aligned_votes(message);
if lock_change {
trace!(target: "poa", "handle_valid_message: Lock change.");
*self.lock_change.write() = Some(message.clone());
}
// Check if it can affect the step transition.
if self.is_height(message) {
let next_step = match *self.step.read() {
Step::Precommit if self.has_enough_aligned_votes(message) => {
if message.block_hash.is_none() {
self.increment_round(1);
Some(Step::Propose)
} else {
Some(Step::Commit)
}
},
Step::Precommit if self.has_enough_future_step_votes(message) => {
self.increment_round(message.round - self.round.load(AtomicOrdering::SeqCst));
Some(Step::Precommit)
},
// Avoid counting twice.
Step::Prevote if lock_change => Some(Step::Precommit),
Step::Prevote if self.has_enough_aligned_votes(message) => Some(Step::Precommit),
Step::Prevote if self.has_enough_future_step_votes(message) => {
self.increment_round(message.round - self.round.load(AtomicOrdering::SeqCst));
Some(Step::Prevote)
},
_ => None,
};
if let Some(step) = next_step {
trace!(target: "poa", "Transition to {:?} triggered.", step);
self.to_step(step);
}
}
}
}
impl Engine for Tendermint {
fn name(&self) -> &str { "Tendermint" }
fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) }
/// (consensus round, proposal signature, authority signatures)
fn seal_fields(&self) -> usize { 3 }
fn params(&self) -> &CommonParams { &self.params }
fn builtins(&self) -> &BTreeMap<Address, Builtin> { &self.builtins }
fn maximum_uncle_count(&self) -> usize { 0 }
fn maximum_uncle_age(&self) -> usize { 0 }
/// Additional engine-specific information for the user/developer concerning `header`.
fn extra_info(&self, header: &Header) -> BTreeMap<String, String> {
let message = ConsensusMessage::new_proposal(header).expect("Invalid header.");
map![
"signature".into() => message.signature.to_string(),
"height".into() => message.height.to_string(),
"round".into() => message.round.to_string(),
"block_hash".into() => message.block_hash.as_ref().map(ToString::to_string).unwrap_or("".into())
]
}
fn schedule(&self, _env_info: &EnvInfo) -> Schedule {
Schedule::new_post_eip150(usize::max_value(), true, true, true)
}
fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) {
header.set_difficulty(parent.difficulty().clone());
header.set_gas_limit({
let gas_limit = parent.gas_limit().clone();
let bound_divisor = self.our_params.gas_limit_bound_divisor;
if gas_limit < gas_floor_target {
min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into())
} else {
max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into())
}
});
}
/// Should this node participate.
fn is_sealer(&self, address: &Address) -> Option<bool> {
Some(self.is_authority(address))
}
/// Attempt to seal generate a proposal seal.
fn generate_seal(&self, block: &ExecutedBlock) -> Seal {
if let Some(ref ap) = *self.account_provider.lock() {
let header = block.header();
let author = header.author();
// Only proposer can generate seal if None was generated.
if self.is_proposer(author).is_err() || self.proposal.read().is_some() {
return Seal::None;
}
let height = header.number() as Height;
let round = self.round.load(AtomicOrdering::SeqCst);
let bh = Some(header.bare_hash());
let vote_info = message_info_rlp(height, round, Step::Propose, bh.clone());
if let Ok(signature) = ap.sign(*author, self.password.read().clone(), vote_info.sha3()).map(H520::from) {
// Insert Propose vote.
debug!(target: "poa", "Submitting proposal {} at height {} round {}.", header.bare_hash(), height, round);
self.votes.vote(ConsensusMessage::new(signature, height, round, Step::Propose, bh), *author);
// Remember proposal for later seal submission.
*self.proposal.write() = bh;
Seal::Proposal(vec![
::rlp::encode(&round).to_vec(),
::rlp::encode(&signature).to_vec(),
::rlp::EMPTY_LIST_RLP.to_vec()
])
} else {
warn!(target: "poa", "generate_seal: FAIL: accounts secret key unavailable");
Seal::None
}
} else {
warn!(target: "poa", "generate_seal: FAIL: accounts not provided");
Seal::None
}
}
fn handle_message(&self, rlp: &[u8]) -> Result<(), Error> {
let rlp = UntrustedRlp::new(rlp);
let message: ConsensusMessage = try!(rlp.as_val());
if !self.votes.is_old_or_known(&message) {
let sender = public_to_address(&try!(recover(&message.signature.into(), &try!(rlp.at(1)).as_raw().sha3())));
if !self.is_authority(&sender) {
try!(Err(EngineError::NotAuthorized(sender)));
}
self.broadcast_message(rlp.as_raw().to_vec());
trace!(target: "poa", "Handling a valid {:?} from {}.", message, sender);
self.votes.vote(message.clone(), sender);
self.handle_valid_message(&message);
}
Ok(())
}
fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
let seal_length = header.seal().len();
if seal_length == self.seal_fields() {
let signatures_len = header.seal()[2].len();
if signatures_len >= 1 {
Ok(())
} else {
Err(From::from(EngineError::BadSealFieldSize(OutOfBounds {
min: Some(1),
max: None,
found: signatures_len
})))
}
} else {
Err(From::from(BlockError::InvalidSealArity(
Mismatch { expected: self.seal_fields(), found: seal_length }
)))
}
}
fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
let proposal = try!(ConsensusMessage::new_proposal(header));
let proposer = try!(proposal.verify());
if !self.is_authority(&proposer) {
try!(Err(EngineError::NotAuthorized(proposer)))
}
let precommit_hash = proposal.precommit_hash();
let ref signatures_field = header.seal()[2];
let mut signature_count = 0;
let mut origins = HashSet::new();
for rlp in UntrustedRlp::new(signatures_field).iter() {
let precommit: ConsensusMessage = ConsensusMessage::new_commit(&proposal, try!(rlp.as_val()));
let address = match self.votes.get(&precommit) {
Some(a) => a,
None => public_to_address(&try!(recover(&precommit.signature.into(), &precommit_hash))),
};
if !self.our_params.authorities.contains(&address) {
try!(Err(EngineError::NotAuthorized(address.to_owned())))
}
if origins.insert(address) {
signature_count += 1;
} else {
warn!(target: "poa", "verify_block_unordered: Duplicate signature from {} on the seal.", address);
try!(Err(BlockError::InvalidSeal));
}
}
// Check if its a proposal if there is not enough precommits.
if !self.is_above_threshold(signature_count) {
let signatures_len = signatures_field.len();
// Proposal has to have an empty signature list.
if signatures_len != 1 {
try!(Err(EngineError::BadSealFieldSize(OutOfBounds {
min: Some(1),
max: Some(1),
found: signatures_len
})));
}
try!(self.is_round_proposer(proposal.height, proposal.round, &proposer));
}
Ok(())
}
fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> {
if header.number() == 0 {
try!(Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() })));
}
let gas_limit_divisor = self.our_params.gas_limit_bound_divisor;
let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor;
let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor;
if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas {
try!(Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() })));
}
Ok(())
}
fn verify_transaction_basic(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> {
try!(t.check_low_s());
Ok(())
}
fn verify_transaction(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> {
t.sender().map(|_|()) // Perform EC recovery and cache sender
}
fn set_signer(&self, address: Address, password: String) {
*self.authority.write() = address;
*self.password.write() = Some(password);
self.to_step(Step::Propose);
}
fn stop(&self) {
self.step_service.stop()
}
fn is_new_best_block(&self, _best_total_difficulty: U256, best_header: HeaderView, _parent_details: &BlockDetails, new_header: &HeaderView) -> bool {
let new_number = new_header.number();
let best_number = best_header.number();
trace!(target: "poa", "new_header: {}, best_header: {}", new_number, best_number);
if new_number != best_number {
new_number > best_number
} else {
let new_seal = new_header.seal();
let best_seal = best_header.seal();
let new_signatures = new_seal.get(2).expect("Tendermint seal should have three elements.").len();
let best_signatures = best_seal.get(2).expect("Tendermint seal should have three elements.").len();
if new_signatures > best_signatures {
true
} else {
let new_round: Round = ::rlp::Rlp::new(&new_seal.get(0).expect("Tendermint seal should have three elements.")).as_val();
let best_round: Round = ::rlp::Rlp::new(&best_seal.get(0).expect("Tendermint seal should have three elements.")).as_val();
new_round > best_round
}
}
}
fn is_proposal(&self, header: &Header) -> bool {
let signatures_len = header.seal()[2].len();
// Signatures have to be an empty list rlp.
let proposal = ConsensusMessage::new_proposal(header).expect("block went through full verification; this Engine verifies new_proposal creation; qed");
if signatures_len != 1 {
// New Commit received, skip to next height.
trace!(target: "poa", "Received a commit for height {}, round {}.", proposal.height, proposal.round);
self.to_next_height(proposal.height);
return false;
}
let proposer = proposal.verify().expect("block went through full verification; this Engine tries verify; qed");
debug!(target: "poa", "Received a new proposal for height {}, round {} from {}.", proposal.height, proposal.round, proposer);
if self.is_round(&proposal) {
*self.proposal.write() = proposal.block_hash.clone();
}
self.votes.vote(proposal, proposer);
true
}
/// Equivalent to a timeout: to be used for tests.
fn step(&self) {
let next_step = match *self.step.read() {
Step::Propose => {
trace!(target: "poa", "Propose timeout.");
Step::Prevote
},
Step::Prevote if self.has_enough_any_votes() => {
trace!(target: "poa", "Prevote timeout.");
Step::Precommit
},
Step::Prevote => {
trace!(target: "poa", "Prevote timeout without enough votes.");
self.broadcast_old_messages();
Step::Prevote
},
Step::Precommit if self.has_enough_any_votes() => {
trace!(target: "poa", "Precommit timeout.");
self.increment_round(1);
Step::Propose
},
Step::Precommit => {
trace!(target: "poa", "Precommit timeout without enough votes.");
self.broadcast_old_messages();
Step::Precommit
},
Step::Commit => {
trace!(target: "poa", "Commit timeout.");
Step::Propose
},
};
self.to_step(next_step);
}
fn register_message_channel(&self, message_channel: IoChannel<ClientIoMessage>) {
trace!(target: "poa", "Register the IoChannel.");
*self.message_channel.lock() = Some(message_channel);
}
fn register_account_provider(&self, account_provider: Arc<AccountProvider>) {
*self.account_provider.lock() = Some(account_provider);
}
}
#[cfg(test)]
mod tests {
use util::*;
use util::trie::TrieSpec;
use io::{IoContext, IoHandler};
use block::*;
use error::{Error, BlockError};
use header::Header;
use env_info::EnvInfo;
use tests::helpers::*;
use account_provider::AccountProvider;
use io::IoService;
use service::ClientIoMessage;
use spec::Spec;
use engines::{Engine, EngineError, Seal};
use super::*;
use super::message::*;
/// Accounts inserted with "0" and "1" are authorities. First proposer is "0".
fn setup() -> (Spec, Arc<AccountProvider>) {
let tap = Arc::new(AccountProvider::transient_provider());
let spec = Spec::new_test_tendermint();
spec.engine.register_account_provider(tap.clone());
(spec, tap)
}
fn propose_default(spec: &Spec, proposer: Address) -> (LockedBlock, Vec<Bytes>) {
let mut db_result = get_temp_state_db();
let mut db = db_result.take();
spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap();
let genesis_header = spec.genesis_header();
let last_hashes = Arc::new(vec![genesis_header.hash()]);
let b = OpenBlock::new(spec.engine.as_ref(), Default::default(), false, db.boxed_clone(), &genesis_header, last_hashes, proposer, (3141562.into(), 31415620.into()), vec![]).unwrap();
let b = b.close_and_lock();
if let Seal::Proposal(seal) = spec.engine.generate_seal(b.block()) {
(b, seal)
} else {
panic!()
}
}
fn vote<F>(engine: &Arc<Engine>, signer: F, height: usize, round: usize, step: Step, block_hash: Option<H256>) -> Bytes where F: FnOnce(H256) -> Result<H520, ::account_provider::Error> {
let mi = message_info_rlp(height, round, step, block_hash);
let m = message_full_rlp(&signer(mi.sha3()).unwrap().into(), &mi);
engine.handle_message(&m).unwrap();
m
}
fn proposal_seal(tap: &Arc<AccountProvider>, header: &Header, round: Round) -> Vec<Bytes> {
let author = header.author();
let vote_info = message_info_rlp(header.number() as Height, round, Step::Propose, Some(header.bare_hash()));
let signature = tap.sign(*author, None, vote_info.sha3()).unwrap();
vec![
::rlp::encode(&round).to_vec(),
::rlp::encode(&H520::from(signature)).to_vec(),
::rlp::EMPTY_LIST_RLP.to_vec()
]
}
fn precommit_signatures(tap: &Arc<AccountProvider>, height: Height, round: Round, bare_hash: Option<H256>, v1: H160, v2: H160) -> Bytes {
let vote_info = message_info_rlp(height, round, Step::Precommit, bare_hash);
::rlp::encode(&vec![
H520::from(tap.sign(v1, None, vote_info.sha3()).unwrap()),
H520::from(tap.sign(v2, None, vote_info.sha3()).unwrap())
]).to_vec()
}
fn insert_and_unlock(tap: &Arc<AccountProvider>, acc: &str) -> Address {
let addr = tap.insert_account(acc.sha3(), acc).unwrap();
tap.unlock_account_permanently(addr, acc.into()).unwrap();
addr
}
fn insert_and_register(tap: &Arc<AccountProvider>, engine: &Arc<Engine>, acc: &str) -> Address {
let addr = insert_and_unlock(tap, acc);
engine.set_signer(addr.clone(), acc.into());
addr
}
struct TestIo {
received: RwLock<Vec<ClientIoMessage>>
}
impl TestIo {
fn new() -> Arc<Self> { Arc::new(TestIo { received: RwLock::new(Vec::new()) }) }
}
impl IoHandler<ClientIoMessage> for TestIo {
fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) {
self.received.write().push(net_message.clone());
}
}
#[test]
fn has_valid_metadata() {
let engine = Spec::new_test_tendermint().engine;
assert!(!engine.name().is_empty());
assert!(engine.version().major >= 1);
}
#[test]
fn can_return_schedule() {
let engine = Spec::new_test_tendermint().engine;
let schedule = engine.schedule(&EnvInfo {
number: 10000000,
author: 0.into(),
timestamp: 0,
difficulty: 0.into(),
last_hashes: Arc::new(vec![]),
gas_used: 0.into(),
gas_limit: 0.into(),
});
assert!(schedule.stack_limit > 0);
}
#[test]
fn verification_fails_on_short_seal() {
let engine = Spec::new_test_tendermint().engine;
let header = Header::default();
let verify_result = engine.verify_block_basic(&header, None);
match verify_result {
Err(Error::Block(BlockError::InvalidSealArity(_))) => {},
Err(_) => { panic!("should be block seal-arity mismatch error (got {:?})", verify_result); },
_ => { panic!("Should be error, got Ok"); },
}
}
#[test]
fn allows_correct_proposer() {
let (spec, tap) = setup();
let engine = spec.engine;
let mut header = Header::default();
let validator = insert_and_unlock(&tap, "0");
header.set_author(validator);
let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal);
// Good proposer.
assert!(engine.verify_block_unordered(&header.clone(), None).is_ok());
let validator = insert_and_unlock(&tap, "1");
header.set_author(validator);
let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal);
// Bad proposer.
match engine.verify_block_unordered(&header, None) {
Err(Error::Engine(EngineError::NotProposer(_))) => {},
_ => panic!(),
}
let random = insert_and_unlock(&tap, "101");
header.set_author(random);
let seal = proposal_seal(&tap, &header, 0);
header.set_seal(seal);
// Not authority.
match engine.verify_block_unordered(&header, None) {
Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
_ => panic!(),
};
engine.stop();
}
#[test]
fn seal_signatures_checking() {
let (spec, tap) = setup();
let engine = spec.engine;
let mut header = Header::default();
let proposer = insert_and_unlock(&tap, "1");
header.set_author(proposer);
let mut seal = proposal_seal(&tap, &header, 0);
let vote_info = message_info_rlp(0, 0, Step::Precommit, Some(header.bare_hash()));
let signature1 = tap.sign(proposer, None, vote_info.sha3()).unwrap();
seal[2] = ::rlp::encode(&vec![H520::from(signature1.clone())]).to_vec();
header.set_seal(seal.clone());
// One good signature is not enough.
match engine.verify_block_unordered(&header, None) {
Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {},
_ => panic!(),
}
let voter = insert_and_unlock(&tap, "0");
let signature0 = tap.sign(voter, None, vote_info.sha3()).unwrap();
seal[2] = ::rlp::encode(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).to_vec();
header.set_seal(seal.clone());
assert!(engine.verify_block_unordered(&header, None).is_ok());
let bad_voter = insert_and_unlock(&tap, "101");
let bad_signature = tap.sign(bad_voter, None, vote_info.sha3()).unwrap();
seal[2] = ::rlp::encode(&vec![H520::from(signature1), H520::from(bad_signature)]).to_vec();
header.set_seal(seal);
// One good and one bad signature.
match engine.verify_block_unordered(&header, None) {
Err(Error::Engine(EngineError::NotAuthorized(_))) => {},
_ => panic!(),
};
engine.stop();
}
#[test]
fn can_generate_seal() {
let (spec, tap) = setup();
let proposer = insert_and_register(&tap, &spec.engine, "1");
let (b, seal) = propose_default(&spec, proposer);
assert!(b.try_seal(spec.engine.as_ref(), seal).is_ok());
spec.engine.stop();
}
#[test]
fn can_recognize_proposal() {
let (spec, tap) = setup();
let proposer = insert_and_register(&tap, &spec.engine, "1");
let (b, seal) = propose_default(&spec, proposer);
let sealed = b.seal(spec.engine.as_ref(), seal).unwrap();
assert!(spec.engine.is_proposal(sealed.header()));
spec.engine.stop();
}
#[test]
fn relays_messages() {
let (spec, tap) = setup();
let engine = spec.engine.clone();
let mut db_result = get_temp_state_db();
let mut db = db_result.take();
spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap();
let v0 = insert_and_register(&tap, &engine, "0");
let v1 = insert_and_register(&tap, &engine, "1");
let h = 0;
let r = 0;
// Propose
let (b, _) = propose_default(&spec, v1.clone());
let proposal = Some(b.header().bare_hash());
// Register IoHandler remembers messages.
let io_service = IoService::<ClientIoMessage>::start().unwrap();
let test_io = TestIo::new();
io_service.register_handler(test_io.clone()).unwrap();
engine.register_message_channel(io_service.channel());
let prevote_current = vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal);
let precommit_current = vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Precommit, proposal);
let prevote_future = vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h + 1, r, Step::Prevote, proposal);
// Wait a bit for async stuff.
::std::thread::sleep(::std::time::Duration::from_millis(500));
// Relays all valid present and future messages.
assert!(test_io.received.read().contains(&ClientIoMessage::BroadcastMessage(prevote_current)));
assert!(test_io.received.read().contains(&ClientIoMessage::BroadcastMessage(precommit_current)));
assert!(test_io.received.read().contains(&ClientIoMessage::BroadcastMessage(prevote_future)));
engine.stop();
}
#[test]
fn seal_submission() {
let (spec, tap) = setup();
let engine = spec.engine.clone();
let mut db_result = get_temp_state_db();
let mut db = db_result.take();
spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap();
let v0 = insert_and_register(&tap, &engine, "0");
let v1 = insert_and_register(&tap, &engine, "1");
let h = 1;
let r = 0;
// Register IoHandler remembers messages.
let test_io = TestIo::new();
let io_service = IoService::<ClientIoMessage>::start().unwrap();
io_service.register_handler(test_io.clone()).unwrap();
engine.register_message_channel(io_service.channel());
// Propose
let (b, mut seal) = propose_default(&spec, v1.clone());
let proposal = Some(b.header().bare_hash());
engine.step();
// Prevote.
vote(&engine, |mh| tap.sign(v1, None, mh).map(H520::from), h, r, Step::Prevote, proposal);
vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal);
vote(&engine, |mh| tap.sign(v1, None, mh).map(H520::from), h, r, Step::Precommit, proposal);
vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Precommit, proposal);
// Wait a bit for async stuff.
::std::thread::sleep(::std::time::Duration::from_millis(500));
seal[2] = precommit_signatures(&tap, h, r, Some(b.header().bare_hash()), v1, v0);
assert!(test_io.received.read().contains(&ClientIoMessage::SubmitSeal(proposal.unwrap(), seal)));
engine.stop();
}
}

View File

@ -0,0 +1,72 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint specific parameters.
use ethjson;
use super::transition::TendermintTimeouts;
use util::{Address, U256};
use time::Duration;
/// `Tendermint` params.
#[derive(Debug, Clone)]
pub struct TendermintParams {
/// Gas limit divisor.
pub gas_limit_bound_divisor: U256,
/// List of authorities.
pub authorities: Vec<Address>,
/// Number of authorities.
pub authority_n: usize,
/// Timeout durations for different steps.
pub timeouts: TendermintTimeouts,
}
impl Default for TendermintParams {
fn default() -> Self {
let authorities = vec!["0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e".into(), "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1".into()];
let val_n = authorities.len();
TendermintParams {
gas_limit_bound_divisor: 0x0400.into(),
authorities: authorities,
authority_n: val_n,
timeouts: TendermintTimeouts::default(),
}
}
}
fn to_duration(ms: ethjson::uint::Uint) -> Duration {
let ms: usize = ms.into();
Duration::milliseconds(ms as i64)
}
impl From<ethjson::spec::TendermintParams> for TendermintParams {
fn from(p: ethjson::spec::TendermintParams) -> Self {
let val: Vec<_> = p.authorities.into_iter().map(Into::into).collect();
let val_n = val.len();
let dt = TendermintTimeouts::default();
TendermintParams {
gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(),
authorities: val,
authority_n: val_n,
timeouts: TendermintTimeouts {
propose: p.timeout_propose.map_or(dt.propose, to_duration),
prevote: p.timeout_prevote.map_or(dt.prevote, to_duration),
precommit: p.timeout_precommit.map_or(dt.precommit, to_duration),
commit: p.timeout_commit.map_or(dt.commit, to_duration),
},
}
}
}

View File

@ -0,0 +1,96 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint timeout handling.
use std::sync::Weak;
use time::Duration;
use io::{IoContext, IoHandler, TimerToken};
use super::{Tendermint, Step};
use engines::Engine;
pub struct TransitionHandler {
pub engine: Weak<Tendermint>,
}
/// Base timeout of each step in ms.
#[derive(Debug, Clone)]
pub struct TendermintTimeouts {
pub propose: Duration,
pub prevote: Duration,
pub precommit: Duration,
pub commit: Duration,
}
impl TendermintTimeouts {
pub fn for_step(&self, step: Step) -> Duration {
match step {
Step::Propose => self.propose,
Step::Prevote => self.prevote,
Step::Precommit => self.precommit,
Step::Commit => self.commit,
}
}
}
impl Default for TendermintTimeouts {
fn default() -> Self {
TendermintTimeouts {
propose: Duration::milliseconds(10000),
prevote: Duration::milliseconds(10000),
precommit: Duration::milliseconds(10000),
commit: Duration::milliseconds(10000),
}
}
}
/// Timer token representing the consensus step timeouts.
pub const ENGINE_TIMEOUT_TOKEN: TimerToken = 23;
fn set_timeout(io: &IoContext<Step>, timeout: Duration) {
io.register_timer_once(ENGINE_TIMEOUT_TOKEN, timeout.num_milliseconds() as u64)
.unwrap_or_else(|e| warn!(target: "poa", "Failed to set consensus step timeout: {}.", e))
}
impl IoHandler<Step> for TransitionHandler {
fn initialize(&self, io: &IoContext<Step>) {
if let Some(engine) = self.engine.upgrade() {
set_timeout(io, engine.our_params.timeouts.propose)
}
}
fn timeout(&self, _io: &IoContext<Step>, timer: TimerToken) {
if timer == ENGINE_TIMEOUT_TOKEN {
if let Some(engine) = self.engine.upgrade() {
engine.step();
}
}
}
fn message(&self, io: &IoContext<Step>, next_step: &Step) {
if let Some(engine) = self.engine.upgrade() {
if let Err(io_err) = io.clear_timer(ENGINE_TIMEOUT_TOKEN) {
warn!(target: "poa", "Could not remove consensus timer {}.", io_err)
}
match *next_step {
Step::Propose => set_timeout(io, engine.our_params.timeouts.propose),
Step::Prevote => set_timeout(io, engine.our_params.timeouts.prevote),
Step::Precommit => set_timeout(io, engine.our_params.timeouts.precommit),
Step::Commit => set_timeout(io, engine.our_params.timeouts.commit),
};
}
}
}

View File

@ -0,0 +1,272 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Collects votes on hashes at each height and round.
use util::*;
use super::message::ConsensusMessage;
use super::{Height, Round, Step};
#[derive(Debug)]
pub struct VoteCollector {
/// Storing all Proposals, Prevotes and Precommits.
votes: RwLock<BTreeMap<ConsensusMessage, Address>>,
}
#[derive(Debug)]
pub struct SealSignatures {
pub proposal: H520,
pub votes: Vec<H520>,
}
impl PartialEq for SealSignatures {
fn eq(&self, other: &SealSignatures) -> bool {
self.proposal == other.proposal
&& self.votes.iter().collect::<HashSet<_>>() == other.votes.iter().collect::<HashSet<_>>()
}
}
impl Eq for SealSignatures {}
impl VoteCollector {
pub fn new() -> VoteCollector {
let mut collector = BTreeMap::new();
// Insert dummy message to fulfill invariant: "only messages newer than the oldest are inserted".
collector.insert(ConsensusMessage {
signature: H520::default(),
height: 0,
round: 0,
step: Step::Propose,
block_hash: None
},
Address::default());
VoteCollector { votes: RwLock::new(collector) }
}
/// Insert vote if it is newer than the oldest one.
pub fn vote(&self, message: ConsensusMessage, voter: Address) -> Option<Address> {
self.votes.write().insert(message, voter)
}
pub fn is_old_or_known(&self, message: &ConsensusMessage) -> bool {
self.votes.read().get(message).map_or(false, |a| {
trace!(target: "poa", "Known message from {}: {:?}.", a, message);
true
}) || {
let guard = self.votes.read();
let is_old = guard.keys().next().map_or(true, |oldest| message <= oldest);
if is_old { trace!(target: "poa", "Old message {:?}.", message); }
is_old
}
}
/// Throws out messages older than message, leaves message as marker for the oldest.
pub fn throw_out_old(&self, message: &ConsensusMessage) {
let mut guard = self.votes.write();
let new_collector = guard.split_off(message);
*guard = new_collector;
}
pub fn seal_signatures(&self, height: Height, round: Round, block_hash: H256) -> Option<SealSignatures> {
let bh = Some(block_hash);
let (proposal, votes) = {
let guard = self.votes.read();
let mut current_signatures = guard.keys().skip_while(|m| !m.is_block_hash(height, round, Step::Propose, bh));
let proposal = current_signatures.next().cloned();
let votes = current_signatures
.skip_while(|m| !m.is_block_hash(height, round, Step::Precommit, bh))
.filter(|m| m.is_block_hash(height, round, Step::Precommit, bh))
.cloned()
.collect::<Vec<_>>();
(proposal, votes)
};
if votes.is_empty() {
return None;
}
// Remove messages that are no longer relevant.
votes.last().map(|m| self.throw_out_old(m));
let mut votes_vec: Vec<_> = votes.into_iter().map(|m| m.signature).collect();
votes_vec.sort();
proposal.map(|p| SealSignatures {
proposal: p.signature,
votes: votes_vec,
})
}
pub fn count_aligned_votes(&self, message: &ConsensusMessage) -> usize {
let guard = self.votes.read();
guard.keys()
.skip_while(|m| !m.is_aligned(message))
// sorted by signature so might not be continuous
.filter(|m| m.is_aligned(message))
.count()
}
pub fn count_step_votes(&self, height: Height, round: Round, step: Step) -> usize {
let guard = self.votes.read();
let current = guard.iter().skip_while(|&(m, _)| !m.is_step(height, round, step));
let mut origins = HashSet::new();
let mut n = 0;
for (message, origin) in current {
if message.is_step(height, round, step) {
if origins.insert(origin) {
n += 1;
} else {
warn!("count_step_votes: Authority {} has cast multiple step votes, this indicates malicious behaviour.", origin)
}
}
}
n
}
pub fn get_up_to(&self, height: Height) -> Vec<Bytes> {
let guard = self.votes.read();
guard
.keys()
.filter(|m| m.step.is_pre())
.take_while(|m| m.height <= height)
.map(|m| ::rlp::encode(m).to_vec())
.collect()
}
pub fn get(&self, message: &ConsensusMessage) -> Option<Address> {
let guard = self.votes.read();
guard.get(message).cloned()
}
}
#[cfg(test)]
mod tests {
use util::*;
use super::*;
use super::super::{Height, Round, BlockHash, Step};
use super::super::message::ConsensusMessage;
fn random_vote(collector: &VoteCollector, signature: H520, h: Height, r: Round, step: Step, block_hash: Option<BlockHash>) -> Option<H160> {
full_vote(collector, signature, h, r, step, block_hash, H160::random())
}
fn full_vote(collector: &VoteCollector, signature: H520, h: Height, r: Round, step: Step, block_hash: Option<BlockHash>, address: Address) -> Option<H160> {
collector.vote(ConsensusMessage { signature: signature, height: h, round: r, step: step, block_hash: block_hash }, address)
}
#[test]
fn seal_retrieval() {
let collector = VoteCollector::new();
let bh = Some("1".sha3());
let h = 1;
let r = 2;
let mut signatures = Vec::new();
for _ in 0..5 {
signatures.push(H520::random());
}
// Wrong height proposal.
random_vote(&collector, signatures[4].clone(), h - 1, r, Step::Propose, bh.clone());
// Good proposal
random_vote(&collector, signatures[0].clone(), h, r, Step::Propose, bh.clone());
// Wrong block proposal.
random_vote(&collector, signatures[0].clone(), h, r, Step::Propose, Some("0".sha3()));
// Wrong block precommit.
random_vote(&collector, signatures[3].clone(), h, r, Step::Precommit, Some("0".sha3()));
// Wrong round proposal.
random_vote(&collector, signatures[0].clone(), h, r - 1, Step::Propose, bh.clone());
// Prevote.
random_vote(&collector, signatures[0].clone(), h, r, Step::Prevote, bh.clone());
// Relevant precommit.
random_vote(&collector, signatures[2].clone(), h, r, Step::Precommit, bh.clone());
// Replcated vote.
random_vote(&collector, signatures[2].clone(), h, r, Step::Precommit, bh.clone());
// Wrong round precommit.
random_vote(&collector, signatures[4].clone(), h, r + 1, Step::Precommit, bh.clone());
// Wrong height precommit.
random_vote(&collector, signatures[3].clone(), h + 1, r, Step::Precommit, bh.clone());
// Relevant precommit.
random_vote(&collector, signatures[1].clone(), h, r, Step::Precommit, bh.clone());
// Wrong round precommit, same signature.
random_vote(&collector, signatures[1].clone(), h, r + 1, Step::Precommit, bh.clone());
// Wrong round precommit.
random_vote(&collector, signatures[4].clone(), h, r - 1, Step::Precommit, bh.clone());
let seal = SealSignatures {
proposal: signatures[0],
votes: signatures[1..3].to_vec()
};
assert_eq!(seal, collector.seal_signatures(h, r, bh.unwrap()).unwrap());
}
#[test]
fn count_votes() {
let collector = VoteCollector::new();
// good prevote
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3()));
random_vote(&collector, H520::random(), 3, 1, Step::Prevote, Some("0".sha3()));
// good precommit
random_vote(&collector, H520::random(), 3, 2, Step::Precommit, Some("0".sha3()));
random_vote(&collector, H520::random(), 3, 3, Step::Precommit, Some("0".sha3()));
// good prevote
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3()));
// good prevote
let same_sig = H520::random();
random_vote(&collector, same_sig.clone(), 3, 2, Step::Prevote, Some("1".sha3()));
random_vote(&collector, same_sig, 3, 2, Step::Prevote, Some("1".sha3()));
// good precommit
random_vote(&collector, H520::random(), 3, 2, Step::Precommit, Some("1".sha3()));
// good prevote
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3()));
random_vote(&collector, H520::random(), 2, 2, Step::Precommit, Some("2".sha3()));
assert_eq!(collector.count_step_votes(3, 2, Step::Prevote), 4);
assert_eq!(collector.count_step_votes(3, 2, Step::Precommit), 2);
let message = ConsensusMessage {
signature: H520::default(),
height: 3,
round: 2,
step: Step::Prevote,
block_hash: Some("1".sha3())
};
assert_eq!(collector.count_aligned_votes(&message), 2);
}
#[test]
fn remove_old() {
let collector = VoteCollector::new();
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3()));
random_vote(&collector, H520::random(), 3, 1, Step::Prevote, Some("0".sha3()));
random_vote(&collector, H520::random(), 3, 3, Step::Precommit, Some("0".sha3()));
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3()));
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3()));
random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3()));
random_vote(&collector, H520::random(), 2, 2, Step::Precommit, Some("2".sha3()));
let message = ConsensusMessage {
signature: H520::default(),
height: 3,
round: 2,
step: Step::Precommit,
block_hash: Some("1".sha3())
};
collector.throw_out_old(&message);
assert_eq!(collector.votes.read().len(), 1);
}
#[test]
fn malicious_authority() {
let collector = VoteCollector::new();
full_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3()), Address::default());
full_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3()), Address::default());
assert_eq!(collector.count_step_votes(3, 2, Step::Prevote), 1);
}
}

View File

@ -24,6 +24,7 @@ use client::Error as ClientError;
use ipc::binary::{BinaryConvertError, BinaryConvertable};
use types::block_import_error::BlockImportError;
use snapshot::Error as SnapshotError;
use engines::EngineError;
use ethkey::Error as EthkeyError;
pub use types::executed::{ExecutionError, CallError};
@ -167,8 +168,6 @@ pub enum BlockError {
UnknownParent(H256),
/// Uncle parent given is unknown.
UnknownUncleParent(H256),
/// The same author issued different votes at the same step.
DoubleVote(H160),
}
impl fmt::Display for BlockError {
@ -202,7 +201,6 @@ impl fmt::Display for BlockError {
RidiculousNumber(ref oob) => format!("Implausible block number. {}", oob),
UnknownParent(ref hash) => format!("Unknown parent: {}", hash),
UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash),
DoubleVote(ref address) => format!("Author {} issued too many blocks.", address),
};
f.write_fmt(format_args!("Block error ({})", msg))
@ -263,6 +261,8 @@ pub enum Error {
Snappy(::util::snappy::InvalidInput),
/// Snapshot error.
Snapshot(SnapshotError),
/// Consensus vote error.
Engine(EngineError),
/// Ethkey error.
Ethkey(EthkeyError),
}
@ -285,6 +285,7 @@ impl fmt::Display for Error {
Error::StdIo(ref err) => err.fmt(f),
Error::Snappy(ref err) => err.fmt(f),
Error::Snapshot(ref err) => err.fmt(f),
Error::Engine(ref err) => err.fmt(f),
Error::Ethkey(ref err) => err.fmt(f),
}
}
@ -383,6 +384,12 @@ impl From<SnapshotError> for Error {
}
}
impl From<EngineError> for Error {
fn from(err: EngineError) -> Error {
Error::Engine(err)
}
}
impl From<EthkeyError> for Error {
fn from(err: EthkeyError) -> Error {
Error::Ethkey(err)

View File

@ -26,12 +26,12 @@ use state::{State, CleanupMode};
use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockId, CallAnalytics, TransactionId};
use client::TransactionImportResult;
use executive::contract_address;
use block::{ClosedBlock, SealedBlock, IsBlock, Block};
use block::{ClosedBlock, IsBlock, Block};
use error::*;
use transaction::{Action, SignedTransaction};
use receipt::{Receipt, RichReceipt};
use spec::Spec;
use engines::Engine;
use engines::{Engine, Seal};
use miner::{MinerService, MinerStatus, TransactionQueue, PrioritizationStrategy, AccountDetails, TransactionOrigin};
use miner::banning_queue::{BanningTransactionQueue, Threshold};
use miner::work_notify::WorkPoster;
@ -466,34 +466,43 @@ impl Miner {
}
}
/// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed),
/// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine.
fn seal_block_internally(&self, block: ClosedBlock) -> Result<SealedBlock, Option<ClosedBlock>> {
trace!(target: "miner", "seal_block_internally: attempting internal seal.");
let s = self.engine.generate_seal(block.block());
if let Some(seal) = s {
trace!(target: "miner", "seal_block_internally: managed internal seal. importing...");
block.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| {
warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal: {}", e);
Err(None)
})
} else {
trace!(target: "miner", "seal_block_internally: unable to generate seal internally");
Err(Some(block))
}
}
/// Uses Engine to seal the block internally and then imports it to chain.
/// Attempts to perform internal sealing (one that does not require work) and handles the result depending on the type of Seal.
fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool {
if !block.transactions().is_empty() || self.forced_sealing() {
if let Ok(sealed) = self.seal_block_internally(block) {
if chain.import_sealed_block(sealed).is_ok() {
trace!(target: "miner", "import_block_internally: imported internally sealed block");
return true
}
trace!(target: "miner", "seal_block_internally: attempting internal seal.");
match self.engine.generate_seal(block.block()) {
// Save proposal for later seal submission and broadcast it.
Seal::Proposal(seal) => {
trace!(target: "miner", "Received a Proposal seal.");
{
let mut sealing_work = self.sealing_work.lock();
sealing_work.queue.push(block.clone());
sealing_work.queue.use_last_ref();
}
block
.lock()
.seal(&*self.engine, seal)
.map(|sealed| { chain.broadcast_proposal_block(sealed); true })
.unwrap_or_else(|e| {
warn!("ERROR: seal failed when given internally generated seal: {}", e);
false
})
},
// Directly import a regular sealed block.
Seal::Regular(seal) =>
block
.lock()
.seal(&*self.engine, seal)
.map(|sealed| chain.import_sealed_block(sealed).is_ok())
.unwrap_or_else(|e| {
warn!("ERROR: seal failed when given internally generated seal: {}", e);
false
}),
Seal::None => false,
}
} else {
false
}
false
}
/// Prepares work which has to be done to seal.
@ -1024,7 +1033,6 @@ impl MinerService for Miner {
self.transaction_queue.lock().last_nonce(address)
}
/// Update sealing if required.
/// Prepare the block and work if the Engine does not seal internally.
fn update_sealing(&self, chain: &MiningBlockChainClient) {
@ -1039,7 +1047,9 @@ impl MinerService for Miner {
let (block, original_work_hash) = self.prepare_block(chain);
if self.seals_internally {
trace!(target: "miner", "update_sealing: engine indicates internal sealing");
self.seal_and_import_block_internally(chain, block);
if self.seal_and_import_block_internally(chain, block) {
trace!(target: "miner", "update_sealing: imported internally sealed block");
}
} else {
trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work");
self.prepare_work(block, original_work_hash);

View File

@ -20,7 +20,7 @@ use util::*;
use io::*;
use spec::Spec;
use error::*;
use client::{Client, ClientConfig, ChainNotify};
use client::{Client, BlockChainClient, MiningBlockChainClient, ClientConfig, ChainNotify};
use miner::Miner;
use snapshot::ManifestData;
use snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams};
@ -28,11 +28,9 @@ use std::sync::atomic::AtomicBool;
#[cfg(feature="ipc")]
use nanoipc;
#[cfg(feature="ipc")]
use client::BlockChainClient;
/// Message type for external and internal events
#[derive(Clone)]
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum ClientIoMessage {
/// Best Block Hash in chain has been changed
NewChainHead,
@ -50,6 +48,12 @@ pub enum ClientIoMessage {
TakeSnapshot(u64),
/// Trigger sealing update (useful for internal sealing).
UpdateSealing,
/// Submit seal (useful for internal sealing).
SubmitSeal(H256, Vec<Bytes>),
/// Broadcast a message to the network.
BroadcastMessage(Bytes),
/// New consensus message received.
NewMessage(Bytes)
}
/// Client service setup. Creates and registers client and network services with the IO subsystem.
@ -77,9 +81,6 @@ impl ClientService {
panic_handler.forward_from(&io_service);
info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name()));
if spec.fork_name.is_some() {
warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!"));
}
let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS);
@ -220,9 +221,11 @@ impl IoHandler<ClientIoMessage> for ClientIoHandler {
debug!(target: "snapshot", "Failed to initialize periodic snapshot thread: {:?}", e);
}
},
ClientIoMessage::UpdateSealing => {
trace!(target: "authorityround", "message: UpdateSealing");
self.client.update_sealing()
ClientIoMessage::UpdateSealing => self.client.update_sealing(),
ClientIoMessage::SubmitSeal(ref hash, ref seal) => self.client.submit_seal(*hash, seal.clone()),
ClientIoMessage::BroadcastMessage(ref message) => self.client.broadcast_consensus_message(message.clone()),
ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) {
trace!(target: "poa", "Invalid message received: {}", e);
},
_ => {} // ignore other messages
}

View File

@ -23,7 +23,7 @@ use service::ClientIoMessage;
use views::HeaderView;
use io::IoChannel;
use util::hash::H256;
use util::{H256, Bytes};
use std::sync::Arc;
@ -107,6 +107,7 @@ impl ChainNotify for Watcher {
_: Vec<H256>,
_: Vec<H256>,
_: Vec<H256>,
_: Vec<Bytes>,
_duration: u64)
{
if self.oracle.is_major_importing() { return }
@ -174,6 +175,7 @@ mod tests {
vec![],
vec![],
vec![],
vec![],
0,
);
}

View File

@ -17,7 +17,7 @@
//! Spec seal.
use rlp::*;
use util::hash::{H64, H256};
use util::hash::{H64, H256, H520};
use ethjson;
/// Classic ethereum seal.
@ -32,23 +32,55 @@ impl Into<Generic> for Ethereum {
fn into(self) -> Generic {
let mut s = RlpStream::new_list(2);
s.append(&self.mix_hash).append(&self.nonce);
Generic {
rlp: s.out()
}
Generic(s.out())
}
}
/// Generic seal.
pub struct Generic {
/// Seal rlp.
pub rlp: Vec<u8>,
/// AuthorityRound seal.
pub struct AuthorityRound {
/// Seal step.
pub step: usize,
/// Seal signature.
pub signature: H520,
}
/// Tendermint seal.
pub struct Tendermint {
/// Seal round.
pub round: usize,
/// Proposal seal signature.
pub proposal: H520,
/// Precommit seal signatures.
pub precommits: Vec<H520>,
}
impl Into<Generic> for AuthorityRound {
fn into(self) -> Generic {
let mut s = RlpStream::new_list(2);
s.append(&self.step).append(&self.signature);
Generic(s.out())
}
}
impl Into<Generic> for Tendermint {
fn into(self) -> Generic {
let mut s = RlpStream::new_list(3);
s.append(&self.round).append(&self.proposal).append(&self.precommits);
Generic(s.out())
}
}
pub struct Generic(pub Vec<u8>);
/// Genesis seal type.
pub enum Seal {
/// Classic ethereum seal.
Ethereum(Ethereum),
/// Generic seal.
/// AuthorityRound seal.
AuthorityRound(AuthorityRound),
/// Tendermint seal.
Tendermint(Tendermint),
/// Generic RLP seal.
Generic(Generic),
}
@ -59,9 +91,16 @@ impl From<ethjson::spec::Seal> for Seal {
nonce: eth.nonce.into(),
mix_hash: eth.mix_hash.into()
}),
ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic {
rlp: g.rlp.into()
})
ethjson::spec::Seal::AuthorityRound(ar) => Seal::AuthorityRound(AuthorityRound {
step: ar.step.into(),
signature: ar.signature.into()
}),
ethjson::spec::Seal::Tendermint(tender) => Seal::Tendermint(Tendermint {
round: tender.round.into(),
proposal: tender.proposal.into(),
precommits: tender.precommits.into_iter().map(Into::into).collect()
}),
ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic(g.into())),
}
}
}
@ -70,7 +109,9 @@ impl Into<Generic> for Seal {
fn into(self) -> Generic {
match self {
Seal::Generic(generic) => generic,
Seal::Ethereum(eth) => eth.into()
Seal::Ethereum(eth) => eth.into(),
Seal::AuthorityRound(ar) => ar.into(),
Seal::Tendermint(tender) => tender.into(),
}
}
}

View File

@ -18,7 +18,7 @@
use util::*;
use builtin::Builtin;
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound};
use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint};
use pod_state::*;
use account_db::*;
use header::{BlockNumber, Header};
@ -66,8 +66,8 @@ pub struct Spec {
pub name: String,
/// What engine are we using for this?
pub engine: Arc<Engine>,
/// The fork identifier for this chain. Only needed to distinguish two chains sharing the same genesis.
pub fork_name: Option<String>,
/// Name of the subdir inside the main data dir to use for chain data and settings.
pub data_dir: String,
/// Known nodes on the network in enode format.
pub nodes: Vec<String>,
@ -107,13 +107,13 @@ impl From<ethjson::spec::Spec> for Spec {
fn from(s: ethjson::spec::Spec) -> Self {
let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect();
let g = Genesis::from(s.genesis);
let seal: GenericSeal = g.seal.into();
let GenericSeal(seal_rlp) = g.seal.into();
let params = CommonParams::from(s.params);
Spec {
name: s.name.into(),
name: s.name.clone().into(),
params: params.clone(),
engine: Spec::engine(s.engine, params, builtins),
fork_name: s.fork_name.map(Into::into),
data_dir: s.data_dir.unwrap_or(s.name).into(),
nodes: s.nodes.unwrap_or_else(Vec::new),
parent_hash: g.parent_hash,
transactions_root: g.transactions_root,
@ -124,7 +124,7 @@ impl From<ethjson::spec::Spec> for Spec {
gas_used: g.gas_used,
timestamp: g.timestamp,
extra_data: g.extra_data,
seal_rlp: seal.rlp,
seal_rlp: seal_rlp,
state_root_memo: RwLock::new(g.state_root),
genesis_state: From::from(s.accounts),
}
@ -146,7 +146,8 @@ impl Spec {
ethjson::spec::Engine::InstantSeal => Arc::new(InstantSeal::new(params, builtins)),
ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)),
ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Consensus engine could not be started."),
ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."),
ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."),
}
}
@ -208,7 +209,7 @@ impl Spec {
/// Overwrite the genesis components.
pub fn overwrite_genesis_params(&mut self, g: Genesis) {
let seal: GenericSeal = g.seal.into();
let GenericSeal(seal_rlp) = g.seal.into();
self.parent_hash = g.parent_hash;
self.transactions_root = g.transactions_root;
self.receipts_root = g.receipts_root;
@ -218,7 +219,7 @@ impl Spec {
self.gas_used = g.gas_used;
self.timestamp = g.timestamp;
self.extra_data = g.extra_data;
self.seal_rlp = seal.rlp;
self.seal_rlp = seal_rlp;
self.state_root_memo = RwLock::new(g.state_root);
}
@ -275,6 +276,10 @@ impl Spec {
/// Create a new Spec with AuthorityRound consensus which does internal sealing (not requiring work).
/// Accounts with secrets "0".sha3() and "1".sha3() are the authorities.
pub fn new_test_round() -> Self { load_bundled!("authority_round") }
/// Create a new Spec with Tendermint consensus which does internal sealing (not requiring work).
/// Account "0".sha3() and "1".sha3() are a authorities.
pub fn new_test_tendermint() -> Self { load_bundled!("tendermint") }
}
#[cfg(test)]

View File

@ -457,7 +457,6 @@ impl StateDB {
#[cfg(test)]
mod tests {
use util::{U256, H256, FixedHash, Address, DBTransaction};
use tests::helpers::*;
use state::Account;
@ -531,4 +530,3 @@ mod tests {
assert!(s.get_cached_account(&address).is_none());
}
}

View File

@ -18,6 +18,7 @@ use std::ops::{Deref, DerefMut};
use std::cmp::PartialEq;
use std::{mem, fmt};
use std::str::FromStr;
use std::hash::{Hash, Hasher};
use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError};
use secp256k1::key::{SecretKey, PublicKey};
use rustc_serialize::hex::{ToHex, FromHex};
@ -116,6 +117,18 @@ impl Default for Signature {
}
}
impl Hash for Signature {
fn hash<H: Hasher>(&self, state: &mut H) {
H520::from(self.0).hash(state);
}
}
impl Clone for Signature {
fn clone(&self) -> Self {
Signature(self.0)
}
}
impl From<[u8; 65]> for Signature {
fn from(s: [u8; 65]) -> Self {
Signature(s)

View File

@ -18,7 +18,6 @@ use std::{fs, io};
use std::path::{PathBuf, Path};
use std::collections::HashMap;
use time;
use ethkey::Address;
use {json, SafeAccount, Error};
use json::Uuid;
use super::KeyDirectory;
@ -106,6 +105,11 @@ impl KeyDirectory for DiskDirectory {
Ok(accounts)
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
// Disk store handles updates correctly iff filename is the same
self.insert(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
// transform account into key file
let keyfile: json::KeyFile = account.clone().into();
@ -138,12 +142,12 @@ impl KeyDirectory for DiskDirectory {
Ok(account)
}
fn remove(&self, address: &Address) -> Result<(), Error> {
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
// enumerate all entries in keystore
// and find entry with given address
let to_remove = try!(self.files())
.into_iter()
.find(|&(_, ref account)| &account.address == address);
.find(|&(_, ref acc)| acc == account);
// remove it
match to_remove {

View File

@ -16,7 +16,6 @@
use std::env;
use std::path::PathBuf;
use ethkey::Address;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
@ -89,7 +88,11 @@ impl KeyDirectory for GethDirectory {
self.dir.insert(account)
}
fn remove(&self, address: &Address) -> Result<(), Error> {
self.dir.remove(address)
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}

View File

@ -0,0 +1,67 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::collections::HashMap;
use parking_lot::RwLock;
use itertools::Itertools;
use ethkey::Address;
use {SafeAccount, Error};
use super::KeyDirectory;
#[derive(Default)]
pub struct MemoryDirectory {
accounts: RwLock<HashMap<Address, Vec<SafeAccount>>>,
}
impl KeyDirectory for MemoryDirectory {
fn load(&self) -> Result<Vec<SafeAccount>, Error> {
Ok(self.accounts.read().values().cloned().flatten().collect())
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let mut lock = self.accounts.write();
let mut accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new);
// If the filename is the same we just need to replace the entry
accounts.retain(|acc| acc.filename != account.filename);
accounts.push(account.clone());
Ok(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
let mut lock = self.accounts.write();
let mut accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new);
accounts.push(account.clone());
Ok(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
let mut accounts = self.accounts.write();
let is_empty = if let Some(mut accounts) = accounts.get_mut(&account.address) {
if let Some(position) = accounts.iter().position(|acc| acc == account) {
accounts.remove(position);
}
accounts.is_empty()
} else {
false
};
if is_empty {
accounts.remove(&account.address);
}
Ok(())
}
}

View File

@ -14,12 +14,12 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use ethkey::Address;
use std::path::{PathBuf};
use {SafeAccount, Error};
mod disk;
mod geth;
mod memory;
mod parity;
pub enum DirectoryType {
@ -30,10 +30,12 @@ pub enum DirectoryType {
pub trait KeyDirectory: Send + Sync {
fn load(&self) -> Result<Vec<SafeAccount>, Error>;
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error>;
fn remove(&self, address: &Address) -> Result<(), Error>;
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error>;
fn remove(&self, account: &SafeAccount) -> Result<(), Error>;
fn path(&self) -> Option<&PathBuf> { None }
}
pub use self::disk::DiskDirectory;
pub use self::geth::GethDirectory;
pub use self::memory::MemoryDirectory;
pub use self::parity::ParityDirectory;

View File

@ -16,7 +16,6 @@
use std::env;
use std::path::PathBuf;
use ethkey::Address;
use {SafeAccount, Error};
use super::{KeyDirectory, DiskDirectory, DirectoryType};
@ -68,7 +67,11 @@ impl KeyDirectory for ParityDirectory {
self.dir.insert(account)
}
fn remove(&self, address: &Address) -> Result<(), Error> {
self.dir.remove(address)
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}

View File

@ -16,23 +16,19 @@
use std::collections::BTreeMap;
use std::mem;
use ethkey::KeyPair;
use parking_lot::RwLock;
use crypto::KEY_ITERATIONS;
use random::Random;
use ethkey::{Signature, Address, Message, Secret, Public};
use ethkey::{Signature, Address, Message, Secret, Public, KeyPair};
use dir::KeyDirectory;
use account::SafeAccount;
use {Error, SecretStore};
use json;
use json::Uuid;
use parking_lot::RwLock;
use presale::PresaleWallet;
use import;
use json::{self, Uuid};
use {import, Error, SimpleSecretStore, SecretStore};
pub struct EthStore {
dir: Box<KeyDirectory>,
iterations: u32,
cache: RwLock<BTreeMap<Address, SafeAccount>>,
store: EthMultiStore,
}
impl EthStore {
@ -41,57 +37,46 @@ impl EthStore {
}
pub fn open_with_iterations(directory: Box<KeyDirectory>, iterations: u32) -> Result<Self, Error> {
let accounts = try!(directory.load());
let cache = accounts.into_iter().map(|account| (account.address.clone(), account)).collect();
let store = EthStore {
dir: directory,
iterations: iterations,
cache: RwLock::new(cache),
};
Ok(store)
}
fn save(&self, account: SafeAccount) -> Result<(), Error> {
// save to file
let account = try!(self.dir.insert(account.clone()));
// update cache
let mut cache = self.cache.write();
cache.insert(account.address.clone(), account);
Ok(())
}
fn reload_accounts(&self) -> Result<(), Error> {
let mut cache = self.cache.write();
let accounts = try!(self.dir.load());
let new_accounts: BTreeMap<_, _> = accounts.into_iter().map(|account| (account.address.clone(), account)).collect();
mem::replace(&mut *cache, new_accounts);
Ok(())
Ok(EthStore {
store: try!(EthMultiStore::open_with_iterations(directory, iterations)),
})
}
fn get(&self, address: &Address) -> Result<SafeAccount, Error> {
{
let cache = self.cache.read();
if let Some(account) = cache.get(address) {
return Ok(account.clone())
}
}
try!(self.reload_accounts());
let cache = self.cache.read();
cache.get(address).cloned().ok_or(Error::InvalidAccount)
let mut accounts = try!(self.store.get(address)).into_iter();
accounts.next().ok_or(Error::InvalidAccount)
}
}
impl SimpleSecretStore for EthStore {
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error> {
self.store.insert_account(secret, password)
}
fn accounts(&self) -> Result<Vec<Address>, Error> {
self.store.accounts()
}
fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> {
self.store.change_password(address, old_password, new_password)
}
fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> {
self.store.remove_account(address, password)
}
fn sign(&self, address: &Address, password: &str, message: &Message) -> Result<Signature, Error> {
let account = try!(self.get(address));
account.sign(password, message)
}
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let account = try!(self.get(account));
account.decrypt(password, shared_mac, message)
}
}
impl SecretStore for EthStore {
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error> {
let keypair = try!(KeyPair::from_secret(secret).map_err(|_| Error::CreationFailed));
let id: [u8; 16] = Random::random();
let account = SafeAccount::create(&keypair, id, password, self.iterations, "".to_owned(), "{}".to_owned());
let address = account.address.clone();
try!(self.save(account));
Ok(address)
}
fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error> {
let json_wallet = try!(json::PresaleWallet::load(json).map_err(|_| Error::InvalidKeyFile("Invalid JSON format".to_owned())));
let wallet = PresaleWallet::from(json_wallet);
@ -105,48 +90,20 @@ impl SecretStore for EthStore {
let secret = try!(safe_account.crypto.secret(password).map_err(|_| Error::InvalidPassword));
safe_account.address = try!(KeyPair::from_secret(secret)).address();
let address = safe_account.address.clone();
try!(self.save(safe_account));
try!(self.store.import(safe_account));
Ok(address)
}
fn accounts(&self) -> Result<Vec<Address>, Error> {
try!(self.reload_accounts());
Ok(self.cache.read().keys().cloned().collect())
}
fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> {
// change password
fn test_password(&self, address: &Address, password: &str) -> Result<bool, Error> {
let account = try!(self.get(address));
let account = try!(account.change_password(old_password, new_password, self.iterations));
// save to file
self.save(account)
Ok(account.check_password(password))
}
fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> {
let can_remove = {
let account = try!(self.get(address));
account.check_password(password)
};
if can_remove {
try!(self.dir.remove(address));
let mut cache = self.cache.write();
cache.remove(address);
Ok(())
} else {
Err(Error::InvalidPassword)
}
}
fn sign(&self, address: &Address, password: &str, message: &Message) -> Result<Signature, Error> {
fn copy_account(&self, new_store: &SimpleSecretStore, address: &Address, password: &str, new_password: &str) -> Result<(), Error> {
let account = try!(self.get(address));
account.sign(password, message)
}
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let account = try!(self.get(account));
account.decrypt(password, shared_mac, message)
let secret = try!(account.crypto.secret(password));
try!(new_store.insert_account(secret, new_password));
Ok(())
}
fn public(&self, account: &Address, password: &str) -> Result<Public, Error> {
@ -170,23 +127,25 @@ impl SecretStore for EthStore {
}
fn set_name(&self, address: &Address, name: String) -> Result<(), Error> {
let mut account = try!(self.get(address));
let old = try!(self.get(address));
let mut account = old.clone();
account.name = name;
// save to file
self.save(account)
self.store.update(old, account)
}
fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error> {
let mut account = try!(self.get(address));
let old = try!(self.get(address));
let mut account = old.clone();
account.meta = meta;
// save to file
self.save(account)
self.store.update(old, account)
}
fn local_path(&self) -> String {
self.dir.path().map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|| String::new())
self.store.dir.path().map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|| String::new())
}
fn list_geth_accounts(&self, testnet: bool) -> Vec<Address> {
@ -194,6 +153,288 @@ impl SecretStore for EthStore {
}
fn import_geth_accounts(&self, desired: Vec<Address>, testnet: bool) -> Result<Vec<Address>, Error> {
import::import_geth_accounts(&*self.dir, desired.into_iter().collect(), testnet)
import::import_geth_accounts(&*self.store.dir, desired.into_iter().collect(), testnet)
}
}
/// Similar to `EthStore` but may store many accounts (with different passwords) for the same `Address`
pub struct EthMultiStore {
dir: Box<KeyDirectory>,
iterations: u32,
cache: RwLock<BTreeMap<Address, Vec<SafeAccount>>>,
}
impl EthMultiStore {
pub fn open(directory: Box<KeyDirectory>) -> Result<Self, Error> {
Self::open_with_iterations(directory, KEY_ITERATIONS as u32)
}
pub fn open_with_iterations(directory: Box<KeyDirectory>, iterations: u32) -> Result<Self, Error> {
let store = EthMultiStore {
dir: directory,
iterations: iterations,
cache: Default::default(),
};
try!(store.reload_accounts());
Ok(store)
}
fn reload_accounts(&self) -> Result<(), Error> {
let mut cache = self.cache.write();
let accounts = try!(self.dir.load());
let mut new_accounts = BTreeMap::new();
for account in accounts {
let mut entry = new_accounts.entry(account.address.clone()).or_insert_with(Vec::new);
entry.push(account);
}
mem::replace(&mut *cache, new_accounts);
Ok(())
}
fn get(&self, address: &Address) -> Result<Vec<SafeAccount>, Error> {
{
let cache = self.cache.read();
if let Some(accounts) = cache.get(address) {
if !accounts.is_empty() {
return Ok(accounts.clone())
}
}
}
try!(self.reload_accounts());
let cache = self.cache.read();
let accounts = try!(cache.get(address).cloned().ok_or(Error::InvalidAccount));
if accounts.is_empty() {
Err(Error::InvalidAccount)
} else {
Ok(accounts)
}
}
fn import(&self, account: SafeAccount) -> Result<(), Error> {
// save to file
let account = try!(self.dir.insert(account));
// update cache
let mut cache = self.cache.write();
let mut accounts = cache.entry(account.address.clone()).or_insert_with(Vec::new);
accounts.push(account);
Ok(())
}
fn update(&self, old: SafeAccount, new: SafeAccount) -> Result<(), Error> {
// save to file
let account = try!(self.dir.update(new));
// update cache
let mut cache = self.cache.write();
let mut accounts = cache.entry(account.address.clone()).or_insert_with(Vec::new);
// Remove old account
accounts.retain(|acc| acc != &old);
// And push updated to the end
accounts.push(account);
Ok(())
}
}
impl SimpleSecretStore for EthMultiStore {
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error> {
let keypair = try!(KeyPair::from_secret(secret).map_err(|_| Error::CreationFailed));
let id: [u8; 16] = Random::random();
let account = SafeAccount::create(&keypair, id, password, self.iterations, "".to_owned(), "{}".to_owned());
let address = account.address.clone();
try!(self.import(account));
Ok(address)
}
fn accounts(&self) -> Result<Vec<Address>, Error> {
try!(self.reload_accounts());
Ok(self.cache.read().keys().cloned().collect())
}
fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> {
let accounts = try!(self.get(address));
for account in accounts {
// Skip if password is invalid
if !account.check_password(password) {
continue;
}
// Remove from dir
try!(self.dir.remove(&account));
// Remove from cache
let mut cache = self.cache.write();
let is_empty = {
let mut accounts = cache.get_mut(address).expect("Entry exists, because it was returned by `get`; qed");
if let Some(position) = accounts.iter().position(|acc| acc == &account) {
accounts.remove(position);
}
accounts.is_empty()
};
if is_empty {
cache.remove(address);
}
return Ok(());
}
Err(Error::InvalidPassword)
}
fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> {
let accounts = try!(self.get(address));
for account in accounts {
// Change password
let new_account = try!(account.change_password(old_password, new_password, self.iterations));
try!(self.update(account, new_account));
}
Ok(())
}
fn sign(&self, address: &Address, password: &str, message: &Message) -> Result<Signature, Error> {
let accounts = try!(self.get(address));
for account in accounts {
if account.check_password(password) {
return account.sign(password, message);
}
}
Err(Error::InvalidPassword)
}
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error> {
let accounts = try!(self.get(account));
for account in accounts {
if account.check_password(password) {
return account.decrypt(password, shared_mac, message);
}
}
Err(Error::InvalidPassword)
}
}
#[cfg(test)]
mod tests {
use dir::MemoryDirectory;
use ethkey::{Random, Generator, KeyPair};
use secret_store::{SimpleSecretStore, SecretStore};
use super::{EthStore, EthMultiStore};
fn keypair() -> KeyPair {
Random.generate().unwrap()
}
fn store() -> EthStore {
EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory always load successfuly; qed")
}
fn multi_store() -> EthMultiStore {
EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory always load successfuly; qed")
}
#[test]
fn should_insert_account_successfully() {
// given
let store = store();
let keypair = keypair();
// when
let address = store.insert_account(keypair.secret().clone(), "test").unwrap();
// then
assert_eq!(address, keypair.address());
assert!(store.get(&address).is_ok(), "Should contain account.");
assert_eq!(store.accounts().unwrap().len(), 1, "Should have one account.");
}
#[test]
fn should_update_meta_and_name() {
// given
let store = store();
let keypair = keypair();
let address = store.insert_account(keypair.secret().clone(), "test").unwrap();
assert_eq!(&store.meta(&address).unwrap(), "{}");
assert_eq!(&store.name(&address).unwrap(), "");
// when
store.set_meta(&address, "meta".into()).unwrap();
store.set_name(&address, "name".into()).unwrap();
// then
assert_eq!(&store.meta(&address).unwrap(), "meta");
assert_eq!(&store.name(&address).unwrap(), "name");
assert_eq!(store.accounts().unwrap().len(), 1);
}
#[test]
fn should_remove_account() {
// given
let store = store();
let keypair = keypair();
let address = store.insert_account(keypair.secret().clone(), "test").unwrap();
// when
store.remove_account(&address, "test").unwrap();
// then
assert_eq!(store.accounts().unwrap().len(), 0, "Should remove account.");
}
#[test]
fn should_return_true_if_password_is_correct() {
// given
let store = store();
let keypair = keypair();
let address = store.insert_account(keypair.secret().clone(), "test").unwrap();
// when
let res1 = store.test_password(&address, "x").unwrap();
let res2 = store.test_password(&address, "test").unwrap();
assert!(!res1, "First password should be invalid.");
assert!(res2, "Second password should be correct.");
}
#[test]
fn multistore_should_be_able_to_have_the_same_account_twice() {
// given
let store = multi_store();
let keypair = keypair();
let address = store.insert_account(keypair.secret().clone(), "test").unwrap();
let address2 = store.insert_account(keypair.secret().clone(), "xyz").unwrap();
assert_eq!(address, address2);
// when
assert!(store.remove_account(&address, "test").is_ok(), "First password should work.");
assert_eq!(store.accounts().unwrap().len(), 1);
assert!(store.remove_account(&address, "xyz").is_ok(), "Second password should work too.");
assert_eq!(store.accounts().unwrap().len(), 0);
}
#[test]
fn should_copy_account() {
// given
let store = store();
let multi_store = multi_store();
let keypair = keypair();
let address = store.insert_account(keypair.secret().clone(), "test").unwrap();
assert_eq!(multi_store.accounts().unwrap().len(), 0);
// when
store.copy_account(&multi_store, &address, "test", "xyz").unwrap();
// then
assert!(store.test_password(&address, "test").unwrap(), "First password should work for store.");
assert!(multi_store.sign(&address, "xyz", &Default::default()).is_ok(), "Second password should work for second store.");
assert_eq!(multi_store.accounts().unwrap().len(), 1);
}
}

View File

@ -50,8 +50,8 @@ mod secret_store;
pub use self::account::SafeAccount;
pub use self::error::Error;
pub use self::ethstore::EthStore;
pub use self::ethstore::{EthStore, EthMultiStore};
pub use self::import::{import_accounts, read_geth_accounts};
pub use self::presale::PresaleWallet;
pub use self::secret_store::SecretStore;
pub use self::random::random_phrase;
pub use self::secret_store::{SimpleSecretStore, SecretStore};
pub use self::random::{random_phrase, random_string};

View File

@ -51,10 +51,16 @@ pub fn random_phrase(words: usize) -> String {
.map(|s| s.to_owned())
.collect();
}
let mut rng = OsRng::new().unwrap();
let mut rng = OsRng::new().expect("Not able to operate without random source.");
(0..words).map(|_| rng.choose(&WORDS).unwrap()).join(" ")
}
/// Generate a random string of given length.
pub fn random_string(length: usize) -> String {
let mut rng = OsRng::new().expect("Not able to operate without random source.");
rng.gen_ascii_chars().take(length).collect()
}
#[cfg(test)]
mod tests {
use super::random_phrase;

View File

@ -18,18 +18,25 @@ use ethkey::{Address, Message, Signature, Secret, Public};
use Error;
use json::Uuid;
pub trait SecretStore: Send + Sync {
pub trait SimpleSecretStore: Send + Sync {
fn insert_account(&self, secret: Secret, password: &str) -> Result<Address, Error>;
fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error>;
fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error>;
fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>;
fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>;
fn sign(&self, account: &Address, password: &str, message: &Message) -> Result<Signature, Error>;
fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result<Vec<u8>, Error>;
fn public(&self, account: &Address, password: &str) -> Result<Public, Error>;
fn accounts(&self) -> Result<Vec<Address>, Error>;
}
pub trait SecretStore: SimpleSecretStore {
fn import_presale(&self, json: &[u8], password: &str) -> Result<Address, Error>;
fn import_wallet(&self, json: &[u8], password: &str) -> Result<Address, Error>;
fn copy_account(&self, new_store: &SimpleSecretStore, account: &Address, password: &str, new_password: &str) -> Result<(), Error>;
fn test_password(&self, account: &Address, password: &str) -> Result<bool, Error>;
fn public(&self, account: &Address, password: &str) -> Result<Public, Error>;
fn uuid(&self, account: &Address) -> Result<Uuid, Error>;
fn name(&self, account: &Address) -> Result<String, Error>;
fn meta(&self, account: &Address) -> Result<String, Error>;

View File

@ -19,7 +19,7 @@ extern crate ethstore;
mod util;
use ethstore::{SecretStore, EthStore};
use ethstore::{EthStore, SimpleSecretStore};
use ethstore::ethkey::{Random, Generator, Secret, KeyPair, verify_address};
use ethstore::dir::DiskDirectory;
use util::TransientDir;

View File

@ -18,7 +18,6 @@ use std::path::PathBuf;
use std::{env, fs};
use rand::{Rng, OsRng};
use ethstore::dir::{KeyDirectory, DiskDirectory};
use ethstore::ethkey::Address;
use ethstore::{Error, SafeAccount};
pub fn random_dir() -> PathBuf {
@ -64,11 +63,15 @@ impl KeyDirectory for TransientDir {
self.dir.load()
}
fn update(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.update(account)
}
fn insert(&self, account: SafeAccount) -> Result<SafeAccount, Error> {
self.dir.insert(account)
}
fn remove(&self, address: &Address) -> Result<(), Error> {
self.dir.remove(address)
fn remove(&self, account: &SafeAccount) -> Result<(), Error> {
self.dir.remove(account)
}
}

8
js/.stylelintrc.json Normal file
View File

@ -0,0 +1,8 @@
{
"extends": "stylelint-config-standard",
"rules": {
"selector-pseudo-class-no-unknown": [
true, { "ignorePseudoClasses": ["global"] }
]
}
}

View File

@ -1,6 +1,6 @@
{
"name": "parity.js",
"version": "0.2.122",
"version": "0.2.123",
"main": "release/index.js",
"jsnext:main": "src/index.js",
"author": "Parity Team <admin@parity.io>",
@ -38,8 +38,11 @@
"start:app": "node webpack/dev.server",
"clean": "rm -rf ./build ./coverage",
"coveralls": "npm run testCoverage && coveralls < coverage/lcov.info",
"lint": "eslint --ignore-path .gitignore ./src/",
"lint:cached": "eslint --cache --ignore-path .gitignore ./src/",
"lint": "npm run lint:css && npm run lint:js",
"lint:cached": "npm run lint:css && npm run lint:js:cached",
"lint:css": "stylelint ./src/**/*.css",
"lint:js": "eslint --ignore-path .gitignore ./src/",
"lint:js:cached": "eslint --cache --ignore-path .gitignore ./src/",
"test": "NODE_ENV=test mocha 'src/**/*.spec.js'",
"test:coverage": "NODE_ENV=test istanbul cover _mocha -- 'src/**/*.spec.js'",
"test:e2e": "NODE_ENV=test mocha 'src/**/*.e2e.js'",
@ -118,6 +121,8 @@
"sinon-as-promised": "4.0.2",
"sinon-chai": "2.8.0",
"style-loader": "0.13.1",
"stylelint": "7.6.0",
"stylelint-config-standard": "15.0.0",
"url-loader": "0.5.7",
"webpack": "2.1.0-beta.27",
"webpack-dev-middleware": "1.8.4",

View File

@ -18,7 +18,8 @@ import { bytesToHex, hex2Ascii } from '~/api/util/format';
import ABI from './abi/certifier.json';
const ZERO = '0x0000000000000000000000000000000000000000000000000000000000000000';
const ZERO20 = '0x0000000000000000000000000000000000000000';
const ZERO32 = '0x0000000000000000000000000000000000000000000000000000000000000000';
export default class BadgeReg {
constructor (api, registry) {
@ -26,32 +27,57 @@ export default class BadgeReg {
this._registry = registry;
registry.getContract('badgereg');
this.certifiers = {}; // by name
this.certifiers = []; // by id
this.contracts = {}; // by name
}
fetchCertifier (name) {
if (this.certifiers[name]) {
return Promise.resolve(this.certifiers[name]);
certifierCount () {
return this._registry.getContract('badgereg')
.then((badgeReg) => {
return badgeReg.instance.badgeCount.call({}, [])
.then((count) => count.valueOf());
});
}
fetchCertifier (id) {
if (this.certifiers[id]) {
return Promise.resolve(this.certifiers[id]);
}
return this._registry.getContract('badgereg')
.then((badgeReg) => {
return badgeReg.instance.fromName.call({}, [name])
.then(([ id, address ]) => {
return Promise.all([
badgeReg.instance.meta.call({}, [id, 'TITLE']),
badgeReg.instance.meta.call({}, [id, 'IMG'])
])
.then(([ title, img ]) => {
title = bytesToHex(title);
title = title === ZERO ? null : hex2Ascii(title);
if (bytesToHex(img) === ZERO) img = null;
return badgeReg.instance.badge.call({}, [ id ]);
})
.then(([ address, name ]) => {
if (address === ZERO20) {
throw new Error(`Certifier ${id} does not exist.`);
}
const data = { address, name, title, icon: img };
this.certifiers[name] = data;
return data;
});
});
name = bytesToHex(name);
name = name === ZERO32
? null
: hex2Ascii(name);
return this.fetchMeta(id)
.then(({ title, icon }) => {
const data = { address, id, name, title, icon };
this.certifiers[id] = data;
return data;
});
});
}
fetchMeta (id) {
return this._registry.getContract('badgereg')
.then((badgeReg) => {
return Promise.all([
badgeReg.instance.meta.call({}, [id, 'TITLE']),
badgeReg.instance.meta.call({}, [id, 'IMG'])
]);
})
.then(([ title, icon ]) => {
title = bytesToHex(title);
title = title === ZERO32 ? null : hex2Ascii(title);
if (bytesToHex(icon) === ZERO32) icon = null;
return { title, icon };
});
}

18
js/src/inject.js Normal file
View File

@ -0,0 +1,18 @@
// Copyright 2015, 2016 Parity Technologies (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
import './parity';
import './web3';

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
import React, { Component, PropTypes } from 'react';
import { connect } from 'react-redux';
import ActionDone from 'material-ui/svg-icons/action/done';
import ActionDoneAll from 'material-ui/svg-icons/action/done-all';
import NavigationArrowForward from 'material-ui/svg-icons/navigation/arrow-forward';
@ -35,14 +36,15 @@ import ParityLogo from '../../../assets/images/parity-logo-black-no-text.svg';
const STAGE_NAMES = ['welcome', 'terms', 'new account', 'recovery', 'completed'];
export default class FirstRun extends Component {
class FirstRun extends Component {
static contextTypes = {
api: PropTypes.object.isRequired,
store: PropTypes.object.isRequired
}
static propTypes = {
visible: PropTypes.bool,
hasAccounts: PropTypes.bool.isRequired,
visible: PropTypes.bool.isRequired,
onClose: PropTypes.func.isRequired
}
@ -109,6 +111,7 @@ export default class FirstRun extends Component {
}
renderDialogActions () {
const { hasAccounts } = this.props;
const { canCreate, stage, hasAcceptedTnc } = this.state;
switch (stage) {
@ -130,13 +133,26 @@ export default class FirstRun extends Component {
);
case 2:
return (
const buttons = [
<Button
icon={ <ActionDone /> }
label='Create'
key='create'
disabled={ !canCreate }
onClick={ this.onCreate } />
);
onClick={ this.onCreate }
/>
];
if (hasAccounts) {
buttons.unshift(
<Button
icon={ <NavigationArrowForward /> }
label='Skip'
key='skip'
onClick={ this.skipAccountCreation }
/>
);
}
return buttons;
case 3:
return [
@ -219,6 +235,10 @@ export default class FirstRun extends Component {
});
}
skipAccountCreation = () => {
this.setState({ stage: this.state.stage + 2 });
}
newError = (error) => {
const { store } = this.context;
@ -232,3 +252,9 @@ export default class FirstRun extends Component {
print(recoveryPage({ phrase, name, identity, address, logo: ParityLogo }));
}
}
function mapStateToProps (state) {
return { hasAccounts: state.personal.hasAccounts };
}
export default connect(mapStateToProps, null)(FirstRun);

View File

@ -14,10 +14,18 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
export const fetchCertifiers = () => ({
type: 'fetchCertifiers'
});
export const fetchCertifications = (address) => ({
type: 'fetchCertifications', address
});
export const addCertification = (address, name, title, icon) => ({
type: 'addCertification', address, name, title, icon
export const addCertification = (address, id, name, title, icon) => ({
type: 'addCertification', address, id, name, title, icon
});
export const removeCertification = (address, id) => ({
type: 'removeCertification', address, id
});

View File

@ -14,38 +14,90 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
import Contracts from '~/contracts';
import { addCertification } from './actions';
import { uniq } from 'lodash';
const knownCertifiers = [ 'smsverification' ];
import ABI from '~/contracts/abi/certifier.json';
import Contract from '~/api/contract';
import Contracts from '~/contracts';
import { addCertification, removeCertification } from './actions';
export default class CertificationsMiddleware {
toMiddleware () {
return (store) => (next) => (action) => {
if (action.type !== 'fetchCertifications') {
return next(action);
}
const api = Contracts.get()._api;
const badgeReg = Contracts.get().badgeReg;
const contract = new Contract(api, ABI);
const Confirmed = contract.events.find((e) => e.name === 'Confirmed');
const Revoked = contract.events.find((e) => e.name === 'Revoked');
const { address } = action;
const badgeReg = Contracts.get().badgeReg;
let certifiers = [];
let accounts = []; // these are addresses
knownCertifiers.forEach((name) => {
badgeReg.fetchCertifier(name)
.then((cert) => {
return badgeReg.checkIfCertified(cert.address, address)
.then((isCertified) => {
if (isCertified) {
const { name, title, icon } = cert;
store.dispatch(addCertification(address, name, title, icon));
}
});
})
.catch((err) => {
if (err) {
console.error(`Failed to check if ${address} certified by ${name}:`, err);
const fetchConfirmedEvents = (dispatch) => {
if (certifiers.length === 0 || accounts.length === 0) return;
api.eth.getLogs({
fromBlock: 0,
toBlock: 'latest',
address: certifiers.map((c) => c.address),
topics: [ [ Confirmed.signature, Revoked.signature ], accounts ]
})
.then((logs) => contract.parseEventLogs(logs))
.then((logs) => {
logs.forEach((log) => {
const certifier = certifiers.find((c) => c.address === log.address);
if (!certifier) {
throw new Error(`Could not find certifier at ${log.address}.`);
}
const { id, name, title, icon } = certifier;
if (log.event === 'Revoked') {
dispatch(removeCertification(log.params.who.value, id));
} else {
dispatch(addCertification(log.params.who.value, id, name, title, icon));
}
});
});
})
.catch((err) => {
console.error('Failed to fetch Confirmed events:', err);
});
};
return (store) => (next) => (action) => {
switch (action.type) {
case 'fetchCertifiers':
badgeReg.certifierCount().then((count) => {
new Array(+count).fill(null).forEach((_, id) => {
badgeReg.fetchCertifier(id)
.then((cert) => {
if (!certifiers.some((c) => c.id === cert.id)) {
certifiers = certifiers.concat(cert);
fetchConfirmedEvents(store.dispatch);
}
})
.catch((err) => {
console.warn(`Could not fetch certifier ${id}:`, err);
});
});
});
break;
case 'fetchCertifications':
const { address } = action;
if (!accounts.includes(address)) {
accounts = accounts.concat(address);
fetchConfirmedEvents(store.dispatch);
}
break;
case 'setVisibleAccounts':
const { addresses } = action;
accounts = uniq(accounts.concat(addresses));
fetchConfirmedEvents(store.dispatch);
break;
default:
next(action);
}
};
}
}

View File

@ -17,17 +17,27 @@
const initialState = {};
export default (state = initialState, action) => {
if (action.type !== 'addCertification') {
return state;
if (action.type === 'addCertification') {
const { address, id, name, icon, title } = action;
const certifications = state[address] || [];
if (certifications.some((c) => c.id === id)) {
return state;
}
const newCertifications = certifications.concat({
id, name, icon, title
});
return { ...state, [address]: newCertifications };
}
const { address, name, icon, title } = action;
const certifications = state[address] || [];
if (action.type === 'removeCertification') {
const { address, id } = action;
const certifications = state[address] || [];
if (certifications.some((c) => c.name === name)) {
return state;
const newCertifications = certifications.filter((c) => c.id !== id);
return { ...state, [address]: newCertifications };
}
const newCertifications = certifications.concat({ name, icon, title });
return { ...state, [address]: newCertifications };
return state;
};

View File

@ -16,10 +16,8 @@
import React, { Component, PropTypes } from 'react';
import { connect } from 'react-redux';
import { bindActionCreators } from 'redux';
import { hashToImageUrl } from '~/redux/providers/imagesReducer';
import { fetchCertifications } from '~/redux/providers/certifications/actions';
import defaultIcon from '../../../assets/images/certifications/unknown.svg';
@ -29,14 +27,7 @@ class Certifications extends Component {
static propTypes = {
account: PropTypes.string.isRequired,
certifications: PropTypes.array.isRequired,
dappsUrl: PropTypes.string.isRequired,
fetchCertifications: PropTypes.func.isRequired
}
componentWillMount () {
const { account, fetchCertifications } = this.props;
fetchCertifications(account);
dappsUrl: PropTypes.string.isRequired
}
render () {
@ -73,15 +64,13 @@ function mapStateToProps (_, initProps) {
return (state) => {
const certifications = state.certifications[account] || [];
return { certifications };
};
}
const dappsUrl = state.api.dappsUrl;
function mapDispatchToProps (dispatch) {
return bindActionCreators({ fetchCertifications }, dispatch);
return { certifications, dappsUrl };
};
}
export default connect(
mapStateToProps,
mapDispatchToProps
null
)(Certifications);

View File

@ -23,10 +23,6 @@ import Certifications from '~/ui/Certifications';
import styles from './header.css';
export default class Header extends Component {
static contextTypes = {
api: PropTypes.object
};
static propTypes = {
account: PropTypes.object,
balance: PropTypes.object,
@ -44,7 +40,6 @@ export default class Header extends Component {
};
render () {
const { api } = this.context;
const { account, balance, className, children, hideName } = this.props;
const { address, meta, uuid } = account;
@ -85,7 +80,6 @@ export default class Header extends Component {
balance={ balance } />
<Certifications
account={ account.address }
dappsUrl={ api.dappsUrl }
/>
</div>
{ children }

View File

@ -31,6 +31,7 @@ import shapeshiftBtn from '~/../assets/images/shapeshift-btn.png';
import Header from './Header';
import Transactions from './Transactions';
import { setVisibleAccounts } from '~/redux/providers/personalActions';
import { fetchCertifiers, fetchCertifications } from '~/redux/providers/certifications/actions';
import SMSVerificationStore from '~/modals/Verification/sms-store';
import EmailVerificationStore from '~/modals/Verification/email-store';
@ -44,6 +45,8 @@ class Account extends Component {
static propTypes = {
setVisibleAccounts: PropTypes.func.isRequired,
fetchCertifiers: PropTypes.func.isRequired,
fetchCertifications: PropTypes.func.isRequired,
images: PropTypes.object.isRequired,
params: PropTypes.object,
@ -63,6 +66,7 @@ class Account extends Component {
}
componentDidMount () {
this.props.fetchCertifiers();
this.setVisibleAccounts();
}
@ -80,9 +84,10 @@ class Account extends Component {
}
setVisibleAccounts (props = this.props) {
const { params, setVisibleAccounts } = props;
const { params, setVisibleAccounts, fetchCertifications } = props;
const addresses = [ params.address ];
setVisibleAccounts(addresses);
fetchCertifications(params.address);
}
render () {
@ -353,7 +358,9 @@ function mapStateToProps (state) {
function mapDispatchToProps (dispatch) {
return bindActionCreators({
setVisibleAccounts
setVisibleAccounts,
fetchCertifiers,
fetchCertifications
}, dispatch);
}

View File

@ -15,22 +15,29 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
import React, { Component, PropTypes } from 'react';
import { connect } from 'react-redux';
import { bindActionCreators } from 'redux';
import { Container } from '~/ui';
import { fetchCertifiers, fetchCertifications } from '~/redux/providers/certifications/actions';
import Summary from '../Summary';
import styles from './list.css';
export default class List extends Component {
class List extends Component {
static propTypes = {
accounts: PropTypes.object,
walletsOwners: PropTypes.object,
balances: PropTypes.object,
link: PropTypes.string,
search: PropTypes.array,
certifications: PropTypes.object.isRequired,
empty: PropTypes.bool,
link: PropTypes.string,
order: PropTypes.string,
orderFallback: PropTypes.string,
search: PropTypes.array,
walletsOwners: PropTypes.object,
fetchCertifiers: PropTypes.func.isRequired,
fetchCertifications: PropTypes.func.isRequired,
handleAddSearchToken: PropTypes.func
};
@ -42,8 +49,16 @@ export default class List extends Component {
);
}
componentWillMount () {
const { accounts, fetchCertifiers, fetchCertifications } = this.props;
fetchCertifiers();
for (let address in accounts) {
fetchCertifications(address);
}
}
renderAccounts () {
const { accounts, balances, link, empty, handleAddSearchToken, walletsOwners } = this.props;
const { accounts, balances, empty, link, walletsOwners, handleAddSearchToken } = this.props;
if (empty) {
return (
@ -72,7 +87,9 @@ export default class List extends Component {
account={ account }
balance={ balance }
owners={ owners }
handleAddSearchToken={ handleAddSearchToken } />
handleAddSearchToken={ handleAddSearchToken }
showCertifications
/>
</div>
);
});
@ -207,3 +224,20 @@ export default class List extends Component {
});
}
}
function mapStateToProps (state) {
const { certifications } = state;
return { certifications };
}
function mapDispatchToProps (dispatch) {
return bindActionCreators({
fetchCertifiers,
fetchCertifications
}, dispatch);
}
export default connect(
mapStateToProps,
mapDispatchToProps
)(List);

View File

@ -21,6 +21,7 @@ import { isEqual } from 'lodash';
import ReactTooltip from 'react-tooltip';
import { Balance, Container, ContainerTitle, IdentityIcon, IdentityName, Tags, Input } from '~/ui';
import Certifications from '~/ui/Certifications';
import { nullableProptype } from '~/util/proptypes';
import styles from '../accounts.css';
@ -36,12 +37,14 @@ export default class Summary extends Component {
link: PropTypes.string,
name: PropTypes.string,
noLink: PropTypes.bool,
showCertifications: PropTypes.bool,
handleAddSearchToken: PropTypes.func,
owners: nullableProptype(PropTypes.array)
};
static defaultProps = {
noLink: false
noLink: false,
showCertifications: false
};
shouldComponentUpdate (nextProps) {
@ -115,6 +118,7 @@ export default class Summary extends Component {
{ this.renderOwners() }
{ this.renderBalance() }
{ this.renderCertifications() }
</Container>
);
}
@ -181,4 +185,15 @@ export default class Summary extends Component {
<Balance balance={ balance } />
);
}
renderCertifications () {
const { showCertifications, account } = this.props;
if (!showCertifications) {
return null;
}
return (
<Certifications account={ account.address } />
);
}
}

View File

@ -16,14 +16,17 @@
import { action, observable } from 'mobx';
const showFirstRun = window.localStorage.getItem('showFirstRun') !== '0';
export default class Store {
@observable firstrunVisible = showFirstRun;
@observable firstrunVisible = false;
constructor (api) {
this._api = api;
const value = window.localStorage.getItem('showFirstRun');
if (value) {
this.firstrunVisible = JSON.parse(value);
}
this._checkAccounts();
}
@ -33,7 +36,7 @@ export default class Store {
@action toggleFirstrun = (visible = false) => {
this.firstrunVisible = visible;
window.localStorage.setItem('showFirstRun', visible ? '1' : '0');
window.localStorage.setItem('showFirstRun', JSON.stringify(!!visible));
}
_checkAccounts () {

View File

@ -15,6 +15,7 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
const webpack = require('webpack');
const WebpackStats = require('webpack/lib/Stats');
const webpackDevMiddleware = require('webpack-dev-middleware');
const webpackHotMiddleware = require('webpack-hot-middleware');
@ -59,12 +60,24 @@ app.use(webpackHotMiddleware(compiler, {
}));
app.use(webpackDevMiddleware(compiler, {
noInfo: false,
quiet: true,
noInfo: true,
quiet: false,
progress: true,
publicPath: webpackConfig.output.publicPath,
stats: {
colors: true
},
reporter: function (data) {
// @see https://github.com/webpack/webpack/blob/324d309107f00cfc38ec727521563d309339b2ec/lib/Stats.js#L790
// Accepted values: none, errors-only, minimal, normal, verbose
const options = WebpackStats.presetToOptions('minimal');
options.timings = true;
const output = data.stats.toString(options);
process.stdout.write('\n');
process.stdout.write(output);
process.stdout.write('\n\n');
}
}));

View File

@ -28,7 +28,7 @@ module.exports = {
context: path.join(__dirname, '../src'),
entry: {
// library
'inject': ['./web3.js'],
'inject': ['./inject.js'],
'web3': ['./web3.js'],
'parity': ['./parity.js']
},

View File

@ -20,7 +20,7 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer, Error};
use serde::de::Visitor;
use rustc_serialize::hex::ToHex;
use util::hash::{H64 as Hash64, H160 as Hash160, H256 as Hash256, H2048 as Hash2048};
use util::hash::{H64 as Hash64, H160 as Hash160, H256 as Hash256, H520 as Hash520, H2048 as Hash2048};
macro_rules! impl_hash {
@ -87,6 +87,7 @@ macro_rules! impl_hash {
impl_hash!(H64, Hash64);
impl_hash!(Address, Hash160);
impl_hash!(H256, Hash256);
impl_hash!(H520, Hash520);
impl_hash!(Bloom, Hash2048);
#[cfg(test)]

View File

@ -19,6 +19,7 @@
use spec::Ethash;
use spec::BasicAuthority;
use spec::AuthorityRound;
use spec::Tendermint;
/// Engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
@ -33,6 +34,8 @@ pub enum Engine {
BasicAuthority(BasicAuthority),
/// AuthorityRound engine.
AuthorityRound(AuthorityRound),
/// Tendermint engine.
Tendermint(Tendermint)
}
#[cfg(test)]

View File

@ -27,15 +27,17 @@ pub mod state;
pub mod ethash;
pub mod basic_authority;
pub mod authority_round;
pub mod tendermint;
pub use self::account::Account;
pub use self::builtin::{Builtin, Pricing, Linear};
pub use self::genesis::Genesis;
pub use self::params::Params;
pub use self::spec::Spec;
pub use self::seal::{Seal, Ethereum, Generic};
pub use self::seal::{Seal, Ethereum, AuthorityRoundSeal, TendermintSeal};
pub use self::engine::Engine;
pub use self::state::State;
pub use self::ethash::{Ethash, EthashParams};
pub use self::basic_authority::{BasicAuthority, BasicAuthorityParams};
pub use self::authority_round::{AuthorityRound, AuthorityRoundParams};
pub use self::tendermint::{Tendermint, TendermintParams};

View File

@ -16,7 +16,8 @@
//! Spec seal deserialization.
use hash::{H64, H256};
use hash::*;
use uint::Uint;
use bytes::Bytes;
/// Ethereum seal.
@ -29,11 +30,24 @@ pub struct Ethereum {
pub mix_hash: H256,
}
/// Generic seal.
/// AuthorityRound seal.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Generic {
/// Seal rlp.
pub rlp: Bytes,
pub struct AuthorityRoundSeal {
/// Seal step.
pub step: Uint,
/// Seal signature.
pub signature: H520,
}
/// Tendermint seal.
#[derive(Debug, PartialEq, Deserialize)]
pub struct TendermintSeal {
/// Seal round.
pub round: Uint,
/// Proposal seal signature.
pub proposal: H520,
/// Proposal seal signature.
pub precommits: Vec<H520>,
}
/// Seal variants.
@ -42,9 +56,15 @@ pub enum Seal {
/// Ethereum seal.
#[serde(rename="ethereum")]
Ethereum(Ethereum),
/// AuthorityRound seal.
#[serde(rename="authority_round")]
AuthorityRound(AuthorityRoundSeal),
/// Tendermint seal.
#[serde(rename="tendermint")]
Tendermint(TendermintSeal),
/// Generic seal.
#[serde(rename="generic")]
Generic(Generic),
Generic(Bytes),
}
#[cfg(test)]
@ -53,15 +73,26 @@ mod tests {
use spec::Seal;
#[test]
fn builtin_deserialization() {
fn seal_deserialization() {
let s = r#"[{
"ethereum": {
"nonce": "0x0000000000000042",
"mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000"
}
},{
"generic": {
"rlp": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa"
"generic": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa"
},{
"authority_round": {
"step": "0x0",
"signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
},{
"tendermint": {
"round": "0x0",
"proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"precommits": [
"0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
]
}
}]"#;
let _deserialized: Vec<Seal> = serde_json::from_str(s).unwrap();

View File

@ -27,8 +27,8 @@ pub struct Spec {
/// Spec name.
pub name: String,
/// Special fork name.
#[serde(rename="forkName")]
pub fork_name: Option<String>,
#[serde(rename="dataDir")]
pub data_dir: Option<String>,
/// Engine.
pub engine: Engine,
/// Spec params.
@ -57,6 +57,7 @@ mod tests {
fn spec_deserialization() {
let s = r#"{
"name": "Morden",
"dataDir": "morden",
"engine": {
"Ethash": {
"params": {

View File

@ -0,0 +1,67 @@
// Copyright 2015, 2016 Ethcore (UK) Ltd.
// This file is part of Parity.
// Parity is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Parity is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
//! Tendermint params deserialization.
use uint::Uint;
use hash::Address;
/// Tendermint params deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct TendermintParams {
/// Gas limit divisor.
#[serde(rename="gasLimitBoundDivisor")]
pub gas_limit_bound_divisor: Uint,
/// Valid authorities
pub authorities: Vec<Address>,
/// Propose step timeout in milliseconds.
#[serde(rename="timeoutPropose")]
pub timeout_propose: Option<Uint>,
/// Prevote step timeout in milliseconds.
#[serde(rename="timeoutPrevote")]
pub timeout_prevote: Option<Uint>,
/// Precommit step timeout in milliseconds.
#[serde(rename="timeoutPrecommit")]
pub timeout_precommit: Option<Uint>,
/// Commit step timeout in milliseconds.
#[serde(rename="timeoutCommit")]
pub timeout_commit: Option<Uint>,
}
/// Tendermint engine deserialization.
#[derive(Debug, PartialEq, Deserialize)]
pub struct Tendermint {
/// Ethash params.
pub params: TendermintParams,
}
#[cfg(test)]
mod tests {
use serde_json;
use spec::tendermint::Tendermint;
#[test]
fn basic_authority_deserialization() {
let s = r#"{
"params": {
"gasLimitBoundDivisor": "0x0400",
"authorities" : ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"]
}
}"#;
let _deserialized: Tendermint = serde_json::from_str(s).unwrap();
}
}

View File

@ -14,23 +14,32 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::path::PathBuf;
use ethcore::ethstore::{EthStore, SecretStore, import_accounts, read_geth_accounts};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use helpers::{password_prompt, password_from_file};
use params::SpecType;
#[derive(Debug, PartialEq)]
pub enum AccountCmd {
New(NewAccount),
List(String),
List(ListAccounts),
Import(ImportAccounts),
ImportFromGeth(ImportFromGethAccounts)
}
#[derive(Debug, PartialEq)]
pub struct ListAccounts {
pub path: String,
pub spec: SpecType,
}
#[derive(Debug, PartialEq)]
pub struct NewAccount {
pub iterations: u32,
pub path: String,
pub spec: SpecType,
pub password_file: Option<String>,
}
@ -38,6 +47,7 @@ pub struct NewAccount {
pub struct ImportAccounts {
pub from: Vec<String>,
pub to: String,
pub spec: SpecType,
}
/// Parameters for geth accounts' import
@ -47,18 +57,22 @@ pub struct ImportFromGethAccounts {
pub testnet: bool,
/// directory to import accounts to
pub to: String,
pub spec: SpecType,
}
pub fn execute(cmd: AccountCmd) -> Result<String, String> {
match cmd {
AccountCmd::New(new_cmd) => new(new_cmd),
AccountCmd::List(path) => list(path),
AccountCmd::List(list_cmd) => list(list_cmd),
AccountCmd::Import(import_cmd) => import(import_cmd),
AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd)
}
}
fn keys_dir(path: String) -> Result<DiskDirectory, String> {
fn keys_dir(path: String, spec: SpecType) -> Result<DiskDirectory, String> {
let spec = try!(spec.spec());
let mut path = PathBuf::from(&path);
path.push(spec.data_dir);
DiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e))
}
@ -75,15 +89,15 @@ fn new(n: NewAccount) -> Result<String, String> {
None => try!(password_prompt()),
};
let dir = Box::new(try!(keys_dir(n.path)));
let dir = Box::new(try!(keys_dir(n.path, n.spec)));
let secret_store = Box::new(try!(secret_store(dir, Some(n.iterations))));
let acc_provider = AccountProvider::new(secret_store);
let new_account = try!(acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e)));
Ok(format!("{:?}", new_account))
}
fn list(path: String) -> Result<String, String> {
let dir = Box::new(try!(keys_dir(path)));
fn list(list_cmd: ListAccounts) -> Result<String, String> {
let dir = Box::new(try!(keys_dir(list_cmd.path, list_cmd.spec)));
let secret_store = Box::new(try!(secret_store(dir, None)));
let acc_provider = AccountProvider::new(secret_store);
let accounts = acc_provider.accounts();
@ -96,7 +110,7 @@ fn list(path: String) -> Result<String, String> {
}
fn import(i: ImportAccounts) -> Result<String, String> {
let to = try!(keys_dir(i.to));
let to = try!(keys_dir(i.to, i.spec));
let mut imported = 0;
for path in &i.from {
let from = DiskDirectory::at(path);
@ -109,7 +123,7 @@ fn import_geth(i: ImportFromGethAccounts) -> Result<String, String> {
use std::io::ErrorKind;
use ethcore::ethstore::Error;
let dir = Box::new(try!(keys_dir(i.to)));
let dir = Box::new(try!(keys_dir(i.to, i.spec)));
let secret_store = Box::new(try!(secret_store(dir, None)));
let geth_accounts = read_geth_accounts(i.testnet);
match secret_store.import_geth_accounts(geth_accounts, i.testnet) {

View File

@ -64,11 +64,19 @@ impl FromStr for DataFormat {
#[derive(Debug, PartialEq)]
pub enum BlockchainCmd {
Kill(KillBlockchain),
Import(ImportBlockchain),
Export(ExportBlockchain),
ExportState(ExportState),
}
#[derive(Debug, PartialEq)]
pub struct KillBlockchain {
pub spec: SpecType,
pub dirs: Directories,
pub pruning: Pruning,
}
#[derive(Debug, PartialEq)]
pub struct ImportBlockchain {
pub spec: SpecType,
@ -128,6 +136,7 @@ pub struct ExportState {
pub fn execute(cmd: BlockchainCmd) -> Result<String, String> {
match cmd {
BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd),
BlockchainCmd::Import(import_cmd) => execute_import(import_cmd),
BlockchainCmd::Export(export_cmd) => execute_export(export_cmd),
BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd),
@ -140,9 +149,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
// Setup panic handler
let panic_handler = PanicHandler::new_in_arc();
// create dirs used by parity
try!(cmd.dirs.create_dirs(false, false));
// load spec file
let spec = try!(cmd.spec.spec());
@ -150,7 +156,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -174,7 +180,10 @@ fn execute_import(cmd: ImportBlockchain) -> Result<String, String> {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// create dirs used by parity
try!(cmd.dirs.create_dirs(false, false));
// prepare client config
let mut client_config = to_client_config(
@ -311,9 +320,6 @@ fn start_client(
wal: bool,
cache_config: CacheConfig) -> Result<ClientService, String> {
// create dirs used by parity
try!(dirs.create_dirs(false, false));
// load spec file
let spec = try!(spec.spec());
@ -321,7 +327,7 @@ fn start_client(
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -345,7 +351,10 @@ fn start_client(
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// create dirs used by parity
try!(dirs.create_dirs(false, false));
// prepare client config
let client_config = to_client_config(&cache_config, Mode::Active, tracing, fat_db, compaction, wal, VMType::default(), "".into(), algorithm, pruning_history, true);
@ -473,6 +482,18 @@ fn execute_export_state(cmd: ExportState) -> Result<String, String> {
Ok("Export completed.".into())
}
pub fn kill_db(cmd: KillBlockchain) -> Result<String, String> {
let spec = try!(cmd.spec.spec());
let genesis_hash = spec.genesis_header().hash();
let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir);
let user_defaults_path = db_dirs.user_defaults_path();
let user_defaults = try!(UserDefaults::load(&user_defaults_path));
let algorithm = cmd.pruning.to_algorithm(&user_defaults);
let dir = db_dirs.db_path(algorithm);
try!(fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e)));
Ok("Database deleted.".to_owned())
}
#[cfg(test)]
mod test {
use super::DataFormat;

View File

@ -16,6 +16,7 @@
#[macro_use]
mod usage;
use dir::default_data_path;
usage! {
{
@ -38,6 +39,8 @@ usage! {
cmd_ui: bool,
cmd_tools: bool,
cmd_hash: bool,
cmd_kill: bool,
cmd_db: bool,
// Arguments
arg_pid_file: String,
@ -83,8 +86,8 @@ usage! {
flag_mode_timeout: u64 = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(),
flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(),
flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(),
flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(),
flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_db_path: String = default_data_path(), or |c: &Config| otry!(c.parity).db_path.clone(),
flag_keys_path: String = "$DATA/keys", or |c: &Config| otry!(c.parity).keys_path.clone(),
flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(),
// -- Account Options
@ -103,7 +106,7 @@ usage! {
or |c: &Config| otry!(c.ui).port.clone(),
flag_ui_interface: String = "local",
or |c: &Config| otry!(c.ui).interface.clone(),
flag_ui_path: String = "$HOME/.parity/signer",
flag_ui_path: String = "$DATA/signer",
or |c: &Config| otry!(c.ui).path.clone(),
// NOTE [todr] For security reasons don't put this to config files
flag_ui_no_validation: bool = false, or |_| None,
@ -159,7 +162,7 @@ usage! {
// IPC
flag_no_ipc: bool = false,
or |c: &Config| otry!(c.ipc).disable.clone(),
flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc",
flag_ipc_path: String = "$DATA/jsonrpc.ipc",
or |c: &Config| otry!(c.ipc).path.clone(),
flag_ipc_apis: String = "web3,eth,net,parity,parity_accounts,traces,rpc",
or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")),
@ -173,7 +176,7 @@ usage! {
or |c: &Config| otry!(c.dapps).interface.clone(),
flag_dapps_hosts: String = "none",
or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")),
flag_dapps_path: String = "$HOME/.parity/dapps",
flag_dapps_path: String = "$DATA/dapps",
or |c: &Config| otry!(c.dapps).path.clone(),
flag_dapps_user: Option<String> = None,
or |c: &Config| otry!(c.dapps).user.clone().map(Some),
@ -274,7 +277,7 @@ usage! {
or |c: &Config| otry!(c.vm).jit.clone(),
// -- Miscellaneous Options
flag_config: String = "$HOME/.parity/config.toml", or |_| None,
flag_config: String = "$DATA/config.toml", or |_| None,
flag_logging: Option<String> = None,
or |c: &Config| otry!(c.misc).logging.clone().map(Some),
flag_log_file: Option<String> = None,
@ -517,6 +520,8 @@ mod tests {
cmd_ui: false,
cmd_tools: false,
cmd_hash: false,
cmd_db: false,
cmd_kill: false,
// Arguments
arg_pid_file: "".into(),
@ -671,7 +676,7 @@ mod tests {
// -- Miscellaneous Options
flag_version: false,
flag_config: "$HOME/.parity/config.toml".into(),
flag_config: "$DATA/config.toml".into(),
flag_logging: Some("own_tx=trace".into()),
flag_log_file: Some("/var/log/parity.log".into()),
flag_no_color: false,

View File

@ -145,7 +145,7 @@ macro_rules! usage {
}
let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config);
let config_file = replace_home(&config_file);
let config_file = replace_home("", &config_file);
let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) {
// Load config file
(Ok(mut file), _) => {

View File

@ -18,6 +18,7 @@ Usage:
parity snapshot <file> [options]
parity restore [ <file> ] [options]
parity tools hash <file>
parity db kill [options]
Operating Options:
--mode MODE Set the operating mode. MODE can be one of:
@ -285,10 +286,8 @@ Import/Export Options:
(default: {flag_format:?} = Import: auto, Export: binary)
--no-seal-check Skip block seal check. (default: {flag_no_seal_check})
--at BLOCK Export state at the given block, which may be an
index, hash, or 'latest'. Note that taking snapshots at
non-recent blocks will only work with --pruning archive
(default: {flag_at})
--no-storage Don't export account storge. (default: {flag_no_storage})
index, hash, or 'latest'. (default: {flag_at})
--no-storage Don't export account storage. (default: {flag_no_storage})
--no-code Don't export account code. (default: {flag_no_code})
--min-balance WEI Don't export accounts with balance less than specified.
(default: {flag_min_balance:?})

View File

@ -38,9 +38,9 @@ use dir::Directories;
use dapps::Configuration as DappsConfiguration;
use signer::{Configuration as SignerConfiguration};
use run::RunCmd;
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, ExportState, DataFormat};
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat};
use presale::ImportWallet;
use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts};
use account::{AccountCmd, NewAccount, ListAccounts, ImportAccounts, ImportFromGethAccounts};
use snapshot::{self, SnapshotCommand};
const AUTHCODE_FILENAME: &'static str = "authcodes";
@ -152,20 +152,32 @@ impl Configuration {
}
} else if self.args.cmd_tools && self.args.cmd_hash {
Cmd::Hash(self.args.arg_file)
} else if self.args.cmd_db && self.args.cmd_kill {
Cmd::Blockchain(BlockchainCmd::Kill(KillBlockchain {
spec: spec,
dirs: dirs,
pruning: pruning,
}))
} else if self.args.cmd_account {
let account_cmd = if self.args.cmd_new {
let new_acc = NewAccount {
iterations: self.args.flag_keys_iterations,
path: dirs.keys,
spec: spec,
password_file: self.args.flag_password.first().cloned(),
};
AccountCmd::New(new_acc)
} else if self.args.cmd_list {
AccountCmd::List(dirs.keys)
let list_acc = ListAccounts {
path: dirs.keys,
spec: spec,
};
AccountCmd::List(list_acc)
} else if self.args.cmd_import {
let import_acc = ImportAccounts {
from: self.args.arg_path.clone(),
to: dirs.keys,
spec: spec,
};
AccountCmd::Import(import_acc)
} else {
@ -175,6 +187,7 @@ impl Configuration {
} else if self.args.flag_import_geth_keys {
let account_cmd = AccountCmd::ImportFromGeth(
ImportFromGethAccounts {
spec: spec,
to: dirs.keys,
testnet: self.args.flag_testnet
}
@ -184,6 +197,7 @@ impl Configuration {
let presale_cmd = ImportWallet {
iterations: self.args.flag_keys_iterations,
path: dirs.keys,
spec: spec,
wallet_path: self.args.arg_path.first().unwrap().clone(),
password_file: self.args.flag_password.first().cloned(),
};
@ -575,7 +589,7 @@ impl Configuration {
ret.snapshot_peers = self.snapshot_peers();
ret.allow_ips = try!(self.allow_ips());
ret.max_pending_peers = self.max_pending_peers();
let mut net_path = PathBuf::from(self.directories().db);
let mut net_path = PathBuf::from(self.directories().data);
net_path.push("network");
ret.config_path = Some(net_path.to_str().unwrap().to_owned());
ret.reserved_nodes = try!(self.init_reserved_nodes());
@ -669,18 +683,11 @@ impl Configuration {
fn directories(&self) -> Directories {
use util::path;
let db_path = replace_home(self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path));
let data_path = replace_home("", self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path));
let keys_path = replace_home(
if self.args.flag_testnet {
"$HOME/.parity/testnet_keys"
} else {
&self.args.flag_keys_path
}
);
let dapps_path = replace_home(&self.args.flag_dapps_path);
let ui_path = replace_home(&self.args.flag_ui_path);
let keys_path = replace_home(&data_path, &self.args.flag_keys_path);
let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path);
let ui_path = replace_home(&data_path, &self.args.flag_ui_path);
if self.args.flag_geth && !cfg!(windows) {
let geth_root = if self.args.flag_testnet { path::ethereum::test() } else { path::ethereum::default() };
@ -689,7 +696,7 @@ impl Configuration {
}
if cfg!(feature = "ipc") && !cfg!(feature = "windows") {
let mut path_buf = PathBuf::from(db_path.clone());
let mut path_buf = PathBuf::from(data_path.clone());
path_buf.push("ipc");
let ipc_path = path_buf.to_str().unwrap();
::std::fs::create_dir_all(ipc_path).unwrap_or_else(
@ -699,7 +706,7 @@ impl Configuration {
Directories {
keys: keys_path,
db: db_path,
data: data_path,
dapps: dapps_path,
signer: ui_path,
}
@ -709,7 +716,7 @@ impl Configuration {
if self.args.flag_geth {
geth_ipc_path(self.args.flag_testnet)
} else {
parity_ipc_path(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone()))
parity_ipc_path(&self.directories().data, &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone()))
}
}
@ -777,12 +784,14 @@ mod tests {
use ethcore_rpc::NetworkSettings;
use ethcore::client::{VMType, BlockId};
use ethcore::miner::{MinerOptions, PrioritizationStrategy};
use helpers::{replace_home, default_network_config};
use helpers::{default_network_config};
use run::RunCmd;
use dir::Directories;
use signer::{Configuration as SignerConfiguration};
use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat, ExportState};
use presale::ImportWallet;
use account::{AccountCmd, NewAccount, ImportAccounts};
use params::SpecType;
use account::{AccountCmd, NewAccount, ImportAccounts, ListAccounts};
use devtools::{RandomTempPath};
use std::io::Write;
use std::fs::{File, create_dir};
@ -809,8 +818,9 @@ mod tests {
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::New(NewAccount {
iterations: 10240,
path: replace_home("$HOME/.parity/keys"),
path: Directories::default().keys,
password_file: None,
spec: SpecType::default(),
})));
}
@ -819,7 +829,10 @@ mod tests {
let args = vec!["parity", "account", "list"];
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(
AccountCmd::List(replace_home("$HOME/.parity/keys")),
AccountCmd::List(ListAccounts {
path: Directories::default().keys,
spec: SpecType::default(),
})
));
}
@ -829,7 +842,8 @@ mod tests {
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::Import(ImportAccounts {
from: vec!["my_dir".into(), "another_dir".into()],
to: replace_home("$HOME/.parity/keys"),
to: Directories::default().keys,
spec: SpecType::default(),
})));
}
@ -839,9 +853,10 @@ mod tests {
let conf = parse(&args);
assert_eq!(conf.into_command().unwrap().cmd, Cmd::ImportPresaleWallet(ImportWallet {
iterations: 10240,
path: replace_home("$HOME/.parity/keys"),
path: Directories::default().keys,
wallet_path: "my_wallet.json".into(),
password_file: Some("pwd".into()),
spec: SpecType::default(),
}));
}
@ -940,7 +955,7 @@ mod tests {
fn test_command_signer_new_token() {
let args = vec!["parity", "signer", "new-token"];
let conf = parse(&args);
let expected = replace_home("$HOME/.parity/signer");
let expected = Directories::default().signer;
assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(SignerConfiguration {
enabled: true,
signer_path: expected,

View File

@ -20,6 +20,7 @@ use rpc_apis;
use ethcore::client::Client;
use ethsync::SyncProvider;
use helpers::replace_home;
use dir::default_data_path;
#[derive(Debug, PartialEq, Clone)]
pub struct Configuration {
@ -34,6 +35,7 @@ pub struct Configuration {
impl Default for Configuration {
fn default() -> Self {
let data_dir = default_data_path();
Configuration {
enabled: true,
interface: "127.0.0.1".into(),
@ -41,7 +43,7 @@ impl Default for Configuration {
hosts: Some(Vec::new()),
user: None,
pass: None,
dapps_path: replace_home("$HOME/.parity/dapps"),
dapps_path: replace_home(&data_dir, "$DATA/dapps"),
}
}
}

View File

@ -19,6 +19,7 @@ use std::path::{PathBuf, Path};
use util::{H64, H256};
use util::journaldb::Algorithm;
use helpers::replace_home;
use app_dirs::{AppInfo, get_app_root, AppDataType};
// this const is irrelevent cause we do have migrations now,
// but we still use it for backwards compatibility
@ -26,7 +27,7 @@ const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3";
#[derive(Debug, PartialEq)]
pub struct Directories {
pub db: String,
pub data: String,
pub keys: String,
pub signer: String,
pub dapps: String,
@ -34,18 +35,19 @@ pub struct Directories {
impl Default for Directories {
fn default() -> Self {
let data_dir = default_data_path();
Directories {
db: replace_home("$HOME/.parity"),
keys: replace_home("$HOME/.parity/keys"),
signer: replace_home("$HOME/.parity/signer"),
dapps: replace_home("$HOME/.parity/dapps"),
data: replace_home(&data_dir, "$DATA"),
keys: replace_home(&data_dir, "$DATA/keys"),
signer: replace_home(&data_dir, "$DATA/signer"),
dapps: replace_home(&data_dir, "$DATA/dapps"),
}
}
}
impl Directories {
pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool) -> Result<(), String> {
try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.data).map_err(|e| e.to_string()));
try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string()));
if signer_enabled {
try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string()));
@ -57,20 +59,38 @@ impl Directories {
}
/// Database paths.
pub fn database(&self, genesis_hash: H256, fork_name: Option<String>) -> DatabaseDirectories {
pub fn database(&self, genesis_hash: H256, fork_name: Option<String>, spec_name: String) -> DatabaseDirectories {
DatabaseDirectories {
path: self.db.clone(),
path: self.data.clone(),
genesis_hash: genesis_hash,
fork_name: fork_name,
spec_name: spec_name,
}
}
/// Get the ipc sockets path
pub fn ipc_path(&self) -> PathBuf {
let mut dir = Path::new(&self.db).to_path_buf();
let mut dir = Path::new(&self.data).to_path_buf();
dir.push("ipc");
dir
}
// TODO: remove in 1.7
pub fn legacy_keys_path(&self, testnet: bool) -> PathBuf {
let mut dir = Path::new(&self.data).to_path_buf();
if testnet {
dir.push("testnet_keys");
} else {
dir.push("keys");
}
dir
}
pub fn keys_path(&self, spec_name: &str) -> PathBuf {
let mut dir = PathBuf::from(&self.keys);
dir.push(spec_name);
dir
}
}
#[derive(Debug, PartialEq)]
@ -78,52 +98,103 @@ pub struct DatabaseDirectories {
pub path: String,
pub genesis_hash: H256,
pub fork_name: Option<String>,
pub spec_name: String,
}
impl DatabaseDirectories {
/// Base DB directory for the given fork.
pub fn fork_path(&self) -> PathBuf {
// TODO: remove in 1.7
pub fn legacy_fork_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default()));
dir
}
/// Get the root path for database
pub fn version_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.fork_path();
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
pub fn spec_root_path(&self) -> PathBuf {
let mut dir = Path::new(&self.path).to_path_buf();
dir.push("chains");
dir.push(&self.spec_name);
dir
}
/// Get the path for the databases given the genesis_hash and information on the databases.
pub fn client_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.version_path(pruning);
let mut dir = self.db_root_path();
dir.push(pruning.as_internal_name_str());
dir.push("db");
dir
}
pub fn db_root_path(&self) -> PathBuf {
let mut dir = self.spec_root_path();
dir.push("db");
dir.push(H64::from(self.genesis_hash).hex());
dir
}
pub fn db_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.db_root_path();
dir.push(pruning.as_internal_name_str());
dir
}
/// Get the root path for database
// TODO: remove in 1.7
pub fn legacy_version_path(&self, pruning: Algorithm) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str()));
dir
}
/// Get user defaults path
// TODO: remove in 1.7
pub fn legacy_user_defaults_path(&self) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push("user_defaults");
dir
}
/// Get user defaults path
// TODO: remove in 1.7
pub fn legacy_snapshot_path(&self) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push("snapshot");
dir
}
/// Get user defaults path
// TODO: remove in 1.7
pub fn legacy_network_path(&self) -> PathBuf {
let mut dir = self.legacy_fork_path();
dir.push("network");
dir
}
pub fn user_defaults_path(&self) -> PathBuf {
let mut dir = self.fork_path();
let mut dir = self.spec_root_path();
dir.push("user_defaults");
dir
}
/// Get the path for the snapshot directory given the genesis hash and fork name.
pub fn snapshot_path(&self) -> PathBuf {
let mut dir = self.fork_path();
let mut dir = self.db_root_path();
dir.push("snapshot");
dir
}
/// Get the path for the network directory.
pub fn network_path(&self) -> PathBuf {
let mut dir = self.fork_path();
let mut dir = self.spec_root_path();
dir.push("network");
dir
}
}
pub fn default_data_path() -> String {
let app_info = AppInfo { name: "parity", author: "parity" };
get_app_root(AppDataType::UserData, &app_info).map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|_| "$HOME/.parity".to_owned())
}
#[cfg(test)]
mod tests {
use super::Directories;
@ -131,11 +202,12 @@ mod tests {
#[test]
fn test_default_directories() {
let data_dir = super::default_data_path();
let expected = Directories {
db: replace_home("$HOME/.parity"),
keys: replace_home("$HOME/.parity/keys"),
signer: replace_home("$HOME/.parity/signer"),
dapps: replace_home("$HOME/.parity/dapps"),
data: replace_home(&data_dir, "$DATA"),
keys: replace_home(&data_dir, "$DATA/keys"),
signer: replace_home(&data_dir, "$DATA/signer"),
dapps: replace_home(&data_dir, "$DATA/dapps"),
};
assert_eq!(expected, Directories::default());
}

View File

@ -24,7 +24,7 @@ use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientCo
use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy};
use cache::CacheConfig;
use dir::DatabaseDirectories;
use upgrade::upgrade;
use upgrade::{upgrade, upgrade_data_paths};
use migration::migrate;
use ethsync::is_valid_node_url;
@ -132,9 +132,10 @@ pub fn to_price(s: &str) -> Result<f32, String> {
}
/// Replaces `$HOME` str with home directory path.
pub fn replace_home(arg: &str) -> String {
pub fn replace_home(base: &str, arg: &str) -> String {
// the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support`
let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap());
let r = r.replace("$DATA", base );
r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() )
}
@ -159,13 +160,13 @@ pub fn geth_ipc_path(testnet: bool) -> String {
}
/// Formats and returns parity ipc path.
pub fn parity_ipc_path(s: &str) -> String {
pub fn parity_ipc_path(base: &str, s: &str) -> String {
// Windows path should not be hardcoded here.
if cfg!(windows) {
return r"\\.\pipe\parity.jsonrpc".to_owned();
}
replace_home(s)
replace_home(base, s)
}
/// Validates and formats bootnodes option.
@ -187,7 +188,7 @@ pub fn to_bootnodes(bootnodes: &Option<String>) -> Result<Vec<String>, String> {
pub fn default_network_config() -> ::ethsync::NetworkConfiguration {
use ethsync::{NetworkConfiguration, AllowIP};
NetworkConfiguration {
config_path: Some(replace_home("$HOME/.parity/network")),
config_path: Some(replace_home(&::dir::default_data_path(), "$DATA/network")),
net_config_path: None,
listen_address: Some("0.0.0.0:30303".into()),
public_address: None,
@ -261,6 +262,8 @@ pub fn execute_upgrades(
compaction_profile: CompactionProfile
) -> Result<(), String> {
upgrade_data_paths(dirs, pruning);
match upgrade(Some(&dirs.path)) {
Ok(upgrades_applied) if upgrades_applied > 0 => {
debug!("Executed {} upgrade scripts - ok", upgrades_applied);
@ -271,7 +274,7 @@ pub fn execute_upgrades(
_ => {},
}
let client_path = dirs.version_path(pruning);
let client_path = dirs.db_path(pruning);
migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e))
}

View File

@ -23,7 +23,7 @@ use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering};
use std::time::{Instant, Duration};
use isatty::{stdout_isatty};
use ethsync::{SyncProvider, ManageNetwork};
use util::{Uint, RwLock, Mutex, H256, Colour};
use util::{Uint, RwLock, Mutex, H256, Colour, Bytes};
use ethcore::client::*;
use ethcore::views::BlockView;
use ethcore::snapshot::service::Service as SnapshotService;
@ -176,14 +176,13 @@ impl Informant {
}
impl ChainNotify for Informant {
fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, duration: u64) {
fn new_blocks(&self, imported: Vec<H256>, _invalid: Vec<H256>, _enacted: Vec<H256>, _retracted: Vec<H256>, _sealed: Vec<H256>, _proposed: Vec<Bytes>, duration: u64) {
let mut last_import = self.last_import.lock();
let sync_state = self.sync.as_ref().map(|s| s.status().state);
let importing = is_major_importing(sync_state, self.client.queue_info());
let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing;
let txs_imported = imported.iter()
.take(imported.len() - if ripe {1} else {0})
.take(imported.len().saturating_sub(if ripe { 1 } else { 0 }))
.filter_map(|h| self.client.block(BlockId::Hash(*h)))
.map(|b| BlockView::new(&b).transactions_count())
.sum();

View File

@ -54,6 +54,7 @@ extern crate ansi_term;
extern crate regex;
extern crate isatty;
extern crate toml;
extern crate app_dirs;
#[macro_use]
extern crate ethcore_util as util;

View File

@ -76,6 +76,14 @@ impl SpecType {
}
}
}
pub fn legacy_fork_name(&self) -> Option<String> {
match *self {
SpecType::Classic => Some("classic".to_owned()),
SpecType::Expanse => Some("expanse".to_owned()),
_ => None,
}
}
}
#[derive(Debug, PartialEq)]

View File

@ -18,11 +18,13 @@ use ethcore::ethstore::{PresaleWallet, EthStore};
use ethcore::ethstore::dir::DiskDirectory;
use ethcore::account_provider::AccountProvider;
use helpers::{password_prompt, password_from_file};
use params::SpecType;
#[derive(Debug, PartialEq)]
pub struct ImportWallet {
pub iterations: u32,
pub path: String,
pub spec: SpecType,
pub wallet_path: String,
pub password_file: Option<String>,
}

View File

@ -23,6 +23,7 @@ use ethcore_rpc::{RpcServerError, RpcServer as Server, IpcServerError};
use rpc_apis;
use rpc_apis::ApiSet;
use helpers::parity_ipc_path;
use dir::default_data_path;
pub use ethcore_rpc::{IpcServer, Server as HttpServer};
@ -58,9 +59,10 @@ pub struct IpcConfiguration {
impl Default for IpcConfiguration {
fn default() -> Self {
let data_dir = default_data_path();
IpcConfiguration {
enabled: true,
socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"),
socket_addr: parity_ipc_path(&data_dir, "$DATA/jsonrpc.ipc"),
apis: ApiSet::IpcContext,
}
}

View File

@ -41,6 +41,7 @@ use params::{
tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool
};
use helpers::{to_client_config, execute_upgrades, passwords_from_files};
use upgrade::upgrade_key_location;
use dir::Directories;
use cache::CacheConfig;
use user_defaults::UserDefaults;
@ -129,9 +130,6 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
// increase max number of open files
raise_fd_limit();
// create dirs used by parity
try!(cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled));
// load spec
let spec = try!(cmd.spec.spec());
@ -139,7 +137,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -166,7 +164,10 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// create dirs used by parity
try!(cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled));
// run in daemon mode
if let Some(pid_file) = cmd.daemon {
@ -175,7 +176,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
// display info about used pruning algorithm
info!("Starting {}", Colour::White.bold().paint(version()));
info!("State DB configuation: {}{}{}",
info!("State DB configuration: {}{}{}",
Colour::White.bold().paint(algorithm.as_str()),
match fat_db {
true => Colour::White.bold().paint(" +Fat").to_string(),
@ -217,7 +218,7 @@ pub fn execute(cmd: RunCmd, logger: Arc<RotatingLogger>) -> Result<(), String> {
let passwords = try!(passwords_from_files(&cmd.acc_conf.password_files));
// prepare account provider
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf, &passwords)));
let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)));
// let the Engine access the accounts
spec.engine.register_account_provider(account_provider.clone());
@ -449,11 +450,13 @@ fn daemonize(_pid_file: String) -> Result<(), String> {
Err("daemon is no supported on windows".into())
}
fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig, passwords: &[String]) -> Result<AccountProvider, String> {
fn prepare_account_provider(dirs: &Directories, data_dir: &str, cfg: AccountsConfig, passwords: &[String]) -> Result<AccountProvider, String> {
use ethcore::ethstore::EthStore;
use ethcore::ethstore::dir::DiskDirectory;
let dir = Box::new(try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e))));
let path = dirs.keys_path(data_dir);
upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path);
let dir = Box::new(try!(DiskDirectory::create(&path).map_err(|e| format!("Could not open keys directory: {}", e))));
let account_service = AccountProvider::new(Box::new(
try!(EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e)))
));

View File

@ -23,6 +23,7 @@ use util::path::restrict_permissions_owner;
use rpc_apis;
use ethcore_signer as signer;
use helpers::replace_home;
use dir::default_data_path;
pub use ethcore_signer::Server as SignerServer;
const CODES_FILENAME: &'static str = "authcodes";
@ -38,11 +39,12 @@ pub struct Configuration {
impl Default for Configuration {
fn default() -> Self {
let data_dir = default_data_path();
Configuration {
enabled: true,
port: 8180,
interface: "127.0.0.1".into(),
signer_path: replace_home("$HOME/.parity/signer"),
signer_path: replace_home(&data_dir, "$DATA/signer"),
skip_origin_validation: false,
}
}

View File

@ -143,7 +143,7 @@ impl SnapshotCommand {
let genesis_hash = spec.genesis_header().hash();
// database paths
let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone());
let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone());
// user defaults path
let user_defaults_path = db_dirs.user_defaults_path();
@ -167,7 +167,7 @@ impl SnapshotCommand {
let snapshot_path = db_dirs.snapshot_path();
// execute upgrades
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.fork_path().as_path())));
try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.db_root_path().as_path())));
// prepare client config
let client_config = to_client_config(&self.cache_config, Mode::Active, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history, true);

View File

@ -18,10 +18,14 @@
use semver::Version;
use std::collections::*;
use std::fs::{File, create_dir_all};
use std::fs::{self, File, create_dir_all};
use std::env;
use std::io;
use std::io::{Read, Write};
use std::path::PathBuf;
use std::path::{PathBuf, Path};
use dir::{DatabaseDirectories, default_data_path};
use helpers::replace_home;
use util::journaldb::Algorithm;
#[cfg_attr(feature="dev", allow(enum_variant_names))]
#[derive(Debug)]
@ -126,3 +130,84 @@ pub fn upgrade(db_path: Option<&str>) -> Result<usize, Error> {
upgrade_from_version(ver)
})
}
fn file_exists(path: &Path) -> bool {
match fs::metadata(&path) {
Err(ref e) if e.kind() == io::ErrorKind::NotFound => false,
_ => true,
}
}
pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) {
let mut parent = to.clone();
parent.pop();
match fs::create_dir_all(&parent).and_then(|()| fs::read_dir(from)) {
Ok(entries) => {
let files: Vec<_> = entries.filter_map(|f| f.ok().and_then(|f| if f.file_type().ok().map_or(false, |f| f.is_file()) { f.file_name().to_str().map(|s| s.to_owned()) } else { None })).collect();
let mut num: usize = 0;
for name in files {
let mut from = from.clone();
from.push(&name);
let mut to = to.clone();
to.push(&name);
if !file_exists(&to) {
if let Err(e) = fs::rename(&from, &to) {
debug!("Error upgrading key {:?}: {:?}", from, e);
} else {
num += 1;
}
} else {
debug!("Skipped upgrading key {:?}", from);
}
}
if num > 0 {
info!("Moved {} keys from {} to {}", num, from.to_string_lossy(), to.to_string_lossy());
}
},
Err(e) => {
warn!("Error moving keys from {:?} to {:?}: {:?}", from, to, e);
}
}
}
fn upgrade_dir_location(source: &PathBuf, dest: &PathBuf) {
if file_exists(&source) {
if !file_exists(&dest) {
let mut parent = dest.clone();
parent.pop();
if let Err(e) = fs::create_dir_all(&parent).and_then(|()| fs::rename(&source, &dest)) {
debug!("Skipped path {:?} -> {:?} :{:?}", source, dest, e);
} else {
info!("Moved {} to {}", source.to_string_lossy(), dest.to_string_lossy());
}
} else {
debug!("Skipped upgrading directory {:?}, Destination already exists at {:?}", source, dest);
}
}
}
fn upgrade_user_defaults(dirs: &DatabaseDirectories) {
let source = dirs.legacy_user_defaults_path();
let dest = dirs.user_defaults_path();
if file_exists(&source) {
if !file_exists(&dest) {
if let Err(e) = fs::rename(&source, &dest) {
debug!("Skipped upgrading user defaults {:?}:{:?}", dest, e);
}
} else {
debug!("Skipped upgrading user defaults {:?}, File exists at {:?}", source, dest);
}
}
}
pub fn upgrade_data_paths(dirs: &DatabaseDirectories, pruning: Algorithm) {
let legacy_root_path = replace_home("", "$HOME/.parity");
let default_path = default_data_path();
if legacy_root_path != dirs.path && dirs.path == default_path {
upgrade_dir_location(&PathBuf::from(legacy_root_path), &PathBuf::from(&dirs.path));
}
upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning));
upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path());
upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path());
upgrade_user_defaults(&dirs);
}

View File

@ -14,6 +14,8 @@
// You should have received a copy of the GNU General Public License
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use std::fmt::Debug;
use std::ops::Deref;
use rlp;
use util::{Address, H256, U256, Uint, Bytes};
use util::bytes::ToPretty;
@ -37,46 +39,112 @@ use v1::types::{
pub const DEFAULT_MAC: [u8; 2] = [0, 0];
pub fn execute<C, M>(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: Option<String>) -> Result<ConfirmationResponse, Error>
type AccountToken = String;
#[derive(Debug, Clone, PartialEq)]
pub enum SignWith {
Nothing,
Password(String),
Token(AccountToken),
}
#[derive(Debug)]
pub enum WithToken<T: Debug> {
No(T),
Yes(T, AccountToken),
}
impl<T: Debug> Deref for WithToken<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
match *self {
WithToken::No(ref v) => v,
WithToken::Yes(ref v, _) => v,
}
}
}
impl<T: Debug> WithToken<T> {
pub fn map<S, F>(self, f: F) -> WithToken<S> where
S: Debug,
F: FnOnce(T) -> S,
{
match self {
WithToken::No(v) => WithToken::No(f(v)),
WithToken::Yes(v, token) => WithToken::Yes(f(v), token),
}
}
pub fn into_value(self) -> T {
match self {
WithToken::No(v) => v,
WithToken::Yes(v, _) => v,
}
}
}
impl<T: Debug> From<(T, AccountToken)> for WithToken<T> {
fn from(tuple: (T, AccountToken)) -> Self {
WithToken::Yes(tuple.0, tuple.1)
}
}
pub fn execute<C, M>(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: SignWith) -> Result<WithToken<ConfirmationResponse>, Error>
where C: MiningBlockChainClient, M: MinerService
{
match payload {
ConfirmationPayload::SendTransaction(request) => {
sign_and_dispatch(client, miner, accounts, request, pass)
.map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction)
.map(|result| result
.map(RpcH256::from)
.map(ConfirmationResponse::SendTransaction)
)
},
ConfirmationPayload::SignTransaction(request) => {
sign_no_dispatch(client, miner, accounts, request, pass)
.map(RpcRichRawTransaction::from)
.map(ConfirmationResponse::SignTransaction)
.map(|result| result
.map(RpcRichRawTransaction::from)
.map(ConfirmationResponse::SignTransaction)
)
},
ConfirmationPayload::Signature(address, hash) => {
signature(accounts, address, hash, pass)
.map(RpcH520::from)
.map(ConfirmationResponse::Signature)
.map(|result| result
.map(RpcH520::from)
.map(ConfirmationResponse::Signature)
)
},
ConfirmationPayload::Decrypt(address, data) => {
decrypt(accounts, address, data, pass)
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
.map(|result| result
.map(RpcBytes)
.map(ConfirmationResponse::Decrypt)
)
},
}
}
fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: Option<String>) -> Result<Signature, Error> {
accounts.sign(address, password.clone(), hash).map_err(|e| match password {
Some(_) => errors::from_password_error(e),
None => errors::from_signing_error(e),
fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: SignWith) -> Result<WithToken<Signature>, Error> {
match password.clone() {
SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No),
SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No),
SignWith::Token(token) => accounts.sign_with_token(address, token, hash).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: Option<String>) -> Result<Bytes, Error> {
accounts.decrypt(address, password.clone(), &DEFAULT_MAC, &msg)
.map_err(|e| match password {
Some(_) => errors::from_password_error(e),
None => errors::from_signing_error(e),
})
fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: SignWith) -> Result<WithToken<Bytes>, Error> {
match password.clone() {
SignWith::Nothing => accounts.decrypt(address, None, &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Password(pass) => accounts.decrypt(address, Some(pass), &DEFAULT_MAC, &msg).map(WithToken::No),
SignWith::Token(token) => accounts.decrypt_with_token(address, token, &DEFAULT_MAC, &msg).map(Into::into),
}.map_err(|e| match password {
SignWith::Nothing => errors::from_signing_error(e),
_ => errors::from_password_error(e),
})
}
pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result<H256, Error>
@ -88,7 +156,7 @@ pub fn dispatch_transaction<C, M>(client: &C, miner: &M, signed_transaction: Sig
.map(|_| hash)
}
pub fn sign_no_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: Option<String>) -> Result<SignedTransaction, Error>
pub fn sign_no_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<SignedTransaction>, Error>
where C: MiningBlockChainClient, M: MinerService {
let network_id = client.signing_network_id();
@ -110,20 +178,32 @@ pub fn sign_no_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider,
let hash = t.hash(network_id);
let signature = try!(signature(accounts, address, hash, password));
t.with_signature(signature, network_id)
signature.map(|sig| {
t.with_signature(sig, network_id)
})
};
Ok(signed_transaction)
}
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: Option<String>) -> Result<H256, Error>
pub fn sign_and_dispatch<C, M>(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result<WithToken<H256>, Error>
where C: MiningBlockChainClient, M: MinerService
{
let network_id = client.signing_network_id();
let signed_transaction = try!(sign_no_dispatch(client, miner, accounts, filled, password));
let (signed_transaction, token) = match signed_transaction {
WithToken::No(signed_transaction) => (signed_transaction, None),
WithToken::Yes(signed_transaction, token) => (signed_transaction, Some(token)),
};
trace!(target: "miner", "send_transaction: dispatching tx: {} for network ID {:?}", rlp::encode(&signed_transaction).to_vec().pretty(), network_id);
dispatch_transaction(&*client, &*miner, signed_transaction)
dispatch_transaction(&*client, &*miner, signed_transaction).map(|hash| {
match token {
Some(ref token) => WithToken::Yes(hash, token.clone()),
None => WithToken::No(hash),
}
})
}
pub fn fill_optional_fields<C, M>(request: TransactionRequest, client: &C, miner: &M) -> FilledTransactionRequest

View File

@ -114,7 +114,7 @@ impl<C: 'static, M: 'static> Personal for PersonalClient<C, M> where C: MiningBl
&*miner,
&*accounts,
request,
Some(password)
).map(Into::into)
dispatch::SignWith::Password(password)
).map(|v| v.into_value().into())
}
}

View File

@ -26,9 +26,9 @@ use ethcore::miner::MinerService;
use jsonrpc_core::Error;
use v1::traits::Signer;
use v1::types::{TransactionModification, ConfirmationRequest, ConfirmationResponse, U256, Bytes};
use v1::types::{TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, U256, Bytes};
use v1::helpers::{errors, SignerService, SigningQueue, ConfirmationPayload};
use v1::helpers::dispatch::{self, dispatch_transaction};
use v1::helpers::dispatch::{self, dispatch_transaction, WithToken};
/// Transactions confirmation (personal) rpc implementation.
pub struct SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
@ -60,24 +60,10 @@ impl<C: 'static, M: 'static> SignerClient<C, M> where C: MiningBlockChainClient,
take_weak!(self.client).keep_alive();
Ok(())
}
}
impl<C: 'static, M: 'static> Signer for SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
fn requests_to_confirm(&self) -> Result<Vec<ConfirmationRequest>, Error> {
try!(self.active());
let signer = take_weak!(self.signer);
Ok(signer.requests()
.into_iter()
.map(Into::into)
.collect()
)
}
// TODO [ToDr] TransactionModification is redundant for some calls
// might be better to replace it in future
fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) -> Result<ConfirmationResponse, Error> {
fn confirm_internal<F>(&self, id: U256, modification: TransactionModification, f: F) -> Result<WithToken<ConfirmationResponse>, Error> where
F: FnOnce(&C, &M, &AccountProvider, ConfirmationPayload) -> Result<WithToken<ConfirmationResponse>, Error>,
{
try!(self.active());
let id = id.into();
@ -97,14 +83,48 @@ impl<C: 'static, M: 'static> Signer for SignerClient<C, M> where C: MiningBlockC
request.gas = gas.into();
}
}
let result = f(&*client, &*miner, &*accounts, payload);
// Execute
let result = dispatch::execute(&*client, &*miner, &*accounts, payload, Some(pass));
if let Ok(ref response) = result {
signer.request_confirmed(id, Ok(response.clone()));
signer.request_confirmed(id, Ok((*response).clone()));
}
result
}).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id)))
}
}
impl<C: 'static, M: 'static> Signer for SignerClient<C, M> where C: MiningBlockChainClient, M: MinerService {
fn requests_to_confirm(&self) -> Result<Vec<ConfirmationRequest>, Error> {
try!(self.active());
let signer = take_weak!(self.signer);
Ok(signer.requests()
.into_iter()
.map(Into::into)
.collect()
)
}
// TODO [ToDr] TransactionModification is redundant for some calls
// might be better to replace it in future
fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) -> Result<ConfirmationResponse, Error> {
self.confirm_internal(id, modification, move |client, miner, accounts, payload| {
dispatch::execute(client, miner, accounts, payload, dispatch::SignWith::Password(pass))
}).map(|v| v.into_value())
}
fn confirm_request_with_token(&self, id: U256, modification: TransactionModification, token: String) -> Result<ConfirmationResponseWithToken, Error> {
self.confirm_internal(id, modification, move |client, miner, accounts, payload| {
dispatch::execute(client, miner, accounts, payload, dispatch::SignWith::Token(token))
}).and_then(|v| match v {
WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")),
WithToken::Yes(response, token) => Ok(ConfirmationResponseWithToken {
result: response,
token: token,
}),
})
}
fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result<ConfirmationResponse, Error> {
try!(self.active());

View File

@ -99,7 +99,9 @@ impl<C, M> SigningQueueClient<C, M> where
let sender = payload.sender();
if accounts.is_unlocked(sender) {
return dispatch::execute(&*client, &*miner, &*accounts, payload, None).map(DispatchResult::Value);
return dispatch::execute(&*client, &*miner, &*accounts, payload, dispatch::SignWith::Nothing)
.map(|v| v.into_value())
.map(DispatchResult::Value);
}
take_weak!(self.signer).add_request(payload)

View File

@ -76,7 +76,8 @@ impl<C, M> SigningUnsafeClient<C, M> where
let accounts = take_weak!(self.accounts);
let payload = dispatch::from_rpc(payload, &*client, &*miner);
dispatch::execute(&*client, &*miner, &*accounts, payload, None)
dispatch::execute(&*client, &*miner, &*accounts, payload, dispatch::SignWith::Nothing)
.map(|v| v.into_value())
}
}

View File

@ -209,6 +209,53 @@ fn should_confirm_transaction_and_dispatch() {
assert_eq!(tester.miner.imported_transactions.lock().len(), 1);
}
#[test]
fn should_confirm_transaction_with_token() {
// given
let tester = signer_tester();
let address = tester.accounts.new_account("test").unwrap();
let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap();
tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest {
from: address,
to: Some(recipient),
gas_price: U256::from(10_000),
gas: U256::from(10_000_000),
value: U256::from(1),
data: vec![],
nonce: None,
})).unwrap();
let t = Transaction {
nonce: U256::zero(),
gas_price: U256::from(0x1000),
gas: U256::from(10_000_000),
action: Action::Call(recipient),
value: U256::from(0x1),
data: vec![]
};
let (signature, token) = tester.accounts.sign_with_token(address, "test".into(), t.hash(None)).unwrap();
let t = t.with_signature(signature, None);
assert_eq!(tester.signer.requests().len(), 1);
// when
let request = r#"{
"jsonrpc":"2.0",
"method":"signer_confirmRequestWithToken",
"params":["0x1", {"gasPrice":"0x1000"}, ""#.to_owned() + &token + r#""],
"id":1
}"#;
let response = r#"{"jsonrpc":"2.0","result":{"result":""#.to_owned() +
format!("0x{:?}", t.hash()).as_ref() +
r#"","token":""#;
// then
let result = tester.io.handle_request_sync(&request).unwrap();
assert!(result.starts_with(&response), "Should return correct result. Expected: {:?}, Got: {:?}", response, result);
assert_eq!(tester.signer.requests().len(), 0);
assert_eq!(tester.miner.imported_transactions.lock().len(), 1);
}
#[test]
fn should_confirm_transaction_with_rlp() {
// given

View File

@ -17,7 +17,7 @@
//! Parity Signer-related rpc interface.
use jsonrpc_core::Error;
use v1::types::{U256, Bytes, TransactionModification, ConfirmationRequest, ConfirmationResponse};
use v1::types::{U256, Bytes, TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken};
build_rpc_trait! {
/// Signer extension for confirmations rpc interface.
@ -31,6 +31,10 @@ build_rpc_trait! {
#[rpc(name = "signer_confirmRequest")]
fn confirm_request(&self, U256, TransactionModification, String) -> Result<ConfirmationResponse, Error>;
/// Confirm specific request with token.
#[rpc(name = "signer_confirmRequestWithToken")]
fn confirm_request_with_token(&self, U256, TransactionModification, String) -> Result<ConfirmationResponseWithToken, Error>;
/// Confirm specific request with already signed data.
#[rpc(name = "signer_confirmRequestRaw")]
fn confirm_request_raw(&self, U256, Bytes) -> Result<ConfirmationResponse, Error>;

View File

@ -141,6 +141,15 @@ impl Serialize for ConfirmationResponse {
}
}
/// Confirmation response with additional token for further requests
#[derive(Debug, Clone, PartialEq, Serialize)]
pub struct ConfirmationResponseWithToken {
/// Actual response
pub result: ConfirmationResponse,
/// New token
pub token: String,
}
/// Confirmation payload, i.e. the thing to be confirmed
#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)]
pub enum ConfirmationPayload {
@ -225,7 +234,7 @@ impl<A, B> Serialize for Either<A, B> where
mod tests {
use std::str::FromStr;
use serde_json;
use v1::types::U256;
use v1::types::{U256, H256};
use v1::helpers;
use super::*;
@ -339,4 +348,20 @@ mod tests {
gas: None,
});
}
#[test]
fn should_serialize_confirmation_response_with_token() {
// given
let response = ConfirmationResponseWithToken {
result: ConfirmationResponse::SendTransaction(H256::default()),
token: "test-token".into(),
};
// when
let res = serde_json::to_string(&response);
let expected = r#"{"result":"0x0000000000000000000000000000000000000000000000000000000000000000","token":"test-token"}"#;
// then
assert_eq!(res.unwrap(), expected.to_owned());
}
}

View File

@ -39,7 +39,10 @@ pub use self::bytes::Bytes;
pub use self::block::{RichBlock, Block, BlockTransactions};
pub use self::block_number::BlockNumber;
pub use self::call_request::CallRequest;
pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, TransactionModification, SignRequest, DecryptRequest, Either};
pub use self::confirmations::{
ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken,
TransactionModification, SignRequest, DecryptRequest, Either
};
pub use self::dapp_id::DappId;
pub use self::filter::{Filter, FilterChanges};
pub use self::hash::{H64, H160, H256, H512, H520, H2048};

View File

@ -35,7 +35,12 @@ use parking_lot::RwLock;
use chain::{ETH_PACKET_COUNT, SNAPSHOT_SYNC_PACKET_COUNT};
use light::net::{LightProtocol, Params as LightParams, Capabilities, Handler as LightHandler, EventContext};
/// Parity sync protocol
pub const WARP_SYNC_PROTOCOL_ID: ProtocolId = *b"par";
/// Ethereum sync protocol
pub const ETH_PROTOCOL: ProtocolId = *b"eth";
/// Ethereum light protocol
pub const LES_PROTOCOL: ProtocolId = *b"les";
/// Sync configuration
#[derive(Debug, Clone, Copy)]
@ -64,8 +69,8 @@ impl Default for SyncConfig {
max_download_ahead_blocks: 20000,
download_old_blocks: true,
network_id: 1,
subprotocol_name: *b"eth",
light_subprotocol_name: *b"les",
subprotocol_name: ETH_PROTOCOL,
light_subprotocol_name: LES_PROTOCOL,
fork_block: None,
warp_sync: false,
serve_light: false,
@ -143,7 +148,7 @@ pub struct EthSync {
/// Network service
network: NetworkService,
/// Main (eth/par) protocol handler
sync_handler: Arc<SyncProtocolHandler>,
eth_handler: Arc<SyncProtocolHandler>,
/// Light (les) protocol handler
light_proto: Option<Arc<LightProtocol>>,
/// The main subprotocol name
@ -182,7 +187,7 @@ impl EthSync {
let sync = Arc::new(EthSync {
network: service,
sync_handler: Arc::new(SyncProtocolHandler {
eth_handler: Arc::new(SyncProtocolHandler {
sync: RwLock::new(chain_sync),
chain: params.chain,
snapshot_service: params.snapshot_service,
@ -201,15 +206,15 @@ impl EthSync {
impl SyncProvider for EthSync {
/// Get sync status
fn status(&self) -> SyncStatus {
self.sync_handler.sync.write().status()
self.eth_handler.sync.write().status()
}
/// Get sync peers
fn peers(&self) -> Vec<PeerInfo> {
// TODO: [rob] LES peers/peer info
self.network.with_context_eval(self.subprotocol_name, |context| {
let sync_io = NetSyncIo::new(context, &*self.sync_handler.chain, &*self.sync_handler.snapshot_service, &self.sync_handler.overlay);
self.sync_handler.sync.write().peers(&sync_io)
let sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay);
self.eth_handler.sync.write().peers(&sync_io)
}).unwrap_or(Vec::new())
}
@ -218,7 +223,7 @@ impl SyncProvider for EthSync {
}
fn transactions_stats(&self) -> BTreeMap<H256, TransactionStats> {
let sync = self.sync_handler.sync.read();
let sync = self.eth_handler.sync.read();
sync.transactions_stats()
.iter()
.map(|(hash, stats)| (*hash, stats.into()))
@ -277,19 +282,21 @@ impl ChainNotify for EthSync {
enacted: Vec<H256>,
retracted: Vec<H256>,
sealed: Vec<H256>,
proposed: Vec<Bytes>,
_duration: u64)
{
use light::net::Announcement;
self.network.with_context(self.subprotocol_name, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.sync_handler.chain, &*self.sync_handler.snapshot_service, &self.sync_handler.overlay);
self.sync_handler.sync.write().chain_new_blocks(
let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay);
self.eth_handler.sync.write().chain_new_blocks(
&mut sync_io,
&imported,
&invalid,
&enacted,
&retracted,
&sealed);
&sealed,
&proposed);
});
self.network.with_context(self.light_subprotocol_name, |context| {
@ -297,8 +304,8 @@ impl ChainNotify for EthSync {
Some(lp) => lp,
None => return,
};
let chain_info = self.sync_handler.chain.chain_info();
let chain_info = self.eth_handler.chain.chain_info();
light_proto.make_announcement(context, Announcement {
head_hash: chain_info.best_block_hash,
head_num: chain_info.best_block_number,
@ -318,10 +325,10 @@ impl ChainNotify for EthSync {
Err(err) => warn!("Error starting network: {}", err),
_ => {},
}
self.network.register_protocol(self.sync_handler.clone(), self.subprotocol_name, ETH_PACKET_COUNT, &[62u8, 63u8])
self.network.register_protocol(self.eth_handler.clone(), self.subprotocol_name, ETH_PACKET_COUNT, &[62u8, 63u8])
.unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e));
// register the warp sync subprotocol
self.network.register_protocol(self.sync_handler.clone(), WARP_SYNC_PROTOCOL_ID, SNAPSHOT_SYNC_PACKET_COUNT, &[1u8])
self.network.register_protocol(self.eth_handler.clone(), WARP_SYNC_PROTOCOL_ID, SNAPSHOT_SYNC_PACKET_COUNT, &[1u8, 2u8])
.unwrap_or_else(|e| warn!("Error registering snapshot sync protocol: {:?}", e));
// register the light protocol.
@ -332,12 +339,19 @@ impl ChainNotify for EthSync {
}
fn stop(&self) {
self.sync_handler.snapshot_service.abort_restore();
self.eth_handler.snapshot_service.abort_restore();
self.network.stop().unwrap_or_else(|e| warn!("Error stopping network: {:?}", e));
}
fn broadcast(&self, message: Vec<u8>) {
self.network.with_context(WARP_SYNC_PROTOCOL_ID, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay);
self.eth_handler.sync.write().propagate_consensus_packet(&mut sync_io, message.clone());
});
}
fn transactions_received(&self, hashes: Vec<H256>, peer_id: PeerId) {
let mut sync = self.sync_handler.sync.write();
let mut sync = self.eth_handler.sync.write();
sync.transactions_received(hashes, peer_id);
}
}
@ -399,8 +413,8 @@ impl ManageNetwork for EthSync {
fn stop_network(&self) {
self.network.with_context(self.subprotocol_name, |context| {
let mut sync_io = NetSyncIo::new(context, &*self.sync_handler.chain, &*self.sync_handler.snapshot_service, &self.sync_handler.overlay);
self.sync_handler.sync.write().abort(&mut sync_io);
let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay);
self.eth_handler.sync.write().abort(&mut sync_io);
});
if let Some(light_proto) = self.light_proto.as_ref() {

View File

@ -113,6 +113,7 @@ type PacketDecodeError = DecoderError;
const PROTOCOL_VERSION_63: u8 = 63;
const PROTOCOL_VERSION_62: u8 = 62;
const PROTOCOL_VERSION_1: u8 = 1;
const PROTOCOL_VERSION_2: u8 = 2;
const MAX_BODIES_TO_SEND: usize = 256;
const MAX_HEADERS_TO_SEND: usize = 512;
const MAX_NODE_DATA_TO_SEND: usize = 1024;
@ -149,8 +150,9 @@ const GET_SNAPSHOT_MANIFEST_PACKET: u8 = 0x11;
const SNAPSHOT_MANIFEST_PACKET: u8 = 0x12;
const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13;
const SNAPSHOT_DATA_PACKET: u8 = 0x14;
const CONSENSUS_DATA_PACKET: u8 = 0x15;
pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x15;
pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x16;
const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3;
@ -615,13 +617,15 @@ impl ChainSync {
trace!(target: "sync", "Peer {} network id mismatch (ours: {}, theirs: {})", peer_id, self.network_id, peer.network_id);
return Ok(());
}
if (warp_protocol && peer.protocol_version != PROTOCOL_VERSION_1) || (!warp_protocol && peer.protocol_version != PROTOCOL_VERSION_63 && peer.protocol_version != PROTOCOL_VERSION_62) {
if (warp_protocol && peer.protocol_version != PROTOCOL_VERSION_1 && peer.protocol_version != PROTOCOL_VERSION_2) || (!warp_protocol && peer.protocol_version != PROTOCOL_VERSION_63 && peer.protocol_version != PROTOCOL_VERSION_62) {
io.disable_peer(peer_id);
trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, peer.protocol_version);
return Ok(());
}
self.peers.insert(peer_id.clone(), peer);
// Don't activate peer immediatelly when searching for common block.
// Let the current sync round complete first.
self.active_peers.insert(peer_id.clone());
debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id));
if let Some((fork_block, _)) = self.fork_block {
@ -1422,8 +1426,9 @@ impl ChainSync {
/// Send Status message
fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), NetworkError> {
let warp_protocol = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer) != 0;
let protocol = if warp_protocol { PROTOCOL_VERSION_1 } else { io.eth_protocol_version(peer) };
let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer);
let warp_protocol = warp_protocol_version != 0;
let protocol = if warp_protocol { warp_protocol_version } else { PROTOCOL_VERSION_63 };
trace!(target: "sync", "Sending status to {}, protocol version {}", peer, protocol);
let mut packet = RlpStream::new_list(if warp_protocol { 7 } else { 5 });
let chain = io.chain().chain_info();
@ -1672,7 +1677,7 @@ impl ChainSync {
GET_SNAPSHOT_DATA_PACKET => ChainSync::return_rlp(io, &rlp, peer,
ChainSync::return_snapshot_data,
|e| format!("Error sending snapshot data: {:?}", e)),
CONSENSUS_DATA_PACKET => ChainSync::on_consensus_packet(io, peer, &rlp),
_ => {
sync.write().on_packet(io, peer, packet_id, data);
Ok(())
@ -1799,44 +1804,51 @@ impl ChainSync {
}
}
/// creates rlp from block bytes and total difficulty
fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes {
let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(bytes, 1);
rlp_stream.append(&total_difficulty);
rlp_stream.out()
}
/// creates latest block rlp for the given client
fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes {
let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(&chain.block(BlockId::Hash(chain.chain_info().best_block_hash)).expect("Best block always exists"), 1);
rlp_stream.append(&chain.chain_info().total_difficulty);
rlp_stream.out()
ChainSync::create_block_rlp(
&chain.block(BlockId::Hash(chain.chain_info().best_block_hash)).expect("Best block always exists"),
chain.chain_info().total_difficulty
)
}
/// creates latest block rlp for the given client
/// creates given hash block rlp for the given client
fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes {
let mut rlp_stream = RlpStream::new_list(2);
rlp_stream.append_raw(&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed"), 1);
rlp_stream.append(&chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed."));
rlp_stream.out()
ChainSync::create_block_rlp(
&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed"),
chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.")
)
}
/// returns peer ids that have less blocks than our chain
fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec<PeerId> {
/// returns peer ids that have different blocks than our chain
fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo) -> Vec<PeerId> {
let latest_hash = chain_info.best_block_hash;
self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)|
match io.chain().block_status(BlockId::Hash(peer_info.latest_hash.clone())) {
BlockStatus::InChain => {
if peer_info.latest_hash != latest_hash {
Some(id)
} else {
None
}
},
_ => None
self
.peers
.iter_mut()
.filter_map(|(&id, ref mut peer_info)| {
trace!(target: "sync", "Checking peer our best {} their best {}", latest_hash, peer_info.latest_hash);
if peer_info.latest_hash != latest_hash {
Some(id)
} else {
None
}
})
.collect::<Vec<_>>()
}
fn select_random_lagging_peers(&mut self, peers: &[PeerId]) -> Vec<PeerId> {
use rand::Rng;
fn select_random_peers(peers: &[PeerId]) -> Vec<PeerId> {
// take sqrt(x) peers
let mut peers = peers.to_vec();
let mut count = (self.peers.len() as f64).powf(0.5).round() as usize;
let mut count = (peers.len() as f64).powf(0.5).round() as usize;
count = min(count, MAX_PEERS_PROPAGATION);
count = max(count, MIN_PEERS_PROPAGATION);
::rand::thread_rng().shuffle(&mut peers);
@ -1844,16 +1856,20 @@ impl ChainSync {
peers
}
/// propagates latest block to lagging peers
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[PeerId]) -> usize {
fn get_consensus_peers(&self) -> Vec<PeerId> {
self.peers.iter().filter_map(|(id, p)| if p.protocol_version == PROTOCOL_VERSION_2 { Some(*id) } else { None }).collect()
}
/// propagates latest block to a set of peers
fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize {
trace!(target: "sync", "Sending NewBlocks to {:?}", peers);
let mut sent = 0;
for peer_id in peers {
if sealed.is_empty() {
if blocks.is_empty() {
let rlp = ChainSync::create_latest_block_rlp(io.chain());
self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
} else {
for h in sealed {
for h in blocks {
let rlp = ChainSync::create_new_block_rlp(io.chain(), h);
self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp);
}
@ -1971,10 +1987,10 @@ impl ChainSync {
fn propagate_latest_blocks(&mut self, io: &mut SyncIo, sealed: &[H256]) {
let chain_info = io.chain().chain_info();
if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION {
let mut peers = self.get_lagging_peers(&chain_info, io);
let mut peers = self.get_lagging_peers(&chain_info);
if sealed.is_empty() {
let hashes = self.propagate_new_hashes(&chain_info, io, &peers);
peers = self.select_random_lagging_peers(&peers);
peers = ChainSync::select_random_peers(&peers);
let blocks = self.propagate_blocks(&chain_info, io, sealed, &peers);
if blocks != 0 || hashes != 0 {
trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes);
@ -1989,6 +2005,21 @@ impl ChainSync {
self.last_sent_block_number = chain_info.best_block_number;
}
/// Distribute valid proposed blocks to subset of current peers.
fn propagate_proposed_blocks(&mut self, io: &mut SyncIo, proposed: &[Bytes]) {
let peers = self.get_consensus_peers();
trace!(target: "sync", "Sending proposed blocks to {:?}", peers);
for block in proposed {
let rlp = ChainSync::create_block_rlp(
block,
io.chain().chain_info().total_difficulty
);
for peer_id in &peers {
self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp.clone());
}
}
}
/// Maintain other peers. Send out any new blocks and transactions
pub fn maintain_sync(&mut self, io: &mut SyncIo) {
self.maybe_start_snapshot_sync(io);
@ -1996,15 +2027,32 @@ impl ChainSync {
}
/// called when block is imported to chain - propagates the blocks and updates transactions sent to peers
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], _enacted: &[H256], _retracted: &[H256], sealed: &[H256]) {
pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], _enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) {
if io.is_chain_queue_empty() {
self.propagate_latest_blocks(io, sealed);
self.propagate_proposed_blocks(io, proposed);
}
if !invalid.is_empty() {
trace!(target: "sync", "Bad blocks in the queue, restarting");
self.restart(io);
}
}
/// Called when peer sends us new consensus packet
fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> {
trace!(target: "sync", "Received consensus packet from {:?}", peer_id);
io.chain().queue_consensus_message(r.as_raw().to_vec());
Ok(())
}
/// Broadcast consensus message to peers.
pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) {
let lucky_peers = ChainSync::select_random_peers(&self.get_consensus_peers());
trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers);
for peer_id in lucky_peers {
self.send_packet(io, peer_id, CONSENSUS_DATA_PACKET, packet.clone());
}
}
}
#[cfg(test)]
@ -2067,9 +2115,9 @@ mod tests {
#[test]
fn return_receipts_empty() {
let mut client = TestBlockChainClient::new();
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let io = TestIo::new(&mut client, &ss, &mut queue, None);
let io = TestIo::new(&mut client, &ss, &queue, None);
let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0);
@ -2079,10 +2127,10 @@ mod tests {
#[test]
fn return_receipts() {
let mut client = TestBlockChainClient::new();
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let sync = dummy_sync_with_peer(H256::new(), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let mut receipt_list = RlpStream::new_list(4);
receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555"));
@ -2103,7 +2151,7 @@ mod tests {
io.sender = Some(2usize);
ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, super::GET_RECEIPTS_PACKET, &receipts_request);
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
}
#[test]
@ -2136,9 +2184,9 @@ mod tests {
let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect();
let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect();
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let io = TestIo::new(&mut client, &ss, &mut queue, None);
let io = TestIo::new(&mut client, &ss, &queue, None);
let unknown: H256 = H256::new();
let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0);
@ -2174,10 +2222,10 @@ mod tests {
#[test]
fn return_nodes() {
let mut client = TestBlockChainClient::new();
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let sync = dummy_sync_with_peer(H256::new(), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let mut node_list = RlpStream::new_list(3);
node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555"));
@ -2200,7 +2248,7 @@ mod tests {
io.sender = Some(2usize);
ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, super::GET_NODE_DATA_PACKET, &node_request);
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
}
fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync {
@ -2231,15 +2279,12 @@ mod tests {
fn finds_lagging_peers() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client);
let chain_info = client.chain_info();
let ss = TestSnapshotService::new();
let io = TestIo::new(&mut client, &ss, &mut queue, None);
let lagging_peers = sync.get_lagging_peers(&chain_info, &io);
let lagging_peers = sync.get_lagging_peers(&chain_info);
assert_eq!(1, lagging_peers.len())
assert_eq!(1, lagging_peers.len());
}
#[test]
@ -2263,62 +2308,99 @@ mod tests {
fn sends_new_hashes_to_lagging_peer() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info();
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io);
let peers = sync.get_lagging_peers(&chain_info);
let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers);
// 1 message should be send
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
// 1 peer should be updated
assert_eq!(1, peer_count);
// NEW_BLOCK_HASHES_PACKET
assert_eq!(0x01, io.queue[0].packet_id);
assert_eq!(0x01, io.packets[0].packet_id);
}
#[test]
fn sends_latest_block_to_lagging_peer() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info();
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peers = sync.get_lagging_peers(&chain_info);
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers);
// 1 message should be send
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
// 1 peer should be updated
assert_eq!(1, peer_count);
// NEW_BLOCK_PACKET
assert_eq!(0x07, io.queue[0].packet_id);
assert_eq!(0x07, io.packets[0].packet_id);
}
#[test]
fn sends_sealed_block() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let hash = client.block_hash(BlockId::Number(99)).unwrap();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info();
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peers = sync.get_lagging_peers(&chain_info);
let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers);
// 1 message should be send
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
// 1 peer should be updated
assert_eq!(1, peer_count);
// NEW_BLOCK_PACKET
assert_eq!(0x07, io.queue[0].packet_id);
assert_eq!(0x07, io.packets[0].packet_id);
}
#[test]
fn sends_proposed_block() {
let mut client = TestBlockChainClient::new();
client.add_blocks(2, EachBlockWith::Uncle);
let queue = RwLock::new(VecDeque::new());
let block = client.block(BlockId::Latest).unwrap();
let mut sync = ChainSync::new(SyncConfig::default(), &client);
sync.peers.insert(0,
PeerInfo {
// Messaging protocol
protocol_version: 2,
genesis: H256::zero(),
network_id: 0,
latest_hash: client.block_hash_delta_minus(1),
difficulty: None,
asking: PeerAsking::Nothing,
asking_blocks: Vec::new(),
asking_hash: None,
ask_time: 0,
last_sent_transactions: HashSet::new(),
expired: false,
confirmation: super::ForkConfirmation::Confirmed,
snapshot_number: None,
snapshot_hash: None,
asking_snapshot_data: None,
block_set: None,
});
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &queue, None);
sync.propagate_proposed_blocks(&mut io, &[block]);
// 1 message should be sent
assert_eq!(1, io.packets.len());
// NEW_BLOCK_PACKET
assert_eq!(0x07, io.packets[0].packet_id);
}
#[test]
@ -2327,25 +2409,25 @@ mod tests {
client.add_blocks(100, EachBlockWith::Uncle);
client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peer_count = sync.propagate_new_transactions(&mut io);
// Try to propagate same transactions for the second time
let peer_count2 = sync.propagate_new_transactions(&mut io);
// Even after new block transactions should not be propagated twice
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]);
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]);
// Try to propagate same transactions for the third time
let peer_count3 = sync.propagate_new_transactions(&mut io);
// 1 message should be send
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
// 1 peer should be updated but only once
assert_eq!(1, peer_count);
assert_eq!(0, peer_count2);
assert_eq!(0, peer_count3);
// TRANSACTIONS_PACKET
assert_eq!(0x02, io.queue[0].packet_id);
assert_eq!(0x02, io.packets[0].packet_id);
}
#[test]
@ -2354,21 +2436,21 @@ mod tests {
client.add_blocks(100, EachBlockWith::Uncle);
client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peer_count = sync.propagate_new_transactions(&mut io);
io.chain.insert_transaction_to_queue();
// New block import should trigger propagation.
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]);
sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]);
// 2 message should be send
assert_eq!(2, io.queue.len());
assert_eq!(2, io.packets.len());
// 1 peer should receive the message
assert_eq!(1, peer_count);
// TRANSACTIONS_PACKET
assert_eq!(0x02, io.queue[0].packet_id);
assert_eq!(0x02, io.queue[1].packet_id);
assert_eq!(0x02, io.packets[0].packet_id);
assert_eq!(0x02, io.packets[1].packet_id);
}
#[test]
@ -2377,31 +2459,34 @@ mod tests {
client.add_blocks(100, EachBlockWith::Uncle);
client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
// should sent some
{
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peer_count = sync.propagate_new_transactions(&mut io);
assert_eq!(1, io.queue.len());
assert_eq!(1, io.packets.len());
assert_eq!(1, peer_count);
}
// Insert some more
client.insert_transaction_to_queue();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
// Propagate new transactions
let peer_count2 = sync.propagate_new_transactions(&mut io);
// And now the peer should have all transactions
let peer_count3 = sync.propagate_new_transactions(&mut io);
let (peer_count2, peer_count3) = {
let mut io = TestIo::new(&mut client, &ss, &queue, None);
// Propagate new transactions
let peer_count2 = sync.propagate_new_transactions(&mut io);
// And now the peer should have all transactions
let peer_count3 = sync.propagate_new_transactions(&mut io);
(peer_count2, peer_count3)
};
// 2 message should be send (in total)
assert_eq!(2, io.queue.len());
assert_eq!(2, queue.read().len());
// 1 peer should be updated but only once after inserting new transaction
assert_eq!(1, peer_count2);
assert_eq!(0, peer_count3);
// TRANSACTIONS_PACKET
assert_eq!(0x02, io.queue[0].packet_id);
assert_eq!(0x02, io.queue[1].packet_id);
assert_eq!(0x02, queue.read()[0].packet_id);
assert_eq!(0x02, queue.read()[1].packet_id);
}
#[test]
@ -2410,9 +2495,9 @@ mod tests {
client.add_blocks(100, EachBlockWith::Uncle);
client.insert_transaction_to_queue();
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
sync.propagate_new_transactions(&mut io);
let stats = sync.transactions_stats();
@ -2426,11 +2511,11 @@ mod tests {
let block_data = get_dummy_block(11, client.chain_info().best_block_hash);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
//sync.have_common_block = true;
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let block = UntrustedRlp::new(&block_data);
@ -2446,10 +2531,10 @@ mod tests {
let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let block = UntrustedRlp::new(&block_data);
@ -2462,10 +2547,10 @@ mod tests {
fn handles_peer_new_block_empty() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let empty_data = vec![];
let block = UntrustedRlp::new(&empty_data);
@ -2479,10 +2564,10 @@ mod tests {
fn handles_peer_new_hashes() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let hashes_data = get_dummy_hashes();
let hashes_rlp = UntrustedRlp::new(&hashes_data);
@ -2496,10 +2581,10 @@ mod tests {
fn handles_peer_new_hashes_empty() {
let mut client = TestBlockChainClient::new();
client.add_blocks(10, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let empty_hashes_data = vec![];
let hashes_rlp = UntrustedRlp::new(&empty_hashes_data);
@ -2515,16 +2600,16 @@ mod tests {
fn hashes_rlp_mutually_acceptable() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info();
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io);
let peers = sync.get_lagging_peers(&chain_info);
sync.propagate_new_hashes(&chain_info, &mut io, &peers);
let data = &io.queue[0].data.clone();
let data = &io.packets[0].data.clone();
let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(data));
assert!(result.is_ok());
}
@ -2535,16 +2620,16 @@ mod tests {
fn block_rlp_mutually_acceptable() {
let mut client = TestBlockChainClient::new();
client.add_blocks(100, EachBlockWith::Uncle);
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client);
let chain_info = client.chain_info();
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
let peers = sync.get_lagging_peers(&chain_info, &io);
let peers = sync.get_lagging_peers(&chain_info);
sync.propagate_blocks(&chain_info, &mut io, &[], &peers);
let data = &io.queue[0].data.clone();
let data = &io.packets[0].data.clone();
let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(data));
assert!(result.is_ok());
}
@ -2572,11 +2657,11 @@ mod tests {
// when
{
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks);
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]);
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]);
assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0);
assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 1);
}
@ -2587,11 +2672,11 @@ mod tests {
client.set_nonce(view.transactions()[0].sender().unwrap(), U256::from(1));
}
{
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&client, &ss, &queue, None);
io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]);
}
// then
@ -2612,15 +2697,15 @@ mod tests {
let good_blocks = vec![client.block_hash_delta_minus(2)];
let retracted_blocks = vec![client.block_hash_delta_minus(1)];
let mut queue = VecDeque::new();
let queue = RwLock::new(VecDeque::new());
let ss = TestSnapshotService::new();
let mut io = TestIo::new(&mut client, &ss, &mut queue, None);
let mut io = TestIo::new(&mut client, &ss, &queue, None);
// when
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]);
sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]);
assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0);
assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 0);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]);
sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]);
// then
let status = io.chain.miner.status();

View File

@ -101,7 +101,7 @@ fn forked_with_misbehaving_peer() {
::env_logger::init().ok();
let mut net = TestNet::new(3);
// peer 0 is on a totally different chain with higher total difficulty
net.peer_mut(0).chain = TestBlockChainClient::new_with_extra_data(b"fork".to_vec());
net.peer_mut(0).chain = Arc::new(TestBlockChainClient::new_with_extra_data(b"fork".to_vec()));
net.peer(0).chain.add_blocks(50, EachBlockWith::Nothing);
net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing);
net.peer(2).chain.add_blocks(10, EachBlockWith::Nothing);

View File

@ -15,7 +15,9 @@
// along with Parity. If not, see <http://www.gnu.org/licenses/>.
use util::*;
use ethcore::client::BlockChainClient;
use io::{IoHandler, IoContext, IoChannel};
use ethcore::client::{BlockChainClient, Client, MiningBlockChainClient};
use ethcore::service::ClientIoMessage;
use ethcore::spec::Spec;
use ethcore::miner::MinerService;
use ethcore::transaction::*;
@ -24,55 +26,171 @@ use ethkey::KeyPair;
use super::helpers::*;
use SyncConfig;
#[test]
fn test_authority_round() {
::env_logger::init().ok();
struct TestIoHandler {
client: Arc<Client>,
}
let s1 = KeyPair::from_secret("1".sha3()).unwrap();
let s2 = KeyPair::from_secret("0".sha3()).unwrap();
let spec_factory = || {
let spec = Spec::new_test_round();
let account_provider = AccountProvider::transient_provider();
account_provider.insert_account(s1.secret().clone(), "").unwrap();
account_provider.insert_account(s2.secret().clone(), "").unwrap();
spec.engine.register_account_provider(Arc::new(account_provider));
spec
};
let mut net = TestNet::new_with_spec(2, SyncConfig::default(), spec_factory);
let mut net = &mut *net;
// Push transaction to both clients. Only one of them gets lucky to mine a block.
net.peer(0).chain.miner().set_author(s1.address());
net.peer(0).chain.engine().set_signer(s1.address(), "".to_owned());
net.peer(1).chain.miner().set_author(s2.address());
net.peer(1).chain.engine().set_signer(s2.address(), "".to_owned());
let tx1 = Transaction {
nonce: 0.into(),
impl IoHandler<ClientIoMessage> for TestIoHandler {
fn message(&self, _io: &IoContext<ClientIoMessage>, net_message: &ClientIoMessage) {
match *net_message {
ClientIoMessage::UpdateSealing => self.client.update_sealing(),
ClientIoMessage::SubmitSeal(ref hash, ref seal) => self.client.submit_seal(*hash, seal.clone()),
ClientIoMessage::BroadcastMessage(ref message) => self.client.broadcast_consensus_message(message.clone()),
ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) {
panic!("Invalid message received: {}", e);
},
_ => {} // ignore other messages
}
}
}
fn new_tx(secret: &H256, nonce: U256) -> SignedTransaction {
Transaction {
nonce: nonce.into(),
gas_price: 0.into(),
gas: 21000.into(),
action: Action::Call(Address::default()),
value: 0.into(),
data: Vec::new(),
}.sign(s1.secret(), None);
// exhange statuses
net.sync_steps(5);
net.peer(0).chain.miner().import_own_transaction(&net.peer(0).chain, tx1).unwrap();
}.sign(secret, None)
}
#[test]
fn authority_round() {
let s0 = KeyPair::from_secret("1".sha3()).unwrap();
let s1 = KeyPair::from_secret("0".sha3()).unwrap();
let spec_factory = || {
let spec = Spec::new_test_round();
let account_provider = AccountProvider::transient_provider();
account_provider.insert_account(s0.secret().clone(), "").unwrap();
account_provider.insert_account(s1.secret().clone(), "").unwrap();
spec.engine.register_account_provider(Arc::new(account_provider));
spec
};
let mut net = TestNet::with_spec(2, SyncConfig::default(), spec_factory);
let mut net = &mut *net;
let io_handler0: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(0).chain.clone() });
let io_handler1: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(1).chain.clone() });
// Push transaction to both clients. Only one of them gets lucky to produce a block.
net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap();
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
net.peer(0).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
net.peer(1).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
// exchange statuses
net.sync();
// Trigger block proposal
net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into())).unwrap();
// Sync a block
net.sync();
assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1);
assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1);
let tx2 = Transaction {
nonce: 0.into(),
gas_price: 0.into(),
gas: 21000.into(),
action: Action::Call(Address::default()),
value: 0.into(),
data: Vec::new(),
}.sign(s2.secret(), None);
net.peer(1).chain.miner().import_own_transaction(&net.peer(1).chain, tx2).unwrap();
net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into())).unwrap();
// Move to next proposer step
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
net.peer(1).chain.miner().update_sealing(&net.peer(1).chain);
net.sync();
assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2);
assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2);
// Fork the network
net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into())).unwrap();
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
assert_eq!(net.peer(0).chain.chain_info().best_block_number, 3);
net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into())).unwrap();
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
assert_eq!(net.peer(1).chain.chain_info().best_block_number, 3);
// Reorg to the correct one.
net.sync();
let ci0 = net.peer(0).chain.chain_info();
let ci1 = net.peer(1).chain.chain_info();
assert_eq!(ci0.best_block_number, 3);
assert_eq!(ci1.best_block_number, 3);
assert_eq!(ci0.best_block_hash, ci1.best_block_hash);
}
#[test]
fn tendermint() {
let s0 = KeyPair::from_secret("1".sha3()).unwrap();
let s1 = KeyPair::from_secret("0".sha3()).unwrap();
let spec_factory = || {
let spec = Spec::new_test_tendermint();
let account_provider = AccountProvider::transient_provider();
account_provider.insert_account(s0.secret().clone(), "").unwrap();
account_provider.insert_account(s1.secret().clone(), "").unwrap();
spec.engine.register_account_provider(Arc::new(account_provider));
spec
};
let mut net = TestNet::with_spec(2, SyncConfig::default(), spec_factory);
let mut net = &mut *net;
let io_handler0: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(0).chain.clone() });
let io_handler1: Arc<IoHandler<ClientIoMessage>> = Arc::new(TestIoHandler { client: net.peer(1).chain.clone() });
// Push transaction to both clients. Only one of them issues a proposal.
net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap();
trace!(target: "poa", "Peer 0 is {}.", s0.address());
net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap();
trace!(target: "poa", "Peer 1 is {}.", s1.address());
net.peer(0).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
net.peer(1).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0)));
net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1)));
// Exhange statuses
net.sync();
// Propose
net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into())).unwrap();
net.sync();
// Propose timeout, synchronous for now
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
// Prevote, precommit and commit
net.sync();
assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1);
assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1);
net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into())).unwrap();
// Commit timeout
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
// Propose
net.sync();
// Propose timeout
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
// Prevote, precommit and commit
net.sync();
assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2);
assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2);
net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into())).unwrap();
net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into())).unwrap();
// Peers get disconnected.
// Commit
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
// Propose
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into())).unwrap();
net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into())).unwrap();
// Send different prevotes
net.sync();
// Prevote timeout
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
// Precommit and commit
net.sync();
// Propose timeout
net.peer(0).chain.engine().step();
net.peer(1).chain.engine().step();
net.sync();
let ci0 = net.peer(0).chain.chain_info();
let ci1 = net.peer(1).chain.chain_info();
assert_eq!(ci0.best_block_number, 3);
assert_eq!(ci1.best_block_number, 3);
assert_eq!(ci0.best_block_hash, ci1.best_block_hash);
}

View File

@ -45,14 +45,15 @@ impl FlushingBlockChainClient for TestBlockChainClient {}
pub struct TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
pub chain: &'p C,
pub snapshot_service: &'p TestSnapshotService,
pub queue: &'p mut VecDeque<TestPacket>,
pub queue: &'p RwLock<VecDeque<TestPacket>>,
pub sender: Option<PeerId>,
pub to_disconnect: HashSet<PeerId>,
pub packets: Vec<TestPacket>,
overlay: RwLock<HashMap<BlockNumber, Bytes>>,
}
impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
pub fn new(chain: &'p C, ss: &'p TestSnapshotService, queue: &'p mut VecDeque<TestPacket>, sender: Option<PeerId>) -> TestIo<'p, C> {
pub fn new(chain: &'p C, ss: &'p TestSnapshotService, queue: &'p RwLock<VecDeque<TestPacket>>, sender: Option<PeerId>) -> TestIo<'p, C> {
TestIo {
chain: chain,
snapshot_service: ss,
@ -60,10 +61,17 @@ impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
sender: sender,
to_disconnect: HashSet::new(),
overlay: RwLock::new(HashMap::new()),
packets: Vec::new(),
}
}
}
impl<'p, C> Drop for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
fn drop(&mut self) {
self.queue.write().extend(self.packets.drain(..));
}
}
impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
fn disable_peer(&mut self, peer_id: PeerId) {
self.disconnect_peer(peer_id);
@ -78,7 +86,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
}
fn respond(&mut self, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
self.packets.push(TestPacket {
data: data,
packet_id: packet_id,
recipient: self.sender.unwrap()
@ -87,7 +95,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
}
fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec<u8>) -> Result<(), NetworkError> {
self.queue.push_back(TestPacket {
self.packets.push(TestPacket {
data: data,
packet_id: packet_id,
recipient: peer_id,
@ -100,7 +108,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p {
}
fn chain(&self) -> &BlockChainClient {
self.chain
&*self.chain
}
fn snapshot_service(&self) -> &SnapshotService {
@ -131,7 +139,7 @@ pub struct TestPacket {
}
pub struct TestPeer<C> where C: FlushingBlockChainClient {
pub chain: C,
pub chain: Arc<C>,
pub snapshot_service: Arc<TestSnapshotService>,
pub sync: RwLock<ChainSync>,
pub queue: RwLock<VecDeque<TestPacket>>,
@ -167,7 +175,7 @@ impl TestNet<TestBlockChainClient> {
net.peers.push(Arc::new(TestPeer {
sync: RwLock::new(sync),
snapshot_service: ss,
chain: chain,
chain: Arc::new(chain),
queue: RwLock::new(VecDeque::new()),
}));
}
@ -176,7 +184,7 @@ impl TestNet<TestBlockChainClient> {
}
impl TestNet<EthcoreClient> {
pub fn new_with_spec<F>(n: usize, config: SyncConfig, spec_factory: F) -> GuardedTempResult<TestNet<EthcoreClient>>
pub fn with_spec<F>(n: usize, config: SyncConfig, spec_factory: F) -> GuardedTempResult<TestNet<EthcoreClient>>
where F: Fn() -> Spec
{
let mut net = TestNet {
@ -192,17 +200,17 @@ impl TestNet<EthcoreClient> {
let db_config = DatabaseConfig::with_columns(NUM_COLUMNS);
let spec = spec_factory();
let client = Arc::try_unwrap(EthcoreClient::new(
let client = EthcoreClient::new(
ClientConfig::default(),
&spec,
client_dir.as_path(),
Arc::new(Miner::with_spec(&spec)),
IoChannel::disconnected(),
&db_config
).unwrap()).ok().unwrap();
).unwrap();
let ss = Arc::new(TestSnapshotService::new());
let sync = ChainSync::new(config.clone(), &client);
let sync = ChainSync::new(config.clone(), &*client);
let peer = Arc::new(TestPeer {
sync: RwLock::new(sync),
snapshot_service: ss,
@ -229,33 +237,38 @@ impl<C> TestNet<C> where C: FlushingBlockChainClient {
}
pub fn start(&mut self) {
if self.started {
return;
}
for peer in 0..self.peers.len() {
for client in 0..self.peers.len() {
if peer != client {
let p = &self.peers[peer];
p.sync.write().update_targets(&p.chain);
p.sync.write().on_peer_connected(&mut TestIo::new(&p.chain, &p.snapshot_service, &mut p.queue.write(), Some(client as PeerId)), client as PeerId);
p.sync.write().update_targets(&*p.chain);
p.sync.write().on_peer_connected(&mut TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(client as PeerId)), client as PeerId);
}
}
}
self.started = true;
}
pub fn sync_step(&mut self) {
for peer in 0..self.peers.len() {
self.peers[peer].chain.flush();
let packet = self.peers[peer].queue.write().pop_front();
if let Some(packet) = packet {
let disconnecting = {
let p = &self.peers[packet.recipient];
let mut queue = p.queue.write();
trace!("--- {} -> {} ---", peer, packet.recipient);
let to_disconnect = {
let mut io = TestIo::new(&p.chain, &p.snapshot_service, &mut queue, Some(peer as PeerId));
let mut io = TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(peer as PeerId));
ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data);
io.to_disconnect
p.chain.flush();
io.to_disconnect.clone()
};
for d in &to_disconnect {
// notify this that disconnecting peers are disconnecting
let mut io = TestIo::new(&p.chain, &p.snapshot_service, &mut queue, Some(*d));
let mut io = TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(*d));
p.sync.write().on_peer_aborting(&mut io, *d);
self.disconnect_events.push((peer, *d));
}
@ -264,8 +277,7 @@ impl<C> TestNet<C> where C: FlushingBlockChainClient {
for d in &disconnecting {
// notify other peers that this peer is disconnecting
let p = &self.peers[*d];
let mut queue = p.queue.write();
let mut io = TestIo::new(&p.chain, &p.snapshot_service, &mut queue, Some(peer as PeerId));
let mut io = TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(peer as PeerId));
p.sync.write().on_peer_aborting(&mut io, peer as PeerId);
}
}
@ -277,15 +289,14 @@ impl<C> TestNet<C> where C: FlushingBlockChainClient {
pub fn sync_step_peer(&mut self, peer_num: usize) {
let peer = self.peer(peer_num);
peer.chain.flush();
let mut queue = peer.queue.write();
peer.sync.write().maintain_peers(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None));
peer.sync.write().maintain_sync(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None));
peer.sync.write().propagate_new_transactions(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None));
peer.sync.write().maintain_peers(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None));
peer.sync.write().maintain_sync(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None));
peer.sync.write().propagate_new_transactions(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None));
}
pub fn restart_peer(&mut self, i: usize) {
let peer = self.peer(i);
peer.sync.write().restart(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut peer.queue.write(), None));
peer.sync.write().restart(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None));
}
pub fn sync(&mut self) -> u32 {
@ -299,10 +310,7 @@ impl<C> TestNet<C> where C: FlushingBlockChainClient {
}
pub fn sync_steps(&mut self, count: usize) {
if !self.started {
self.start();
self.started = true;
}
self.start();
for _ in 0..count {
self.sync_step();
}
@ -314,8 +322,7 @@ impl<C> TestNet<C> where C: FlushingBlockChainClient {
pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) {
let peer = self.peer(peer_id);
let mut queue = peer.queue.write();
peer.sync.write().chain_new_blocks(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None), &[], &[], &[], &[], &[]);
peer.sync.write().chain_new_blocks(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None), &[], &[], &[], &[], &[], &[]);
}
}
@ -326,21 +333,26 @@ impl ChainNotify for TestPeer<EthcoreClient> {
enacted: Vec<H256>,
retracted: Vec<H256>,
sealed: Vec<H256>,
proposed: Vec<Bytes>,
_duration: u64)
{
let mut queue = self.queue.write();
let mut io = TestIo::new(&self.chain, &self.snapshot_service, &mut queue, None);
let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None);
self.sync.write().chain_new_blocks(
&mut io,
&imported,
&invalid,
&enacted,
&retracted,
&sealed);
&sealed,
&proposed);
}
fn start(&self) {}
fn stop(&self) {}
}
fn broadcast(&self, message: Vec<u8>) {
let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None);
self.sync.write().propagate_consensus_packet(&mut io, message.clone());
}
}

View File

@ -329,11 +329,18 @@ impl<Message> Handler for IoManager<Message> where Message: Send + Clone + Sync
}
}
#[derive(Clone)]
enum Handlers<Message> where Message: Send + Clone {
SharedCollection(Weak<RwLock<Slab<Arc<IoHandler<Message>>, HandlerId>>>),
Single(Weak<IoHandler<Message>>),
}
/// Allows sending messages into the event loop. All the IO handlers will get the message
/// in the `message` callback.
pub struct IoChannel<Message> where Message: Send + Clone{
channel: Option<Sender<IoMessage<Message>>>,
handlers: Weak<RwLock<Slab<Arc<IoHandler<Message>>, HandlerId>>>,
handlers: Handlers<Message>,
}
impl<Message> Clone for IoChannel<Message> where Message: Send + Clone + Sync + 'static {
@ -348,19 +355,29 @@ impl<Message> Clone for IoChannel<Message> where Message: Send + Clone + Sync +
impl<Message> IoChannel<Message> where Message: Send + Clone + Sync + 'static {
/// Send a message through the channel
pub fn send(&self, message: Message) -> Result<(), IoError> {
if let Some(ref channel) = self.channel {
try!(channel.send(IoMessage::UserMessage(message)));
match self.channel {
Some(ref channel) => try!(channel.send(IoMessage::UserMessage(message))),
None => try!(self.send_sync(message))
}
Ok(())
}
/// Send a message through the channel and handle it synchronously
pub fn send_sync(&self, message: Message) -> Result<(), IoError> {
if let Some(handlers) = self.handlers.upgrade() {
for id in 0 .. MAX_HANDLERS {
if let Some(h) = handlers.read().get(id) {
let handler = h.clone();
handler.message(&IoContext::new(self.clone(), id), &message);
match self.handlers {
Handlers::SharedCollection(ref handlers) => {
if let Some(handlers) = handlers.upgrade() {
for id in 0 .. MAX_HANDLERS {
if let Some(h) = handlers.read().get(id) {
let handler = h.clone();
handler.message(&IoContext::new(self.clone(), id), &message);
}
}
}
},
Handlers::Single(ref handler) => {
if let Some(handler) = handler.upgrade() {
handler.message(&IoContext::new(self.clone(), 0), &message);
}
}
}
@ -378,14 +395,21 @@ impl<Message> IoChannel<Message> where Message: Send + Clone + Sync + 'static {
pub fn disconnected() -> IoChannel<Message> {
IoChannel {
channel: None,
handlers: Weak::default(),
handlers: Handlers::SharedCollection(Weak::default()),
}
}
/// Create a new synchronous channel to a given handler.
pub fn to_handler(handler: Weak<IoHandler<Message>>) -> IoChannel<Message> {
IoChannel {
channel: None,
handlers: Handlers::Single(handler),
}
}
fn new(channel: Sender<IoMessage<Message>>, handlers: Weak<RwLock<Slab<Arc<IoHandler<Message>>, HandlerId>>>) -> IoChannel<Message> {
IoChannel {
channel: Some(channel),
handlers: handlers,
handlers: Handlers::SharedCollection(handlers),
}
}
}