diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 84d10e1bc..28604d0cd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -231,10 +231,10 @@ linux-armv6: stage: build image: ethcore/rust-armv6:latest only: -# - beta + - beta # - tags # - stable - - triggers +# - triggers script: - export CC=arm-linux-gnueabi-gcc - export CXX=arm-linux-gnueabi-g++ @@ -312,8 +312,8 @@ darwin: - stable - triggers script: - - cargo build -j 8 --release -p ethstore #$CARGOFLAGS - cargo build -j 8 --release #$CARGOFLAGS + - cargo build -j 8 --release -p ethstore #$CARGOFLAGS - rm -rf parity.md5 - md5sum target/release/parity > parity.md5 - packagesbuild -v mac/Parity.pkgproj @@ -350,7 +350,7 @@ windows: - set RUST_BACKTRACE=1 - set RUSTFLAGS=%RUSTFLAGS% - rustup default stable-x86_64-pc-windows-msvc - - cargo build -j 8 --release #%CARGOFLAGS% + - cargo build --release #%CARGOFLAGS% - curl -sL --url "https://github.com/ethcore/win-build/raw/master/SimpleFC.dll" -o nsis\SimpleFC.dll - curl -sL --url "https://github.com/ethcore/win-build/raw/master/vc_redist.x64.exe" -o nsis\vc_redist.x64.exe - signtool sign /f %keyfile% /p %certpass% target\release\parity.exe @@ -408,7 +408,7 @@ test-darwin: test-windows: stage: test only: - - triggers +# - triggers before_script: - git submodule update --init --recursive script: diff --git a/Cargo.lock b/Cargo.lock index d6b90f602..d22edd0d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,6 +3,7 @@ name = "parity" version = "1.5.0" dependencies = [ "ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)", + "app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", "ctrlc 1.1.1 (git+https://github.com/ethcore/rust-ctrlc.git)", "daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -32,9 +33,11 @@ dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-rpc-client 1.4.0", "regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rpc-cli 1.4.0", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -42,7 +45,7 @@ dependencies = [ "serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -58,6 +61,17 @@ name = "ansi_term" version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "app_dirs" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "arrayvec" version = "0.3.16" @@ -196,7 +210,7 @@ source = "git+https://github.com/ethcore/rust-ctrlc.git#f4927770f89eca80ec250911 dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -277,7 +291,7 @@ name = "ethash" version = "1.5.0" dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "sha3 0.1.0", ] @@ -395,7 +409,7 @@ dependencies = [ "crossbeam 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.1 (git+https://github.com/ethcore/mio)", - "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -501,7 +515,7 @@ dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "mio 0.6.1 (git+https://github.com/ethcore/mio)", - "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", @@ -531,6 +545,7 @@ dependencies = [ "jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc.git)", "jsonrpc-ipc-server 0.2.4 (git+https://github.com/ethcore/jsonrpc.git)", + "jsonrpc-macros 0.1.0 (git+https://github.com/ethcore/jsonrpc.git)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -597,7 +612,7 @@ dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "lru-cache 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.1.68 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", @@ -659,7 +674,7 @@ dependencies = [ "itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)", "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", @@ -688,7 +703,7 @@ dependencies = [ "ethkey 0.2.0", "heapsize 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "semver 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -729,6 +744,14 @@ dependencies = [ "miniz-sys 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "futures" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "gcc" version = "0.3.35" @@ -845,7 +868,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -860,8 +883,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" -version = "4.0.0" -source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", @@ -870,10 +893,22 @@ dependencies = [ "serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "jsonrpc-core" +version = "4.0.0" +source = "git+https://github.com/ethcore/jsonrpc.git#33262d626a294a00c20435dec331058ba65e224a" +dependencies = [ + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_codegen 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "jsonrpc-http-server" version = "6.1.1" -source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53" +source = "git+https://github.com/ethcore/jsonrpc.git#33262d626a294a00c20435dec331058ba65e224a" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/ethcore/hyper)", "jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)", @@ -884,7 +919,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "0.2.4" -source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53" +source = "git+https://github.com/ethcore/jsonrpc.git#33262d626a294a00c20435dec331058ba65e224a" dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -896,10 +931,19 @@ dependencies = [ "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "jsonrpc-macros" +version = "0.1.0" +source = "git+https://github.com/ethcore/jsonrpc.git#33262d626a294a00c20435dec331058ba65e224a" +dependencies = [ + "jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)", + "serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "jsonrpc-tcp-server" version = "0.1.0" -source = "git+https://github.com/ethcore/jsonrpc.git#1500da1b9613a0a17fc0109d825f3ccc60199a53" +source = "git+https://github.com/ethcore/jsonrpc.git#33262d626a294a00c20435dec331058ba65e224a" dependencies = [ "bytes 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -916,7 +960,7 @@ name = "kernel32-sys" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1017,7 +1061,7 @@ dependencies = [ "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1033,7 +1077,7 @@ dependencies = [ "nix 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1048,7 +1092,7 @@ dependencies = [ "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1064,7 +1108,7 @@ dependencies = [ "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", "nix 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1074,7 +1118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1085,7 +1129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1114,7 +1158,7 @@ dependencies = [ "cfg-if 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1253,6 +1297,15 @@ name = "odds" version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "ole32-sys" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "owning_ref" version = "0.2.2" @@ -1272,6 +1325,26 @@ dependencies = [ "syntex_syntax 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity-rpc-client" +version = "1.4.0" +dependencies = [ + "ethcore-rpc 1.5.0", + "ethcore-signer 1.5.0", + "ethcore-util 1.5.0", + "futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)", + "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.8.4 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)", + "tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)", +] + [[package]] name = "parity-ui" version = "1.5.0" @@ -1291,7 +1364,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/ethcore/js-precompiled.git#eb9d978ed5ad1c514b37e89c716f80b3c8d613b5" +source = "git+https://github.com/ethcore/js-precompiled.git#2cdda91549dfeebd94775b348a443f8ee5446e9f" dependencies = [ "parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1304,12 +1377,12 @@ dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] name = "parking_lot" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1325,7 +1398,7 @@ dependencies = [ "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "smallvec 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1548,7 +1621,30 @@ dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rpassword" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", + "termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "rpc-cli" +version = "1.4.0" +dependencies = [ + "ethcore-bigint 0.1.2", + "ethcore-rpc 1.5.0", + "ethcore-util 1.5.0", + "futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-rpc-client 1.4.0", + "rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1652,6 +1748,15 @@ dependencies = [ "gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "shell32-sys" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "siphasher" version = "0.1.1" @@ -1786,13 +1891,21 @@ name = "target_info" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "tempdir" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "term" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1801,7 +1914,7 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1836,7 +1949,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "libc 0.2.16 (registry+https://github.com/rust-lang/crates.io-index)", - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1932,7 +2045,7 @@ name = "vecio" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1963,7 +2076,7 @@ dependencies = [ [[package]] name = "winapi" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -1991,10 +2104,15 @@ name = "ws2_32-sys" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)", + "winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)", "winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "xdg" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "xml-rs" version = "0.3.4" @@ -2025,6 +2143,7 @@ dependencies = [ [metadata] "checksum aho-corasick 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "67077478f0a03952bed2e6786338d400d40c25e9836e08ad50af96607317fd03" "checksum ansi_term 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)" = "1f46cd5b1d660c938e3f92dfe7a73d832b3281479363dd0cd9c1c2fbf60f7962" +"checksum app_dirs 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b7d1c0d48a81bbb13043847f957971f4d87c81542d80ece5e84ba3cba4058fd4" "checksum arrayvec 0.3.16 (registry+https://github.com/rust-lang/crates.io-index)" = "16e3bdb2f54b3ace0285975d59a97cf8ed3855294b2b6bc651fcf22a9c352975" "checksum aster 0.17.0 (registry+https://github.com/rust-lang/crates.io-index)" = "07d344974f0a155f091948aa389fb1b912d3a58414fbdb9c8d446d193ee3496a" "checksum aster 0.25.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4df293303e8a52e1df7984ac1415e195f5fcbf51e4bb7bda54557861a3954a08" @@ -2055,6 +2174,7 @@ dependencies = [ "checksum ethabi 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f7b0c53453517f620847be51943db329276ae52f2e210cfc659e81182864be2f" "checksum fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "b1ee15a7050e5580b3712877157068ea713b245b080ff302ae2ca973cfcd9baa" "checksum flate2 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "3eeb481e957304178d2e782f2da1257f1434dfecbae883bafb61ada2a9fea3bb" +"checksum futures 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)" = "0bad0a2ac64b227fdc10c254051ae5af542cf19c9328704fd4092f7914196897" "checksum gcc 0.3.35 (registry+https://github.com/rust-lang/crates.io-index)" = "91ecd03771effb0c968fd6950b37e89476a578aaf1c70297d8e92b6516ec3312" "checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb" "checksum hamming 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "65043da274378d68241eb9a8f8f8aa54e349136f7b8e12f63e3ef44043cc30e1" @@ -2068,9 +2188,11 @@ dependencies = [ "checksum isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7408a548dc0e406b7912d9f84c261cc533c1866e047644a811c133c56041ac0c" "checksum itertools 0.4.13 (registry+https://github.com/rust-lang/crates.io-index)" = "086e1fa5fe48840b1cfdef3a20c7e3115599f8d5c4c87ef32a794a7cdd184d76" "checksum itoa 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ae3088ea4baeceb0284ee9eea42f591226e6beaecf65373e41b38d95a1b8e7a1" +"checksum jsonrpc-core 3.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3c5094610b07f28f3edaf3947b732dadb31dbba4941d4d0c1c7a8350208f4414" "checksum jsonrpc-core 4.0.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-http-server 6.1.1 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-ipc-server 0.2.4 (git+https://github.com/ethcore/jsonrpc.git)" = "" +"checksum jsonrpc-macros 0.1.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum jsonrpc-tcp-server 0.1.0 (git+https://github.com/ethcore/jsonrpc.git)" = "" "checksum kernel32-sys 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "7507624b29483431c0ba2d82aece8ca6cdba9382bff4ddd0f7490560c056098d" "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" @@ -2110,11 +2232,12 @@ dependencies = [ "checksum num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "51fedae97a05f7353612fe017ab705a37e6db8f4d67c5c6fe739a9e70d6eed09" "checksum number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)" = "084d05f4bf60621a9ac9bde941a410df548f4de9545f06e5ee9d3aef4b97cd77" "checksum odds 0.2.12 (registry+https://github.com/rust-lang/crates.io-index)" = "b28c06e81b0f789122d415d6394b5fe849bde8067469f4c2980d3cdc10c78ec1" +"checksum ole32-sys 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5d2c49021782e5233cd243168edfa8037574afed4eba4bbaf538b3d8d1789d8c" "checksum owning_ref 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "8d91377085359426407a287ab16884a0111ba473aa6844ff01d4ec20ce3d75e7" "checksum parity-dapps-glue 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98378dec0a185da2b7180308752f0bad73aaa949c3e0a3b0528d0e067945f7ab" "checksum parity-ui-precompiled 1.4.0 (git+https://github.com/ethcore/js-precompiled.git)" = "" "checksum parking_lot 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "968f685642555d2f7e202c48b8b11de80569e9bfea817f7f12d7c61aac62d4e6" -"checksum parking_lot 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "dbc5847584161f273e69edc63c1a86254a22f570a0b5dd87aa6f9773f6f7d125" +"checksum parking_lot 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "e1435e7a2a00dfebededd6c6bdbd54008001e94b4a2aadd6aef0dc4c56317621" "checksum parking_lot_core 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fb1b97670a2ffadce7c397fb80a3d687c4f3060140b885621ef1653d0e5d5068" "checksum phf 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "447d9d45f2e0b4a9b532e808365abf18fc211be6ca217202fcd45236ef12f026" "checksum phf_codegen 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "8af7ae7c3f75a502292b491e5cc0a1f69e3407744abe6e57e2a3b712bb82f01d" @@ -2141,6 +2264,7 @@ dependencies = [ "checksum rocksdb-sys 0.3.0 (git+https://github.com/ethcore/rust-rocksdb)" = "" "checksum rotor 0.6.3 (git+https://github.com/ethcore/rotor)" = "" "checksum rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "5d3a99497c5c544e629cc8b359ae5ede321eba5fa8e5a8078f3ced727a976c3f" +"checksum rpassword 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab6e42be826e215f30ff830904f8f4a0933c6e2ae890e1af8b408f5bae60081e" "checksum rust-crypto 0.2.36 (registry+https://github.com/rust-lang/crates.io-index)" = "f76d05d3993fd5f4af9434e8e436db163a12a9d40e1a58a726f27a01dfd12a2a" "checksum rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)" = "6159e4e6e559c81bd706afe9c8fd68f547d3e851ce12e76b1de7914bab61691b" "checksum rustc_version 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "c5f5376ea5e30ce23c03eb77cbe4962b988deead10910c372b226388b594c084" @@ -2152,6 +2276,7 @@ dependencies = [ "checksum serde_codegen_internals 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "f877e2781ed0a323295d1c9f0e26556117b5a11489fc47b1848dfb98b3173d21" "checksum serde_json 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e10f8a9d94b06cf5d3bef66475f04c8ff90950f1be7004c357ff9472ccbaebc" "checksum sha1 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "cc30b1e1e8c40c121ca33b86c23308a090d19974ef001b4bf6e61fd1a0fb095c" +"checksum shell32-sys 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "72f20b8f3c060374edb8046591ba28f62448c369ccbdc7b02075103fb3a9e38d" "checksum siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "5c44e42fa187b5a8782489cf7740cc27c3125806be2bf33563cf5e02e9533fcd" "checksum slab 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)" = "d807fd58c4181bbabed77cb3b891ba9748241a552bcc5be698faaebefc54f46e" "checksum slab 0.2.0 (git+https://github.com/carllerche/slab?rev=5476efcafb)" = "" @@ -2170,6 +2295,7 @@ dependencies = [ "checksum syntex_syntax 0.33.0 (registry+https://github.com/rust-lang/crates.io-index)" = "44bded3cabafc65c90b663b1071bd2d198a9ab7515e6ce729e4570aaf53c407e" "checksum syntex_syntax 0.42.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7628a0506e8f9666fdabb5f265d0059b059edac9a3f810bda077abb5d826bd8d" "checksum target_info 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" +"checksum tempdir 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "87974a6f5c1dfb344d733055601650059a3363de2a6104819293baff662132d6" "checksum term 0.2.14 (registry+https://github.com/rust-lang/crates.io-index)" = "f2077e54d38055cf1ca0fd7933a2e00cd3ec8f6fed352b2a377f06dcdaaf3281" "checksum term 0.4.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3deff8a2b3b6607d6d7cc32ac25c0b33709453ca9cceac006caac51e963cf94a" "checksum termios 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "d5d9cf598a6d7ce700a4e6a9199da127e6819a61e64b68609683cc9a01b5683a" @@ -2193,10 +2319,11 @@ dependencies = [ "checksum vergen 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "56b639f935488eb40f06d17c3e3bcc3054f6f75d264e187b1107c8d1cba8d31c" "checksum void 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" "checksum webpki 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "813503a5985585e0812d430cd1328ee322f47f66629c8ed4ecab939cf9e92f91" -"checksum winapi 0.2.6 (registry+https://github.com/rust-lang/crates.io-index)" = "4dfaaa8fbdaa618fa6914b59b2769d690dd7521920a18d84b42d254678dd5fd4" +"checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a" "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc" "checksum ws 0.5.3 (git+https://github.com/ethcore/ws-rs.git?branch=mio-upstream-stable)" = "" "checksum ws2_32-sys 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d59cefebd0c892fa2dd6de581e937301d8552cb44489cdff035c6187cb63fa5e" +"checksum xdg 2.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77b831a5ba77110f438f0ac5583aafeb087f70432998ba6b7dcb1d32185db453" "checksum xml-rs 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "65e74b96bd3179209dc70a980da6df843dff09e46eee103a0376c0949257e3ef" "checksum xmltree 0.3.2 (registry+https://github.com/rust-lang/crates.io-index)" = "472a9d37c7c53ab2391161df5b89b1f3bf76dab6ab150d7941ecbdd832282082" "checksum zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)" = "3ceb33a75b3d0608942302eed325b59d2c3ed777cc6c01627ae14e5697c6a31c" diff --git a/Cargo.toml b/Cargo.toml index bce959f61..2d16e0bd4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ isatty = "0.1" toml = "0.2" serde = "0.8.0" serde_json = "0.8.0" +app_dirs = "1.1.1" hyper = { version = "0.9", default-features = false } ctrlc = { git = "https://github.com/ethcore/rust-ctrlc.git" } fdlimit = "0.1" @@ -47,6 +48,8 @@ rlp = { path = "util/rlp" } ethcore-stratum = { path = "stratum" } ethcore-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} +rpc-cli = { path = "rpc_cli" } +parity-rpc-client = { path = "rpc_client" } ethcore-light = { path = "ethcore/light" } [target.'cfg(windows)'.dependencies] diff --git a/dapps/src/apps/fetcher.rs b/dapps/src/apps/fetcher.rs index 2430af035..006858e73 100644 --- a/dapps/src/apps/fetcher.rs +++ b/dapps/src/apps/fetcher.rs @@ -120,7 +120,7 @@ impl ContentFetcher { // Content is already being fetched Some(&mut ContentStatus::Fetching(ref fetch_control)) => { trace!(target: "dapps", "Content fetching in progress. Waiting..."); - (None, fetch_control.to_handler(control)) + (None, fetch_control.to_async_handler(path, control)) }, // We need to start fetching the content None => { @@ -129,11 +129,12 @@ impl ContentFetcher { let content = self.resolver.resolve(content_hex); let cache = self.cache.clone(); - let on_done = move |id: String, result: Option| { + let id = content_id.clone(); + let on_done = move |result: Option| { let mut cache = cache.lock(); match result { Some(endpoint) => { - cache.insert(id, ContentStatus::Ready(endpoint)); + cache.insert(id.clone(), ContentStatus::Ready(endpoint)); }, // In case of error None => { @@ -150,6 +151,7 @@ impl ContentFetcher { Some(URLHintResult::Dapp(dapp)) => { let (handler, fetch_control) = ContentFetcherHandler::new( dapp.url(), + path, control, DappInstaller { id: content_id.clone(), @@ -165,6 +167,7 @@ impl ContentFetcher { Some(URLHintResult::Content(content)) => { let (handler, fetch_control) = ContentFetcherHandler::new( content.url, + path, control, ContentInstaller { id: content_id.clone(), @@ -248,43 +251,45 @@ struct ContentInstaller { id: String, mime: String, content_path: PathBuf, - on_done: Box) + Send>, + on_done: Box) + Send>, } impl ContentValidator for ContentInstaller { type Error = ValidationError; - fn validate_and_install(&self, path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> { - // Create dir - try!(fs::create_dir_all(&self.content_path)); + fn validate_and_install(&self, path: PathBuf) -> Result { + let validate = || { + // Create dir + try!(fs::create_dir_all(&self.content_path)); - // Validate hash - let mut file_reader = io::BufReader::new(try!(fs::File::open(&path))); - let hash = try!(sha3(&mut file_reader)); - let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId)); - if id != hash { - return Err(ValidationError::HashMismatch { - expected: id, - got: hash, - }); - } + // Validate hash + let mut file_reader = io::BufReader::new(try!(fs::File::open(&path))); + let hash = try!(sha3(&mut file_reader)); + let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId)); + if id != hash { + return Err(ValidationError::HashMismatch { + expected: id, + got: hash, + }); + } - // And prepare path for a file - let filename = path.file_name().expect("We always fetch a file."); - let mut content_path = self.content_path.clone(); - content_path.push(&filename); + // And prepare path for a file + let filename = path.file_name().expect("We always fetch a file."); + let mut content_path = self.content_path.clone(); + content_path.push(&filename); - if content_path.exists() { - try!(fs::remove_dir_all(&content_path)) - } + if content_path.exists() { + try!(fs::remove_dir_all(&content_path)) + } - try!(fs::copy(&path, &content_path)); + try!(fs::copy(&path, &content_path)); + Ok(LocalPageEndpoint::single_file(content_path, self.mime.clone(), PageCache::Enabled)) + }; - Ok((self.id.clone(), LocalPageEndpoint::single_file(content_path, self.mime.clone(), PageCache::Enabled))) - } - - fn done(&self, endpoint: Option) { - (self.on_done)(self.id.clone(), endpoint) + // Make sure to always call on_done (even in case of errors)! + let result = validate(); + (self.on_done)(result.as_ref().ok().cloned()); + result } } @@ -292,7 +297,7 @@ impl ContentValidator for ContentInstaller { struct DappInstaller { id: String, dapps_path: PathBuf, - on_done: Box) + Send>, + on_done: Box) + Send>, embeddable_on: Option<(String, u16)>, } @@ -331,69 +336,68 @@ impl DappInstaller { impl ContentValidator for DappInstaller { type Error = ValidationError; - fn validate_and_install(&self, app_path: PathBuf) -> Result<(String, LocalPageEndpoint), ValidationError> { - trace!(target: "dapps", "Opening dapp bundle at {:?}", app_path); - let mut file_reader = io::BufReader::new(try!(fs::File::open(app_path))); - let hash = try!(sha3(&mut file_reader)); - let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId)); - if id != hash { - return Err(ValidationError::HashMismatch { - expected: id, - got: hash, - }); - } - let file = file_reader.into_inner(); - // Unpack archive - let mut zip = try!(zip::ZipArchive::new(file)); - // First find manifest file - let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip)); - // Overwrite id to match hash - manifest.id = self.id.clone(); + fn validate_and_install(&self, path: PathBuf) -> Result { + trace!(target: "dapps", "Opening dapp bundle at {:?}", path); + let validate = || { + let mut file_reader = io::BufReader::new(try!(fs::File::open(path))); + let hash = try!(sha3(&mut file_reader)); + let id = try!(self.id.as_str().parse().map_err(|_| ValidationError::InvalidContentId)); + if id != hash { + return Err(ValidationError::HashMismatch { + expected: id, + got: hash, + }); + } + let file = file_reader.into_inner(); + // Unpack archive + let mut zip = try!(zip::ZipArchive::new(file)); + // First find manifest file + let (mut manifest, manifest_dir) = try!(Self::find_manifest(&mut zip)); + // Overwrite id to match hash + manifest.id = self.id.clone(); - let target = self.dapp_target_path(&manifest); + let target = self.dapp_target_path(&manifest); - // Remove old directory - if target.exists() { - warn!(target: "dapps", "Overwriting existing dapp: {}", manifest.id); - try!(fs::remove_dir_all(target.clone())); - } + // Remove old directory + if target.exists() { + warn!(target: "dapps", "Overwriting existing dapp: {}", manifest.id); + try!(fs::remove_dir_all(target.clone())); + } - // Unpack zip - for i in 0..zip.len() { - let mut file = try!(zip.by_index(i)); - // TODO [todr] Check if it's consistent on windows. - let is_dir = file.name().chars().rev().next() == Some('/'); + // Unpack zip + for i in 0..zip.len() { + let mut file = try!(zip.by_index(i)); + // TODO [todr] Check if it's consistent on windows. + let is_dir = file.name().chars().rev().next() == Some('/'); - let file_path = PathBuf::from(file.name()); - let location_in_manifest_base = file_path.strip_prefix(&manifest_dir); - // Create files that are inside manifest directory - if let Ok(location_in_manifest_base) = location_in_manifest_base { - let p = target.join(location_in_manifest_base); - // Check if it's a directory - if is_dir { - try!(fs::create_dir_all(p)); - } else { - let mut target = try!(fs::File::create(p)); - try!(io::copy(&mut file, &mut target)); + let file_path = PathBuf::from(file.name()); + let location_in_manifest_base = file_path.strip_prefix(&manifest_dir); + // Create files that are inside manifest directory + if let Ok(location_in_manifest_base) = location_in_manifest_base { + let p = target.join(location_in_manifest_base); + // Check if it's a directory + if is_dir { + try!(fs::create_dir_all(p)); + } else { + let mut target = try!(fs::File::create(p)); + try!(io::copy(&mut file, &mut target)); + } } } - } - // Write manifest - let manifest_str = try!(serialize_manifest(&manifest).map_err(ValidationError::ManifestSerialization)); - let manifest_path = target.join(MANIFEST_FILENAME); - let mut manifest_file = try!(fs::File::create(manifest_path)); - try!(manifest_file.write_all(manifest_str.as_bytes())); + // Write manifest + let manifest_str = try!(serialize_manifest(&manifest).map_err(ValidationError::ManifestSerialization)); + let manifest_path = target.join(MANIFEST_FILENAME); + let mut manifest_file = try!(fs::File::create(manifest_path)); + try!(manifest_file.write_all(manifest_str.as_bytes())); + // Create endpoint + let endpoint = LocalPageEndpoint::new(target, manifest.clone().into(), PageCache::Enabled, self.embeddable_on.clone()); + Ok(endpoint) + }; - // Create endpoint - let app = LocalPageEndpoint::new(target, manifest.clone().into(), PageCache::Enabled, self.embeddable_on.clone()); - - // Return modified app manifest - Ok((manifest.id.clone(), app)) - } - - fn done(&self, endpoint: Option) { - (self.on_done)(self.id.clone(), endpoint) + let result = validate(); + (self.on_done)(result.as_ref().ok().cloned()); + result } } diff --git a/dapps/src/handlers/fetch.rs b/dapps/src/handlers/fetch.rs index 6fb524293..d62b425d9 100644 --- a/dapps/src/handlers/fetch.rs +++ b/dapps/src/handlers/fetch.rs @@ -22,35 +22,41 @@ use std::sync::{mpsc, Arc}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::{Instant, Duration}; use util::Mutex; -use url::Url; use fetch::{Client, Fetch, FetchResult}; use hyper::{server, Decoder, Encoder, Next, Method, Control}; use hyper::net::HttpStream; +use hyper::uri::RequestUri; use hyper::status::StatusCode; -use handlers::{ContentHandler, Redirection, extract_url}; -use page::LocalPageEndpoint; +use endpoint::EndpointPath; +use handlers::ContentHandler; +use page::{LocalPageEndpoint, PageHandlerWaiting}; const FETCH_TIMEOUT: u64 = 30; enum FetchState { + Waiting, NotStarted(String), Error(ContentHandler), InProgress(mpsc::Receiver), - Done(String, LocalPageEndpoint, Redirection), + Done(LocalPageEndpoint, Box), +} + +enum WaitResult { + Error(ContentHandler), + Done(LocalPageEndpoint), } pub trait ContentValidator { type Error: fmt::Debug + fmt::Display; - fn validate_and_install(&self, app: PathBuf) -> Result<(String, LocalPageEndpoint), Self::Error>; - fn done(&self, Option); + fn validate_and_install(&self, path: PathBuf) -> Result; } pub struct FetchControl { abort: Arc, - listeners: Mutex)>>, + listeners: Mutex)>>, deadline: Instant, } @@ -65,9 +71,10 @@ impl Default for FetchControl { } impl FetchControl { - fn notify FetchState>(&self, status: F) { + fn notify WaitResult>(&self, status: F) { let mut listeners = self.listeners.lock(); for (control, sender) in listeners.drain(..) { + trace!(target: "dapps", "Resuming request waiting for content..."); if let Err(e) = sender.send(status()) { trace!(target: "dapps", "Waiting listener notification failed: {:?}", e); } else { @@ -78,9 +85,9 @@ impl FetchControl { fn set_status(&self, status: &FetchState) { match *status { - FetchState::Error(ref handler) => self.notify(|| FetchState::Error(handler.clone())), - FetchState::Done(ref id, ref endpoint, ref handler) => self.notify(|| FetchState::Done(id.clone(), endpoint.clone(), handler.clone())), - FetchState::NotStarted(_) | FetchState::InProgress(_) => {}, + FetchState::Error(ref handler) => self.notify(|| WaitResult::Error(handler.clone())), + FetchState::Done(ref endpoint, _) => self.notify(|| WaitResult::Done(endpoint.clone())), + FetchState::NotStarted(_) | FetchState::InProgress(_) | FetchState::Waiting => {}, } } @@ -88,44 +95,66 @@ impl FetchControl { self.abort.store(true, Ordering::SeqCst); } - pub fn to_handler(&self, control: Control) -> Box + Send> { + pub fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box + Send> { let (tx, rx) = mpsc::channel(); self.listeners.lock().push((control, tx)); Box::new(WaitingHandler { receiver: rx, - state: None, + state: FetchState::Waiting, + uri: RequestUri::default(), + path: path, }) } } pub struct WaitingHandler { - receiver: mpsc::Receiver, - state: Option, + receiver: mpsc::Receiver, + state: FetchState, + uri: RequestUri, + path: EndpointPath, } impl server::Handler for WaitingHandler { - fn on_request(&mut self, _request: server::Request) -> Next { + fn on_request(&mut self, request: server::Request) -> Next { + self.uri = request.uri().clone(); Next::wait() } - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - self.state = self.receiver.try_recv().ok(); - Next::write() + fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { + let result = self.receiver.try_recv().ok(); + self.state = match result { + Some(WaitResult::Error(handler)) => FetchState::Error(handler), + Some(WaitResult::Done(endpoint)) => { + let mut page_handler = endpoint.to_page_handler(self.path.clone()); + page_handler.set_uri(&self.uri); + FetchState::Done(endpoint, page_handler) + }, + None => { + warn!("A result for waiting request was not received."); + FetchState::Waiting + }, + }; + + match self.state { + FetchState::Done(_, ref mut handler) => handler.on_request_readable(decoder), + FetchState::Error(ref mut handler) => handler.on_request_readable(decoder), + _ => Next::write(), + } } fn on_response(&mut self, res: &mut server::Response) -> Next { match self.state { - Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response(res), - Some(FetchState::Error(ref mut handler)) => handler.on_response(res), + FetchState::Done(_, ref mut handler) => handler.on_response(res), + FetchState::Error(ref mut handler) => handler.on_response(res), _ => Next::end(), } } fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { match self.state { - Some(FetchState::Done(_, _, ref mut handler)) => handler.on_response_writable(encoder), - Some(FetchState::Error(ref mut handler)) => handler.on_response_writable(encoder), + FetchState::Done(_, ref mut handler) => handler.on_response_writable(encoder), + FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), _ => Next::end(), } } @@ -137,29 +166,19 @@ pub struct ContentFetcherHandler { status: FetchState, client: Option, installer: H, - request_url: Option, + path: EndpointPath, + uri: RequestUri, embeddable_on: Option<(String, u16)>, } -impl Drop for ContentFetcherHandler { - fn drop(&mut self) { - let result = match self.status { - FetchState::Done(_, ref result, _) => Some(result.clone()), - _ => None, - }; - self.installer.done(result); - } -} - impl ContentFetcherHandler { - pub fn new( url: String, + path: EndpointPath, control: Control, handler: H, embeddable_on: Option<(String, u16)>, ) -> (Self, Arc) { - let fetch_control = Arc::new(FetchControl::default()); let client = Client::default(); let handler = ContentFetcherHandler { @@ -168,7 +187,8 @@ impl ContentFetcherHandler { client: Some(client), status: FetchState::NotStarted(url), installer: handler, - request_url: None, + path: path, + uri: RequestUri::default(), embeddable_on: embeddable_on, }; @@ -192,7 +212,6 @@ impl ContentFetcherHandler { impl server::Handler for ContentFetcherHandler { fn on_request(&mut self, request: server::Request) -> Next { - self.request_url = extract_url(&request); let status = if let FetchState::NotStarted(ref url) = self.status { Some(match *request.method() { // Start fetching content @@ -205,8 +224,8 @@ impl server::Handler for ContentFetcherHandler< Ok(receiver) => FetchState::InProgress(receiver), Err(e) => FetchState::Error(ContentHandler::error( StatusCode::BadGateway, - "Unable To Start Dapp Download", - "Could not initialize download of the dapp. It might be a problem with the remote server.", + "Unable To Start Content Download", + "Could not initialize download of the content. It might be a problem with the remote server.", Some(&format!("{}", e)), self.embeddable_on.clone(), )), @@ -227,6 +246,7 @@ impl server::Handler for ContentFetcherHandler< self.fetch_control.set_status(&status); self.status = status; } + self.uri = request.uri().clone(); Next::read() } @@ -266,11 +286,10 @@ impl server::Handler for ContentFetcherHandler< self.embeddable_on.clone(), )) }, - Ok((id, result)) => { - let url: String = self.request_url.take() - .map(|url| url.raw.into_string()) - .expect("Request URL always read in on_request; qed"); - FetchState::Done(id, result, Redirection::new(&url)) + Ok(endpoint) => { + let mut handler = endpoint.to_page_handler(self.path.clone()); + handler.set_uri(&self.uri); + FetchState::Done(endpoint, handler) }, }; // Remove temporary zip file @@ -306,7 +325,7 @@ impl server::Handler for ContentFetcherHandler< fn on_response(&mut self, res: &mut server::Response) -> Next { match self.status { - FetchState::Done(_, _, ref mut handler) => handler.on_response(res), + FetchState::Done(_, ref mut handler) => handler.on_response(res), FetchState::Error(ref mut handler) => handler.on_response(res), _ => Next::end(), } @@ -314,7 +333,7 @@ impl server::Handler for ContentFetcherHandler< fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { match self.status { - FetchState::Done(_, _, ref mut handler) => handler.on_response_writable(encoder), + FetchState::Done(_, ref mut handler) => handler.on_response_writable(encoder), FetchState::Error(ref mut handler) => handler.on_response_writable(encoder), _ => Next::end(), } diff --git a/dapps/src/page/handler.rs b/dapps/src/page/handler.rs index 382dfa5d1..ba7a7ee04 100644 --- a/dapps/src/page/handler.rs +++ b/dapps/src/page/handler.rs @@ -83,13 +83,19 @@ impl Default for PageCache { } } +/// A generic type for `PageHandler` allowing to set the URL. +/// Used by dapps fetching to set the URL after the content was downloaded. +pub trait PageHandlerWaiting: server::Handler + Send { + fn set_uri(&mut self, uri: &RequestUri); +} + /// A handler for a single webapp. /// Resolves correct paths and serves as a plumbing code between /// hyper server and dapp. pub struct PageHandler { /// A Dapp. pub app: T, - /// File currently being served (or `None` if file does not exist). + /// File currently being served pub file: ServedFile, /// Optional prefix to strip from path. pub prefix: Option, @@ -101,6 +107,21 @@ pub struct PageHandler { pub cache: PageCache, } +impl PageHandlerWaiting for PageHandler { + fn set_uri(&mut self, uri: &RequestUri) { + trace!(target: "dapps", "Setting URI: {:?}", uri); + self.file = match *uri { + RequestUri::AbsolutePath { ref path, .. } => { + self.app.file(&self.extract_path(path)) + }, + RequestUri::AbsoluteUri(ref url) => { + self.app.file(&self.extract_path(url.path())) + }, + _ => None, + }.map_or_else(|| ServedFile::new(self.safe_to_embed_on.clone()), |f| ServedFile::File(f)); + } +} + impl PageHandler { fn extract_path(&self, path: &str) -> String { let app_id = &self.path.app_id; @@ -124,15 +145,7 @@ impl PageHandler { impl server::Handler for PageHandler { fn on_request(&mut self, req: server::Request) -> Next { - self.file = match *req.uri() { - RequestUri::AbsolutePath { ref path, .. } => { - self.app.file(&self.extract_path(path)) - }, - RequestUri::AbsoluteUri(ref url) => { - self.app.file(&self.extract_path(url.path())) - }, - _ => None, - }.map_or_else(|| ServedFile::new(self.safe_to_embed_on.clone()), |f| ServedFile::File(f)); + self.set_uri(req.uri()); Next::write() } diff --git a/dapps/src/page/local.rs b/dapps/src/page/local.rs index 77c91019d..e8ab9ce14 100644 --- a/dapps/src/page/local.rs +++ b/dapps/src/page/local.rs @@ -18,7 +18,7 @@ use mime_guess; use std::io::{Seek, Read, SeekFrom}; use std::fs; use std::path::{Path, PathBuf}; -use page::handler::{self, PageCache}; +use page::handler::{self, PageCache, PageHandlerWaiting}; use endpoint::{Endpoint, EndpointInfo, EndpointPath, Handler}; #[derive(Debug, Clone)] @@ -54,6 +54,36 @@ impl LocalPageEndpoint { pub fn path(&self) -> PathBuf { self.path.clone() } + + fn page_handler_with_mime(&self, path: EndpointPath, mime: &str) -> handler::PageHandler { + handler::PageHandler { + app: LocalSingleFile { path: self.path.clone(), mime: mime.into() }, + prefix: None, + path: path, + file: handler::ServedFile::new(None), + safe_to_embed_on: self.embeddable_on.clone(), + cache: self.cache, + } + } + + fn page_handler(&self, path: EndpointPath) -> handler::PageHandler { + handler::PageHandler { + app: LocalDapp { path: self.path.clone() }, + prefix: None, + path: path, + file: handler::ServedFile::new(None), + safe_to_embed_on: self.embeddable_on.clone(), + cache: self.cache, + } + } + + pub fn to_page_handler(&self, path: EndpointPath) -> Box { + if let Some(ref mime) = self.mime { + Box::new(self.page_handler_with_mime(path, mime)) + } else { + Box::new(self.page_handler(path)) + } + } } impl Endpoint for LocalPageEndpoint { @@ -63,23 +93,9 @@ impl Endpoint for LocalPageEndpoint { fn to_handler(&self, path: EndpointPath) -> Box { if let Some(ref mime) = self.mime { - Box::new(handler::PageHandler { - app: LocalSingleFile { path: self.path.clone(), mime: mime.clone() }, - prefix: None, - path: path, - file: handler::ServedFile::new(None), - safe_to_embed_on: self.embeddable_on.clone(), - cache: self.cache, - }) + Box::new(self.page_handler_with_mime(path, mime)) } else { - Box::new(handler::PageHandler { - app: LocalDapp { path: self.path.clone() }, - prefix: None, - path: path, - file: handler::ServedFile::new(None), - safe_to_embed_on: self.embeddable_on.clone(), - cache: self.cache, - }) + Box::new(self.page_handler(path)) } } } diff --git a/dapps/src/page/mod.rs b/dapps/src/page/mod.rs index 9619f1b10..5c2b008f8 100644 --- a/dapps/src/page/mod.rs +++ b/dapps/src/page/mod.rs @@ -21,5 +21,5 @@ mod handler; pub use self::local::LocalPageEndpoint; pub use self::builtin::PageEndpoint; -pub use self::handler::PageCache; +pub use self::handler::{PageCache, PageHandlerWaiting}; diff --git a/ethcore/res/authority_round.json b/ethcore/res/authority_round.json index 85beb51b4..efc0cdeb4 100644 --- a/ethcore/res/authority_round.json +++ b/ethcore/res/authority_round.json @@ -21,8 +21,9 @@ }, "genesis": { "seal": { - "generic": { - "rlp": "0xc28080" + "authority_round": { + "step": "0x0", + "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" } }, "difficulty": "0x20000", diff --git a/ethcore/res/basic_authority.json b/ethcore/res/basic_authority.json index 51276d487..db4374160 100644 --- a/ethcore/res/basic_authority.json +++ b/ethcore/res/basic_authority.json @@ -17,10 +17,7 @@ }, "genesis": { "seal": { - "generic": { - "fields": 1, - "rlp": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" - } + "generic": "0x11bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" }, "difficulty": "0x20000", "author": "0x0000000000000000000000000000000000000000", diff --git a/ethcore/res/ethereum/classic.json b/ethcore/res/ethereum/classic.json index 7c1e9454e..223978ca4 100644 --- a/ethcore/res/ethereum/classic.json +++ b/ethcore/res/ethereum/classic.json @@ -1,6 +1,6 @@ { "name": "Ethereum Classic", - "forkName": "classic", + "dataDir": "classic", "engine": { "Ethash": { "params": { @@ -10,14 +10,15 @@ "durationLimit": "0x0d", "blockReward": "0x4563918244F40000", "registrar" : "0xc6d9d2cd449a754c494264e1809c50e34d64562b", - "homesteadTransition": "0x118c30", - "eip150Transition": "0x2625a0", - "eip155Transition": "0x7fffffffffffffff", - "eip160Transition": "0x7fffffffffffffff", + "homesteadTransition": 1150000, + "eip150Transition": 2500000, + "eip155Transition": 3000000, + "eip160Transition": 3000000, + "ecip1010PauseTransition": 3000000, + "ecip1010ContinueTransition": 5000000, + "eip161abcTransition": "0x7fffffffffffffff", - "eip161dTransition": "0x7fffffffffffffff", - "ecip1010PauseTransition": "0x2dc6c0", - "ecip1010ContinueTransition": "0x4c4b40" + "eip161dTransition": "0x7fffffffffffffff" } } }, diff --git a/ethcore/res/ethereum/expanse.json b/ethcore/res/ethereum/expanse.json index 8d580b6f5..d8cbd5b0c 100644 --- a/ethcore/res/ethereum/expanse.json +++ b/ethcore/res/ethereum/expanse.json @@ -1,6 +1,6 @@ { "name": "Expanse", - "forkName": "expanse", + "dataDir": "expanse", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/ethereum/frontier.json b/ethcore/res/ethereum/frontier.json index 3a9dce456..91a8ae9e6 100644 --- a/ethcore/res/ethereum/frontier.json +++ b/ethcore/res/ethereum/frontier.json @@ -1,5 +1,6 @@ { "name": "Frontier/Homestead", + "dataDir": "ethereum", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/ethereum/morden.json b/ethcore/res/ethereum/morden.json index 6e725e8bf..d21756250 100644 --- a/ethcore/res/ethereum/morden.json +++ b/ethcore/res/ethereum/morden.json @@ -1,5 +1,6 @@ { "name": "Morden", + "dataDir": "test", "engine": { "Ethash": { "params": { @@ -9,12 +10,15 @@ "durationLimit": "0x0d", "blockReward": "0x4563918244F40000", "registrar": "0x52dff57a8a1532e6afb3dc07e2af58bb9eb05b3d", - "homesteadTransition": "0x789b0", - "eip150Transition": "0x1b34d8", - "eip155Transition": 1885000, - "eip160Transition": 1885000, - "eip161abcTransition": 1885000, - "eip161dTransition": 1885000 + "homesteadTransition": 494000, + "eip150Transition": 1783000, + "eip155Transition": 1915000, + "eip160Transition": 1915000, + "ecip1010PauseTransition": 1915000, + "ecip1010ContinueTransition": 3415000, + + "eip161abcTransition": "0x7fffffffffffffff", + "eip161dTransition": "0x7fffffffffffffff" } } }, diff --git a/ethcore/res/ethereum/ropsten.json b/ethcore/res/ethereum/ropsten.json index 62282801d..d388ce9a1 100644 --- a/ethcore/res/ethereum/ropsten.json +++ b/ethcore/res/ethereum/ropsten.json @@ -1,5 +1,6 @@ { "name": "Ropsten", + "dataDir": "test", "engine": { "Ethash": { "params": { diff --git a/ethcore/res/instant_seal.json b/ethcore/res/instant_seal.json index fbb650102..6a3964e89 100644 --- a/ethcore/res/instant_seal.json +++ b/ethcore/res/instant_seal.json @@ -4,29 +4,27 @@ "InstantSeal": null }, "params": { - "accountStartNonce": "0x0100000", + "accountStartNonce": "0x0", "maximumExtraDataSize": "0x20", "minGasLimit": "0x1388", - "networkID" : "0x2" + "networkID" : "0x11" }, "genesis": { "seal": { - "generic": { - "rlp": "0x0" - } + "generic": "0x0" }, "difficulty": "0x20000", "author": "0x0000000000000000000000000000000000000000", "timestamp": "0x00", "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", "extraData": "0x", - "gasLimit": "0x2fefd8" + "gasLimit": "0x5B8D80" }, "accounts": { - "0000000000000000000000000000000000000001": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, - "0000000000000000000000000000000000000002": { "balance": "1", "nonce": "1048576", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, - "0000000000000000000000000000000000000003": { "balance": "1", "nonce": "1048576", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, - "0000000000000000000000000000000000000004": { "balance": "1", "nonce": "1048576", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, - "0x00a329c0648769a73afac7f9381e08fb43dbea72": { "balance": "1606938044258990275541962092341162602522202993782792835301376", "nonce": "1048576" } + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "0x00a329c0648769a73afac7f9381e08fb43dbea72": { "balance": "1606938044258990275541962092341162602522202993782792835301376" } } } diff --git a/ethcore/res/tendermint.json b/ethcore/res/tendermint.json new file mode 100644 index 000000000..778757107 --- /dev/null +++ b/ethcore/res/tendermint.json @@ -0,0 +1,44 @@ +{ + "name": "TestBFT", + "engine": { + "Tendermint": { + "params": { + "gasLimitBoundDivisor": "0x0400", + "authorities" : [ + "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1", + "0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e" + ] + } + } + }, + "params": { + "accountStartNonce": "0x0", + "maximumExtraDataSize": "0x20", + "minGasLimit": "0x1388", + "networkID" : "0x2323" + }, + "genesis": { + "seal": { + "tendermint": { + "round": "0x0", + "proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "precommits": [ + "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + ] + } + }, + "difficulty": "0x20000", + "author": "0x0000000000000000000000000000000000000000", + "timestamp": "0x00", + "parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "extraData": "0x", + "gasLimit": "0x2fefd8" + }, + "accounts": { + "0000000000000000000000000000000000000001": { "balance": "1", "builtin": { "name": "ecrecover", "pricing": { "linear": { "base": 3000, "word": 0 } } } }, + "0000000000000000000000000000000000000002": { "balance": "1", "builtin": { "name": "sha256", "pricing": { "linear": { "base": 60, "word": 12 } } } }, + "0000000000000000000000000000000000000003": { "balance": "1", "builtin": { "name": "ripemd160", "pricing": { "linear": { "base": 600, "word": 120 } } } }, + "0000000000000000000000000000000000000004": { "balance": "1", "builtin": { "name": "identity", "pricing": { "linear": { "base": 15, "word": 3 } } } }, + "9cce34f7ab185c7aba1b7c8140d620b4bda941d6": { "balance": "1606938044258990275541962092341162602522202993782792835301376" } + } +} diff --git a/ethcore/src/account_provider/mod.rs b/ethcore/src/account_provider/mod.rs index 1175f2d02..dab19dbc0 100644 --- a/ethcore/src/account_provider/mod.rs +++ b/ethcore/src/account_provider/mod.rs @@ -18,14 +18,14 @@ mod stores; -use self::stores::{AddressBook, DappsSettingsStore}; +use self::stores::{AddressBook, DappsSettingsStore, NewDappsPolicy}; use std::fmt; use std::collections::HashMap; use std::time::{Instant, Duration}; -use util::{Mutex, RwLock}; -use ethstore::{SecretStore, Error as SSError, SafeAccount, EthStore}; -use ethstore::dir::{KeyDirectory}; +use util::RwLock; +use ethstore::{SimpleSecretStore, SecretStore, Error as SSError, EthStore, EthMultiStore, random_string}; +use ethstore::dir::MemoryDirectory; use ethstore::ethkey::{Address, Message, Public, Secret, Random, Generator}; use ethjson::misc::AccountMeta; pub use ethstore::ethkey::Signature; @@ -73,58 +73,47 @@ impl From for Error { } } -#[derive(Default)] -struct NullDir { - accounts: RwLock>, -} - -impl KeyDirectory for NullDir { - fn load(&self) -> Result, SSError> { - Ok(self.accounts.read().values().cloned().collect()) - } - - fn insert(&self, account: SafeAccount) -> Result { - self.accounts.write().insert(account.address.clone(), account.clone()); - Ok(account) - } - - fn remove(&self, address: &Address) -> Result<(), SSError> { - self.accounts.write().remove(address); - Ok(()) - } -} - /// Dapp identifier pub type DappId = String; +fn transient_sstore() -> EthMultiStore { + EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed") +} + +type AccountToken = String; + /// Account management. /// Responsible for unlocking accounts. pub struct AccountProvider { - unlocked: Mutex>, - sstore: Box, + unlocked: RwLock>, address_book: RwLock, dapps_settings: RwLock, + /// Accounts on disk + sstore: Box, + /// Accounts unlocked with rolling tokens + transient_sstore: EthMultiStore, } impl AccountProvider { /// Creates new account provider. pub fn new(sstore: Box) -> Self { AccountProvider { - unlocked: Mutex::new(HashMap::new()), + unlocked: RwLock::new(HashMap::new()), address_book: RwLock::new(AddressBook::new(sstore.local_path().into())), dapps_settings: RwLock::new(DappsSettingsStore::new(sstore.local_path().into())), sstore: sstore, + transient_sstore: transient_sstore(), } } /// Creates not disk backed provider. pub fn transient_provider() -> Self { AccountProvider { - unlocked: Mutex::new(HashMap::new()), + unlocked: RwLock::new(HashMap::new()), address_book: RwLock::new(AddressBook::transient()), dapps_settings: RwLock::new(DappsSettingsStore::transient()), - sstore: Box::new(EthStore::open(Box::new(NullDir::default())) - .expect("NullDir load always succeeds; qed")) + sstore: Box::new(EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory load always succeeds; qed")), + transient_sstore: transient_sstore(), } } @@ -167,10 +156,49 @@ impl AccountProvider { Ok(accounts) } + /// Sets a whitelist of accounts exposed for unknown dapps. + /// `None` means that all accounts will be visible. + pub fn set_new_dapps_whitelist(&self, accounts: Option>) -> Result<(), Error> { + self.dapps_settings.write().set_policy(match accounts { + None => NewDappsPolicy::AllAccounts, + Some(accounts) => NewDappsPolicy::Whitelist(accounts), + }); + Ok(()) + } + + /// Gets a whitelist of accounts exposed for unknown dapps. + /// `None` means that all accounts will be visible. + pub fn new_dapps_whitelist(&self) -> Result>, Error> { + Ok(match self.dapps_settings.read().policy() { + NewDappsPolicy::AllAccounts => None, + NewDappsPolicy::Whitelist(accounts) => Some(accounts), + }) + } + + /// Gets a list of dapps recently requesting accounts. + pub fn recent_dapps(&self) -> Result, Error> { + Ok(self.dapps_settings.read().recent_dapps()) + } + + /// Marks dapp as recently used. + pub fn note_dapp_used(&self, dapp: DappId) -> Result<(), Error> { + let mut dapps = self.dapps_settings.write(); + dapps.mark_dapp_used(dapp.clone()); + Ok(()) + } + /// Gets addresses visile for dapp. pub fn dapps_addresses(&self, dapp: DappId) -> Result, Error> { - let accounts = self.dapps_settings.read().get(); - Ok(accounts.get(&dapp).map(|settings| settings.accounts.clone()).unwrap_or_else(Vec::new)) + let dapps = self.dapps_settings.read(); + + let accounts = dapps.settings().get(&dapp).map(|settings| settings.accounts.clone()); + match accounts { + Some(accounts) => Ok(accounts), + None => match dapps.policy() { + NewDappsPolicy::AllAccounts => self.accounts(), + NewDappsPolicy::Whitelist(accounts) => Ok(accounts), + } + } } /// Sets addresses visile for dapp. @@ -231,11 +259,8 @@ impl AccountProvider { /// Returns `true` if the password for `account` is `password`. `false` if not. pub fn test_password(&self, account: &Address, password: &str) -> Result { - match self.sstore.sign(account, password, &Default::default()) { - Ok(_) => Ok(true), - Err(SSError::InvalidPassword) => Ok(false), - Err(e) => Err(Error::SStore(e)), - } + self.sstore.test_password(account, password) + .map_err(Into::into) } /// Permanently removes an account. @@ -256,7 +281,7 @@ impl AccountProvider { let _ = try!(self.sstore.sign(&account, &password, &Default::default())); // check if account is already unlocked pernamently, if it is, do nothing - let mut unlocked = self.unlocked.lock(); + let mut unlocked = self.unlocked.write(); if let Some(data) = unlocked.get(&account) { if let Unlock::Perm = data.unlock { return Ok(()) @@ -273,7 +298,7 @@ impl AccountProvider { } fn password(&self, account: &Address) -> Result { - let mut unlocked = self.unlocked.lock(); + let mut unlocked = self.unlocked.write(); let data = try!(unlocked.get(account).ok_or(Error::NotUnlocked)).clone(); if let Unlock::Temp = data.unlock { unlocked.remove(account).expect("data exists: so key must exist: qed"); @@ -304,7 +329,7 @@ impl AccountProvider { /// Checks if given account is unlocked pub fn is_unlocked(&self, account: Address) -> bool { - let unlocked = self.unlocked.lock(); + let unlocked = self.unlocked.read(); unlocked.get(&account).is_some() } @@ -314,6 +339,48 @@ impl AccountProvider { Ok(try!(self.sstore.sign(&account, &password, &message))) } + /// Signs given message with supplied token. Returns a token to use in next signing within this session. + pub fn sign_with_token(&self, account: Address, token: AccountToken, message: Message) -> Result<(Signature, AccountToken), Error> { + let is_std_password = try!(self.sstore.test_password(&account, &token)); + + let new_token = random_string(16); + let signature = if is_std_password { + // Insert to transient store + try!(self.sstore.copy_account(&self.transient_sstore, &account, &token, &new_token)); + // sign + try!(self.sstore.sign(&account, &token, &message)) + } else { + // check transient store + try!(self.transient_sstore.change_password(&account, &token, &new_token)); + // and sign + try!(self.transient_sstore.sign(&account, &new_token, &message)) + }; + + Ok((signature, new_token)) + } + + /// Decrypts a message with given token. Returns a token to use in next operation for this account. + pub fn decrypt_with_token(&self, account: Address, token: AccountToken, shared_mac: &[u8], message: &[u8]) + -> Result<(Vec, AccountToken), Error> + { + let is_std_password = try!(self.sstore.test_password(&account, &token)); + + let new_token = random_string(16); + let message = if is_std_password { + // Insert to transient store + try!(self.sstore.copy_account(&self.transient_sstore, &account, &token, &new_token)); + // decrypt + try!(self.sstore.decrypt(&account, &token, shared_mac, message)) + } else { + // check transient store + try!(self.transient_sstore.change_password(&account, &token, &new_token)); + // and decrypt + try!(self.transient_sstore.decrypt(&account, &token, shared_mac, message)) + }; + + Ok((message, new_token)) + } + /// Decrypts a message. If password is not provided the account must be unlocked. pub fn decrypt(&self, account: Address, password: Option, shared_mac: &[u8], message: &[u8]) -> Result, Error> { let password = try!(password.map(Ok).unwrap_or_else(|| self.password(&account))); @@ -370,15 +437,33 @@ mod tests { assert!(ap.unlock_account_timed(kp.address(), "test1".into(), 60000).is_err()); assert!(ap.unlock_account_timed(kp.address(), "test".into(), 60000).is_ok()); assert!(ap.sign(kp.address(), None, Default::default()).is_ok()); - ap.unlocked.lock().get_mut(&kp.address()).unwrap().unlock = Unlock::Timed(Instant::now()); + ap.unlocked.write().get_mut(&kp.address()).unwrap().unlock = Unlock::Timed(Instant::now()); assert!(ap.sign(kp.address(), None, Default::default()).is_err()); } + #[test] + fn should_sign_and_return_token() { + // given + let kp = Random.generate().unwrap(); + let ap = AccountProvider::transient_provider(); + assert!(ap.insert_account(kp.secret().clone(), "test").is_ok()); + + // when + let (_signature, token) = ap.sign_with_token(kp.address(), "test".into(), Default::default()).unwrap(); + + // then + ap.sign_with_token(kp.address(), token.clone(), Default::default()) + .expect("First usage of token should be correct."); + assert!(ap.sign_with_token(kp.address(), token, Default::default()).is_err(), "Second usage of the same token should fail."); + } + #[test] fn should_set_dapps_addresses() { // given let ap = AccountProvider::transient_provider(); let app = "app1".to_owned(); + // set `AllAccounts` policy + ap.set_new_dapps_whitelist(None).unwrap(); // when ap.set_dapps_addresses(app.clone(), vec![1.into(), 2.into()]).unwrap(); @@ -386,4 +471,23 @@ mod tests { // then assert_eq!(ap.dapps_addresses(app.clone()).unwrap(), vec![1.into(), 2.into()]); } + + #[test] + fn should_set_dapps_policy() { + // given + let ap = AccountProvider::transient_provider(); + let address = ap.new_account("test").unwrap(); + + // When returning nothing + ap.set_new_dapps_whitelist(Some(vec![])).unwrap(); + assert_eq!(ap.dapps_addresses("app1".into()).unwrap(), vec![]); + + // change to all + ap.set_new_dapps_whitelist(None).unwrap(); + assert_eq!(ap.dapps_addresses("app1".into()).unwrap(), vec![address]); + + // change to a whitelist + ap.set_new_dapps_whitelist(Some(vec![1.into()])).unwrap(); + assert_eq!(ap.dapps_addresses("app1".into()).unwrap(), vec![1.into()]); + } } diff --git a/ethcore/src/account_provider/stores.rs b/ethcore/src/account_provider/stores.rs index d7e96243c..d4f2093ee 100644 --- a/ethcore/src/account_provider/stores.rs +++ b/ethcore/src/account_provider/stores.rs @@ -17,11 +17,11 @@ //! Address Book and Dapps Settings Store use std::{fs, fmt, hash, ops}; -use std::collections::HashMap; +use std::collections::{HashMap, VecDeque}; use std::path::PathBuf; use ethstore::ethkey::Address; -use ethjson::misc::{AccountMeta, DappsSettings as JsonSettings}; +use ethjson::misc::{AccountMeta, DappsSettings as JsonSettings, NewDappsPolicy as JsonNewDappsPolicy}; use account_provider::DappId; /// Disk-backed map from Address to String. Uses JSON. @@ -105,43 +105,106 @@ impl From for JsonSettings { } } +/// Dapps user settings +#[derive(Debug, Clone, Eq, PartialEq)] +pub enum NewDappsPolicy { + AllAccounts, + Whitelist(Vec
), +} + +impl From for NewDappsPolicy { + fn from(s: JsonNewDappsPolicy) -> Self { + match s { + JsonNewDappsPolicy::AllAccounts => NewDappsPolicy::AllAccounts, + JsonNewDappsPolicy::Whitelist(accounts) => NewDappsPolicy::Whitelist( + accounts.into_iter().map(Into::into).collect() + ), + } + } +} + +impl From for JsonNewDappsPolicy { + fn from(s: NewDappsPolicy) -> Self { + match s { + NewDappsPolicy::AllAccounts => JsonNewDappsPolicy::AllAccounts, + NewDappsPolicy::Whitelist(accounts) => JsonNewDappsPolicy::Whitelist( + accounts.into_iter().map(Into::into).collect() + ), + } + } +} + +const MAX_RECENT_DAPPS: usize = 10; + /// Disk-backed map from DappId to Settings. Uses JSON. pub struct DappsSettingsStore { - cache: DiskMap, + /// Dapps Settings + settings: DiskMap, + /// New Dapps Policy + policy: DiskMap, + /// Recently Accessed Dapps (transient) + recent: VecDeque, } impl DappsSettingsStore { /// Creates new store at given directory path. pub fn new(path: String) -> Self { let mut r = DappsSettingsStore { - cache: DiskMap::new(path, "dapps_accounts.json".into()) + settings: DiskMap::new(path.clone(), "dapps_accounts.json".into()), + policy: DiskMap::new(path.clone(), "dapps_policy.json".into()), + recent: VecDeque::with_capacity(MAX_RECENT_DAPPS), }; - r.cache.revert(JsonSettings::read_dapps_settings); + r.settings.revert(JsonSettings::read_dapps_settings); + r.policy.revert(JsonNewDappsPolicy::read_new_dapps_policy); r } /// Creates transient store (no changes are saved to disk). pub fn transient() -> Self { DappsSettingsStore { - cache: DiskMap::transient() + settings: DiskMap::transient(), + policy: DiskMap::transient(), + recent: VecDeque::with_capacity(MAX_RECENT_DAPPS), } } /// Get copy of the dapps settings - pub fn get(&self) -> HashMap { - self.cache.clone() + pub fn settings(&self) -> HashMap { + self.settings.clone() } - fn save(&self) { - self.cache.save(JsonSettings::write_dapps_settings) + /// Returns current new dapps policy + pub fn policy(&self) -> NewDappsPolicy { + self.policy.get("default").cloned().unwrap_or(NewDappsPolicy::AllAccounts) } + /// Returns recent dapps (in order of last request) + pub fn recent_dapps(&self) -> Vec { + self.recent.iter().cloned().collect() + } + + /// Marks recent dapp as used + pub fn mark_dapp_used(&mut self, dapp: DappId) { + self.recent.retain(|id| id != &dapp); + self.recent.push_front(dapp); + while self.recent.len() > MAX_RECENT_DAPPS { + self.recent.pop_back(); + } + } + + /// Sets current new dapps policy + pub fn set_policy(&mut self, policy: NewDappsPolicy) { + self.policy.insert("default".into(), policy); + self.policy.save(JsonNewDappsPolicy::write_new_dapps_policy); + } + + /// Sets accounts for specific dapp. pub fn set_accounts(&mut self, id: DappId, accounts: Vec
) { { - let mut settings = self.cache.entry(id).or_insert_with(DappsSettings::default); + let mut settings = self.settings.entry(id).or_insert_with(DappsSettings::default); settings.accounts = accounts; } - self.save(); + self.settings.save(JsonSettings::write_dapps_settings); } } @@ -216,7 +279,7 @@ impl DiskMap { #[cfg(test)] mod tests { - use super::{AddressBook, DappsSettingsStore, DappsSettings}; + use super::{AddressBook, DappsSettingsStore, DappsSettings, NewDappsPolicy}; use std::collections::HashMap; use ethjson::misc::AccountMeta; use devtools::RandomTempPath; @@ -232,25 +295,6 @@ mod tests { assert_eq!(b.get(), hash_map![1.into() => AccountMeta{name: "One".to_owned(), meta: "{1:1}".to_owned(), uuid: None}]); } - #[test] - fn should_save_and_reload_dapps_settings() { - // given - let temp = RandomTempPath::create_dir(); - let path = temp.as_str().to_owned(); - let mut b = DappsSettingsStore::new(path.clone()); - - // when - b.set_accounts("dappOne".into(), vec![1.into(), 2.into()]); - - // then - let b = DappsSettingsStore::new(path); - assert_eq!(b.get(), hash_map![ - "dappOne".into() => DappsSettings { - accounts: vec![1.into(), 2.into()], - } - ]); - } - #[test] fn should_remove_address() { let temp = RandomTempPath::create_dir(); @@ -268,4 +312,58 @@ mod tests { 3.into() => AccountMeta{name: "Three".to_owned(), meta: "{}".to_owned(), uuid: None} ]); } + + #[test] + fn should_save_and_reload_dapps_settings() { + // given + let temp = RandomTempPath::create_dir(); + let path = temp.as_str().to_owned(); + let mut b = DappsSettingsStore::new(path.clone()); + + // when + b.set_accounts("dappOne".into(), vec![1.into(), 2.into()]); + + // then + let b = DappsSettingsStore::new(path); + assert_eq!(b.settings(), hash_map![ + "dappOne".into() => DappsSettings { + accounts: vec![1.into(), 2.into()], + } + ]); + } + + #[test] + fn should_maintain_a_list_of_recent_dapps() { + let mut store = DappsSettingsStore::transient(); + assert!(store.recent_dapps().is_empty(), "Initially recent dapps should be empty."); + + store.mark_dapp_used("dapp1".into()); + assert_eq!(store.recent_dapps(), vec!["dapp1".to_owned()]); + + store.mark_dapp_used("dapp2".into()); + assert_eq!(store.recent_dapps(), vec!["dapp2".to_owned(), "dapp1".to_owned()]); + + store.mark_dapp_used("dapp1".into()); + assert_eq!(store.recent_dapps(), vec!["dapp1".to_owned(), "dapp2".to_owned()]); + } + + #[test] + fn should_store_dapps_policy() { + // given + let temp = RandomTempPath::create_dir(); + let path = temp.as_str().to_owned(); + let mut store = DappsSettingsStore::new(path.clone()); + + // Test default policy + assert_eq!(store.policy(), NewDappsPolicy::AllAccounts); + + // when + store.set_policy(NewDappsPolicy::Whitelist(vec![1.into(), 2.into()])); + + // then + let store = DappsSettingsStore::new(path); + assert_eq!(store.policy.clone(), hash_map![ + "default".into() => NewDappsPolicy::Whitelist(vec![1.into(), 2.into()]) + ]); + } } diff --git a/ethcore/src/client/chain_notify.rs b/ethcore/src/client/chain_notify.rs index a9f8d0204..a6dbbeacc 100644 --- a/ethcore/src/client/chain_notify.rs +++ b/ethcore/src/client/chain_notify.rs @@ -15,7 +15,7 @@ // along with Parity. If not, see . use ipc::IpcConfig; -use util::H256; +use util::{H256, Bytes}; /// Represents what has to be handled by actor listening to chain events #[ipc] @@ -27,6 +27,8 @@ pub trait ChainNotify : Send + Sync { _enacted: Vec, _retracted: Vec, _sealed: Vec, + // Block bytes. + _proposed: Vec, _duration: u64) { // does nothing by default } @@ -41,6 +43,9 @@ pub trait ChainNotify : Send + Sync { // does nothing by default } + /// fires when chain broadcasts a message + fn broadcast(&self, _data: Vec) {} + /// fires when new transactions are received from a peer fn transactions_received(&self, _hashes: Vec, diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index dce594617..93b850051 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -24,8 +24,8 @@ use time::precise_time_ns; // util use util::{Bytes, PerfTimer, Itertools, Mutex, RwLock, Hashable}; use util::{journaldb, TrieFactory, Trie}; -use util::trie::TrieSpec; use util::{U256, H256, Address, H2048, Uint, FixedHash}; +use util::trie::TrieSpec; use util::kvdb::*; // other @@ -396,9 +396,10 @@ impl Client { /// This is triggered by a message coming from a block queue when the block is ready for insertion pub fn import_verified_blocks(&self) -> usize { let max_blocks_to_import = 4; - let (imported_blocks, import_results, invalid_blocks, imported, duration, is_empty) = { + let (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration, is_empty) = { let mut imported_blocks = Vec::with_capacity(max_blocks_to_import); let mut invalid_blocks = HashSet::new(); + let mut proposed_blocks = Vec::with_capacity(max_blocks_to_import); let mut import_results = Vec::with_capacity(max_blocks_to_import); let _import_lock = self.import_lock.lock(); @@ -417,12 +418,17 @@ impl Client { continue; } if let Ok(closed_block) = self.check_and_close_block(&block) { - imported_blocks.push(header.hash()); + if self.engine.is_proposal(&block.header) { + self.block_queue.mark_as_good(&[header.hash()]); + proposed_blocks.push(block.bytes); + } else { + imported_blocks.push(header.hash()); - let route = self.commit_block(closed_block, &header.hash(), &block.bytes); - import_results.push(route); + let route = self.commit_block(closed_block, &header.hash(), &block.bytes); + import_results.push(route); - self.report.write().accrue_block(&block); + self.report.write().accrue_block(&block); + } } else { invalid_blocks.insert(header.hash()); } @@ -436,7 +442,7 @@ impl Client { } let is_empty = self.block_queue.mark_as_good(&imported_blocks); let duration_ns = precise_time_ns() - start; - (imported_blocks, import_results, invalid_blocks, imported, duration_ns, is_empty) + (imported_blocks, import_results, invalid_blocks, imported, proposed_blocks, duration_ns, is_empty) }; { @@ -454,6 +460,7 @@ impl Client { enacted.clone(), retracted.clone(), Vec::new(), + proposed_blocks.clone(), duration, ); }); @@ -577,9 +584,10 @@ impl Client { self.miner.clone() } - /// Used by PoA to try sealing on period change. - pub fn update_sealing(&self) { - self.miner.update_sealing(self) + + /// Replace io channel. Useful for testing. + pub fn set_io_channel(&self, io_channel: IoChannel) { + *self.io_channel.lock() = io_channel; } /// Attempt to get a copy of a specific block's final state. @@ -1290,6 +1298,18 @@ impl BlockChainClient for Client { self.miner.pending_transactions(self.chain.read().best_block_number()) } + fn queue_consensus_message(&self, message: Bytes) { + let channel = self.io_channel.lock().clone(); + if let Err(e) = channel.send(ClientIoMessage::NewMessage(message)) { + debug!("Ignoring the message, error queueing: {}", e); + } + } + + fn broadcast_consensus_message(&self, message: Bytes) { + self.notify(|notify| notify.broadcast(message.clone())); + } + + fn signing_network_id(&self) -> Option { self.engine.signing_network_id(&self.latest_env_info()) } @@ -1314,7 +1334,6 @@ impl BlockChainClient for Client { } impl MiningBlockChainClient for Client { - fn latest_schedule(&self) -> Schedule { self.engine.schedule(&self.latest_env_info()) } @@ -1357,6 +1376,30 @@ impl MiningBlockChainClient for Client { &self.factories.vm } + fn update_sealing(&self) { + self.miner.update_sealing(self) + } + + fn submit_seal(&self, block_hash: H256, seal: Vec) { + if self.miner.submit_seal(self, block_hash, seal).is_err() { + warn!(target: "poa", "Wrong internal seal submission!") + } + } + + fn broadcast_proposal_block(&self, block: SealedBlock) { + self.notify(|notify| { + notify.new_blocks( + vec![], + vec![], + vec![], + vec![], + vec![], + vec![block.rlp_bytes()], + 0, + ); + }); + } + fn import_sealed_block(&self, block: SealedBlock) -> ImportResult { let h = block.header().hash(); let start = precise_time_ns(); @@ -1381,6 +1424,7 @@ impl MiningBlockChainClient for Client { enacted.clone(), retracted.clone(), vec![h.clone()], + vec![], precise_time_ns() - start, ); }); @@ -1416,6 +1460,12 @@ impl ::client::ProvingBlockChainClient for Client { } } +impl Drop for Client { + fn drop(&mut self) { + self.engine.stop(); + } +} + #[cfg(test)] mod tests { diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 44954f99d..81c948b00 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -90,6 +90,8 @@ pub struct TestBlockChainClient { pub ancient_block: RwLock>, /// First block info. pub first_block: RwLock>, + /// Traces to return + pub traces: RwLock>>, } /// Used for generating test client blocks. @@ -151,6 +153,7 @@ impl TestBlockChainClient { latest_block_timestamp: RwLock::new(10_000_000), ancient_block: RwLock::new(None), first_block: RwLock::new(None), + traces: RwLock::new(None), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block client.genesis_hash = client.last_hash.read().clone(); @@ -360,6 +363,18 @@ impl MiningBlockChainClient for TestBlockChainClient { fn import_sealed_block(&self, _block: SealedBlock) -> ImportResult { Ok(H256::default()) } + + fn broadcast_proposal_block(&self, _block: SealedBlock) {} + + fn update_sealing(&self) { + self.miner.update_sealing(self) + } + + fn submit_seal(&self, block_hash: H256, seal: Vec) { + if self.miner.submit_seal(self, block_hash, seal).is_err() { + warn!(target: "poa", "Wrong internal seal submission!") + } + } } impl BlockChainClient for TestBlockChainClient { @@ -642,19 +657,19 @@ impl BlockChainClient for TestBlockChainClient { } fn filter_traces(&self, _filter: TraceFilter) -> Option> { - unimplemented!(); + self.traces.read().clone() } fn trace(&self, _trace: TraceId) -> Option { - unimplemented!(); + self.traces.read().clone().and_then(|vec| vec.into_iter().next()) } fn transaction_traces(&self, _trace: TransactionId) -> Option> { - unimplemented!(); + self.traces.read().clone() } fn block_traces(&self, _trace: BlockId) -> Option> { - unimplemented!(); + self.traces.read().clone() } fn queue_transactions(&self, transactions: Vec, _peer_id: usize) { @@ -663,6 +678,12 @@ impl BlockChainClient for TestBlockChainClient { self.miner.import_external_transactions(self, txs); } + fn queue_consensus_message(&self, message: Bytes) { + self.spec.engine.handle_message(&message).unwrap(); + } + + fn broadcast_consensus_message(&self, _message: Bytes) {} + fn pending_transactions(&self) -> Vec { self.miner.pending_transactions(self.chain_info().best_block_number) } diff --git a/ethcore/src/client/traits.rs b/ethcore/src/client/traits.rs index f44a4496f..ad2bed95f 100644 --- a/ethcore/src/client/traits.rs +++ b/ethcore/src/client/traits.rs @@ -202,6 +202,12 @@ pub trait BlockChainClient : Sync + Send { /// Queue transactions for importing. fn queue_transactions(&self, transactions: Vec, peer_id: usize); + /// Queue conensus engine message. + fn queue_consensus_message(&self, message: Bytes); + + /// Used by PoA to communicate with peers. + fn broadcast_consensus_message(&self, message: Bytes); + /// list all transactions fn pending_transactions(&self) -> Vec; @@ -273,6 +279,15 @@ pub trait MiningBlockChainClient: BlockChainClient { /// Returns EvmFactory. fn vm_factory(&self) -> &EvmFactory; + /// Used by PoA to try sealing on period change. + fn update_sealing(&self); + + /// Used by PoA to submit gathered signatures. + fn submit_seal(&self, block_hash: H256, seal: Vec); + + /// Broadcast a block proposal. + fn broadcast_proposal_block(&self, block: SealedBlock); + /// Import sealed block. Skips all verifications. fn import_sealed_block(&self, block: SealedBlock) -> ImportResult; diff --git a/ethcore/src/engines/authority_round.rs b/ethcore/src/engines/authority_round.rs index 9f78d8cec..8d1c004c5 100644 --- a/ethcore/src/engines/authority_round.rs +++ b/ethcore/src/engines/authority_round.rs @@ -25,7 +25,7 @@ use rlp::{UntrustedRlp, Rlp, View, encode}; use account_provider::AccountProvider; use block::*; use spec::CommonParams; -use engines::Engine; +use engines::{Engine, Seal, EngineError}; use header::Header; use error::{Error, BlockError}; use blockchain::extras::BlockDetails; @@ -225,8 +225,8 @@ impl Engine for AuthorityRound { /// /// This operation is synchronous and may (quite reasonably) not be available, in which `false` will /// be returned. - fn generate_seal(&self, block: &ExecutedBlock) -> Option> { - if self.proposed.load(AtomicOrdering::SeqCst) { return None; } + fn generate_seal(&self, block: &ExecutedBlock) -> Seal { + if self.proposed.load(AtomicOrdering::SeqCst) { return Seal::None; } let header = block.header(); let step = self.step(); if self.is_step_proposer(step, header.author()) { @@ -235,7 +235,8 @@ impl Engine for AuthorityRound { if let Ok(signature) = ap.sign(*header.author(), self.password.read().clone(), header.bare_hash()) { trace!(target: "poa", "generate_seal: Issuing a block for step {}.", step); self.proposed.store(true, AtomicOrdering::SeqCst); - return Some(vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]); + let rlps = vec![encode(&step).to_vec(), encode(&(&*signature as &[u8])).to_vec()]; + return Seal::Regular(rlps); } else { warn!(target: "poa", "generate_seal: FAIL: Accounts secret key unavailable."); } @@ -245,7 +246,7 @@ impl Engine for AuthorityRound { } else { trace!(target: "poa", "generate_seal: Not a proposer for step {}.", step); } - None + Seal::None } /// Check the number of seal fields. @@ -288,7 +289,7 @@ impl Engine for AuthorityRound { // Check if parent is from a previous step. if step == try!(header_step(parent)) { trace!(target: "poa", "Multiple blocks proposed for step {}.", step); - try!(Err(BlockError::DoubleVote(header.author().clone()))); + try!(Err(EngineError::DoubleVote(header.author().clone()))); } let gas_limit_divisor = self.our_params.gas_limit_bound_divisor; @@ -347,6 +348,7 @@ mod tests { use tests::helpers::*; use account_provider::AccountProvider; use spec::Spec; + use engines::Seal; #[test] fn has_valid_metadata() { @@ -416,17 +418,17 @@ mod tests { let b2 = b2.close_and_lock(); engine.set_signer(addr1, "1".into()); - if let Some(seal) = engine.generate_seal(b1.block()) { + if let Seal::Regular(seal) = engine.generate_seal(b1.block()) { assert!(b1.clone().try_seal(engine, seal).is_ok()); // Second proposal is forbidden. - assert!(engine.generate_seal(b1.block()).is_none()); + assert!(engine.generate_seal(b1.block()) == Seal::None); } engine.set_signer(addr2, "2".into()); - if let Some(seal) = engine.generate_seal(b2.block()) { + if let Seal::Regular(seal) = engine.generate_seal(b2.block()) { assert!(b2.clone().try_seal(engine, seal).is_ok()); // Second proposal is forbidden. - assert!(engine.generate_seal(b2.block()).is_none()); + assert!(engine.generate_seal(b2.block()) == Seal::None); } } diff --git a/ethcore/src/engines/basic_authority.rs b/ethcore/src/engines/basic_authority.rs index 5676365da..37ac4066b 100644 --- a/ethcore/src/engines/basic_authority.rs +++ b/ethcore/src/engines/basic_authority.rs @@ -21,7 +21,7 @@ use account_provider::AccountProvider; use block::*; use builtin::Builtin; use spec::CommonParams; -use engines::Engine; +use engines::{Engine, Seal}; use env_info::EnvInfo; use error::{BlockError, Error}; use evm::Schedule; @@ -112,20 +112,20 @@ impl Engine for BasicAuthority { /// /// This operation is synchronous and may (quite reasonably) not be available, in which `false` will /// be returned. - fn generate_seal(&self, block: &ExecutedBlock) -> Option> { + fn generate_seal(&self, block: &ExecutedBlock) -> Seal { if let Some(ref ap) = *self.account_provider.lock() { let header = block.header(); let message = header.bare_hash(); // account should be pernamently unlocked, otherwise sealing will fail if let Ok(signature) = ap.sign(*block.header().author(), self.password.read().clone(), message) { - return Some(vec![::rlp::encode(&(&*signature as &[u8])).to_vec()]); + return Seal::Regular(vec![::rlp::encode(&(&*signature as &[u8])).to_vec()]); } else { trace!(target: "basicauthority", "generate_seal: FAIL: accounts secret key unavailable"); } } else { trace!(target: "basicauthority", "generate_seal: FAIL: accounts not provided"); } - None + Seal::None } fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> result::Result<(), Error> { @@ -199,6 +199,7 @@ mod tests { use account_provider::AccountProvider; use header::Header; use spec::Spec; + use engines::Seal; /// Create a new test chain spec with `BasicAuthority` consensus engine. fn new_test_authority() -> Spec { @@ -269,8 +270,9 @@ mod tests { let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, addr, (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); - let seal = engine.generate_seal(b.block()).unwrap(); - assert!(b.try_seal(engine, seal).is_ok()); + if let Seal::Regular(seal) = engine.generate_seal(b.block()) { + assert!(b.try_seal(engine, seal).is_ok()); + } } #[test] diff --git a/ethcore/src/engines/instant_seal.rs b/ethcore/src/engines/instant_seal.rs index f15ccba81..74f71168c 100644 --- a/ethcore/src/engines/instant_seal.rs +++ b/ethcore/src/engines/instant_seal.rs @@ -17,12 +17,11 @@ use std::collections::BTreeMap; use util::Address; use builtin::Builtin; -use engines::Engine; +use engines::{Engine, Seal}; use env_info::EnvInfo; use spec::CommonParams; use evm::Schedule; use block::ExecutedBlock; -use util::Bytes; /// An engine which does not provide any consensus mechanism, just seals blocks internally. pub struct InstantSeal { @@ -54,13 +53,13 @@ impl Engine for InstantSeal { } fn schedule(&self, _env_info: &EnvInfo) -> Schedule { - Schedule::new_post_eip150(usize::max_value(), false, false, false) + Schedule::new_post_eip150(usize::max_value(), true, true, true) } fn is_sealer(&self, _author: &Address) -> Option { Some(true) } - fn generate_seal(&self, _block: &ExecutedBlock) -> Option> { - Some(Vec::new()) + fn generate_seal(&self, _block: &ExecutedBlock) -> Seal { + Seal::Regular(Vec::new()) } } @@ -72,6 +71,7 @@ mod tests { use spec::Spec; use header::Header; use block::*; + use engines::Seal; #[test] fn instant_can_seal() { @@ -84,8 +84,9 @@ mod tests { let last_hashes = Arc::new(vec![genesis_header.hash()]); let b = OpenBlock::new(engine, Default::default(), false, db, &genesis_header, last_hashes, Address::default(), (3141562.into(), 31415620.into()), vec![]).unwrap(); let b = b.close_and_lock(); - let seal = engine.generate_seal(b.block()).unwrap(); - assert!(b.try_seal(engine, seal).is_ok()); + if let Seal::Regular(seal) = engine.generate_seal(b.block()) { + assert!(b.try_seal(engine, seal).is_ok()); + } } #[test] diff --git a/ethcore/src/engines/mod.rs b/ethcore/src/engines/mod.rs index 5e5c5530f..a3e57dd65 100644 --- a/ethcore/src/engines/mod.rs +++ b/ethcore/src/engines/mod.rs @@ -20,11 +20,13 @@ mod null_engine; mod instant_seal; mod basic_authority; mod authority_round; +mod tendermint; pub use self::null_engine::NullEngine; pub use self::instant_seal::InstantSeal; pub use self::basic_authority::BasicAuthority; pub use self::authority_round::AuthorityRound; +pub use self::tendermint::Tendermint; use util::*; use account_provider::AccountProvider; @@ -42,6 +44,47 @@ use ethereum::ethash; use blockchain::extras::BlockDetails; use views::HeaderView; +/// Voting errors. +#[derive(Debug)] +pub enum EngineError { + /// Signature does not belong to an authority. + NotAuthorized(Address), + /// The same author issued different votes at the same step. + DoubleVote(Address), + /// The received block is from an incorrect proposer. + NotProposer(Mismatch
), + /// Message was not expected. + UnexpectedMessage, + /// Seal field has an unexpected size. + BadSealFieldSize(OutOfBounds), +} + +impl fmt::Display for EngineError { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + use self::EngineError::*; + let msg = match *self { + DoubleVote(ref address) => format!("Author {} issued too many blocks.", address), + NotProposer(ref mis) => format!("Author is not a current proposer: {}", mis), + NotAuthorized(ref address) => format!("Signer {} is not authorized.", address), + UnexpectedMessage => "This Engine should not be fed messages.".into(), + BadSealFieldSize(ref oob) => format!("Seal field has an unexpected length: {}", oob), + }; + + f.write_fmt(format_args!("Engine error ({})", msg)) + } +} + +/// Seal type. +#[derive(Debug, PartialEq, Eq)] +pub enum Seal { + /// Proposal seal; should be broadcasted, but not inserted into blockchain. + Proposal(Vec), + /// Regular block seal; should be part of the blockchain. + Regular(Vec), + /// Engine does generate seal for this block right now. + None, +} + /// A consensus mechanism for the chain. Generally either proof-of-work or proof-of-stake-based. /// Provides hooks into each of the major parts of block import. pub trait Engine : Sync + Send { @@ -94,7 +137,7 @@ pub trait Engine : Sync + Send { /// /// This operation is synchronous and may (quite reasonably) not be available, in which None will /// be returned. - fn generate_seal(&self, _block: &ExecutedBlock) -> Option> { None } + fn generate_seal(&self, _block: &ExecutedBlock) -> Seal { Seal::None } /// Phase 1 quick block verification. Only does checks that are cheap. `block` (the header's full block) /// may be provided for additional checks. Returns either a null `Ok` or a general error detailing the problem with import. @@ -133,6 +176,10 @@ pub trait Engine : Sync + Send { header.set_gas_limit(parent.gas_limit().clone()); } + /// Handle any potential consensus messages; + /// updating consensus state and potentially issuing a new one. + fn handle_message(&self, _message: &[u8]) -> Result<(), Error> { Err(EngineError::UnexpectedMessage.into()) } + // TODO: builtin contract routing - to do this properly, it will require removing the built-in configuration-reading logic // from Spec into here and removing the Spec::builtins field. /// Determine whether a particular address is a builtin contract. @@ -153,9 +200,16 @@ pub trait Engine : Sync + Send { ethash::is_new_best_block(best_total_difficulty, parent_details, new_header) } + /// Find out if the block is a proposal block and should not be inserted into the DB. + /// Takes a header of a fully verified block. + fn is_proposal(&self, _verified_header: &Header) -> bool { false } + /// Register an account which signs consensus messages. fn set_signer(&self, _address: Address, _password: String) {} + /// Stops any services that the may hold the Engine and makes it safe to drop. + fn stop(&self) {} + /// Add a channel for communication with Client which can be used for sealing. fn register_message_channel(&self, _message_channel: IoChannel) {} diff --git a/ethcore/src/engines/tendermint/message.rs b/ethcore/src/engines/tendermint/message.rs new file mode 100644 index 000000000..3e5da592d --- /dev/null +++ b/ethcore/src/engines/tendermint/message.rs @@ -0,0 +1,279 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tendermint message handling. + +use util::*; +use super::{Height, Round, BlockHash, Step}; +use error::Error; +use header::Header; +use rlp::*; +use ethkey::{recover, public_to_address}; + +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct ConsensusMessage { + pub signature: H520, + pub height: Height, + pub round: Round, + pub step: Step, + pub block_hash: Option, +} + + +fn consensus_round(header: &Header) -> Result { + let round_rlp = header.seal().get(0).expect("seal passed basic verification; seal has 3 fields; qed"); + UntrustedRlp::new(round_rlp.as_slice()).as_val() +} + +impl ConsensusMessage { + pub fn new(signature: H520, height: Height, round: Round, step: Step, block_hash: Option) -> Self { + ConsensusMessage { + signature: signature, + height: height, + round: round, + step: step, + block_hash: block_hash, + } + } + + pub fn new_proposal(header: &Header) -> Result { + Ok(ConsensusMessage { + signature: try!(UntrustedRlp::new(header.seal().get(1).expect("seal passed basic verification; seal has 3 fields; qed").as_slice()).as_val()), + height: header.number() as Height, + round: try!(consensus_round(header)), + step: Step::Propose, + block_hash: Some(header.bare_hash()), + }) + } + + pub fn new_commit(proposal: &ConsensusMessage, signature: H520) -> Self { + ConsensusMessage { + signature: signature, + height: proposal.height, + round: proposal.round, + step: Step::Precommit, + block_hash: proposal.block_hash, + } + } + + pub fn is_height(&self, height: Height) -> bool { + self.height == height + } + + pub fn is_round(&self, height: Height, round: Round) -> bool { + self.height == height && self.round == round + } + + pub fn is_step(&self, height: Height, round: Round, step: Step) -> bool { + self.height == height && self.round == round && self.step == step + } + + pub fn is_block_hash(&self, h: Height, r: Round, s: Step, block_hash: Option) -> bool { + self.height == h && self.round == r && self.step == s && self.block_hash == block_hash + } + + pub fn is_aligned(&self, m: &ConsensusMessage) -> bool { + self.is_block_hash(m.height, m.round, m.step, m.block_hash) + } + + pub fn verify(&self) -> Result { + let full_rlp = ::rlp::encode(self); + let block_info = Rlp::new(&full_rlp).at(1); + let public_key = try!(recover(&self.signature.into(), &block_info.as_raw().sha3())); + Ok(public_to_address(&public_key)) + } + + pub fn precommit_hash(&self) -> H256 { + message_info_rlp(self.height, self.round, Step::Precommit, self.block_hash).sha3() + } +} + +impl PartialOrd for ConsensusMessage { + fn partial_cmp(&self, m: &ConsensusMessage) -> Option { + Some(self.cmp(m)) + } +} + +impl Step { + fn number(&self) -> u8 { + match *self { + Step::Propose => 0, + Step::Prevote => 1, + Step::Precommit => 2, + Step::Commit => 3, + } + } +} + +impl Ord for ConsensusMessage { + fn cmp(&self, m: &ConsensusMessage) -> Ordering { + if self.height != m.height { + self.height.cmp(&m.height) + } else if self.round != m.round { + self.round.cmp(&m.round) + } else if self.step != m.step { + self.step.number().cmp(&m.step.number()) + } else { + self.signature.cmp(&m.signature) + } + } +} + +impl Decodable for Step { + fn decode(decoder: &D) -> Result where D: Decoder { + match try!(decoder.as_rlp().as_val()) { + 0u8 => Ok(Step::Propose), + 1 => Ok(Step::Prevote), + 2 => Ok(Step::Precommit), + _ => Err(DecoderError::Custom("Invalid step.")), + } + } +} + +impl Encodable for Step { + fn rlp_append(&self, s: &mut RlpStream) { + s.append(&self.number()); + } +} + +/// (signature, height, round, step, block_hash) +impl Decodable for ConsensusMessage { + fn decode(decoder: &D) -> Result where D: Decoder { + let rlp = decoder.as_rlp(); + let m = try!(rlp.at(1)); + let block_message: H256 = try!(m.val_at(3)); + Ok(ConsensusMessage { + signature: try!(rlp.val_at(0)), + height: try!(m.val_at(0)), + round: try!(m.val_at(1)), + step: try!(m.val_at(2)), + block_hash: match block_message.is_zero() { + true => None, + false => Some(block_message), + } + }) + } +} + +impl Encodable for ConsensusMessage { + fn rlp_append(&self, s: &mut RlpStream) { + let info = message_info_rlp(self.height, self.round, self.step, self.block_hash); + s.begin_list(2) + .append(&self.signature) + .append_raw(&info, 1); + } +} + +pub fn message_info_rlp(height: Height, round: Round, step: Step, block_hash: Option) -> Bytes { + // TODO: figure out whats wrong with nested list encoding + let mut s = RlpStream::new_list(5); + s.append(&height).append(&round).append(&step).append(&block_hash.unwrap_or_else(H256::zero)); + s.out() +} + + +pub fn message_full_rlp(signature: &H520, vote_info: &Bytes) -> Bytes { + let mut s = RlpStream::new_list(2); + s.append(signature).append_raw(vote_info, 1); + s.out() +} + +#[cfg(test)] +mod tests { + use util::*; + use rlp::*; + use super::super::Step; + use super::*; + use account_provider::AccountProvider; + use header::Header; + + #[test] + fn encode_decode() { + let message = ConsensusMessage { + signature: H520::default(), + height: 10, + round: 123, + step: Step::Precommit, + block_hash: Some("1".sha3()) + }; + let raw_rlp = ::rlp::encode(&message).to_vec(); + let rlp = Rlp::new(&raw_rlp); + assert_eq!(message, rlp.as_val()); + + let message = ConsensusMessage { + signature: H520::default(), + height: 1314, + round: 0, + step: Step::Prevote, + block_hash: None + }; + let raw_rlp = ::rlp::encode(&message); + let rlp = Rlp::new(&raw_rlp); + assert_eq!(message, rlp.as_val()); + } + + #[test] + fn generate_and_verify() { + let tap = Arc::new(AccountProvider::transient_provider()); + let addr = tap.insert_account("0".sha3(), "0").unwrap(); + tap.unlock_account_permanently(addr, "0".into()).unwrap(); + + let mi = message_info_rlp(123, 2, Step::Precommit, Some(H256::default())); + + let raw_rlp = message_full_rlp(&tap.sign(addr, None, mi.sha3()).unwrap().into(), &mi); + + let rlp = UntrustedRlp::new(&raw_rlp); + let message: ConsensusMessage = rlp.as_val().unwrap(); + match message.verify() { Ok(a) if a == addr => {}, _ => panic!(), }; + } + + #[test] + fn proposal_message() { + let mut header = Header::default(); + let seal = vec![ + ::rlp::encode(&0u8).to_vec(), + ::rlp::encode(&H520::default()).to_vec(), + Vec::new() + ]; + header.set_seal(seal); + let message = ConsensusMessage::new_proposal(&header).unwrap(); + assert_eq!( + message, + ConsensusMessage { + signature: Default::default(), + height: 0, + round: 0, + step: Step::Propose, + block_hash: Some(header.bare_hash()) + } + ); + } + + #[test] + fn message_info_from_header() { + let header = Header::default(); + let pro = ConsensusMessage { + signature: Default::default(), + height: 0, + round: 0, + step: Step::Propose, + block_hash: Some(header.bare_hash()) + }; + let pre = message_info_rlp(0, 0, Step::Precommit, Some(header.bare_hash())); + + assert_eq!(pro.precommit_hash(), pre.sha3()); + } +} diff --git a/ethcore/src/engines/tendermint/mod.rs b/ethcore/src/engines/tendermint/mod.rs new file mode 100644 index 000000000..bb6d54ca5 --- /dev/null +++ b/ethcore/src/engines/tendermint/mod.rs @@ -0,0 +1,962 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +/// Tendermint BFT consensus engine with round robin proof-of-authority. +/// At each blockchain `Height` there can be multiple `Round`s of voting. +/// Signatures always sign `Height`, `Round`, `Step` and `BlockHash` which is a block hash without seal. +/// First a block with `Seal::Proposal` is issued by the designated proposer. +/// Next the `Round` proceeds through `Prevote` and `Precommit` `Step`s. +/// Block is issued when there is enough `Precommit` votes collected on a particular block at the end of a `Round`. +/// Once enough votes have been gathered the proposer issues that block in the `Commit` step. + +mod message; +mod transition; +mod params; +mod vote_collector; + +use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; +use util::*; +use error::{Error, BlockError}; +use header::Header; +use builtin::Builtin; +use env_info::EnvInfo; +use transaction::SignedTransaction; +use rlp::{UntrustedRlp, View}; +use ethkey::{recover, public_to_address}; +use account_provider::AccountProvider; +use block::*; +use spec::CommonParams; +use engines::{Engine, Seal, EngineError}; +use blockchain::extras::BlockDetails; +use views::HeaderView; +use evm::Schedule; +use io::{IoService, IoChannel}; +use service::ClientIoMessage; +use self::message::*; +use self::transition::TransitionHandler; +use self::params::TendermintParams; +use self::vote_collector::VoteCollector; + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum Step { + Propose, + Prevote, + Precommit, + Commit +} + +impl Step { + pub fn is_pre(self) -> bool { + match self { + Step::Prevote | Step::Precommit => true, + _ => false, + } + } +} + +pub type Height = usize; +pub type Round = usize; +pub type BlockHash = H256; + +/// Engine using `Tendermint` consensus algorithm, suitable for EVM chain. +pub struct Tendermint { + params: CommonParams, + our_params: TendermintParams, + builtins: BTreeMap, + step_service: IoService, + /// Address to be used as authority. + authority: RwLock
, + /// Password used for signing messages. + password: RwLock>, + /// Blockchain height. + height: AtomicUsize, + /// Consensus round. + round: AtomicUsize, + /// Consensus step. + step: RwLock, + /// Vote accumulator. + votes: VoteCollector, + /// Channel for updating the sealing. + message_channel: Mutex>>, + /// Used to sign messages and proposals. + account_provider: Mutex>>, + /// Message for the last PoLC. + lock_change: RwLock>, + /// Last lock round. + last_lock: AtomicUsize, + /// Bare hash of the proposed block, used for seal submission. + proposal: RwLock>, +} + +impl Tendermint { + /// Create a new instance of Tendermint engine + pub fn new(params: CommonParams, our_params: TendermintParams, builtins: BTreeMap) -> Result, Error> { + let engine = Arc::new( + Tendermint { + params: params, + our_params: our_params, + builtins: builtins, + step_service: try!(IoService::::start()), + authority: RwLock::new(Address::default()), + password: RwLock::new(None), + height: AtomicUsize::new(1), + round: AtomicUsize::new(0), + step: RwLock::new(Step::Propose), + votes: VoteCollector::new(), + message_channel: Mutex::new(None), + account_provider: Mutex::new(None), + lock_change: RwLock::new(None), + last_lock: AtomicUsize::new(0), + proposal: RwLock::new(None), + }); + let handler = TransitionHandler { engine: Arc::downgrade(&engine) }; + try!(engine.step_service.register_handler(Arc::new(handler))); + Ok(engine) + } + + fn update_sealing(&self) { + if let Some(ref channel) = *self.message_channel.lock() { + match channel.send(ClientIoMessage::UpdateSealing) { + Ok(_) => trace!(target: "poa", "UpdateSealing message sent."), + Err(err) => warn!(target: "poa", "Could not send a sealing message {}.", err), + } + } + } + + fn submit_seal(&self, block_hash: H256, seal: Vec) { + if let Some(ref channel) = *self.message_channel.lock() { + match channel.send(ClientIoMessage::SubmitSeal(block_hash, seal)) { + Ok(_) => trace!(target: "poa", "SubmitSeal message sent."), + Err(err) => warn!(target: "poa", "Could not send a sealing message {}.", err), + } + } + } + + fn broadcast_message(&self, message: Bytes) { + let channel = self.message_channel.lock().clone(); + if let Some(ref channel) = channel { + match channel.send(ClientIoMessage::BroadcastMessage(message)) { + Ok(_) => trace!(target: "poa", "BroadcastMessage message sent."), + Err(err) => warn!(target: "poa", "broadcast_message: Could not send a sealing message {}.", err), + } + } else { + warn!(target: "poa", "broadcast_message: No IoChannel available."); + } + } + + fn generate_message(&self, block_hash: Option) -> Option { + if let Some(ref ap) = *self.account_provider.lock() { + let h = self.height.load(AtomicOrdering::SeqCst); + let r = self.round.load(AtomicOrdering::SeqCst); + let s = self.step.read(); + let vote_info = message_info_rlp(h, r, *s, block_hash); + let authority = self.authority.read(); + match ap.sign(*authority, self.password.read().clone(), vote_info.sha3()).map(Into::into) { + Ok(signature) => { + let message_rlp = message_full_rlp(&signature, &vote_info); + let message = ConsensusMessage::new(signature, h, r, *s, block_hash); + self.votes.vote(message.clone(), *authority); + debug!(target: "poa", "Generated {:?} as {}.", message, *authority); + self.handle_valid_message(&message); + + Some(message_rlp) + }, + Err(e) => { + trace!(target: "poa", "Could not sign the message {}", e); + None + }, + } + } else { + warn!(target: "poa", "No AccountProvider available."); + None + } + } + + fn generate_and_broadcast_message(&self, block_hash: Option) { + if let Some(message) = self.generate_message(block_hash) { + self.broadcast_message(message); + } + } + + /// Broadcast all messages since last issued block to get the peers up to speed. + fn broadcast_old_messages(&self) { + for m in self.votes.get_up_to(self.height.load(AtomicOrdering::SeqCst)).into_iter() { + self.broadcast_message(m); + } + } + + fn to_next_height(&self, height: Height) { + let new_height = height + 1; + debug!(target: "poa", "Received a Commit, transitioning to height {}.", new_height); + self.last_lock.store(0, AtomicOrdering::SeqCst); + self.height.store(new_height, AtomicOrdering::SeqCst); + self.round.store(0, AtomicOrdering::SeqCst); + *self.lock_change.write() = None; + } + + /// Use via step_service to transition steps. + fn to_step(&self, step: Step) { + if let Err(io_err) = self.step_service.send_message(step) { + warn!(target: "poa", "Could not proceed to step {}.", io_err) + } + *self.step.write() = step; + match step { + Step::Propose => { + *self.proposal.write() = None; + self.update_sealing() + }, + Step::Prevote => { + let block_hash = match *self.lock_change.read() { + Some(ref m) if !self.should_unlock(m.round) => m.block_hash, + _ => self.proposal.read().clone(), + }; + self.generate_and_broadcast_message(block_hash); + }, + Step::Precommit => { + trace!(target: "poa", "to_step: Precommit."); + let block_hash = match *self.lock_change.read() { + Some(ref m) if self.is_round(m) && m.block_hash.is_some() => { + trace!(target: "poa", "Setting last lock: {}", m.round); + self.last_lock.store(m.round, AtomicOrdering::SeqCst); + m.block_hash + }, + _ => None, + }; + self.generate_and_broadcast_message(block_hash); + }, + Step::Commit => { + trace!(target: "poa", "to_step: Commit."); + // Commit the block using a complete signature set. + let round = self.round.load(AtomicOrdering::SeqCst); + let height = self.height.load(AtomicOrdering::SeqCst); + if let Some(block_hash) = *self.proposal.read() { + // Generate seal and remove old votes. + if self.is_proposer(&*self.authority.read()).is_ok() { + if let Some(seal) = self.votes.seal_signatures(height, round, block_hash) { + trace!(target: "poa", "Collected seal: {:?}", seal); + let seal = vec![ + ::rlp::encode(&round).to_vec(), + ::rlp::encode(&seal.proposal).to_vec(), + ::rlp::encode(&seal.votes).to_vec() + ]; + self.submit_seal(block_hash, seal); + self.to_next_height(height); + } else { + warn!(target: "poa", "Not enough votes found!"); + } + } + } + }, + } + } + + fn is_authority(&self, address: &Address) -> bool { + self.our_params.authorities.contains(address) + } + + fn is_above_threshold(&self, n: usize) -> bool { + n > self.our_params.authority_n * 2/3 + } + + /// Check if address is a proposer for given round. + fn is_round_proposer(&self, height: Height, round: Round, address: &Address) -> Result<(), EngineError> { + let ref p = self.our_params; + let proposer_nonce = height + round; + trace!(target: "poa", "is_proposer: Proposer nonce: {}", proposer_nonce); + let proposer = p.authorities.get(proposer_nonce % p.authority_n).expect("There are authority_n authorities; taking number modulo authority_n gives number in authority_n range; qed"); + if proposer == address { + Ok(()) + } else { + Err(EngineError::NotProposer(Mismatch { expected: proposer.clone(), found: address.clone() })) + } + } + + /// Check if address is the current proposer. + fn is_proposer(&self, address: &Address) -> Result<(), EngineError> { + self.is_round_proposer(self.height.load(AtomicOrdering::SeqCst), self.round.load(AtomicOrdering::SeqCst), address) + } + + fn is_height(&self, message: &ConsensusMessage) -> bool { + message.is_height(self.height.load(AtomicOrdering::SeqCst)) + } + + fn is_round(&self, message: &ConsensusMessage) -> bool { + message.is_round(self.height.load(AtomicOrdering::SeqCst), self.round.load(AtomicOrdering::SeqCst)) + } + + fn increment_round(&self, n: Round) { + trace!(target: "poa", "increment_round: New round."); + self.round.fetch_add(n, AtomicOrdering::SeqCst); + } + + fn should_unlock(&self, lock_change_round: Round) -> bool { + self.last_lock.load(AtomicOrdering::SeqCst) < lock_change_round + && lock_change_round < self.round.load(AtomicOrdering::SeqCst) + } + + + fn has_enough_any_votes(&self) -> bool { + let step_votes = self.votes.count_step_votes(self.height.load(AtomicOrdering::SeqCst), self.round.load(AtomicOrdering::SeqCst), *self.step.read()); + self.is_above_threshold(step_votes) + } + + fn has_enough_future_step_votes(&self, message: &ConsensusMessage) -> bool { + if message.round > self.round.load(AtomicOrdering::SeqCst) { + let step_votes = self.votes.count_step_votes(message.height, message.round, message.step); + self.is_above_threshold(step_votes) + } else { + false + } + } + + fn has_enough_aligned_votes(&self, message: &ConsensusMessage) -> bool { + let aligned_count = self.votes.count_aligned_votes(&message); + self.is_above_threshold(aligned_count) + } + + fn handle_valid_message(&self, message: &ConsensusMessage) { + let is_newer_than_lock = match *self.lock_change.read() { + Some(ref lock) => message > lock, + None => true, + }; + let lock_change = is_newer_than_lock + && message.step == Step::Prevote + && message.block_hash.is_some() + && self.has_enough_aligned_votes(message); + if lock_change { + trace!(target: "poa", "handle_valid_message: Lock change."); + *self.lock_change.write() = Some(message.clone()); + } + // Check if it can affect the step transition. + if self.is_height(message) { + let next_step = match *self.step.read() { + Step::Precommit if self.has_enough_aligned_votes(message) => { + if message.block_hash.is_none() { + self.increment_round(1); + Some(Step::Propose) + } else { + Some(Step::Commit) + } + }, + Step::Precommit if self.has_enough_future_step_votes(message) => { + self.increment_round(message.round - self.round.load(AtomicOrdering::SeqCst)); + Some(Step::Precommit) + }, + // Avoid counting twice. + Step::Prevote if lock_change => Some(Step::Precommit), + Step::Prevote if self.has_enough_aligned_votes(message) => Some(Step::Precommit), + Step::Prevote if self.has_enough_future_step_votes(message) => { + self.increment_round(message.round - self.round.load(AtomicOrdering::SeqCst)); + Some(Step::Prevote) + }, + _ => None, + }; + + if let Some(step) = next_step { + trace!(target: "poa", "Transition to {:?} triggered.", step); + self.to_step(step); + } + } + } +} + +impl Engine for Tendermint { + fn name(&self) -> &str { "Tendermint" } + fn version(&self) -> SemanticVersion { SemanticVersion::new(1, 0, 0) } + /// (consensus round, proposal signature, authority signatures) + fn seal_fields(&self) -> usize { 3 } + + fn params(&self) -> &CommonParams { &self.params } + fn builtins(&self) -> &BTreeMap { &self.builtins } + + fn maximum_uncle_count(&self) -> usize { 0 } + fn maximum_uncle_age(&self) -> usize { 0 } + + /// Additional engine-specific information for the user/developer concerning `header`. + fn extra_info(&self, header: &Header) -> BTreeMap { + let message = ConsensusMessage::new_proposal(header).expect("Invalid header."); + map![ + "signature".into() => message.signature.to_string(), + "height".into() => message.height.to_string(), + "round".into() => message.round.to_string(), + "block_hash".into() => message.block_hash.as_ref().map(ToString::to_string).unwrap_or("".into()) + ] + } + + fn schedule(&self, _env_info: &EnvInfo) -> Schedule { + Schedule::new_post_eip150(usize::max_value(), true, true, true) + } + + fn populate_from_parent(&self, header: &mut Header, parent: &Header, gas_floor_target: U256, _gas_ceil_target: U256) { + header.set_difficulty(parent.difficulty().clone()); + header.set_gas_limit({ + let gas_limit = parent.gas_limit().clone(); + let bound_divisor = self.our_params.gas_limit_bound_divisor; + if gas_limit < gas_floor_target { + min(gas_floor_target, gas_limit + gas_limit / bound_divisor - 1.into()) + } else { + max(gas_floor_target, gas_limit - gas_limit / bound_divisor + 1.into()) + } + }); + } + + /// Should this node participate. + fn is_sealer(&self, address: &Address) -> Option { + Some(self.is_authority(address)) + } + + /// Attempt to seal generate a proposal seal. + fn generate_seal(&self, block: &ExecutedBlock) -> Seal { + if let Some(ref ap) = *self.account_provider.lock() { + let header = block.header(); + let author = header.author(); + // Only proposer can generate seal if None was generated. + if self.is_proposer(author).is_err() || self.proposal.read().is_some() { + return Seal::None; + } + + let height = header.number() as Height; + let round = self.round.load(AtomicOrdering::SeqCst); + let bh = Some(header.bare_hash()); + let vote_info = message_info_rlp(height, round, Step::Propose, bh.clone()); + if let Ok(signature) = ap.sign(*author, self.password.read().clone(), vote_info.sha3()).map(H520::from) { + // Insert Propose vote. + debug!(target: "poa", "Submitting proposal {} at height {} round {}.", header.bare_hash(), height, round); + self.votes.vote(ConsensusMessage::new(signature, height, round, Step::Propose, bh), *author); + // Remember proposal for later seal submission. + *self.proposal.write() = bh; + Seal::Proposal(vec![ + ::rlp::encode(&round).to_vec(), + ::rlp::encode(&signature).to_vec(), + ::rlp::EMPTY_LIST_RLP.to_vec() + ]) + } else { + warn!(target: "poa", "generate_seal: FAIL: accounts secret key unavailable"); + Seal::None + } + } else { + warn!(target: "poa", "generate_seal: FAIL: accounts not provided"); + Seal::None + } + } + + fn handle_message(&self, rlp: &[u8]) -> Result<(), Error> { + let rlp = UntrustedRlp::new(rlp); + let message: ConsensusMessage = try!(rlp.as_val()); + if !self.votes.is_old_or_known(&message) { + let sender = public_to_address(&try!(recover(&message.signature.into(), &try!(rlp.at(1)).as_raw().sha3()))); + if !self.is_authority(&sender) { + try!(Err(EngineError::NotAuthorized(sender))); + } + self.broadcast_message(rlp.as_raw().to_vec()); + trace!(target: "poa", "Handling a valid {:?} from {}.", message, sender); + self.votes.vote(message.clone(), sender); + self.handle_valid_message(&message); + } + Ok(()) + } + + fn verify_block_basic(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { + let seal_length = header.seal().len(); + if seal_length == self.seal_fields() { + let signatures_len = header.seal()[2].len(); + if signatures_len >= 1 { + Ok(()) + } else { + Err(From::from(EngineError::BadSealFieldSize(OutOfBounds { + min: Some(1), + max: None, + found: signatures_len + }))) + } + } else { + Err(From::from(BlockError::InvalidSealArity( + Mismatch { expected: self.seal_fields(), found: seal_length } + ))) + } + + } + + fn verify_block_unordered(&self, header: &Header, _block: Option<&[u8]>) -> Result<(), Error> { + let proposal = try!(ConsensusMessage::new_proposal(header)); + let proposer = try!(proposal.verify()); + if !self.is_authority(&proposer) { + try!(Err(EngineError::NotAuthorized(proposer))) + } + + let precommit_hash = proposal.precommit_hash(); + let ref signatures_field = header.seal()[2]; + let mut signature_count = 0; + let mut origins = HashSet::new(); + for rlp in UntrustedRlp::new(signatures_field).iter() { + let precommit: ConsensusMessage = ConsensusMessage::new_commit(&proposal, try!(rlp.as_val())); + let address = match self.votes.get(&precommit) { + Some(a) => a, + None => public_to_address(&try!(recover(&precommit.signature.into(), &precommit_hash))), + }; + if !self.our_params.authorities.contains(&address) { + try!(Err(EngineError::NotAuthorized(address.to_owned()))) + } + + if origins.insert(address) { + signature_count += 1; + } else { + warn!(target: "poa", "verify_block_unordered: Duplicate signature from {} on the seal.", address); + try!(Err(BlockError::InvalidSeal)); + } + } + + // Check if its a proposal if there is not enough precommits. + if !self.is_above_threshold(signature_count) { + let signatures_len = signatures_field.len(); + // Proposal has to have an empty signature list. + if signatures_len != 1 { + try!(Err(EngineError::BadSealFieldSize(OutOfBounds { + min: Some(1), + max: Some(1), + found: signatures_len + }))); + } + try!(self.is_round_proposer(proposal.height, proposal.round, &proposer)); + } + Ok(()) + } + + fn verify_block_family(&self, header: &Header, parent: &Header, _block: Option<&[u8]>) -> Result<(), Error> { + if header.number() == 0 { + try!(Err(BlockError::RidiculousNumber(OutOfBounds { min: Some(1), max: None, found: header.number() }))); + } + + let gas_limit_divisor = self.our_params.gas_limit_bound_divisor; + let min_gas = parent.gas_limit().clone() - parent.gas_limit().clone() / gas_limit_divisor; + let max_gas = parent.gas_limit().clone() + parent.gas_limit().clone() / gas_limit_divisor; + if header.gas_limit() <= &min_gas || header.gas_limit() >= &max_gas { + try!(Err(BlockError::InvalidGasLimit(OutOfBounds { min: Some(min_gas), max: Some(max_gas), found: header.gas_limit().clone() }))); + } + + Ok(()) + } + + fn verify_transaction_basic(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> { + try!(t.check_low_s()); + Ok(()) + } + + fn verify_transaction(&self, t: &SignedTransaction, _header: &Header) -> Result<(), Error> { + t.sender().map(|_|()) // Perform EC recovery and cache sender + } + + fn set_signer(&self, address: Address, password: String) { + *self.authority.write() = address; + *self.password.write() = Some(password); + self.to_step(Step::Propose); + } + + fn stop(&self) { + self.step_service.stop() + } + + fn is_new_best_block(&self, _best_total_difficulty: U256, best_header: HeaderView, _parent_details: &BlockDetails, new_header: &HeaderView) -> bool { + let new_number = new_header.number(); + let best_number = best_header.number(); + trace!(target: "poa", "new_header: {}, best_header: {}", new_number, best_number); + if new_number != best_number { + new_number > best_number + } else { + let new_seal = new_header.seal(); + let best_seal = best_header.seal(); + let new_signatures = new_seal.get(2).expect("Tendermint seal should have three elements.").len(); + let best_signatures = best_seal.get(2).expect("Tendermint seal should have three elements.").len(); + if new_signatures > best_signatures { + true + } else { + let new_round: Round = ::rlp::Rlp::new(&new_seal.get(0).expect("Tendermint seal should have three elements.")).as_val(); + let best_round: Round = ::rlp::Rlp::new(&best_seal.get(0).expect("Tendermint seal should have three elements.")).as_val(); + new_round > best_round + } + } + } + + fn is_proposal(&self, header: &Header) -> bool { + let signatures_len = header.seal()[2].len(); + // Signatures have to be an empty list rlp. + let proposal = ConsensusMessage::new_proposal(header).expect("block went through full verification; this Engine verifies new_proposal creation; qed"); + if signatures_len != 1 { + // New Commit received, skip to next height. + trace!(target: "poa", "Received a commit for height {}, round {}.", proposal.height, proposal.round); + self.to_next_height(proposal.height); + return false; + } + let proposer = proposal.verify().expect("block went through full verification; this Engine tries verify; qed"); + debug!(target: "poa", "Received a new proposal for height {}, round {} from {}.", proposal.height, proposal.round, proposer); + if self.is_round(&proposal) { + *self.proposal.write() = proposal.block_hash.clone(); + } + self.votes.vote(proposal, proposer); + true + } + + /// Equivalent to a timeout: to be used for tests. + fn step(&self) { + let next_step = match *self.step.read() { + Step::Propose => { + trace!(target: "poa", "Propose timeout."); + Step::Prevote + }, + Step::Prevote if self.has_enough_any_votes() => { + trace!(target: "poa", "Prevote timeout."); + Step::Precommit + }, + Step::Prevote => { + trace!(target: "poa", "Prevote timeout without enough votes."); + self.broadcast_old_messages(); + Step::Prevote + }, + Step::Precommit if self.has_enough_any_votes() => { + trace!(target: "poa", "Precommit timeout."); + self.increment_round(1); + Step::Propose + }, + Step::Precommit => { + trace!(target: "poa", "Precommit timeout without enough votes."); + self.broadcast_old_messages(); + Step::Precommit + }, + Step::Commit => { + trace!(target: "poa", "Commit timeout."); + Step::Propose + }, + }; + self.to_step(next_step); + } + + fn register_message_channel(&self, message_channel: IoChannel) { + trace!(target: "poa", "Register the IoChannel."); + *self.message_channel.lock() = Some(message_channel); + } + + fn register_account_provider(&self, account_provider: Arc) { + *self.account_provider.lock() = Some(account_provider); + } +} + +#[cfg(test)] +mod tests { + use util::*; + use util::trie::TrieSpec; + use io::{IoContext, IoHandler}; + use block::*; + use error::{Error, BlockError}; + use header::Header; + use io::IoChannel; + use env_info::EnvInfo; + use tests::helpers::*; + use account_provider::AccountProvider; + use service::ClientIoMessage; + use spec::Spec; + use engines::{Engine, EngineError, Seal}; + use super::*; + use super::message::*; + + /// Accounts inserted with "0" and "1" are authorities. First proposer is "0". + fn setup() -> (Spec, Arc) { + let tap = Arc::new(AccountProvider::transient_provider()); + let spec = Spec::new_test_tendermint(); + spec.engine.register_account_provider(tap.clone()); + (spec, tap) + } + + fn propose_default(spec: &Spec, proposer: Address) -> (LockedBlock, Vec) { + let mut db_result = get_temp_state_db(); + let mut db = db_result.take(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); + let genesis_header = spec.genesis_header(); + let last_hashes = Arc::new(vec![genesis_header.hash()]); + let b = OpenBlock::new(spec.engine.as_ref(), Default::default(), false, db.boxed_clone(), &genesis_header, last_hashes, proposer, (3141562.into(), 31415620.into()), vec![]).unwrap(); + let b = b.close_and_lock(); + if let Seal::Proposal(seal) = spec.engine.generate_seal(b.block()) { + (b, seal) + } else { + panic!() + } + } + + fn vote(engine: &Arc, signer: F, height: usize, round: usize, step: Step, block_hash: Option) -> Bytes where F: FnOnce(H256) -> Result { + let mi = message_info_rlp(height, round, step, block_hash); + let m = message_full_rlp(&signer(mi.sha3()).unwrap().into(), &mi); + engine.handle_message(&m).unwrap(); + m + } + + fn proposal_seal(tap: &Arc, header: &Header, round: Round) -> Vec { + let author = header.author(); + let vote_info = message_info_rlp(header.number() as Height, round, Step::Propose, Some(header.bare_hash())); + let signature = tap.sign(*author, None, vote_info.sha3()).unwrap(); + vec![ + ::rlp::encode(&round).to_vec(), + ::rlp::encode(&H520::from(signature)).to_vec(), + ::rlp::EMPTY_LIST_RLP.to_vec() + ] + } + + fn precommit_signatures(tap: &Arc, height: Height, round: Round, bare_hash: Option, v1: H160, v2: H160) -> Bytes { + let vote_info = message_info_rlp(height, round, Step::Precommit, bare_hash); + ::rlp::encode(&vec![ + H520::from(tap.sign(v1, None, vote_info.sha3()).unwrap()), + H520::from(tap.sign(v2, None, vote_info.sha3()).unwrap()) + ]).to_vec() + } + + fn insert_and_unlock(tap: &Arc, acc: &str) -> Address { + let addr = tap.insert_account(acc.sha3(), acc).unwrap(); + tap.unlock_account_permanently(addr, acc.into()).unwrap(); + addr + } + + fn insert_and_register(tap: &Arc, engine: &Arc, acc: &str) -> Address { + let addr = insert_and_unlock(tap, acc); + engine.set_signer(addr.clone(), acc.into()); + addr + } + + struct TestIo { + received: RwLock> + } + + impl TestIo { + fn new() -> Arc { Arc::new(TestIo { received: RwLock::new(Vec::new()) }) } + } + + impl IoHandler for TestIo { + fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { + self.received.write().push(net_message.clone()); + } + } + + #[test] + fn has_valid_metadata() { + let engine = Spec::new_test_tendermint().engine; + assert!(!engine.name().is_empty()); + assert!(engine.version().major >= 1); + } + + #[test] + fn can_return_schedule() { + let engine = Spec::new_test_tendermint().engine; + let schedule = engine.schedule(&EnvInfo { + number: 10000000, + author: 0.into(), + timestamp: 0, + difficulty: 0.into(), + last_hashes: Arc::new(vec![]), + gas_used: 0.into(), + gas_limit: 0.into(), + }); + + assert!(schedule.stack_limit > 0); + } + + #[test] + fn verification_fails_on_short_seal() { + let engine = Spec::new_test_tendermint().engine; + let header = Header::default(); + + let verify_result = engine.verify_block_basic(&header, None); + + match verify_result { + Err(Error::Block(BlockError::InvalidSealArity(_))) => {}, + Err(_) => { panic!("should be block seal-arity mismatch error (got {:?})", verify_result); }, + _ => { panic!("Should be error, got Ok"); }, + } + } + + #[test] + fn allows_correct_proposer() { + let (spec, tap) = setup(); + let engine = spec.engine; + + let mut header = Header::default(); + let validator = insert_and_unlock(&tap, "0"); + header.set_author(validator); + let seal = proposal_seal(&tap, &header, 0); + header.set_seal(seal); + // Good proposer. + assert!(engine.verify_block_unordered(&header.clone(), None).is_ok()); + + let validator = insert_and_unlock(&tap, "1"); + header.set_author(validator); + let seal = proposal_seal(&tap, &header, 0); + header.set_seal(seal); + // Bad proposer. + match engine.verify_block_unordered(&header, None) { + Err(Error::Engine(EngineError::NotProposer(_))) => {}, + _ => panic!(), + } + + let random = insert_and_unlock(&tap, "101"); + header.set_author(random); + let seal = proposal_seal(&tap, &header, 0); + header.set_seal(seal); + // Not authority. + match engine.verify_block_unordered(&header, None) { + Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, + _ => panic!(), + }; + engine.stop(); + } + + #[test] + fn seal_signatures_checking() { + let (spec, tap) = setup(); + let engine = spec.engine; + + let mut header = Header::default(); + let proposer = insert_and_unlock(&tap, "1"); + header.set_author(proposer); + let mut seal = proposal_seal(&tap, &header, 0); + + let vote_info = message_info_rlp(0, 0, Step::Precommit, Some(header.bare_hash())); + let signature1 = tap.sign(proposer, None, vote_info.sha3()).unwrap(); + + seal[2] = ::rlp::encode(&vec![H520::from(signature1.clone())]).to_vec(); + header.set_seal(seal.clone()); + + // One good signature is not enough. + match engine.verify_block_unordered(&header, None) { + Err(Error::Engine(EngineError::BadSealFieldSize(_))) => {}, + _ => panic!(), + } + + let voter = insert_and_unlock(&tap, "0"); + let signature0 = tap.sign(voter, None, vote_info.sha3()).unwrap(); + + seal[2] = ::rlp::encode(&vec![H520::from(signature1.clone()), H520::from(signature0.clone())]).to_vec(); + header.set_seal(seal.clone()); + + assert!(engine.verify_block_unordered(&header, None).is_ok()); + + let bad_voter = insert_and_unlock(&tap, "101"); + let bad_signature = tap.sign(bad_voter, None, vote_info.sha3()).unwrap(); + + seal[2] = ::rlp::encode(&vec![H520::from(signature1), H520::from(bad_signature)]).to_vec(); + header.set_seal(seal); + + // One good and one bad signature. + match engine.verify_block_unordered(&header, None) { + Err(Error::Engine(EngineError::NotAuthorized(_))) => {}, + _ => panic!(), + }; + engine.stop(); + } + + #[test] + fn can_generate_seal() { + let (spec, tap) = setup(); + + let proposer = insert_and_register(&tap, &spec.engine, "1"); + + let (b, seal) = propose_default(&spec, proposer); + assert!(b.try_seal(spec.engine.as_ref(), seal).is_ok()); + spec.engine.stop(); + } + + #[test] + fn can_recognize_proposal() { + let (spec, tap) = setup(); + + let proposer = insert_and_register(&tap, &spec.engine, "1"); + + let (b, seal) = propose_default(&spec, proposer); + let sealed = b.seal(spec.engine.as_ref(), seal).unwrap(); + assert!(spec.engine.is_proposal(sealed.header())); + spec.engine.stop(); + } + + #[test] + fn relays_messages() { + let (spec, tap) = setup(); + let engine = spec.engine.clone(); + let mut db_result = get_temp_state_db(); + let mut db = db_result.take(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); + + let v0 = insert_and_register(&tap, &engine, "0"); + let v1 = insert_and_register(&tap, &engine, "1"); + + let h = 0; + let r = 0; + + // Propose + let (b, _) = propose_default(&spec, v1.clone()); + let proposal = Some(b.header().bare_hash()); + + // Register IoHandler remembers messages. + let test_io = TestIo::new(); + let channel = IoChannel::to_handler(Arc::downgrade(&(test_io.clone() as Arc>))); + engine.register_message_channel(channel); + + let prevote_current = vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal); + + let precommit_current = vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Precommit, proposal); + + let prevote_future = vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h + 1, r, Step::Prevote, proposal); + + // Relays all valid present and future messages. + assert!(test_io.received.read().contains(&ClientIoMessage::BroadcastMessage(prevote_current))); + assert!(test_io.received.read().contains(&ClientIoMessage::BroadcastMessage(precommit_current))); + assert!(test_io.received.read().contains(&ClientIoMessage::BroadcastMessage(prevote_future))); + engine.stop(); + } + + #[test] + fn seal_submission() { + let (spec, tap) = setup(); + let engine = spec.engine.clone(); + let mut db_result = get_temp_state_db(); + let mut db = db_result.take(); + spec.ensure_db_good(&mut db, &TrieFactory::new(TrieSpec::Secure)).unwrap(); + + let v0 = insert_and_register(&tap, &engine, "0"); + let v1 = insert_and_register(&tap, &engine, "1"); + + let h = 1; + let r = 0; + + // Register IoHandler remembers messages. + let test_io = TestIo::new(); + let channel = IoChannel::to_handler(Arc::downgrade(&(test_io.clone() as Arc>))); + engine.register_message_channel(channel); + + // Propose + let (b, mut seal) = propose_default(&spec, v1.clone()); + let proposal = Some(b.header().bare_hash()); + engine.step(); + + // Prevote. + vote(&engine, |mh| tap.sign(v1, None, mh).map(H520::from), h, r, Step::Prevote, proposal); + vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Prevote, proposal); + vote(&engine, |mh| tap.sign(v1, None, mh).map(H520::from), h, r, Step::Precommit, proposal); + vote(&engine, |mh| tap.sign(v0, None, mh).map(H520::from), h, r, Step::Precommit, proposal); + + seal[2] = precommit_signatures(&tap, h, r, Some(b.header().bare_hash()), v1, v0); + let first = test_io.received.read().contains(&ClientIoMessage::SubmitSeal(proposal.unwrap(), seal.clone())); + seal[2] = precommit_signatures(&tap, h, r, Some(b.header().bare_hash()), v0, v1); + let second = test_io.received.read().contains(&ClientIoMessage::SubmitSeal(proposal.unwrap(), seal)); + + assert!(first ^ second); + engine.stop(); + } +} diff --git a/ethcore/src/engines/tendermint/params.rs b/ethcore/src/engines/tendermint/params.rs new file mode 100644 index 000000000..cf723713b --- /dev/null +++ b/ethcore/src/engines/tendermint/params.rs @@ -0,0 +1,72 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tendermint specific parameters. + +use ethjson; +use super::transition::TendermintTimeouts; +use util::{Address, U256}; +use time::Duration; + +/// `Tendermint` params. +#[derive(Debug, Clone)] +pub struct TendermintParams { + /// Gas limit divisor. + pub gas_limit_bound_divisor: U256, + /// List of authorities. + pub authorities: Vec
, + /// Number of authorities. + pub authority_n: usize, + /// Timeout durations for different steps. + pub timeouts: TendermintTimeouts, +} + +impl Default for TendermintParams { + fn default() -> Self { + let authorities = vec!["0x7d577a597b2742b498cb5cf0c26cdcd726d39e6e".into(), "0x82a978b3f5962a5b0957d9ee9eef472ee55b42f1".into()]; + let val_n = authorities.len(); + TendermintParams { + gas_limit_bound_divisor: 0x0400.into(), + authorities: authorities, + authority_n: val_n, + timeouts: TendermintTimeouts::default(), + } + } +} + +fn to_duration(ms: ethjson::uint::Uint) -> Duration { + let ms: usize = ms.into(); + Duration::milliseconds(ms as i64) +} + +impl From for TendermintParams { + fn from(p: ethjson::spec::TendermintParams) -> Self { + let val: Vec<_> = p.authorities.into_iter().map(Into::into).collect(); + let val_n = val.len(); + let dt = TendermintTimeouts::default(); + TendermintParams { + gas_limit_bound_divisor: p.gas_limit_bound_divisor.into(), + authorities: val, + authority_n: val_n, + timeouts: TendermintTimeouts { + propose: p.timeout_propose.map_or(dt.propose, to_duration), + prevote: p.timeout_prevote.map_or(dt.prevote, to_duration), + precommit: p.timeout_precommit.map_or(dt.precommit, to_duration), + commit: p.timeout_commit.map_or(dt.commit, to_duration), + }, + } + } +} diff --git a/ethcore/src/engines/tendermint/transition.rs b/ethcore/src/engines/tendermint/transition.rs new file mode 100644 index 000000000..83b390d74 --- /dev/null +++ b/ethcore/src/engines/tendermint/transition.rs @@ -0,0 +1,96 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tendermint timeout handling. + +use std::sync::Weak; +use time::Duration; +use io::{IoContext, IoHandler, TimerToken}; +use super::{Tendermint, Step}; +use engines::Engine; + +pub struct TransitionHandler { + pub engine: Weak, +} + +/// Base timeout of each step in ms. +#[derive(Debug, Clone)] +pub struct TendermintTimeouts { + pub propose: Duration, + pub prevote: Duration, + pub precommit: Duration, + pub commit: Duration, +} + +impl TendermintTimeouts { + pub fn for_step(&self, step: Step) -> Duration { + match step { + Step::Propose => self.propose, + Step::Prevote => self.prevote, + Step::Precommit => self.precommit, + Step::Commit => self.commit, + } + } +} + +impl Default for TendermintTimeouts { + fn default() -> Self { + TendermintTimeouts { + propose: Duration::milliseconds(10000), + prevote: Duration::milliseconds(10000), + precommit: Duration::milliseconds(10000), + commit: Duration::milliseconds(10000), + } + } +} + +/// Timer token representing the consensus step timeouts. +pub const ENGINE_TIMEOUT_TOKEN: TimerToken = 23; + +fn set_timeout(io: &IoContext, timeout: Duration) { + io.register_timer_once(ENGINE_TIMEOUT_TOKEN, timeout.num_milliseconds() as u64) + .unwrap_or_else(|e| warn!(target: "poa", "Failed to set consensus step timeout: {}.", e)) +} + +impl IoHandler for TransitionHandler { + fn initialize(&self, io: &IoContext) { + if let Some(engine) = self.engine.upgrade() { + set_timeout(io, engine.our_params.timeouts.propose) + } + } + + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer == ENGINE_TIMEOUT_TOKEN { + if let Some(engine) = self.engine.upgrade() { + engine.step(); + } + } + } + + fn message(&self, io: &IoContext, next_step: &Step) { + if let Some(engine) = self.engine.upgrade() { + if let Err(io_err) = io.clear_timer(ENGINE_TIMEOUT_TOKEN) { + warn!(target: "poa", "Could not remove consensus timer {}.", io_err) + } + match *next_step { + Step::Propose => set_timeout(io, engine.our_params.timeouts.propose), + Step::Prevote => set_timeout(io, engine.our_params.timeouts.prevote), + Step::Precommit => set_timeout(io, engine.our_params.timeouts.precommit), + Step::Commit => set_timeout(io, engine.our_params.timeouts.commit), + }; + } + } +} diff --git a/ethcore/src/engines/tendermint/vote_collector.rs b/ethcore/src/engines/tendermint/vote_collector.rs new file mode 100644 index 000000000..be592bc8f --- /dev/null +++ b/ethcore/src/engines/tendermint/vote_collector.rs @@ -0,0 +1,272 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Collects votes on hashes at each height and round. + +use util::*; +use super::message::ConsensusMessage; +use super::{Height, Round, Step}; + +#[derive(Debug)] +pub struct VoteCollector { + /// Storing all Proposals, Prevotes and Precommits. + votes: RwLock>, +} + +#[derive(Debug)] +pub struct SealSignatures { + pub proposal: H520, + pub votes: Vec, +} + +impl PartialEq for SealSignatures { + fn eq(&self, other: &SealSignatures) -> bool { + self.proposal == other.proposal + && self.votes.iter().collect::>() == other.votes.iter().collect::>() + } +} + +impl Eq for SealSignatures {} + +impl VoteCollector { + pub fn new() -> VoteCollector { + let mut collector = BTreeMap::new(); + // Insert dummy message to fulfill invariant: "only messages newer than the oldest are inserted". + collector.insert(ConsensusMessage { + signature: H520::default(), + height: 0, + round: 0, + step: Step::Propose, + block_hash: None + }, + Address::default()); + VoteCollector { votes: RwLock::new(collector) } + } + + /// Insert vote if it is newer than the oldest one. + pub fn vote(&self, message: ConsensusMessage, voter: Address) -> Option
{ + self.votes.write().insert(message, voter) + } + + pub fn is_old_or_known(&self, message: &ConsensusMessage) -> bool { + self.votes.read().get(message).map_or(false, |a| { + trace!(target: "poa", "Known message from {}: {:?}.", a, message); + true + }) || { + let guard = self.votes.read(); + let is_old = guard.keys().next().map_or(true, |oldest| message <= oldest); + if is_old { trace!(target: "poa", "Old message {:?}.", message); } + is_old + } + } + + /// Throws out messages older than message, leaves message as marker for the oldest. + pub fn throw_out_old(&self, message: &ConsensusMessage) { + let mut guard = self.votes.write(); + let new_collector = guard.split_off(message); + *guard = new_collector; + } + + pub fn seal_signatures(&self, height: Height, round: Round, block_hash: H256) -> Option { + let bh = Some(block_hash); + let (proposal, votes) = { + let guard = self.votes.read(); + let mut current_signatures = guard.keys().skip_while(|m| !m.is_block_hash(height, round, Step::Propose, bh)); + let proposal = current_signatures.next().cloned(); + let votes = current_signatures + .skip_while(|m| !m.is_block_hash(height, round, Step::Precommit, bh)) + .filter(|m| m.is_block_hash(height, round, Step::Precommit, bh)) + .cloned() + .collect::>(); + (proposal, votes) + }; + if votes.is_empty() { + return None; + } + // Remove messages that are no longer relevant. + votes.last().map(|m| self.throw_out_old(m)); + let mut votes_vec: Vec<_> = votes.into_iter().map(|m| m.signature).collect(); + votes_vec.sort(); + proposal.map(|p| SealSignatures { + proposal: p.signature, + votes: votes_vec, + }) + } + + pub fn count_aligned_votes(&self, message: &ConsensusMessage) -> usize { + let guard = self.votes.read(); + guard.keys() + .skip_while(|m| !m.is_aligned(message)) + // sorted by signature so might not be continuous + .filter(|m| m.is_aligned(message)) + .count() + } + + pub fn count_step_votes(&self, height: Height, round: Round, step: Step) -> usize { + let guard = self.votes.read(); + let current = guard.iter().skip_while(|&(m, _)| !m.is_step(height, round, step)); + let mut origins = HashSet::new(); + let mut n = 0; + for (message, origin) in current { + if message.is_step(height, round, step) { + if origins.insert(origin) { + n += 1; + } else { + warn!("count_step_votes: Authority {} has cast multiple step votes, this indicates malicious behaviour.", origin) + } + } + } + n + } + + pub fn get_up_to(&self, height: Height) -> Vec { + let guard = self.votes.read(); + guard + .keys() + .filter(|m| m.step.is_pre()) + .take_while(|m| m.height <= height) + .map(|m| ::rlp::encode(m).to_vec()) + .collect() + } + + pub fn get(&self, message: &ConsensusMessage) -> Option
{ + let guard = self.votes.read(); + guard.get(message).cloned() + } +} + +#[cfg(test)] +mod tests { + use util::*; + use super::*; + use super::super::{Height, Round, BlockHash, Step}; + use super::super::message::ConsensusMessage; + + fn random_vote(collector: &VoteCollector, signature: H520, h: Height, r: Round, step: Step, block_hash: Option) -> Option { + full_vote(collector, signature, h, r, step, block_hash, H160::random()) + } + + fn full_vote(collector: &VoteCollector, signature: H520, h: Height, r: Round, step: Step, block_hash: Option, address: Address) -> Option { + collector.vote(ConsensusMessage { signature: signature, height: h, round: r, step: step, block_hash: block_hash }, address) + } + + #[test] + fn seal_retrieval() { + let collector = VoteCollector::new(); + let bh = Some("1".sha3()); + let h = 1; + let r = 2; + let mut signatures = Vec::new(); + for _ in 0..5 { + signatures.push(H520::random()); + } + // Wrong height proposal. + random_vote(&collector, signatures[4].clone(), h - 1, r, Step::Propose, bh.clone()); + // Good proposal + random_vote(&collector, signatures[0].clone(), h, r, Step::Propose, bh.clone()); + // Wrong block proposal. + random_vote(&collector, signatures[0].clone(), h, r, Step::Propose, Some("0".sha3())); + // Wrong block precommit. + random_vote(&collector, signatures[3].clone(), h, r, Step::Precommit, Some("0".sha3())); + // Wrong round proposal. + random_vote(&collector, signatures[0].clone(), h, r - 1, Step::Propose, bh.clone()); + // Prevote. + random_vote(&collector, signatures[0].clone(), h, r, Step::Prevote, bh.clone()); + // Relevant precommit. + random_vote(&collector, signatures[2].clone(), h, r, Step::Precommit, bh.clone()); + // Replcated vote. + random_vote(&collector, signatures[2].clone(), h, r, Step::Precommit, bh.clone()); + // Wrong round precommit. + random_vote(&collector, signatures[4].clone(), h, r + 1, Step::Precommit, bh.clone()); + // Wrong height precommit. + random_vote(&collector, signatures[3].clone(), h + 1, r, Step::Precommit, bh.clone()); + // Relevant precommit. + random_vote(&collector, signatures[1].clone(), h, r, Step::Precommit, bh.clone()); + // Wrong round precommit, same signature. + random_vote(&collector, signatures[1].clone(), h, r + 1, Step::Precommit, bh.clone()); + // Wrong round precommit. + random_vote(&collector, signatures[4].clone(), h, r - 1, Step::Precommit, bh.clone()); + let seal = SealSignatures { + proposal: signatures[0], + votes: signatures[1..3].to_vec() + }; + assert_eq!(seal, collector.seal_signatures(h, r, bh.unwrap()).unwrap()); + } + + #[test] + fn count_votes() { + let collector = VoteCollector::new(); + // good prevote + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3())); + random_vote(&collector, H520::random(), 3, 1, Step::Prevote, Some("0".sha3())); + // good precommit + random_vote(&collector, H520::random(), 3, 2, Step::Precommit, Some("0".sha3())); + random_vote(&collector, H520::random(), 3, 3, Step::Precommit, Some("0".sha3())); + // good prevote + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3())); + // good prevote + let same_sig = H520::random(); + random_vote(&collector, same_sig.clone(), 3, 2, Step::Prevote, Some("1".sha3())); + random_vote(&collector, same_sig, 3, 2, Step::Prevote, Some("1".sha3())); + // good precommit + random_vote(&collector, H520::random(), 3, 2, Step::Precommit, Some("1".sha3())); + // good prevote + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3())); + random_vote(&collector, H520::random(), 2, 2, Step::Precommit, Some("2".sha3())); + + assert_eq!(collector.count_step_votes(3, 2, Step::Prevote), 4); + assert_eq!(collector.count_step_votes(3, 2, Step::Precommit), 2); + + let message = ConsensusMessage { + signature: H520::default(), + height: 3, + round: 2, + step: Step::Prevote, + block_hash: Some("1".sha3()) + }; + assert_eq!(collector.count_aligned_votes(&message), 2); + } + + #[test] + fn remove_old() { + let collector = VoteCollector::new(); + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3())); + random_vote(&collector, H520::random(), 3, 1, Step::Prevote, Some("0".sha3())); + random_vote(&collector, H520::random(), 3, 3, Step::Precommit, Some("0".sha3())); + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3())); + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3())); + random_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3())); + random_vote(&collector, H520::random(), 2, 2, Step::Precommit, Some("2".sha3())); + + let message = ConsensusMessage { + signature: H520::default(), + height: 3, + round: 2, + step: Step::Precommit, + block_hash: Some("1".sha3()) + }; + collector.throw_out_old(&message); + assert_eq!(collector.votes.read().len(), 1); + } + + #[test] + fn malicious_authority() { + let collector = VoteCollector::new(); + full_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("0".sha3()), Address::default()); + full_vote(&collector, H520::random(), 3, 2, Step::Prevote, Some("1".sha3()), Address::default()); + assert_eq!(collector.count_step_votes(3, 2, Step::Prevote), 1); + } +} diff --git a/ethcore/src/error.rs b/ethcore/src/error.rs index 4afbe25b8..846972c02 100644 --- a/ethcore/src/error.rs +++ b/ethcore/src/error.rs @@ -24,6 +24,7 @@ use client::Error as ClientError; use ipc::binary::{BinaryConvertError, BinaryConvertable}; use types::block_import_error::BlockImportError; use snapshot::Error as SnapshotError; +use engines::EngineError; use ethkey::Error as EthkeyError; pub use types::executed::{ExecutionError, CallError}; @@ -167,8 +168,6 @@ pub enum BlockError { UnknownParent(H256), /// Uncle parent given is unknown. UnknownUncleParent(H256), - /// The same author issued different votes at the same step. - DoubleVote(H160), } impl fmt::Display for BlockError { @@ -202,7 +201,6 @@ impl fmt::Display for BlockError { RidiculousNumber(ref oob) => format!("Implausible block number. {}", oob), UnknownParent(ref hash) => format!("Unknown parent: {}", hash), UnknownUncleParent(ref hash) => format!("Unknown uncle parent: {}", hash), - DoubleVote(ref address) => format!("Author {} issued too many blocks.", address), }; f.write_fmt(format_args!("Block error ({})", msg)) @@ -263,6 +261,8 @@ pub enum Error { Snappy(::util::snappy::InvalidInput), /// Snapshot error. Snapshot(SnapshotError), + /// Consensus vote error. + Engine(EngineError), /// Ethkey error. Ethkey(EthkeyError), } @@ -285,6 +285,7 @@ impl fmt::Display for Error { Error::StdIo(ref err) => err.fmt(f), Error::Snappy(ref err) => err.fmt(f), Error::Snapshot(ref err) => err.fmt(f), + Error::Engine(ref err) => err.fmt(f), Error::Ethkey(ref err) => err.fmt(f), } } @@ -383,6 +384,12 @@ impl From for Error { } } +impl From for Error { + fn from(err: EngineError) -> Error { + Error::Engine(err) + } +} + impl From for Error { fn from(err: EthkeyError) -> Error { Error::Ethkey(err) diff --git a/ethcore/src/miner/miner.rs b/ethcore/src/miner/miner.rs index a6a63ccaf..eb050f0bd 100644 --- a/ethcore/src/miner/miner.rs +++ b/ethcore/src/miner/miner.rs @@ -26,12 +26,12 @@ use state::{State, CleanupMode}; use client::{MiningBlockChainClient, Executive, Executed, EnvInfo, TransactOptions, BlockId, CallAnalytics, TransactionId}; use client::TransactionImportResult; use executive::contract_address; -use block::{ClosedBlock, SealedBlock, IsBlock, Block}; +use block::{ClosedBlock, IsBlock, Block}; use error::*; use transaction::{Action, SignedTransaction}; use receipt::{Receipt, RichReceipt}; use spec::Spec; -use engines::Engine; +use engines::{Engine, Seal}; use miner::{MinerService, MinerStatus, TransactionQueue, PrioritizationStrategy, AccountDetails, TransactionOrigin}; use miner::banning_queue::{BanningTransactionQueue, Threshold}; use miner::work_notify::WorkPoster; @@ -466,34 +466,43 @@ impl Miner { } } - /// Attempts to perform internal sealing (one that does not require work) to return Ok(sealed), - /// Err(Some(block)) returns for unsuccesful sealing while Err(None) indicates misspecified engine. - fn seal_block_internally(&self, block: ClosedBlock) -> Result> { - trace!(target: "miner", "seal_block_internally: attempting internal seal."); - let s = self.engine.generate_seal(block.block()); - if let Some(seal) = s { - trace!(target: "miner", "seal_block_internally: managed internal seal. importing..."); - block.lock().try_seal(&*self.engine, seal).or_else(|(e, _)| { - warn!("prepare_sealing: ERROR: try_seal failed when given internally generated seal: {}", e); - Err(None) - }) - } else { - trace!(target: "miner", "seal_block_internally: unable to generate seal internally"); - Err(Some(block)) - } - } - - /// Uses Engine to seal the block internally and then imports it to chain. + /// Attempts to perform internal sealing (one that does not require work) and handles the result depending on the type of Seal. fn seal_and_import_block_internally(&self, chain: &MiningBlockChainClient, block: ClosedBlock) -> bool { if !block.transactions().is_empty() || self.forced_sealing() { - if let Ok(sealed) = self.seal_block_internally(block) { - if chain.import_sealed_block(sealed).is_ok() { - trace!(target: "miner", "import_block_internally: imported internally sealed block"); - return true - } + trace!(target: "miner", "seal_block_internally: attempting internal seal."); + match self.engine.generate_seal(block.block()) { + // Save proposal for later seal submission and broadcast it. + Seal::Proposal(seal) => { + trace!(target: "miner", "Received a Proposal seal."); + { + let mut sealing_work = self.sealing_work.lock(); + sealing_work.queue.push(block.clone()); + sealing_work.queue.use_last_ref(); + } + block + .lock() + .seal(&*self.engine, seal) + .map(|sealed| { chain.broadcast_proposal_block(sealed); true }) + .unwrap_or_else(|e| { + warn!("ERROR: seal failed when given internally generated seal: {}", e); + false + }) + }, + // Directly import a regular sealed block. + Seal::Regular(seal) => + block + .lock() + .seal(&*self.engine, seal) + .map(|sealed| chain.import_sealed_block(sealed).is_ok()) + .unwrap_or_else(|e| { + warn!("ERROR: seal failed when given internally generated seal: {}", e); + false + }), + Seal::None => false, } + } else { + false } - false } /// Prepares work which has to be done to seal. @@ -1024,7 +1033,6 @@ impl MinerService for Miner { self.transaction_queue.lock().last_nonce(address) } - /// Update sealing if required. /// Prepare the block and work if the Engine does not seal internally. fn update_sealing(&self, chain: &MiningBlockChainClient) { @@ -1039,7 +1047,9 @@ impl MinerService for Miner { let (block, original_work_hash) = self.prepare_block(chain); if self.seals_internally { trace!(target: "miner", "update_sealing: engine indicates internal sealing"); - self.seal_and_import_block_internally(chain, block); + if self.seal_and_import_block_internally(chain, block) { + trace!(target: "miner", "update_sealing: imported internally sealed block"); + } } else { trace!(target: "miner", "update_sealing: engine does not seal internally, preparing work"); self.prepare_work(block, original_work_hash); diff --git a/ethcore/src/service.rs b/ethcore/src/service.rs index d809de51a..732d12a5b 100644 --- a/ethcore/src/service.rs +++ b/ethcore/src/service.rs @@ -20,7 +20,7 @@ use util::*; use io::*; use spec::Spec; use error::*; -use client::{Client, ClientConfig, ChainNotify}; +use client::{Client, BlockChainClient, MiningBlockChainClient, ClientConfig, ChainNotify}; use miner::Miner; use snapshot::ManifestData; use snapshot::service::{Service as SnapshotService, ServiceParams as SnapServiceParams}; @@ -28,11 +28,9 @@ use std::sync::atomic::AtomicBool; #[cfg(feature="ipc")] use nanoipc; -#[cfg(feature="ipc")] -use client::BlockChainClient; /// Message type for external and internal events -#[derive(Clone)] +#[derive(Clone, PartialEq, Eq, Debug)] pub enum ClientIoMessage { /// Best Block Hash in chain has been changed NewChainHead, @@ -50,6 +48,12 @@ pub enum ClientIoMessage { TakeSnapshot(u64), /// Trigger sealing update (useful for internal sealing). UpdateSealing, + /// Submit seal (useful for internal sealing). + SubmitSeal(H256, Vec), + /// Broadcast a message to the network. + BroadcastMessage(Bytes), + /// New consensus message received. + NewMessage(Bytes) } /// Client service setup. Creates and registers client and network services with the IO subsystem. @@ -77,9 +81,6 @@ impl ClientService { panic_handler.forward_from(&io_service); info!("Configured for {} using {} engine", Colour::White.bold().paint(spec.name.clone()), Colour::Yellow.bold().paint(spec.engine.name())); - if spec.fork_name.is_some() { - warn!("Your chain is an alternative fork. {}", Colour::Red.bold().paint("TRANSACTIONS MAY BE REPLAYED ON THE MAINNET!")); - } let mut db_config = DatabaseConfig::with_columns(::db::NUM_COLUMNS); @@ -220,9 +221,11 @@ impl IoHandler for ClientIoHandler { debug!(target: "snapshot", "Failed to initialize periodic snapshot thread: {:?}", e); } }, - ClientIoMessage::UpdateSealing => { - trace!(target: "authorityround", "message: UpdateSealing"); - self.client.update_sealing() + ClientIoMessage::UpdateSealing => self.client.update_sealing(), + ClientIoMessage::SubmitSeal(ref hash, ref seal) => self.client.submit_seal(*hash, seal.clone()), + ClientIoMessage::BroadcastMessage(ref message) => self.client.broadcast_consensus_message(message.clone()), + ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) { + trace!(target: "poa", "Invalid message received: {}", e); }, _ => {} // ignore other messages } diff --git a/ethcore/src/snapshot/watcher.rs b/ethcore/src/snapshot/watcher.rs index 2ee186020..91d94174e 100644 --- a/ethcore/src/snapshot/watcher.rs +++ b/ethcore/src/snapshot/watcher.rs @@ -23,7 +23,7 @@ use service::ClientIoMessage; use views::HeaderView; use io::IoChannel; -use util::hash::H256; +use util::{H256, Bytes}; use std::sync::Arc; @@ -107,6 +107,7 @@ impl ChainNotify for Watcher { _: Vec, _: Vec, _: Vec, + _: Vec, _duration: u64) { if self.oracle.is_major_importing() { return } @@ -174,6 +175,7 @@ mod tests { vec![], vec![], vec![], + vec![], 0, ); } diff --git a/ethcore/src/spec/seal.rs b/ethcore/src/spec/seal.rs index eaf951189..967ffc22b 100644 --- a/ethcore/src/spec/seal.rs +++ b/ethcore/src/spec/seal.rs @@ -17,7 +17,7 @@ //! Spec seal. use rlp::*; -use util::hash::{H64, H256}; +use util::hash::{H64, H256, H520}; use ethjson; /// Classic ethereum seal. @@ -32,23 +32,55 @@ impl Into for Ethereum { fn into(self) -> Generic { let mut s = RlpStream::new_list(2); s.append(&self.mix_hash).append(&self.nonce); - Generic { - rlp: s.out() - } + Generic(s.out()) } } -/// Generic seal. -pub struct Generic { - /// Seal rlp. - pub rlp: Vec, +/// AuthorityRound seal. +pub struct AuthorityRound { + /// Seal step. + pub step: usize, + /// Seal signature. + pub signature: H520, } +/// Tendermint seal. +pub struct Tendermint { + /// Seal round. + pub round: usize, + /// Proposal seal signature. + pub proposal: H520, + /// Precommit seal signatures. + pub precommits: Vec, +} + +impl Into for AuthorityRound { + fn into(self) -> Generic { + let mut s = RlpStream::new_list(2); + s.append(&self.step).append(&self.signature); + Generic(s.out()) + } +} + +impl Into for Tendermint { + fn into(self) -> Generic { + let mut s = RlpStream::new_list(3); + s.append(&self.round).append(&self.proposal).append(&self.precommits); + Generic(s.out()) + } +} + +pub struct Generic(pub Vec); + /// Genesis seal type. pub enum Seal { /// Classic ethereum seal. Ethereum(Ethereum), - /// Generic seal. + /// AuthorityRound seal. + AuthorityRound(AuthorityRound), + /// Tendermint seal. + Tendermint(Tendermint), + /// Generic RLP seal. Generic(Generic), } @@ -59,9 +91,16 @@ impl From for Seal { nonce: eth.nonce.into(), mix_hash: eth.mix_hash.into() }), - ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic { - rlp: g.rlp.into() - }) + ethjson::spec::Seal::AuthorityRound(ar) => Seal::AuthorityRound(AuthorityRound { + step: ar.step.into(), + signature: ar.signature.into() + }), + ethjson::spec::Seal::Tendermint(tender) => Seal::Tendermint(Tendermint { + round: tender.round.into(), + proposal: tender.proposal.into(), + precommits: tender.precommits.into_iter().map(Into::into).collect() + }), + ethjson::spec::Seal::Generic(g) => Seal::Generic(Generic(g.into())), } } } @@ -70,7 +109,9 @@ impl Into for Seal { fn into(self) -> Generic { match self { Seal::Generic(generic) => generic, - Seal::Ethereum(eth) => eth.into() + Seal::Ethereum(eth) => eth.into(), + Seal::AuthorityRound(ar) => ar.into(), + Seal::Tendermint(tender) => tender.into(), } } } diff --git a/ethcore/src/spec/spec.rs b/ethcore/src/spec/spec.rs index 5d0cc8360..bdcd5eee2 100644 --- a/ethcore/src/spec/spec.rs +++ b/ethcore/src/spec/spec.rs @@ -18,7 +18,7 @@ use util::*; use builtin::Builtin; -use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound}; +use engines::{Engine, NullEngine, InstantSeal, BasicAuthority, AuthorityRound, Tendermint}; use pod_state::*; use account_db::*; use header::{BlockNumber, Header}; @@ -66,8 +66,8 @@ pub struct Spec { pub name: String, /// What engine are we using for this? pub engine: Arc, - /// The fork identifier for this chain. Only needed to distinguish two chains sharing the same genesis. - pub fork_name: Option, + /// Name of the subdir inside the main data dir to use for chain data and settings. + pub data_dir: String, /// Known nodes on the network in enode format. pub nodes: Vec, @@ -107,13 +107,13 @@ impl From for Spec { fn from(s: ethjson::spec::Spec) -> Self { let builtins = s.accounts.builtins().into_iter().map(|p| (p.0.into(), From::from(p.1))).collect(); let g = Genesis::from(s.genesis); - let seal: GenericSeal = g.seal.into(); + let GenericSeal(seal_rlp) = g.seal.into(); let params = CommonParams::from(s.params); Spec { - name: s.name.into(), + name: s.name.clone().into(), params: params.clone(), engine: Spec::engine(s.engine, params, builtins), - fork_name: s.fork_name.map(Into::into), + data_dir: s.data_dir.unwrap_or(s.name).into(), nodes: s.nodes.unwrap_or_else(Vec::new), parent_hash: g.parent_hash, transactions_root: g.transactions_root, @@ -124,7 +124,7 @@ impl From for Spec { gas_used: g.gas_used, timestamp: g.timestamp, extra_data: g.extra_data, - seal_rlp: seal.rlp, + seal_rlp: seal_rlp, state_root_memo: RwLock::new(g.state_root), genesis_state: From::from(s.accounts), } @@ -146,7 +146,8 @@ impl Spec { ethjson::spec::Engine::InstantSeal => Arc::new(InstantSeal::new(params, builtins)), ethjson::spec::Engine::Ethash(ethash) => Arc::new(ethereum::Ethash::new(params, From::from(ethash.params), builtins)), ethjson::spec::Engine::BasicAuthority(basic_authority) => Arc::new(BasicAuthority::new(params, From::from(basic_authority.params), builtins)), - ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Consensus engine could not be started."), + ethjson::spec::Engine::AuthorityRound(authority_round) => AuthorityRound::new(params, From::from(authority_round.params), builtins).expect("Failed to start AuthorityRound consensus engine."), + ethjson::spec::Engine::Tendermint(tendermint) => Tendermint::new(params, From::from(tendermint.params), builtins).expect("Failed to start the Tendermint consensus engine."), } } @@ -208,7 +209,7 @@ impl Spec { /// Overwrite the genesis components. pub fn overwrite_genesis_params(&mut self, g: Genesis) { - let seal: GenericSeal = g.seal.into(); + let GenericSeal(seal_rlp) = g.seal.into(); self.parent_hash = g.parent_hash; self.transactions_root = g.transactions_root; self.receipts_root = g.receipts_root; @@ -218,7 +219,7 @@ impl Spec { self.gas_used = g.gas_used; self.timestamp = g.timestamp; self.extra_data = g.extra_data; - self.seal_rlp = seal.rlp; + self.seal_rlp = seal_rlp; self.state_root_memo = RwLock::new(g.state_root); } @@ -275,6 +276,10 @@ impl Spec { /// Create a new Spec with AuthorityRound consensus which does internal sealing (not requiring work). /// Accounts with secrets "0".sha3() and "1".sha3() are the authorities. pub fn new_test_round() -> Self { load_bundled!("authority_round") } + + /// Create a new Spec with Tendermint consensus which does internal sealing (not requiring work). + /// Account "0".sha3() and "1".sha3() are a authorities. + pub fn new_test_tendermint() -> Self { load_bundled!("tendermint") } } #[cfg(test)] diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index 79c1a4a2c..d38449f7a 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -457,7 +457,6 @@ impl StateDB { #[cfg(test)] mod tests { - use util::{U256, H256, FixedHash, Address, DBTransaction}; use tests::helpers::*; use state::Account; @@ -531,4 +530,3 @@ mod tests { assert!(s.get_cached_account(&address).is_none()); } } - diff --git a/ethcore/src/types/trace_types/localized.rs b/ethcore/src/types/trace_types/localized.rs index 57abea362..f65c47415 100644 --- a/ethcore/src/types/trace_types/localized.rs +++ b/ethcore/src/types/trace_types/localized.rs @@ -21,7 +21,7 @@ use super::trace::{Action, Res}; use header::BlockNumber; /// Localized trace. -#[derive(Debug, PartialEq, Binary)] +#[derive(Debug, PartialEq, Clone, Binary)] pub struct LocalizedTrace { /// Type of action performed by a transaction. pub action: Action, diff --git a/ethkey/src/signature.rs b/ethkey/src/signature.rs index 97a2e0715..ad595cfb9 100644 --- a/ethkey/src/signature.rs +++ b/ethkey/src/signature.rs @@ -18,6 +18,7 @@ use std::ops::{Deref, DerefMut}; use std::cmp::PartialEq; use std::{mem, fmt}; use std::str::FromStr; +use std::hash::{Hash, Hasher}; use secp256k1::{Message as SecpMessage, RecoverableSignature, RecoveryId, Error as SecpError}; use secp256k1::key::{SecretKey, PublicKey}; use rustc_serialize::hex::{ToHex, FromHex}; @@ -116,6 +117,18 @@ impl Default for Signature { } } +impl Hash for Signature { + fn hash(&self, state: &mut H) { + H520::from(self.0).hash(state); + } +} + +impl Clone for Signature { + fn clone(&self) -> Self { + Signature(self.0) + } +} + impl From<[u8; 65]> for Signature { fn from(s: [u8; 65]) -> Self { Signature(s) diff --git a/ethstore/src/dir/disk.rs b/ethstore/src/dir/disk.rs index 80e95fb66..d049def48 100644 --- a/ethstore/src/dir/disk.rs +++ b/ethstore/src/dir/disk.rs @@ -18,7 +18,6 @@ use std::{fs, io}; use std::path::{PathBuf, Path}; use std::collections::HashMap; use time; -use ethkey::Address; use {json, SafeAccount, Error}; use json::Uuid; use super::KeyDirectory; @@ -106,6 +105,11 @@ impl KeyDirectory for DiskDirectory { Ok(accounts) } + fn update(&self, account: SafeAccount) -> Result { + // Disk store handles updates correctly iff filename is the same + self.insert(account) + } + fn insert(&self, account: SafeAccount) -> Result { // transform account into key file let keyfile: json::KeyFile = account.clone().into(); @@ -138,12 +142,12 @@ impl KeyDirectory for DiskDirectory { Ok(account) } - fn remove(&self, address: &Address) -> Result<(), Error> { + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { // enumerate all entries in keystore // and find entry with given address let to_remove = try!(self.files()) .into_iter() - .find(|&(_, ref account)| &account.address == address); + .find(|&(_, ref acc)| acc == account); // remove it match to_remove { diff --git a/ethstore/src/dir/geth.rs b/ethstore/src/dir/geth.rs index 40c3d938a..fe2ba8d1d 100644 --- a/ethstore/src/dir/geth.rs +++ b/ethstore/src/dir/geth.rs @@ -16,7 +16,6 @@ use std::env; use std::path::PathBuf; -use ethkey::Address; use {SafeAccount, Error}; use super::{KeyDirectory, DiskDirectory, DirectoryType}; @@ -89,7 +88,11 @@ impl KeyDirectory for GethDirectory { self.dir.insert(account) } - fn remove(&self, address: &Address) -> Result<(), Error> { - self.dir.remove(address) + fn update(&self, account: SafeAccount) -> Result { + self.dir.update(account) + } + + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + self.dir.remove(account) } } diff --git a/ethstore/src/dir/memory.rs b/ethstore/src/dir/memory.rs new file mode 100644 index 000000000..c4f20f0e9 --- /dev/null +++ b/ethstore/src/dir/memory.rs @@ -0,0 +1,67 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::HashMap; +use parking_lot::RwLock; +use itertools::Itertools; +use ethkey::Address; + +use {SafeAccount, Error}; +use super::KeyDirectory; + +#[derive(Default)] +pub struct MemoryDirectory { + accounts: RwLock>>, +} + +impl KeyDirectory for MemoryDirectory { + fn load(&self) -> Result, Error> { + Ok(self.accounts.read().values().cloned().flatten().collect()) + } + + fn update(&self, account: SafeAccount) -> Result { + let mut lock = self.accounts.write(); + let mut accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new); + // If the filename is the same we just need to replace the entry + accounts.retain(|acc| acc.filename != account.filename); + accounts.push(account.clone()); + Ok(account) + } + + fn insert(&self, account: SafeAccount) -> Result { + let mut lock = self.accounts.write(); + let mut accounts = lock.entry(account.address.clone()).or_insert_with(Vec::new); + accounts.push(account.clone()); + Ok(account) + } + + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + let mut accounts = self.accounts.write(); + let is_empty = if let Some(mut accounts) = accounts.get_mut(&account.address) { + if let Some(position) = accounts.iter().position(|acc| acc == account) { + accounts.remove(position); + } + accounts.is_empty() + } else { + false + }; + if is_empty { + accounts.remove(&account.address); + } + Ok(()) + } +} + diff --git a/ethstore/src/dir/mod.rs b/ethstore/src/dir/mod.rs index 8f5a8a7ad..9b9051e69 100644 --- a/ethstore/src/dir/mod.rs +++ b/ethstore/src/dir/mod.rs @@ -14,12 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use ethkey::Address; use std::path::{PathBuf}; use {SafeAccount, Error}; mod disk; mod geth; +mod memory; mod parity; pub enum DirectoryType { @@ -30,10 +30,12 @@ pub enum DirectoryType { pub trait KeyDirectory: Send + Sync { fn load(&self) -> Result, Error>; fn insert(&self, account: SafeAccount) -> Result; - fn remove(&self, address: &Address) -> Result<(), Error>; + fn update(&self, account: SafeAccount) -> Result; + fn remove(&self, account: &SafeAccount) -> Result<(), Error>; fn path(&self) -> Option<&PathBuf> { None } } pub use self::disk::DiskDirectory; pub use self::geth::GethDirectory; +pub use self::memory::MemoryDirectory; pub use self::parity::ParityDirectory; diff --git a/ethstore/src/dir/parity.rs b/ethstore/src/dir/parity.rs index 8c5e9c2d6..75c21ea13 100644 --- a/ethstore/src/dir/parity.rs +++ b/ethstore/src/dir/parity.rs @@ -16,7 +16,6 @@ use std::env; use std::path::PathBuf; -use ethkey::Address; use {SafeAccount, Error}; use super::{KeyDirectory, DiskDirectory, DirectoryType}; @@ -68,7 +67,11 @@ impl KeyDirectory for ParityDirectory { self.dir.insert(account) } - fn remove(&self, address: &Address) -> Result<(), Error> { - self.dir.remove(address) + fn update(&self, account: SafeAccount) -> Result { + self.dir.update(account) + } + + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + self.dir.remove(account) } } diff --git a/ethstore/src/ethstore.rs b/ethstore/src/ethstore.rs index ec1f82626..e100594de 100644 --- a/ethstore/src/ethstore.rs +++ b/ethstore/src/ethstore.rs @@ -16,23 +16,19 @@ use std::collections::BTreeMap; use std::mem; -use ethkey::KeyPair; +use parking_lot::RwLock; + use crypto::KEY_ITERATIONS; use random::Random; -use ethkey::{Signature, Address, Message, Secret, Public}; +use ethkey::{Signature, Address, Message, Secret, Public, KeyPair}; use dir::KeyDirectory; use account::SafeAccount; -use {Error, SecretStore}; -use json; -use json::Uuid; -use parking_lot::RwLock; use presale::PresaleWallet; -use import; +use json::{self, Uuid}; +use {import, Error, SimpleSecretStore, SecretStore}; pub struct EthStore { - dir: Box, - iterations: u32, - cache: RwLock>, + store: EthMultiStore, } impl EthStore { @@ -41,57 +37,46 @@ impl EthStore { } pub fn open_with_iterations(directory: Box, iterations: u32) -> Result { - let accounts = try!(directory.load()); - let cache = accounts.into_iter().map(|account| (account.address.clone(), account)).collect(); - let store = EthStore { - dir: directory, - iterations: iterations, - cache: RwLock::new(cache), - }; - Ok(store) - } - - fn save(&self, account: SafeAccount) -> Result<(), Error> { - // save to file - let account = try!(self.dir.insert(account.clone())); - - // update cache - let mut cache = self.cache.write(); - cache.insert(account.address.clone(), account); - Ok(()) - } - - fn reload_accounts(&self) -> Result<(), Error> { - let mut cache = self.cache.write(); - let accounts = try!(self.dir.load()); - let new_accounts: BTreeMap<_, _> = accounts.into_iter().map(|account| (account.address.clone(), account)).collect(); - mem::replace(&mut *cache, new_accounts); - Ok(()) + Ok(EthStore { + store: try!(EthMultiStore::open_with_iterations(directory, iterations)), + }) } fn get(&self, address: &Address) -> Result { - { - let cache = self.cache.read(); - if let Some(account) = cache.get(address) { - return Ok(account.clone()) - } - } - try!(self.reload_accounts()); - let cache = self.cache.read(); - cache.get(address).cloned().ok_or(Error::InvalidAccount) + let mut accounts = try!(self.store.get(address)).into_iter(); + accounts.next().ok_or(Error::InvalidAccount) + } +} + +impl SimpleSecretStore for EthStore { + fn insert_account(&self, secret: Secret, password: &str) -> Result { + self.store.insert_account(secret, password) + } + + fn accounts(&self) -> Result, Error> { + self.store.accounts() + } + + fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> { + self.store.change_password(address, old_password, new_password) + } + + fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> { + self.store.remove_account(address, password) + } + + fn sign(&self, address: &Address, password: &str, message: &Message) -> Result { + let account = try!(self.get(address)); + account.sign(password, message) + } + + fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let account = try!(self.get(account)); + account.decrypt(password, shared_mac, message) } } impl SecretStore for EthStore { - fn insert_account(&self, secret: Secret, password: &str) -> Result { - let keypair = try!(KeyPair::from_secret(secret).map_err(|_| Error::CreationFailed)); - let id: [u8; 16] = Random::random(); - let account = SafeAccount::create(&keypair, id, password, self.iterations, "".to_owned(), "{}".to_owned()); - let address = account.address.clone(); - try!(self.save(account)); - Ok(address) - } - fn import_presale(&self, json: &[u8], password: &str) -> Result { let json_wallet = try!(json::PresaleWallet::load(json).map_err(|_| Error::InvalidKeyFile("Invalid JSON format".to_owned()))); let wallet = PresaleWallet::from(json_wallet); @@ -105,48 +90,20 @@ impl SecretStore for EthStore { let secret = try!(safe_account.crypto.secret(password).map_err(|_| Error::InvalidPassword)); safe_account.address = try!(KeyPair::from_secret(secret)).address(); let address = safe_account.address.clone(); - try!(self.save(safe_account)); + try!(self.store.import(safe_account)); Ok(address) } - fn accounts(&self) -> Result, Error> { - try!(self.reload_accounts()); - Ok(self.cache.read().keys().cloned().collect()) - } - - fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> { - // change password + fn test_password(&self, address: &Address, password: &str) -> Result { let account = try!(self.get(address)); - let account = try!(account.change_password(old_password, new_password, self.iterations)); - - // save to file - self.save(account) + Ok(account.check_password(password)) } - fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> { - let can_remove = { - let account = try!(self.get(address)); - account.check_password(password) - }; - - if can_remove { - try!(self.dir.remove(address)); - let mut cache = self.cache.write(); - cache.remove(address); - Ok(()) - } else { - Err(Error::InvalidPassword) - } - } - - fn sign(&self, address: &Address, password: &str, message: &Message) -> Result { + fn copy_account(&self, new_store: &SimpleSecretStore, address: &Address, password: &str, new_password: &str) -> Result<(), Error> { let account = try!(self.get(address)); - account.sign(password, message) - } - - fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { - let account = try!(self.get(account)); - account.decrypt(password, shared_mac, message) + let secret = try!(account.crypto.secret(password)); + try!(new_store.insert_account(secret, new_password)); + Ok(()) } fn public(&self, account: &Address, password: &str) -> Result { @@ -170,23 +127,25 @@ impl SecretStore for EthStore { } fn set_name(&self, address: &Address, name: String) -> Result<(), Error> { - let mut account = try!(self.get(address)); + let old = try!(self.get(address)); + let mut account = old.clone(); account.name = name; // save to file - self.save(account) + self.store.update(old, account) } fn set_meta(&self, address: &Address, meta: String) -> Result<(), Error> { - let mut account = try!(self.get(address)); + let old = try!(self.get(address)); + let mut account = old.clone(); account.meta = meta; // save to file - self.save(account) + self.store.update(old, account) } fn local_path(&self) -> String { - self.dir.path().map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|| String::new()) + self.store.dir.path().map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|| String::new()) } fn list_geth_accounts(&self, testnet: bool) -> Vec
{ @@ -194,6 +153,288 @@ impl SecretStore for EthStore { } fn import_geth_accounts(&self, desired: Vec
, testnet: bool) -> Result, Error> { - import::import_geth_accounts(&*self.dir, desired.into_iter().collect(), testnet) + import::import_geth_accounts(&*self.store.dir, desired.into_iter().collect(), testnet) } } + +/// Similar to `EthStore` but may store many accounts (with different passwords) for the same `Address` +pub struct EthMultiStore { + dir: Box, + iterations: u32, + cache: RwLock>>, +} + +impl EthMultiStore { + + pub fn open(directory: Box) -> Result { + Self::open_with_iterations(directory, KEY_ITERATIONS as u32) + } + + pub fn open_with_iterations(directory: Box, iterations: u32) -> Result { + let store = EthMultiStore { + dir: directory, + iterations: iterations, + cache: Default::default(), + }; + try!(store.reload_accounts()); + Ok(store) + } + + fn reload_accounts(&self) -> Result<(), Error> { + let mut cache = self.cache.write(); + let accounts = try!(self.dir.load()); + + let mut new_accounts = BTreeMap::new(); + for account in accounts { + let mut entry = new_accounts.entry(account.address.clone()).or_insert_with(Vec::new); + entry.push(account); + } + mem::replace(&mut *cache, new_accounts); + Ok(()) + } + + fn get(&self, address: &Address) -> Result, Error> { + { + let cache = self.cache.read(); + if let Some(accounts) = cache.get(address) { + if !accounts.is_empty() { + return Ok(accounts.clone()) + } + } + } + + try!(self.reload_accounts()); + let cache = self.cache.read(); + let accounts = try!(cache.get(address).cloned().ok_or(Error::InvalidAccount)); + if accounts.is_empty() { + Err(Error::InvalidAccount) + } else { + Ok(accounts) + } + } + + fn import(&self, account: SafeAccount) -> Result<(), Error> { + // save to file + let account = try!(self.dir.insert(account)); + + // update cache + let mut cache = self.cache.write(); + let mut accounts = cache.entry(account.address.clone()).or_insert_with(Vec::new); + accounts.push(account); + Ok(()) + } + + fn update(&self, old: SafeAccount, new: SafeAccount) -> Result<(), Error> { + // save to file + let account = try!(self.dir.update(new)); + + // update cache + let mut cache = self.cache.write(); + let mut accounts = cache.entry(account.address.clone()).or_insert_with(Vec::new); + // Remove old account + accounts.retain(|acc| acc != &old); + // And push updated to the end + accounts.push(account); + Ok(()) + + } + +} + +impl SimpleSecretStore for EthMultiStore { + fn insert_account(&self, secret: Secret, password: &str) -> Result { + let keypair = try!(KeyPair::from_secret(secret).map_err(|_| Error::CreationFailed)); + let id: [u8; 16] = Random::random(); + let account = SafeAccount::create(&keypair, id, password, self.iterations, "".to_owned(), "{}".to_owned()); + let address = account.address.clone(); + try!(self.import(account)); + Ok(address) + } + + fn accounts(&self) -> Result, Error> { + try!(self.reload_accounts()); + Ok(self.cache.read().keys().cloned().collect()) + } + + fn remove_account(&self, address: &Address, password: &str) -> Result<(), Error> { + let accounts = try!(self.get(address)); + + for account in accounts { + // Skip if password is invalid + if !account.check_password(password) { + continue; + } + + // Remove from dir + try!(self.dir.remove(&account)); + + // Remove from cache + let mut cache = self.cache.write(); + let is_empty = { + let mut accounts = cache.get_mut(address).expect("Entry exists, because it was returned by `get`; qed"); + if let Some(position) = accounts.iter().position(|acc| acc == &account) { + accounts.remove(position); + } + accounts.is_empty() + }; + + if is_empty { + cache.remove(address); + } + + return Ok(()); + } + Err(Error::InvalidPassword) + } + + fn change_password(&self, address: &Address, old_password: &str, new_password: &str) -> Result<(), Error> { + let accounts = try!(self.get(address)); + for account in accounts { + // Change password + let new_account = try!(account.change_password(old_password, new_password, self.iterations)); + try!(self.update(account, new_account)); + } + Ok(()) + } + + fn sign(&self, address: &Address, password: &str, message: &Message) -> Result { + let accounts = try!(self.get(address)); + for account in accounts { + if account.check_password(password) { + return account.sign(password, message); + } + } + + Err(Error::InvalidPassword) + } + + fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error> { + let accounts = try!(self.get(account)); + for account in accounts { + if account.check_password(password) { + return account.decrypt(password, shared_mac, message); + } + } + Err(Error::InvalidPassword) + } +} + +#[cfg(test)] +mod tests { + + use dir::MemoryDirectory; + use ethkey::{Random, Generator, KeyPair}; + use secret_store::{SimpleSecretStore, SecretStore}; + use super::{EthStore, EthMultiStore}; + + fn keypair() -> KeyPair { + Random.generate().unwrap() + } + + fn store() -> EthStore { + EthStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory always load successfuly; qed") + } + + fn multi_store() -> EthMultiStore { + EthMultiStore::open(Box::new(MemoryDirectory::default())).expect("MemoryDirectory always load successfuly; qed") + } + + #[test] + fn should_insert_account_successfully() { + // given + let store = store(); + let keypair = keypair(); + + // when + let address = store.insert_account(keypair.secret().clone(), "test").unwrap(); + + // then + assert_eq!(address, keypair.address()); + assert!(store.get(&address).is_ok(), "Should contain account."); + assert_eq!(store.accounts().unwrap().len(), 1, "Should have one account."); + } + + #[test] + fn should_update_meta_and_name() { + // given + let store = store(); + let keypair = keypair(); + let address = store.insert_account(keypair.secret().clone(), "test").unwrap(); + assert_eq!(&store.meta(&address).unwrap(), "{}"); + assert_eq!(&store.name(&address).unwrap(), ""); + + // when + store.set_meta(&address, "meta".into()).unwrap(); + store.set_name(&address, "name".into()).unwrap(); + + // then + assert_eq!(&store.meta(&address).unwrap(), "meta"); + assert_eq!(&store.name(&address).unwrap(), "name"); + assert_eq!(store.accounts().unwrap().len(), 1); + } + + #[test] + fn should_remove_account() { + // given + let store = store(); + let keypair = keypair(); + let address = store.insert_account(keypair.secret().clone(), "test").unwrap(); + + // when + store.remove_account(&address, "test").unwrap(); + + // then + assert_eq!(store.accounts().unwrap().len(), 0, "Should remove account."); + } + + #[test] + fn should_return_true_if_password_is_correct() { + // given + let store = store(); + let keypair = keypair(); + let address = store.insert_account(keypair.secret().clone(), "test").unwrap(); + + // when + let res1 = store.test_password(&address, "x").unwrap(); + let res2 = store.test_password(&address, "test").unwrap(); + + assert!(!res1, "First password should be invalid."); + assert!(res2, "Second password should be correct."); + } + + #[test] + fn multistore_should_be_able_to_have_the_same_account_twice() { + // given + let store = multi_store(); + let keypair = keypair(); + let address = store.insert_account(keypair.secret().clone(), "test").unwrap(); + let address2 = store.insert_account(keypair.secret().clone(), "xyz").unwrap(); + assert_eq!(address, address2); + + // when + assert!(store.remove_account(&address, "test").is_ok(), "First password should work."); + assert_eq!(store.accounts().unwrap().len(), 1); + + assert!(store.remove_account(&address, "xyz").is_ok(), "Second password should work too."); + assert_eq!(store.accounts().unwrap().len(), 0); + } + + #[test] + fn should_copy_account() { + // given + let store = store(); + let multi_store = multi_store(); + let keypair = keypair(); + let address = store.insert_account(keypair.secret().clone(), "test").unwrap(); + assert_eq!(multi_store.accounts().unwrap().len(), 0); + + // when + store.copy_account(&multi_store, &address, "test", "xyz").unwrap(); + + // then + assert!(store.test_password(&address, "test").unwrap(), "First password should work for store."); + assert!(multi_store.sign(&address, "xyz", &Default::default()).is_ok(), "Second password should work for second store."); + assert_eq!(multi_store.accounts().unwrap().len(), 1); + } + +} diff --git a/ethstore/src/lib.rs b/ethstore/src/lib.rs index a55ad207a..e38b04ee4 100644 --- a/ethstore/src/lib.rs +++ b/ethstore/src/lib.rs @@ -50,8 +50,8 @@ mod secret_store; pub use self::account::SafeAccount; pub use self::error::Error; -pub use self::ethstore::EthStore; +pub use self::ethstore::{EthStore, EthMultiStore}; pub use self::import::{import_accounts, read_geth_accounts}; pub use self::presale::PresaleWallet; -pub use self::secret_store::SecretStore; -pub use self::random::random_phrase; +pub use self::secret_store::{SimpleSecretStore, SecretStore}; +pub use self::random::{random_phrase, random_string}; diff --git a/ethstore/src/random.rs b/ethstore/src/random.rs index 6140f0fae..baee08e62 100644 --- a/ethstore/src/random.rs +++ b/ethstore/src/random.rs @@ -51,10 +51,16 @@ pub fn random_phrase(words: usize) -> String { .map(|s| s.to_owned()) .collect(); } - let mut rng = OsRng::new().unwrap(); + let mut rng = OsRng::new().expect("Not able to operate without random source."); (0..words).map(|_| rng.choose(&WORDS).unwrap()).join(" ") } +/// Generate a random string of given length. +pub fn random_string(length: usize) -> String { + let mut rng = OsRng::new().expect("Not able to operate without random source."); + rng.gen_ascii_chars().take(length).collect() +} + #[cfg(test)] mod tests { use super::random_phrase; diff --git a/ethstore/src/secret_store.rs b/ethstore/src/secret_store.rs index 2b3afb2ea..e3eea59c6 100644 --- a/ethstore/src/secret_store.rs +++ b/ethstore/src/secret_store.rs @@ -18,18 +18,25 @@ use ethkey::{Address, Message, Signature, Secret, Public}; use Error; use json::Uuid; -pub trait SecretStore: Send + Sync { +pub trait SimpleSecretStore: Send + Sync { fn insert_account(&self, secret: Secret, password: &str) -> Result; - fn import_presale(&self, json: &[u8], password: &str) -> Result; - fn import_wallet(&self, json: &[u8], password: &str) -> Result; fn change_password(&self, account: &Address, old_password: &str, new_password: &str) -> Result<(), Error>; fn remove_account(&self, account: &Address, password: &str) -> Result<(), Error>; fn sign(&self, account: &Address, password: &str, message: &Message) -> Result; fn decrypt(&self, account: &Address, password: &str, shared_mac: &[u8], message: &[u8]) -> Result, Error>; - fn public(&self, account: &Address, password: &str) -> Result; fn accounts(&self) -> Result, Error>; +} + +pub trait SecretStore: SimpleSecretStore { + fn import_presale(&self, json: &[u8], password: &str) -> Result; + fn import_wallet(&self, json: &[u8], password: &str) -> Result; + fn copy_account(&self, new_store: &SimpleSecretStore, account: &Address, password: &str, new_password: &str) -> Result<(), Error>; + fn test_password(&self, account: &Address, password: &str) -> Result; + + fn public(&self, account: &Address, password: &str) -> Result; + fn uuid(&self, account: &Address) -> Result; fn name(&self, account: &Address) -> Result; fn meta(&self, account: &Address) -> Result; diff --git a/ethstore/tests/api.rs b/ethstore/tests/api.rs index 0b3e3ca23..6485c3347 100644 --- a/ethstore/tests/api.rs +++ b/ethstore/tests/api.rs @@ -19,7 +19,7 @@ extern crate ethstore; mod util; -use ethstore::{SecretStore, EthStore}; +use ethstore::{EthStore, SimpleSecretStore}; use ethstore::ethkey::{Random, Generator, Secret, KeyPair, verify_address}; use ethstore::dir::DiskDirectory; use util::TransientDir; diff --git a/ethstore/tests/util/transient_dir.rs b/ethstore/tests/util/transient_dir.rs index 839e9722d..6a22a602d 100644 --- a/ethstore/tests/util/transient_dir.rs +++ b/ethstore/tests/util/transient_dir.rs @@ -18,7 +18,6 @@ use std::path::PathBuf; use std::{env, fs}; use rand::{Rng, OsRng}; use ethstore::dir::{KeyDirectory, DiskDirectory}; -use ethstore::ethkey::Address; use ethstore::{Error, SafeAccount}; pub fn random_dir() -> PathBuf { @@ -64,11 +63,15 @@ impl KeyDirectory for TransientDir { self.dir.load() } + fn update(&self, account: SafeAccount) -> Result { + self.dir.update(account) + } + fn insert(&self, account: SafeAccount) -> Result { self.dir.insert(account) } - fn remove(&self, address: &Address) -> Result<(), Error> { - self.dir.remove(address) + fn remove(&self, account: &SafeAccount) -> Result<(), Error> { + self.dir.remove(account) } } diff --git a/js/.stylelintrc.json b/js/.stylelintrc.json new file mode 100644 index 000000000..9248483c6 --- /dev/null +++ b/js/.stylelintrc.json @@ -0,0 +1,8 @@ +{ + "extends": "stylelint-config-standard", + "rules": { + "selector-pseudo-class-no-unknown": [ + true, { "ignorePseudoClasses": ["global"] } + ] + } +} diff --git a/js/package.json b/js/package.json index 95b15bb83..3b0be9671 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "0.2.119", + "version": "0.2.127", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", @@ -38,8 +38,11 @@ "start:app": "node webpack/dev.server", "clean": "rm -rf ./build ./coverage", "coveralls": "npm run testCoverage && coveralls < coverage/lcov.info", - "lint": "eslint --ignore-path .gitignore ./src/", - "lint:cached": "eslint --cache --ignore-path .gitignore ./src/", + "lint": "npm run lint:css && npm run lint:js", + "lint:cached": "npm run lint:css && npm run lint:js:cached", + "lint:css": "stylelint ./src/**/*.css", + "lint:js": "eslint --ignore-path .gitignore ./src/", + "lint:js:cached": "eslint --cache --ignore-path .gitignore ./src/", "test": "NODE_ENV=test mocha 'src/**/*.spec.js'", "test:coverage": "NODE_ENV=test istanbul cover _mocha -- 'src/**/*.spec.js'", "test:e2e": "NODE_ENV=test mocha 'src/**/*.e2e.js'", @@ -118,6 +121,8 @@ "sinon-as-promised": "4.0.2", "sinon-chai": "2.8.0", "style-loader": "0.13.1", + "stylelint": "7.6.0", + "stylelint-config-standard": "15.0.0", "url-loader": "0.5.7", "webpack": "2.1.0-beta.27", "webpack-dev-middleware": "1.8.4", diff --git a/js/src/3rdparty/email-verification/index.js b/js/src/3rdparty/email-verification/index.js new file mode 100644 index 000000000..5b81f3f95 --- /dev/null +++ b/js/src/3rdparty/email-verification/index.js @@ -0,0 +1,33 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import { stringify } from 'querystring'; + +export const postToServer = (query, isTestnet = false) => { + const port = isTestnet ? 28443 : 18443; + query = stringify(query); + return fetch(`https://email-verification.parity.io:${port}/?` + query, { + method: 'POST', mode: 'cors', cache: 'no-store' + }) + .then((res) => { + return res.json().then((data) => { + if (res.ok) { + return data.message; + } + throw new Error(data.message || 'unknown error'); + }); + }); +}; diff --git a/js/src/3rdparty/email-verification/terms-of-service.js b/js/src/3rdparty/email-verification/terms-of-service.js new file mode 100644 index 000000000..263b7e8f0 --- /dev/null +++ b/js/src/3rdparty/email-verification/terms-of-service.js @@ -0,0 +1,23 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import React from 'react'; + +export default ( +
    +
  • todo
  • +
+); diff --git a/js/src/3rdparty/sms-verification/index.js b/js/src/3rdparty/sms-verification/index.js index c50b2331a..65761223b 100644 --- a/js/src/3rdparty/sms-verification/index.js +++ b/js/src/3rdparty/sms-verification/index.js @@ -15,17 +15,6 @@ // along with Parity. If not, see . import { stringify } from 'querystring'; -import React from 'react'; - -export const termsOfService = ( -
    -
  • This privacy notice relates to your use of the Parity SMS verification service. We take your privacy seriously and deal in an honest, direct and transparent way when it comes to your data.
  • -
  • We collect your phone number when you use this service. This is temporarily kept in memory, and then encrypted and stored in our EU servers. We only retain the cryptographic hash of the number to prevent duplicated accounts. You consent to this use.
  • -
  • You pay a fee for the cost of this service using the account you want to verify.
  • -
  • Your phone number is transmitted to a third party US SMS verification service Twilio for the sole purpose of the SMS verification. You consent to this use. Twilio’s privacy policy is here: https://www.twilio.com/legal/privacy/developer.
  • -
  • Parity Technology Limited is registered in England and Wales under company number 09760015 and complies with the Data Protection Act 1998 (UK). You may contact us via email at admin@parity.io. Our general privacy policy can be found here: https://ethcore.io/legal.html.
  • -
-); export const postToServer = (query, isTestnet = false) => { const port = isTestnet ? 8443 : 443; diff --git a/js/src/3rdparty/sms-verification/terms-of-service.js b/js/src/3rdparty/sms-verification/terms-of-service.js new file mode 100644 index 000000000..f61b3c97d --- /dev/null +++ b/js/src/3rdparty/sms-verification/terms-of-service.js @@ -0,0 +1,27 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import React from 'react'; + +export default ( +
    +
  • This privacy notice relates to your use of the Parity SMS verification service. We take your privacy seriously and deal in an honest, direct and transparent way when it comes to your data.
  • +
  • We collect your phone number when you use this service. This is temporarily kept in memory, and then encrypted and stored in our EU servers. We only retain the cryptographic hash of the number to prevent duplicated accounts. You consent to this use.
  • +
  • You pay a fee for the cost of this service using the account you want to verify.
  • +
  • Your phone number is transmitted to a third party US SMS verification service Twilio for the sole purpose of the SMS verification. You consent to this use. Twilio’s privacy policy is here: https://www.twilio.com/legal/privacy/developer.
  • +
  • Parity Technology Limited is registered in England and Wales under company number 09760015 and complies with the Data Protection Act 1998 (UK). You may contact us via email at admin@parity.io. Our general privacy policy can be found here: https://ethcore.io/legal.html.
  • +
+); diff --git a/js/src/api/contract/contract.js b/js/src/api/contract/contract.js index 95dcf2e72..68c0371a1 100644 --- a/js/src/api/contract/contract.js +++ b/js/src/api/contract/contract.js @@ -342,7 +342,8 @@ export default class Contract { options: _options, autoRemove, callback, - filterId + filterId, + id: subscriptionId }; if (skipInitFetch) { @@ -452,13 +453,13 @@ export default class Contract { }) ) .then((logsArray) => { - logsArray.forEach((logs, subscriptionId) => { + logsArray.forEach((logs, index) => { if (!logs || !logs.length) { return; } try { - this.sendData(subscriptionId, null, this.parseEventLogs(logs)); + this._sendData(subscriptions[index].id, null, this.parseEventLogs(logs)); } catch (error) { console.error('_sendSubscriptionChanges', error); } diff --git a/js/src/api/util/identity.js b/js/src/api/util/identity.js index 6a25590e3..e4a95891f 100644 --- a/js/src/api/util/identity.js +++ b/js/src/api/util/identity.js @@ -1,9 +1,30 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + import blockies from 'blockies'; +// jsdom doesn't have all the browser features, blockies fail +const TEST_ENV = process.env.NODE_ENV === 'test'; + export function createIdentityImg (address, scale = 8) { - return blockies({ - seed: (address || '').toLowerCase(), - size: 8, - scale - }).toDataURL(); + return TEST_ENV + ? '' + : blockies({ + seed: (address || '').toLowerCase(), + size: 8, + scale + }).toDataURL(); } diff --git a/js/src/contracts/abi/email-verification.json b/js/src/contracts/abi/email-verification.json new file mode 100644 index 000000000..6a7f5a6d0 --- /dev/null +++ b/js/src/contracts/abi/email-verification.json @@ -0,0 +1 @@ +[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"","type":"bytes32"}],"name":"reverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"},{"name":"_puzzle","type":"bytes32"},{"name":"_emailHash","type":"bytes32"}],"name":"puzzle","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_emailHash","type":"bytes32"}],"name":"request","outputs":[],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"uint256"}],"name":"setFee","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_code","type":"bytes32"}],"name":"confirm","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"who","type":"address"},{"indexed":false,"name":"emailHash","type":"bytes32"}],"name":"Requested","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"who","type":"address"},{"indexed":true,"name":"emailHash","type":"bytes32"},{"indexed":false,"name":"puzzle","type":"bytes32"}],"name":"Puzzled","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"who","type":"address"}],"name":"Confirmed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"who","type":"address"}],"name":"Revoked","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"old","type":"address"},{"indexed":true,"name":"current","type":"address"}],"name":"NewOwner","type":"event"}] \ No newline at end of file diff --git a/js/src/contracts/abi/index.js b/js/src/contracts/abi/index.js index 35f96b924..ec5b49383 100644 --- a/js/src/contracts/abi/index.js +++ b/js/src/contracts/abi/index.js @@ -19,6 +19,7 @@ import basiccoin from './basiccoin.json'; import basiccoinmanager from './basiccoinmanager.json'; import dappreg from './dappreg.json'; import eip20 from './eip20.json'; +import emailverification from './email-verification.json'; import gavcoin from './gavcoin.json'; import githubhint from './githubhint.json'; import owned from './owned.json'; @@ -34,6 +35,7 @@ export { basiccoinmanager, dappreg, eip20, + emailverification, gavcoin, githubhint, owned, diff --git a/js/src/contracts/badgereg.js b/js/src/contracts/badgereg.js index 6cf3d8bc9..8075f456e 100644 --- a/js/src/contracts/badgereg.js +++ b/js/src/contracts/badgereg.js @@ -18,7 +18,8 @@ import { bytesToHex, hex2Ascii } from '~/api/util/format'; import ABI from './abi/certifier.json'; -const ZERO = '0x0000000000000000000000000000000000000000000000000000000000000000'; +const ZERO20 = '0x0000000000000000000000000000000000000000'; +const ZERO32 = '0x0000000000000000000000000000000000000000000000000000000000000000'; export default class BadgeReg { constructor (api, registry) { @@ -26,32 +27,57 @@ export default class BadgeReg { this._registry = registry; registry.getContract('badgereg'); - this.certifiers = {}; // by name + this.certifiers = []; // by id this.contracts = {}; // by name } - fetchCertifier (name) { - if (this.certifiers[name]) { - return Promise.resolve(this.certifiers[name]); + certifierCount () { + return this._registry.getContract('badgereg') + .then((badgeReg) => { + return badgeReg.instance.badgeCount.call({}, []) + .then((count) => count.valueOf()); + }); + } + + fetchCertifier (id) { + if (this.certifiers[id]) { + return Promise.resolve(this.certifiers[id]); } return this._registry.getContract('badgereg') .then((badgeReg) => { - return badgeReg.instance.fromName.call({}, [name]) - .then(([ id, address ]) => { - return Promise.all([ - badgeReg.instance.meta.call({}, [id, 'TITLE']), - badgeReg.instance.meta.call({}, [id, 'IMG']) - ]) - .then(([ title, img ]) => { - title = bytesToHex(title); - title = title === ZERO ? null : hex2Ascii(title); - if (bytesToHex(img) === ZERO) img = null; + return badgeReg.instance.badge.call({}, [ id ]); + }) + .then(([ address, name ]) => { + if (address === ZERO20) { + throw new Error(`Certifier ${id} does not exist.`); + } - const data = { address, name, title, icon: img }; - this.certifiers[name] = data; - return data; - }); - }); + name = bytesToHex(name); + name = name === ZERO32 + ? null + : hex2Ascii(name); + return this.fetchMeta(id) + .then(({ title, icon }) => { + const data = { address, id, name, title, icon }; + this.certifiers[id] = data; + return data; + }); + }); + } + + fetchMeta (id) { + return this._registry.getContract('badgereg') + .then((badgeReg) => { + return Promise.all([ + badgeReg.instance.meta.call({}, [id, 'TITLE']), + badgeReg.instance.meta.call({}, [id, 'IMG']) + ]); + }) + .then(([ title, icon ]) => { + title = bytesToHex(title); + title = title === ZERO32 ? null : hex2Ascii(title); + if (bytesToHex(icon) === ZERO32) icon = null; + return { title, icon }; }); } diff --git a/js/src/contracts/contracts.js b/js/src/contracts/contracts.js index 8cab3252d..a1a81a8ee 100644 --- a/js/src/contracts/contracts.js +++ b/js/src/contracts/contracts.js @@ -19,7 +19,7 @@ import Registry from './registry'; import SignatureReg from './signaturereg'; import TokenReg from './tokenreg'; import GithubHint from './githubhint'; -import * as smsVerification from './sms-verification'; +import * as verification from './verification'; import BadgeReg from './badgereg'; let instance = null; @@ -58,7 +58,11 @@ export default class Contracts { } get smsVerification () { - return smsVerification; + return verification; + } + + get emailVerification () { + return verification; } static create (api) { diff --git a/js/src/contracts/sms-verification.js b/js/src/contracts/verification.js similarity index 100% rename from js/src/contracts/sms-verification.js rename to js/src/contracts/verification.js diff --git a/js/src/modals/SMSVerification/index.js b/js/src/inject.js similarity index 94% rename from js/src/modals/SMSVerification/index.js rename to js/src/inject.js index e26958f8c..56095809e 100644 --- a/js/src/modals/SMSVerification/index.js +++ b/js/src/inject.js @@ -14,4 +14,5 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -export default from './SMSVerification'; +import './parity'; +import './web3'; diff --git a/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js b/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js index ae3c92e48..0e54e9ef4 100644 --- a/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js +++ b/js/src/modals/ExecuteContract/DetailsStep/detailsStep.js @@ -58,23 +58,23 @@ export default class DetailsStep extends Component {
{ this.renderWarning() } + error={ fromAddressError } + hint='the account to transact with' + label='from account' + onChange={ onFromAddressChange } + value={ fromAddress } /> { this.renderFunctionSelect() } { this.renderParameters() }
+ hint='the amount to send to with the transaction' + label='transaction value (in ETH)' + onSubmit={ onAmountChange } + value={ amount } />
. + +import { mount } from 'enzyme'; +import React from 'react'; +import sinon from 'sinon'; + +import { ContextProvider, muiTheme } from '~/ui'; + +import DetailsStep from './'; + +import { CONTRACT } from '../executeContract.test.js'; + +let component; +let onAmountChange; +let onClose; +let onFromAddressChange; +let onFuncChange; +let onGasEditClick; +let onValueChange; + +function render (props) { + onAmountChange = sinon.stub(); + onClose = sinon.stub(); + onFromAddressChange = sinon.stub(); + onFuncChange = sinon.stub(); + onGasEditClick = sinon.stub(); + onValueChange = sinon.stub(); + + component = mount( + + + + ); + + return component; +} + +describe('modals/ExecuteContract/DetailsStep', () => { + it('renders', () => { + expect(render({ accounts: {}, values: [ true ], valuesError: [ null ] })).to.be.ok; + }); + + describe('parameter values', () => { + beforeEach(() => { + render({ + accounts: {}, + func: CONTRACT.functions[0], + values: [ false ], + valuesError: [ null ] + }); + }); + + describe('bool parameters', () => { + it('toggles from false to true', () => { + component.find('DropDownMenu').last().simulate('change', { target: { value: 'true' } }); + + expect(onValueChange).to.have.been.calledWith(null, 0, true); + }); + }); + }); +}); diff --git a/js/src/modals/ExecuteContract/executeContract.js b/js/src/modals/ExecuteContract/executeContract.js index afcc826b7..74f36df34 100644 --- a/js/src/modals/ExecuteContract/executeContract.js +++ b/js/src/modals/ExecuteContract/executeContract.js @@ -25,6 +25,7 @@ import ContentClear from 'material-ui/svg-icons/content/clear'; import NavigationArrowBack from 'material-ui/svg-icons/navigation/arrow-back'; import NavigationArrowForward from 'material-ui/svg-icons/navigation/arrow-forward'; +import { toWei } from '~/api/util/wei'; import { BusyStep, Button, CompletedStep, GasPriceEditor, IdentityIcon, Modal, TxHash } from '~/ui'; import { MAX_GAS_ESTIMATION } from '~/util/constants'; import { validateAddress, validateUint } from '~/util/validation'; @@ -56,12 +57,12 @@ class ExecuteContract extends Component { } static propTypes = { - isTest: PropTypes.bool, - fromAddress: PropTypes.string, accounts: PropTypes.object, balances: PropTypes.object, - contract: PropTypes.object, + contract: PropTypes.object.isRequired, + fromAddress: PropTypes.string, gasLimit: PropTypes.object.isRequired, + isTest: PropTypes.bool, onClose: PropTypes.func.isRequired, onFromAddressChange: PropTypes.func.isRequired } @@ -77,11 +78,11 @@ class ExecuteContract extends Component { funcError: null, gasEdit: false, rejected: false, - step: STEP_DETAILS, sending: false, + step: STEP_DETAILS, + txhash: null, values: [], - valuesError: [], - txhash: null + valuesError: [] } componentDidMount () { @@ -255,10 +256,6 @@ class ExecuteContract extends Component { valueError = validateAddress(_value).addressError; break; - case 'bool': - value = _value === 'true'; - break; - case 'uint': valueError = validateUint(_value).valueError; break; @@ -278,13 +275,12 @@ class ExecuteContract extends Component { } estimateGas = (_fromAddress) => { - const { api } = this.context; const { fromAddress } = this.props; const { amount, func, values } = this.state; const options = { gas: MAX_GAS_ESTIMATION, from: _fromAddress || fromAddress, - value: api.util.toWei(amount || 0) + value: toWei(amount || 0) }; if (!func) { diff --git a/js/src/modals/ExecuteContract/executeContract.spec.js b/js/src/modals/ExecuteContract/executeContract.spec.js new file mode 100644 index 000000000..6a8cc692d --- /dev/null +++ b/js/src/modals/ExecuteContract/executeContract.spec.js @@ -0,0 +1,69 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import { shallow } from 'enzyme'; +import React from 'react'; +import sinon from 'sinon'; + +import ExecuteContract from './'; + +import { CONTRACT, STORE } from './executeContract.test.js'; + +let component; +let onClose; +let onFromAddressChange; + +function render (props) { + onClose = sinon.stub(); + onFromAddressChange = sinon.stub(); + + component = shallow( + , + { context: { api: {}, store: STORE } } + ).find('ExecuteContract').shallow(); + + return component; +} + +describe('modals/ExecuteContract/DetailsStep', () => { + it('renders', () => { + expect(render({ accounts: {} })).to.be.ok; + }); + + describe('instance functions', () => { + beforeEach(() => { + render({ + accounts: {} + }); + }); + + describe('onValueChange', () => { + it('toggles boolean from false to true', () => { + component.setState({ + func: CONTRACT.functions[0], + values: [false] + }); + component.instance().onValueChange(null, 0, true); + + expect(component.state().values).to.deep.equal([true]); + }); + }); + }); +}); diff --git a/js/src/modals/ExecuteContract/executeContract.test.js b/js/src/modals/ExecuteContract/executeContract.test.js new file mode 100644 index 000000000..212aba2c8 --- /dev/null +++ b/js/src/modals/ExecuteContract/executeContract.test.js @@ -0,0 +1,64 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +import BigNumber from 'bignumber.js'; +import sinon from 'sinon'; + +const CONTRACT = { + functions: [ + { + name: 'test_a', + signature: 'test_a', + estimateGas: sinon.stub().resolves(new BigNumber(123)), + inputs: [ + { + name: 'test_bool', + kind: { + type: 'bool' + } + } + ], + abi: { + inputs: [ + { + name: 'test_bool', + type: 'bool' + } + ] + } + } + ] +}; + +const STORE = { + dispatch: sinon.stub(), + subscribe: sinon.stub(), + getState: () => { + return { + balances: { + balances: {} + }, + nodeStatus: { + gasLimit: new BigNumber(123) + } + }; + } +}; + +export { + CONTRACT, + STORE +}; diff --git a/js/src/modals/FirstRun/firstRun.js b/js/src/modals/FirstRun/firstRun.js index a273d6e63..03bc8d770 100644 --- a/js/src/modals/FirstRun/firstRun.js +++ b/js/src/modals/FirstRun/firstRun.js @@ -15,6 +15,7 @@ // along with Parity. If not, see . import React, { Component, PropTypes } from 'react'; +import { connect } from 'react-redux'; import ActionDone from 'material-ui/svg-icons/action/done'; import ActionDoneAll from 'material-ui/svg-icons/action/done-all'; import NavigationArrowForward from 'material-ui/svg-icons/navigation/arrow-forward'; @@ -35,14 +36,15 @@ import ParityLogo from '../../../assets/images/parity-logo-black-no-text.svg'; const STAGE_NAMES = ['welcome', 'terms', 'new account', 'recovery', 'completed']; -export default class FirstRun extends Component { +class FirstRun extends Component { static contextTypes = { api: PropTypes.object.isRequired, store: PropTypes.object.isRequired } static propTypes = { - visible: PropTypes.bool, + hasAccounts: PropTypes.bool.isRequired, + visible: PropTypes.bool.isRequired, onClose: PropTypes.func.isRequired } @@ -109,6 +111,7 @@ export default class FirstRun extends Component { } renderDialogActions () { + const { hasAccounts } = this.props; const { canCreate, stage, hasAcceptedTnc } = this.state; switch (stage) { @@ -130,13 +133,26 @@ export default class FirstRun extends Component { ); case 2: - return ( + const buttons = [
{ children } diff --git a/js/src/views/Account/account.js b/js/src/views/Account/account.js index 9e4c56166..82b2e6b71 100644 --- a/js/src/views/Account/account.js +++ b/js/src/views/Account/account.js @@ -23,7 +23,7 @@ import ContentSend from 'material-ui/svg-icons/content/send'; import LockIcon from 'material-ui/svg-icons/action/lock'; import VerifyIcon from 'material-ui/svg-icons/action/verified-user'; -import { EditMeta, DeleteAccount, Shapeshift, SMSVerification, Transfer, PasswordManager } from '~/modals'; +import { EditMeta, DeleteAccount, Shapeshift, Verification, Transfer, PasswordManager } from '~/modals'; import { Actionbar, Button, Page } from '~/ui'; import shapeshiftBtn from '~/../assets/images/shapeshift-btn.png'; @@ -31,8 +31,10 @@ import shapeshiftBtn from '~/../assets/images/shapeshift-btn.png'; import Header from './Header'; import Transactions from './Transactions'; import { setVisibleAccounts } from '~/redux/providers/personalActions'; +import { fetchCertifiers, fetchCertifications } from '~/redux/providers/certifications/actions'; -import VerificationStore from '~/modals/SMSVerification/store'; +import SMSVerificationStore from '~/modals/Verification/sms-store'; +import EmailVerificationStore from '~/modals/Verification/email-store'; import styles from './account.css'; @@ -43,6 +45,8 @@ class Account extends Component { static propTypes = { setVisibleAccounts: PropTypes.func.isRequired, + fetchCertifiers: PropTypes.func.isRequired, + fetchCertifications: PropTypes.func.isRequired, images: PropTypes.object.isRequired, params: PropTypes.object, @@ -62,6 +66,7 @@ class Account extends Component { } componentDidMount () { + this.props.fetchCertifiers(); this.setVisibleAccounts(); } @@ -72,15 +77,6 @@ class Account extends Component { if (prevAddress !== nextAddress) { this.setVisibleAccounts(nextProps); } - - const { isTestnet } = nextProps; - if (typeof isTestnet === 'boolean' && !this.state.verificationStore) { - const { api } = this.context; - const { address } = nextProps.params; - this.setState({ - verificationStore: new VerificationStore(api, address, isTestnet) - }); - } } componentWillUnmount () { @@ -88,9 +84,10 @@ class Account extends Component { } setVisibleAccounts (props = this.props) { - const { params, setVisibleAccounts } = props; + const { params, setVisibleAccounts, fetchCertifications } = props; const addresses = [ params.address ]; setVisibleAccounts(addresses); + fetchCertifications(params.address); } render () { @@ -228,8 +225,9 @@ class Account extends Component { const { address } = this.props.params; return ( - ); @@ -303,6 +301,22 @@ class Account extends Component { this.setState({ showVerificationDialog: true }); } + selectVerificationMethod = (name) => { + const { isTestnet } = this.props; + if (typeof isTestnet !== 'boolean' || this.state.verificationStore) return; + + const { api } = this.context; + const { address } = this.props.params; + + let verificationStore = null; + if (name === 'sms') { + verificationStore = new SMSVerificationStore(api, address, isTestnet); + } else if (name === 'email') { + verificationStore = new EmailVerificationStore(api, address, isTestnet); + } + this.setState({ verificationStore }); + } + onVerificationClose = () => { this.setState({ showVerificationDialog: false }); } @@ -344,7 +358,9 @@ function mapStateToProps (state) { function mapDispatchToProps (dispatch) { return bindActionCreators({ - setVisibleAccounts + setVisibleAccounts, + fetchCertifiers, + fetchCertifications }, dispatch); } diff --git a/js/src/views/Accounts/List/list.js b/js/src/views/Accounts/List/list.js index f08c9fdb0..d5bdd9662 100644 --- a/js/src/views/Accounts/List/list.js +++ b/js/src/views/Accounts/List/list.js @@ -15,22 +15,29 @@ // along with Parity. If not, see . import React, { Component, PropTypes } from 'react'; +import { connect } from 'react-redux'; +import { bindActionCreators } from 'redux'; import { Container } from '~/ui'; +import { fetchCertifiers, fetchCertifications } from '~/redux/providers/certifications/actions'; import Summary from '../Summary'; import styles from './list.css'; -export default class List extends Component { +class List extends Component { static propTypes = { accounts: PropTypes.object, - walletsOwners: PropTypes.object, balances: PropTypes.object, - link: PropTypes.string, - search: PropTypes.array, + certifications: PropTypes.object.isRequired, empty: PropTypes.bool, + link: PropTypes.string, order: PropTypes.string, orderFallback: PropTypes.string, + search: PropTypes.array, + walletsOwners: PropTypes.object, + + fetchCertifiers: PropTypes.func.isRequired, + fetchCertifications: PropTypes.func.isRequired, handleAddSearchToken: PropTypes.func }; @@ -42,8 +49,16 @@ export default class List extends Component { ); } + componentWillMount () { + const { accounts, fetchCertifiers, fetchCertifications } = this.props; + fetchCertifiers(); + for (let address in accounts) { + fetchCertifications(address); + } + } + renderAccounts () { - const { accounts, balances, link, empty, handleAddSearchToken, walletsOwners } = this.props; + const { accounts, balances, empty, link, walletsOwners, handleAddSearchToken } = this.props; if (empty) { return ( @@ -72,7 +87,9 @@ export default class List extends Component { account={ account } balance={ balance } owners={ owners } - handleAddSearchToken={ handleAddSearchToken } /> + handleAddSearchToken={ handleAddSearchToken } + showCertifications + />
); }); @@ -207,3 +224,20 @@ export default class List extends Component { }); } } + +function mapStateToProps (state) { + const { certifications } = state; + return { certifications }; +} + +function mapDispatchToProps (dispatch) { + return bindActionCreators({ + fetchCertifiers, + fetchCertifications + }, dispatch); +} + +export default connect( + mapStateToProps, + mapDispatchToProps +)(List); diff --git a/js/src/views/Accounts/Summary/summary.js b/js/src/views/Accounts/Summary/summary.js index 98d4642fd..3b1d64d18 100644 --- a/js/src/views/Accounts/Summary/summary.js +++ b/js/src/views/Accounts/Summary/summary.js @@ -21,6 +21,7 @@ import { isEqual } from 'lodash'; import ReactTooltip from 'react-tooltip'; import { Balance, Container, ContainerTitle, IdentityIcon, IdentityName, Tags, Input } from '~/ui'; +import Certifications from '~/ui/Certifications'; import { nullableProptype } from '~/util/proptypes'; import styles from '../accounts.css'; @@ -36,12 +37,14 @@ export default class Summary extends Component { link: PropTypes.string, name: PropTypes.string, noLink: PropTypes.bool, + showCertifications: PropTypes.bool, handleAddSearchToken: PropTypes.func, owners: nullableProptype(PropTypes.array) }; static defaultProps = { - noLink: false + noLink: false, + showCertifications: false }; shouldComponentUpdate (nextProps) { @@ -115,6 +118,7 @@ export default class Summary extends Component { { this.renderOwners() } { this.renderBalance() } + { this.renderCertifications() } ); } @@ -181,4 +185,15 @@ export default class Summary extends Component { ); } + + renderCertifications () { + const { showCertifications, account } = this.props; + if (!showCertifications) { + return null; + } + + return ( + + ); + } } diff --git a/js/src/views/Application/store.js b/js/src/views/Application/store.js index bb42721cd..b9f960eff 100644 --- a/js/src/views/Application/store.js +++ b/js/src/views/Application/store.js @@ -16,14 +16,17 @@ import { action, observable } from 'mobx'; -const showFirstRun = window.localStorage.getItem('showFirstRun') !== '0'; - export default class Store { - @observable firstrunVisible = showFirstRun; + @observable firstrunVisible = false; constructor (api) { this._api = api; + const value = window.localStorage.getItem('showFirstRun'); + if (value) { + this.firstrunVisible = JSON.parse(value); + } + this._checkAccounts(); } @@ -33,7 +36,7 @@ export default class Store { @action toggleFirstrun = (visible = false) => { this.firstrunVisible = visible; - window.localStorage.setItem('showFirstRun', visible ? '1' : '0'); + window.localStorage.setItem('showFirstRun', JSON.stringify(!!visible)); } _checkAccounts () { diff --git a/js/webpack/dev.server.js b/js/webpack/dev.server.js index 9e8bd1524..fc107488a 100644 --- a/js/webpack/dev.server.js +++ b/js/webpack/dev.server.js @@ -15,6 +15,7 @@ // along with Parity. If not, see . const webpack = require('webpack'); +const WebpackStats = require('webpack/lib/Stats'); const webpackDevMiddleware = require('webpack-dev-middleware'); const webpackHotMiddleware = require('webpack-hot-middleware'); @@ -59,12 +60,24 @@ app.use(webpackHotMiddleware(compiler, { })); app.use(webpackDevMiddleware(compiler, { - noInfo: false, - quiet: true, + noInfo: true, + quiet: false, progress: true, publicPath: webpackConfig.output.publicPath, stats: { colors: true + }, + reporter: function (data) { + // @see https://github.com/webpack/webpack/blob/324d309107f00cfc38ec727521563d309339b2ec/lib/Stats.js#L790 + // Accepted values: none, errors-only, minimal, normal, verbose + const options = WebpackStats.presetToOptions('minimal'); + options.timings = true; + + const output = data.stats.toString(options); + + process.stdout.write('\n'); + process.stdout.write(output); + process.stdout.write('\n\n'); } })); diff --git a/js/webpack/libraries.js b/js/webpack/libraries.js index 923d799dc..a4e57d7d1 100644 --- a/js/webpack/libraries.js +++ b/js/webpack/libraries.js @@ -28,7 +28,7 @@ module.exports = { context: path.join(__dirname, '../src'), entry: { // library - 'inject': ['./web3.js'], + 'inject': ['./inject.js'], 'web3': ['./web3.js'], 'parity': ['./parity.js'] }, diff --git a/json/src/hash.rs b/json/src/hash.rs index 9aad33115..b09c596f1 100644 --- a/json/src/hash.rs +++ b/json/src/hash.rs @@ -20,7 +20,7 @@ use std::str::FromStr; use serde::{Deserialize, Deserializer, Serialize, Serializer, Error}; use serde::de::Visitor; use rustc_serialize::hex::ToHex; -use util::hash::{H64 as Hash64, H160 as Hash160, H256 as Hash256, H2048 as Hash2048}; +use util::hash::{H64 as Hash64, H160 as Hash160, H256 as Hash256, H520 as Hash520, H2048 as Hash2048}; macro_rules! impl_hash { @@ -87,6 +87,7 @@ macro_rules! impl_hash { impl_hash!(H64, Hash64); impl_hash!(Address, Hash160); impl_hash!(H256, Hash256); +impl_hash!(H520, Hash520); impl_hash!(Bloom, Hash2048); #[cfg(test)] diff --git a/json/src/misc/dapps_settings.rs b/json/src/misc/dapps_settings.rs index 74a12a331..fdbb671eb 100644 --- a/json/src/misc/dapps_settings.rs +++ b/json/src/misc/dapps_settings.rs @@ -49,3 +49,32 @@ impl DappsSettings { serde_json::to_writer(writer, &m.iter().map(|(a, m)| (a.clone().into(), m.clone().into())).collect::>()) } } + +/// Accounts policy for new dapps. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +pub enum NewDappsPolicy { + /// All accounts are exposed by default. + AllAccounts, + /// Only accounts listed here are exposed by default for new dapps. + Whitelist(Vec), +} + +impl NewDappsPolicy { + /// Read a hash map of `String -> NewDappsPolicy` + pub fn read_new_dapps_policy(reader: R) -> Result, serde_json::Error> where + R: io::Read, + S: From + Clone, + { + serde_json::from_reader(reader).map(|ok: HashMap| + ok.into_iter().map(|(a, m)| (a.into(), m.into())).collect() + ) + } + + /// Write a hash map of `String -> NewDappsPolicy` + pub fn write_new_dapps_policy(m: &HashMap, writer: &mut W) -> Result<(), serde_json::Error> where + W: io::Write, + S: Into + Clone, + { + serde_json::to_writer(writer, &m.iter().map(|(a, m)| (a.clone().into(), m.clone().into())).collect::>()) + } +} diff --git a/json/src/misc/mod.rs b/json/src/misc/mod.rs index b64afcc31..0a1e93fa9 100644 --- a/json/src/misc/mod.rs +++ b/json/src/misc/mod.rs @@ -19,5 +19,5 @@ mod account_meta; mod dapps_settings; -pub use self::dapps_settings::DappsSettings; +pub use self::dapps_settings::{DappsSettings, NewDappsPolicy}; pub use self::account_meta::AccountMeta; diff --git a/json/src/spec/engine.rs b/json/src/spec/engine.rs index d36327e70..c95693b5e 100644 --- a/json/src/spec/engine.rs +++ b/json/src/spec/engine.rs @@ -19,6 +19,7 @@ use spec::Ethash; use spec::BasicAuthority; use spec::AuthorityRound; +use spec::Tendermint; /// Engine deserialization. #[derive(Debug, PartialEq, Deserialize)] @@ -33,6 +34,8 @@ pub enum Engine { BasicAuthority(BasicAuthority), /// AuthorityRound engine. AuthorityRound(AuthorityRound), + /// Tendermint engine. + Tendermint(Tendermint) } #[cfg(test)] diff --git a/json/src/spec/mod.rs b/json/src/spec/mod.rs index 19b9974d9..d923be069 100644 --- a/json/src/spec/mod.rs +++ b/json/src/spec/mod.rs @@ -27,15 +27,17 @@ pub mod state; pub mod ethash; pub mod basic_authority; pub mod authority_round; +pub mod tendermint; pub use self::account::Account; pub use self::builtin::{Builtin, Pricing, Linear}; pub use self::genesis::Genesis; pub use self::params::Params; pub use self::spec::Spec; -pub use self::seal::{Seal, Ethereum, Generic}; +pub use self::seal::{Seal, Ethereum, AuthorityRoundSeal, TendermintSeal}; pub use self::engine::Engine; pub use self::state::State; pub use self::ethash::{Ethash, EthashParams}; pub use self::basic_authority::{BasicAuthority, BasicAuthorityParams}; pub use self::authority_round::{AuthorityRound, AuthorityRoundParams}; +pub use self::tendermint::{Tendermint, TendermintParams}; diff --git a/json/src/spec/seal.rs b/json/src/spec/seal.rs index eba75840a..1cf1e86e6 100644 --- a/json/src/spec/seal.rs +++ b/json/src/spec/seal.rs @@ -16,7 +16,8 @@ //! Spec seal deserialization. -use hash::{H64, H256}; +use hash::*; +use uint::Uint; use bytes::Bytes; /// Ethereum seal. @@ -29,11 +30,24 @@ pub struct Ethereum { pub mix_hash: H256, } -/// Generic seal. +/// AuthorityRound seal. #[derive(Debug, PartialEq, Deserialize)] -pub struct Generic { - /// Seal rlp. - pub rlp: Bytes, +pub struct AuthorityRoundSeal { + /// Seal step. + pub step: Uint, + /// Seal signature. + pub signature: H520, +} + +/// Tendermint seal. +#[derive(Debug, PartialEq, Deserialize)] +pub struct TendermintSeal { + /// Seal round. + pub round: Uint, + /// Proposal seal signature. + pub proposal: H520, + /// Proposal seal signature. + pub precommits: Vec, } /// Seal variants. @@ -42,9 +56,15 @@ pub enum Seal { /// Ethereum seal. #[serde(rename="ethereum")] Ethereum(Ethereum), + /// AuthorityRound seal. + #[serde(rename="authority_round")] + AuthorityRound(AuthorityRoundSeal), + /// Tendermint seal. + #[serde(rename="tendermint")] + Tendermint(TendermintSeal), /// Generic seal. #[serde(rename="generic")] - Generic(Generic), + Generic(Bytes), } #[cfg(test)] @@ -53,15 +73,26 @@ mod tests { use spec::Seal; #[test] - fn builtin_deserialization() { + fn seal_deserialization() { let s = r#"[{ "ethereum": { "nonce": "0x0000000000000042", "mixHash": "0x0000000000000000000000000000000000000000000000000000000000000000" } },{ - "generic": { - "rlp": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" + "generic": "0xe011bbe8db4e347b4e8c937c1c8370e4b5ed33adb3db69cbdb7a38e1e50b1b82fa" + },{ + "authority_round": { + "step": "0x0", + "signature": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + },{ + "tendermint": { + "round": "0x0", + "proposal": "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "precommits": [ + "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + ] } }]"#; let _deserialized: Vec = serde_json::from_str(s).unwrap(); diff --git a/json/src/spec/spec.rs b/json/src/spec/spec.rs index 22d6b36d8..fb289f11b 100644 --- a/json/src/spec/spec.rs +++ b/json/src/spec/spec.rs @@ -27,8 +27,8 @@ pub struct Spec { /// Spec name. pub name: String, /// Special fork name. - #[serde(rename="forkName")] - pub fork_name: Option, + #[serde(rename="dataDir")] + pub data_dir: Option, /// Engine. pub engine: Engine, /// Spec params. @@ -57,6 +57,7 @@ mod tests { fn spec_deserialization() { let s = r#"{ "name": "Morden", + "dataDir": "morden", "engine": { "Ethash": { "params": { diff --git a/json/src/spec/tendermint.rs b/json/src/spec/tendermint.rs new file mode 100644 index 000000000..6858602da --- /dev/null +++ b/json/src/spec/tendermint.rs @@ -0,0 +1,67 @@ +// Copyright 2015, 2016 Ethcore (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Tendermint params deserialization. + +use uint::Uint; +use hash::Address; + +/// Tendermint params deserialization. +#[derive(Debug, PartialEq, Deserialize)] +pub struct TendermintParams { + /// Gas limit divisor. + #[serde(rename="gasLimitBoundDivisor")] + pub gas_limit_bound_divisor: Uint, + /// Valid authorities + pub authorities: Vec
, + /// Propose step timeout in milliseconds. + #[serde(rename="timeoutPropose")] + pub timeout_propose: Option, + /// Prevote step timeout in milliseconds. + #[serde(rename="timeoutPrevote")] + pub timeout_prevote: Option, + /// Precommit step timeout in milliseconds. + #[serde(rename="timeoutPrecommit")] + pub timeout_precommit: Option, + /// Commit step timeout in milliseconds. + #[serde(rename="timeoutCommit")] + pub timeout_commit: Option, +} + +/// Tendermint engine deserialization. +#[derive(Debug, PartialEq, Deserialize)] +pub struct Tendermint { + /// Ethash params. + pub params: TendermintParams, +} + +#[cfg(test)] +mod tests { + use serde_json; + use spec::tendermint::Tendermint; + + #[test] + fn basic_authority_deserialization() { + let s = r#"{ + "params": { + "gasLimitBoundDivisor": "0x0400", + "authorities" : ["0xc6d9d2cd449a754c494264e1809c50e34d64562b"] + } + }"#; + + let _deserialized: Tendermint = serde_json::from_str(s).unwrap(); + } +} diff --git a/parity/account.rs b/parity/account.rs index e790eb596..15e712413 100644 --- a/parity/account.rs +++ b/parity/account.rs @@ -14,23 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::path::PathBuf; use ethcore::ethstore::{EthStore, SecretStore, import_accounts, read_geth_accounts}; use ethcore::ethstore::dir::DiskDirectory; use ethcore::account_provider::AccountProvider; use helpers::{password_prompt, password_from_file}; +use params::SpecType; #[derive(Debug, PartialEq)] pub enum AccountCmd { New(NewAccount), - List(String), + List(ListAccounts), Import(ImportAccounts), ImportFromGeth(ImportFromGethAccounts) } +#[derive(Debug, PartialEq)] +pub struct ListAccounts { + pub path: String, + pub spec: SpecType, +} + #[derive(Debug, PartialEq)] pub struct NewAccount { pub iterations: u32, pub path: String, + pub spec: SpecType, pub password_file: Option, } @@ -38,6 +47,7 @@ pub struct NewAccount { pub struct ImportAccounts { pub from: Vec, pub to: String, + pub spec: SpecType, } /// Parameters for geth accounts' import @@ -47,18 +57,22 @@ pub struct ImportFromGethAccounts { pub testnet: bool, /// directory to import accounts to pub to: String, + pub spec: SpecType, } pub fn execute(cmd: AccountCmd) -> Result { match cmd { AccountCmd::New(new_cmd) => new(new_cmd), - AccountCmd::List(path) => list(path), + AccountCmd::List(list_cmd) => list(list_cmd), AccountCmd::Import(import_cmd) => import(import_cmd), AccountCmd::ImportFromGeth(import_geth_cmd) => import_geth(import_geth_cmd) } } -fn keys_dir(path: String) -> Result { +fn keys_dir(path: String, spec: SpecType) -> Result { + let spec = try!(spec.spec()); + let mut path = PathBuf::from(&path); + path.push(spec.data_dir); DiskDirectory::create(path).map_err(|e| format!("Could not open keys directory: {}", e)) } @@ -75,15 +89,15 @@ fn new(n: NewAccount) -> Result { None => try!(password_prompt()), }; - let dir = Box::new(try!(keys_dir(n.path))); + let dir = Box::new(try!(keys_dir(n.path, n.spec))); let secret_store = Box::new(try!(secret_store(dir, Some(n.iterations)))); let acc_provider = AccountProvider::new(secret_store); let new_account = try!(acc_provider.new_account(&password).map_err(|e| format!("Could not create new account: {}", e))); Ok(format!("{:?}", new_account)) } -fn list(path: String) -> Result { - let dir = Box::new(try!(keys_dir(path))); +fn list(list_cmd: ListAccounts) -> Result { + let dir = Box::new(try!(keys_dir(list_cmd.path, list_cmd.spec))); let secret_store = Box::new(try!(secret_store(dir, None))); let acc_provider = AccountProvider::new(secret_store); let accounts = acc_provider.accounts(); @@ -96,7 +110,7 @@ fn list(path: String) -> Result { } fn import(i: ImportAccounts) -> Result { - let to = try!(keys_dir(i.to)); + let to = try!(keys_dir(i.to, i.spec)); let mut imported = 0; for path in &i.from { let from = DiskDirectory::at(path); @@ -109,7 +123,7 @@ fn import_geth(i: ImportFromGethAccounts) -> Result { use std::io::ErrorKind; use ethcore::ethstore::Error; - let dir = Box::new(try!(keys_dir(i.to))); + let dir = Box::new(try!(keys_dir(i.to, i.spec))); let secret_store = Box::new(try!(secret_store(dir, None))); let geth_accounts = read_geth_accounts(i.testnet); match secret_store.import_geth_accounts(geth_accounts, i.testnet) { diff --git a/parity/blockchain.rs b/parity/blockchain.rs index 10fe1d32e..94e9e7709 100644 --- a/parity/blockchain.rs +++ b/parity/blockchain.rs @@ -64,11 +64,19 @@ impl FromStr for DataFormat { #[derive(Debug, PartialEq)] pub enum BlockchainCmd { + Kill(KillBlockchain), Import(ImportBlockchain), Export(ExportBlockchain), ExportState(ExportState), } +#[derive(Debug, PartialEq)] +pub struct KillBlockchain { + pub spec: SpecType, + pub dirs: Directories, + pub pruning: Pruning, +} + #[derive(Debug, PartialEq)] pub struct ImportBlockchain { pub spec: SpecType, @@ -128,6 +136,7 @@ pub struct ExportState { pub fn execute(cmd: BlockchainCmd) -> Result { match cmd { + BlockchainCmd::Kill(kill_cmd) => kill_db(kill_cmd), BlockchainCmd::Import(import_cmd) => execute_import(import_cmd), BlockchainCmd::Export(export_cmd) => execute_export(export_cmd), BlockchainCmd::ExportState(export_cmd) => execute_export_state(export_cmd), @@ -140,9 +149,6 @@ fn execute_import(cmd: ImportBlockchain) -> Result { // Setup panic handler let panic_handler = PanicHandler::new_in_arc(); - // create dirs used by parity - try!(cmd.dirs.create_dirs(false, false)); - // load spec file let spec = try!(cmd.spec.spec()); @@ -150,7 +156,7 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -174,7 +180,10 @@ fn execute_import(cmd: ImportBlockchain) -> Result { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))); + + // create dirs used by parity + try!(cmd.dirs.create_dirs(false, false)); // prepare client config let mut client_config = to_client_config( @@ -311,9 +320,6 @@ fn start_client( wal: bool, cache_config: CacheConfig) -> Result { - // create dirs used by parity - try!(dirs.create_dirs(false, false)); - // load spec file let spec = try!(spec.spec()); @@ -321,7 +327,7 @@ fn start_client( let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = dirs.database(genesis_hash, None, spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -345,7 +351,10 @@ fn start_client( let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, compaction.compaction_profile(db_dirs.db_root_path().as_path()))); + + // create dirs used by parity + try!(dirs.create_dirs(false, false)); // prepare client config let client_config = to_client_config(&cache_config, Mode::Active, tracing, fat_db, compaction, wal, VMType::default(), "".into(), algorithm, pruning_history, true); @@ -473,6 +482,18 @@ fn execute_export_state(cmd: ExportState) -> Result { Ok("Export completed.".into()) } +pub fn kill_db(cmd: KillBlockchain) -> Result { + let spec = try!(cmd.spec.spec()); + let genesis_hash = spec.genesis_header().hash(); + let db_dirs = cmd.dirs.database(genesis_hash, None, spec.data_dir); + let user_defaults_path = db_dirs.user_defaults_path(); + let user_defaults = try!(UserDefaults::load(&user_defaults_path)); + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + let dir = db_dirs.db_path(algorithm); + try!(fs::remove_dir_all(&dir).map_err(|e| format!("Error removing database: {:?}", e))); + Ok("Database deleted.".to_owned()) +} + #[cfg(test)] mod test { use super::DataFormat; diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 39e4d5867..59f478bb3 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -16,6 +16,7 @@ #[macro_use] mod usage; +use dir::default_data_path; usage! { { @@ -31,16 +32,21 @@ usage! { cmd_import: bool, cmd_signer: bool, cmd_new_token: bool, + cmd_sign: bool, + cmd_reject: bool, cmd_snapshot: bool, cmd_restore: bool, cmd_ui: bool, cmd_tools: bool, cmd_hash: bool, + cmd_kill: bool, + cmd_db: bool, // Arguments arg_pid_file: String, arg_file: Option, arg_path: Vec, + arg_id: Option, // Flags // -- Legacy Options @@ -80,8 +86,8 @@ usage! { flag_mode_timeout: u64 = 300u64, or |c: &Config| otry!(c.parity).mode_timeout.clone(), flag_mode_alarm: u64 = 3600u64, or |c: &Config| otry!(c.parity).mode_alarm.clone(), flag_chain: String = "homestead", or |c: &Config| otry!(c.parity).chain.clone(), - flag_db_path: String = "$HOME/.parity", or |c: &Config| otry!(c.parity).db_path.clone(), - flag_keys_path: String = "$HOME/.parity/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), + flag_db_path: String = default_data_path(), or |c: &Config| otry!(c.parity).db_path.clone(), + flag_keys_path: String = "$DATA/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), // -- Account Options @@ -100,7 +106,7 @@ usage! { or |c: &Config| otry!(c.ui).port.clone(), flag_ui_interface: String = "local", or |c: &Config| otry!(c.ui).interface.clone(), - flag_ui_path: String = "$HOME/.parity/signer", + flag_ui_path: String = "$DATA/signer", or |c: &Config| otry!(c.ui).path.clone(), // NOTE [todr] For security reasons don't put this to config files flag_ui_no_validation: bool = false, or |_| None, @@ -156,7 +162,7 @@ usage! { // IPC flag_no_ipc: bool = false, or |c: &Config| otry!(c.ipc).disable.clone(), - flag_ipc_path: String = "$HOME/.parity/jsonrpc.ipc", + flag_ipc_path: String = "$DATA/jsonrpc.ipc", or |c: &Config| otry!(c.ipc).path.clone(), flag_ipc_apis: String = "web3,eth,net,parity,parity_accounts,traces,rpc", or |c: &Config| otry!(c.ipc).apis.clone().map(|vec| vec.join(",")), @@ -170,7 +176,7 @@ usage! { or |c: &Config| otry!(c.dapps).interface.clone(), flag_dapps_hosts: String = "none", or |c: &Config| otry!(c.dapps).hosts.clone().map(|vec| vec.join(",")), - flag_dapps_path: String = "$HOME/.parity/dapps", + flag_dapps_path: String = "$DATA/dapps", or |c: &Config| otry!(c.dapps).path.clone(), flag_dapps_user: Option = None, or |c: &Config| otry!(c.dapps).user.clone().map(Some), @@ -271,7 +277,7 @@ usage! { or |c: &Config| otry!(c.vm).jit.clone(), // -- Miscellaneous Options - flag_config: String = "$HOME/.parity/config.toml", or |_| None, + flag_config: String = "$DATA/config.toml", or |_| None, flag_logging: Option = None, or |c: &Config| otry!(c.misc).logging.clone().map(Some), flag_log_file: Option = None, @@ -506,16 +512,21 @@ mod tests { cmd_blocks: false, cmd_import: false, cmd_signer: false, + cmd_sign: false, + cmd_reject: false, cmd_new_token: false, cmd_snapshot: false, cmd_restore: false, cmd_ui: false, cmd_tools: false, cmd_hash: false, + cmd_db: false, + cmd_kill: false, // Arguments arg_pid_file: "".into(), arg_file: None, + arg_id: None, arg_path: vec![], // -- Operating Options @@ -665,7 +676,7 @@ mod tests { // -- Miscellaneous Options flag_version: false, - flag_config: "$HOME/.parity/config.toml".into(), + flag_config: "$DATA/config.toml".into(), flag_logging: Some("own_tx=trace".into()), flag_log_file: Some("/var/log/parity.log".into()), flag_no_color: false, diff --git a/parity/cli/usage.rs b/parity/cli/usage.rs index cd94e0299..8c8b54c87 100644 --- a/parity/cli/usage.rs +++ b/parity/cli/usage.rs @@ -145,7 +145,7 @@ macro_rules! usage { } let config_file = raw_args.flag_config.clone().unwrap_or_else(|| raw_args.clone().into_args(Config::default()).flag_config); - let config_file = replace_home(&config_file); + let config_file = replace_home("", &config_file); let config = match (fs::File::open(&config_file), raw_args.flag_config.is_some()) { // Load config file (Ok(mut file), _) => { diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index c58672542..bbf7ac236 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -12,9 +12,13 @@ Usage: parity import [ ] [options] parity export (blocks | state) [ ] [options] parity signer new-token [options] + parity signer list [options] + parity signer sign [ ] [ --password FILE ] [options] + parity signer reject [options] parity snapshot [options] parity restore [ ] [options] parity tools hash + parity db kill [options] Operating Options: --mode MODE Set the operating mode. MODE can be one of: @@ -266,7 +270,7 @@ Footprint Options: of all accounts and storage keys. Doubles the size of the state database. BOOL may be one of on, off or auto. (default: {flag_fat_db}) - --scale-verifiers Automatically scale amount of verifier threads based on + --scale-verifiers Automatically scale amount of verifier threads based on workload. Not guaranteed to be faster. (default: {flag_scale_verifiers}) --num-verifiers INT Amount of verifier threads to use or to begin with, if verifier @@ -282,10 +286,8 @@ Import/Export Options: (default: {flag_format:?} = Import: auto, Export: binary) --no-seal-check Skip block seal check. (default: {flag_no_seal_check}) --at BLOCK Export state at the given block, which may be an - index, hash, or 'latest'. Note that taking snapshots at - non-recent blocks will only work with --pruning archive - (default: {flag_at}) - --no-storage Don't export account storge. (default: {flag_no_storage}) + index, hash, or 'latest'. (default: {flag_at}) + --no-storage Don't export account storage. (default: {flag_no_storage}) --no-code Don't export account code. (default: {flag_no_code}) --min-balance WEI Don't export accounts with balance less than specified. (default: {flag_min_balance:?}) diff --git a/parity/configuration.rs b/parity/configuration.rs index 59333c883..b9ae2e958 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -38,11 +38,13 @@ use dir::Directories; use dapps::Configuration as DappsConfiguration; use signer::{Configuration as SignerConfiguration}; use run::RunCmd; -use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, ExportState, DataFormat}; +use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, KillBlockchain, ExportState, DataFormat}; use presale::ImportWallet; -use account::{AccountCmd, NewAccount, ImportAccounts, ImportFromGethAccounts}; +use account::{AccountCmd, NewAccount, ListAccounts, ImportAccounts, ImportFromGethAccounts}; use snapshot::{self, SnapshotCommand}; +const AUTHCODE_FILENAME: &'static str = "authcodes"; + #[derive(Debug, PartialEq)] pub enum Cmd { Run(RunCmd), @@ -51,6 +53,21 @@ pub enum Cmd { ImportPresaleWallet(ImportWallet), Blockchain(BlockchainCmd), SignerToken(SignerConfiguration), + SignerSign { + id: Option, + pwfile: Option, + port: u16, + authfile: PathBuf, + }, + SignerList { + port: u16, + authfile: PathBuf + }, + SignerReject { + id: Option, + port: u16, + authfile: PathBuf + }, Snapshot(SnapshotCommand), Hash(Option), } @@ -103,24 +120,64 @@ impl Configuration { let cmd = if self.args.flag_version { Cmd::Version - } else if self.args.cmd_signer && self.args.cmd_new_token { - Cmd::SignerToken(signer_conf) + } else if self.args.cmd_signer { + let mut authfile = PathBuf::from(signer_conf.signer_path.clone()); + authfile.push(AUTHCODE_FILENAME); + + if self.args.cmd_new_token { + Cmd::SignerToken(signer_conf) + } else if self.args.cmd_sign { + let pwfile = self.args.flag_password.get(0).map(|pwfile| { + PathBuf::from(pwfile) + }); + Cmd::SignerSign { + id: self.args.arg_id, + pwfile: pwfile, + port: signer_conf.port, + authfile: authfile, + } + } else if self.args.cmd_reject { + Cmd::SignerReject { + id: self.args.arg_id, + port: signer_conf.port, + authfile: authfile, + } + } else if self.args.cmd_list { + Cmd::SignerList { + port: signer_conf.port, + authfile: authfile, + } + } else { + unreachable!(); + } } else if self.args.cmd_tools && self.args.cmd_hash { Cmd::Hash(self.args.arg_file) + } else if self.args.cmd_db && self.args.cmd_kill { + Cmd::Blockchain(BlockchainCmd::Kill(KillBlockchain { + spec: spec, + dirs: dirs, + pruning: pruning, + })) } else if self.args.cmd_account { let account_cmd = if self.args.cmd_new { let new_acc = NewAccount { iterations: self.args.flag_keys_iterations, path: dirs.keys, + spec: spec, password_file: self.args.flag_password.first().cloned(), }; AccountCmd::New(new_acc) } else if self.args.cmd_list { - AccountCmd::List(dirs.keys) + let list_acc = ListAccounts { + path: dirs.keys, + spec: spec, + }; + AccountCmd::List(list_acc) } else if self.args.cmd_import { let import_acc = ImportAccounts { from: self.args.arg_path.clone(), to: dirs.keys, + spec: spec, }; AccountCmd::Import(import_acc) } else { @@ -130,6 +187,7 @@ impl Configuration { } else if self.args.flag_import_geth_keys { let account_cmd = AccountCmd::ImportFromGeth( ImportFromGethAccounts { + spec: spec, to: dirs.keys, testnet: self.args.flag_testnet } @@ -139,6 +197,7 @@ impl Configuration { let presale_cmd = ImportWallet { iterations: self.args.flag_keys_iterations, path: dirs.keys, + spec: spec, wallet_path: self.args.arg_path.first().unwrap().clone(), password_file: self.args.flag_password.first().cloned(), }; @@ -530,7 +589,7 @@ impl Configuration { ret.snapshot_peers = self.snapshot_peers(); ret.allow_ips = try!(self.allow_ips()); ret.max_pending_peers = self.max_pending_peers(); - let mut net_path = PathBuf::from(self.directories().db); + let mut net_path = PathBuf::from(self.directories().data); net_path.push("network"); ret.config_path = Some(net_path.to_str().unwrap().to_owned()); ret.reserved_nodes = try!(self.init_reserved_nodes()); @@ -624,18 +683,11 @@ impl Configuration { fn directories(&self) -> Directories { use util::path; - let db_path = replace_home(self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); + let data_path = replace_home("", self.args.flag_datadir.as_ref().unwrap_or(&self.args.flag_db_path)); - let keys_path = replace_home( - if self.args.flag_testnet { - "$HOME/.parity/testnet_keys" - } else { - &self.args.flag_keys_path - } - ); - - let dapps_path = replace_home(&self.args.flag_dapps_path); - let ui_path = replace_home(&self.args.flag_ui_path); + let keys_path = replace_home(&data_path, &self.args.flag_keys_path); + let dapps_path = replace_home(&data_path, &self.args.flag_dapps_path); + let ui_path = replace_home(&data_path, &self.args.flag_ui_path); if self.args.flag_geth && !cfg!(windows) { let geth_root = if self.args.flag_testnet { path::ethereum::test() } else { path::ethereum::default() }; @@ -644,7 +696,7 @@ impl Configuration { } if cfg!(feature = "ipc") && !cfg!(feature = "windows") { - let mut path_buf = PathBuf::from(db_path.clone()); + let mut path_buf = PathBuf::from(data_path.clone()); path_buf.push("ipc"); let ipc_path = path_buf.to_str().unwrap(); ::std::fs::create_dir_all(ipc_path).unwrap_or_else( @@ -654,7 +706,7 @@ impl Configuration { Directories { keys: keys_path, - db: db_path, + data: data_path, dapps: dapps_path, signer: ui_path, } @@ -664,7 +716,7 @@ impl Configuration { if self.args.flag_geth { geth_ipc_path(self.args.flag_testnet) } else { - parity_ipc_path(&self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) + parity_ipc_path(&self.directories().data, &self.args.flag_ipcpath.clone().unwrap_or(self.args.flag_ipc_path.clone())) } } @@ -732,12 +784,14 @@ mod tests { use ethcore_rpc::NetworkSettings; use ethcore::client::{VMType, BlockId}; use ethcore::miner::{MinerOptions, PrioritizationStrategy}; - use helpers::{replace_home, default_network_config}; + use helpers::{default_network_config}; use run::RunCmd; + use dir::Directories; use signer::{Configuration as SignerConfiguration}; use blockchain::{BlockchainCmd, ImportBlockchain, ExportBlockchain, DataFormat, ExportState}; use presale::ImportWallet; - use account::{AccountCmd, NewAccount, ImportAccounts}; + use params::SpecType; + use account::{AccountCmd, NewAccount, ImportAccounts, ListAccounts}; use devtools::{RandomTempPath}; use std::io::Write; use std::fs::{File, create_dir}; @@ -764,8 +818,9 @@ mod tests { let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::New(NewAccount { iterations: 10240, - path: replace_home("$HOME/.parity/keys"), + path: Directories::default().keys, password_file: None, + spec: SpecType::default(), }))); } @@ -774,7 +829,10 @@ mod tests { let args = vec!["parity", "account", "list"]; let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account( - AccountCmd::List(replace_home("$HOME/.parity/keys")), + AccountCmd::List(ListAccounts { + path: Directories::default().keys, + spec: SpecType::default(), + }) )); } @@ -784,7 +842,8 @@ mod tests { let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Account(AccountCmd::Import(ImportAccounts { from: vec!["my_dir".into(), "another_dir".into()], - to: replace_home("$HOME/.parity/keys"), + to: Directories::default().keys, + spec: SpecType::default(), }))); } @@ -794,9 +853,10 @@ mod tests { let conf = parse(&args); assert_eq!(conf.into_command().unwrap().cmd, Cmd::ImportPresaleWallet(ImportWallet { iterations: 10240, - path: replace_home("$HOME/.parity/keys"), + path: Directories::default().keys, wallet_path: "my_wallet.json".into(), password_file: Some("pwd".into()), + spec: SpecType::default(), })); } @@ -895,7 +955,7 @@ mod tests { fn test_command_signer_new_token() { let args = vec!["parity", "signer", "new-token"]; let conf = parse(&args); - let expected = replace_home("$HOME/.parity/signer"); + let expected = Directories::default().signer; assert_eq!(conf.into_command().unwrap().cmd, Cmd::SignerToken(SignerConfiguration { enabled: true, signer_path: expected, @@ -1128,4 +1188,3 @@ mod tests { assert!(conf.init_reserved_nodes().is_ok()); } } - diff --git a/parity/dapps.rs b/parity/dapps.rs index 4eed929ca..e54638ed2 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -20,6 +20,7 @@ use rpc_apis; use ethcore::client::Client; use ethsync::SyncProvider; use helpers::replace_home; +use dir::default_data_path; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { @@ -34,6 +35,7 @@ pub struct Configuration { impl Default for Configuration { fn default() -> Self { + let data_dir = default_data_path(); Configuration { enabled: true, interface: "127.0.0.1".into(), @@ -41,7 +43,7 @@ impl Default for Configuration { hosts: Some(Vec::new()), user: None, pass: None, - dapps_path: replace_home("$HOME/.parity/dapps"), + dapps_path: replace_home(&data_dir, "$DATA/dapps"), } } } diff --git a/parity/dir.rs b/parity/dir.rs index 0a49cd528..f9c2f30c9 100644 --- a/parity/dir.rs +++ b/parity/dir.rs @@ -19,6 +19,7 @@ use std::path::{PathBuf, Path}; use util::{H64, H256}; use util::journaldb::Algorithm; use helpers::replace_home; +use app_dirs::{AppInfo, get_app_root, AppDataType}; // this const is irrelevent cause we do have migrations now, // but we still use it for backwards compatibility @@ -26,7 +27,7 @@ const LEGACY_CLIENT_DB_VER_STR: &'static str = "5.3"; #[derive(Debug, PartialEq)] pub struct Directories { - pub db: String, + pub data: String, pub keys: String, pub signer: String, pub dapps: String, @@ -34,18 +35,19 @@ pub struct Directories { impl Default for Directories { fn default() -> Self { + let data_dir = default_data_path(); Directories { - db: replace_home("$HOME/.parity"), - keys: replace_home("$HOME/.parity/keys"), - signer: replace_home("$HOME/.parity/signer"), - dapps: replace_home("$HOME/.parity/dapps"), + data: replace_home(&data_dir, "$DATA"), + keys: replace_home(&data_dir, "$DATA/keys"), + signer: replace_home(&data_dir, "$DATA/signer"), + dapps: replace_home(&data_dir, "$DATA/dapps"), } } } impl Directories { pub fn create_dirs(&self, dapps_enabled: bool, signer_enabled: bool) -> Result<(), String> { - try!(fs::create_dir_all(&self.db).map_err(|e| e.to_string())); + try!(fs::create_dir_all(&self.data).map_err(|e| e.to_string())); try!(fs::create_dir_all(&self.keys).map_err(|e| e.to_string())); if signer_enabled { try!(fs::create_dir_all(&self.signer).map_err(|e| e.to_string())); @@ -57,20 +59,38 @@ impl Directories { } /// Database paths. - pub fn database(&self, genesis_hash: H256, fork_name: Option) -> DatabaseDirectories { + pub fn database(&self, genesis_hash: H256, fork_name: Option, spec_name: String) -> DatabaseDirectories { DatabaseDirectories { - path: self.db.clone(), + path: self.data.clone(), genesis_hash: genesis_hash, fork_name: fork_name, + spec_name: spec_name, } } /// Get the ipc sockets path pub fn ipc_path(&self) -> PathBuf { - let mut dir = Path::new(&self.db).to_path_buf(); + let mut dir = Path::new(&self.data).to_path_buf(); dir.push("ipc"); dir } + + // TODO: remove in 1.7 + pub fn legacy_keys_path(&self, testnet: bool) -> PathBuf { + let mut dir = Path::new(&self.data).to_path_buf(); + if testnet { + dir.push("testnet_keys"); + } else { + dir.push("keys"); + } + dir + } + + pub fn keys_path(&self, spec_name: &str) -> PathBuf { + let mut dir = PathBuf::from(&self.keys); + dir.push(spec_name); + dir + } } #[derive(Debug, PartialEq)] @@ -78,52 +98,103 @@ pub struct DatabaseDirectories { pub path: String, pub genesis_hash: H256, pub fork_name: Option, + pub spec_name: String, } impl DatabaseDirectories { /// Base DB directory for the given fork. - pub fn fork_path(&self) -> PathBuf { + // TODO: remove in 1.7 + pub fn legacy_fork_path(&self) -> PathBuf { let mut dir = Path::new(&self.path).to_path_buf(); dir.push(format!("{:?}{}", H64::from(self.genesis_hash), self.fork_name.as_ref().map(|f| format!("-{}", f)).unwrap_or_default())); dir } - /// Get the root path for database - pub fn version_path(&self, pruning: Algorithm) -> PathBuf { - let mut dir = self.fork_path(); - dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + pub fn spec_root_path(&self) -> PathBuf { + let mut dir = Path::new(&self.path).to_path_buf(); + dir.push("chains"); + dir.push(&self.spec_name); dir } - /// Get the path for the databases given the genesis_hash and information on the databases. pub fn client_path(&self, pruning: Algorithm) -> PathBuf { - let mut dir = self.version_path(pruning); + let mut dir = self.db_root_path(); + dir.push(pruning.as_internal_name_str()); dir.push("db"); dir } + pub fn db_root_path(&self) -> PathBuf { + let mut dir = self.spec_root_path(); + dir.push("db"); + dir.push(H64::from(self.genesis_hash).hex()); + dir + } + + pub fn db_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.db_root_path(); + dir.push(pruning.as_internal_name_str()); + dir + } + + /// Get the root path for database + // TODO: remove in 1.7 + pub fn legacy_version_path(&self, pruning: Algorithm) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push(format!("v{}-sec-{}", LEGACY_CLIENT_DB_VER_STR, pruning.as_internal_name_str())); + dir + } + /// Get user defaults path + // TODO: remove in 1.7 + pub fn legacy_user_defaults_path(&self) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push("user_defaults"); + dir + } + + /// Get user defaults path + // TODO: remove in 1.7 + pub fn legacy_snapshot_path(&self) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push("snapshot"); + dir + } + + /// Get user defaults path + // TODO: remove in 1.7 + pub fn legacy_network_path(&self) -> PathBuf { + let mut dir = self.legacy_fork_path(); + dir.push("network"); + dir + } + pub fn user_defaults_path(&self) -> PathBuf { - let mut dir = self.fork_path(); + let mut dir = self.spec_root_path(); dir.push("user_defaults"); dir } /// Get the path for the snapshot directory given the genesis hash and fork name. pub fn snapshot_path(&self) -> PathBuf { - let mut dir = self.fork_path(); + let mut dir = self.db_root_path(); dir.push("snapshot"); dir } /// Get the path for the network directory. pub fn network_path(&self) -> PathBuf { - let mut dir = self.fork_path(); + let mut dir = self.spec_root_path(); dir.push("network"); dir } } +pub fn default_data_path() -> String { + let app_info = AppInfo { name: "parity", author: "parity" }; + get_app_root(AppDataType::UserData, &app_info).map(|p| p.to_string_lossy().into_owned()).unwrap_or_else(|_| "$HOME/.parity".to_owned()) +} + #[cfg(test)] mod tests { use super::Directories; @@ -131,11 +202,12 @@ mod tests { #[test] fn test_default_directories() { + let data_dir = super::default_data_path(); let expected = Directories { - db: replace_home("$HOME/.parity"), - keys: replace_home("$HOME/.parity/keys"), - signer: replace_home("$HOME/.parity/signer"), - dapps: replace_home("$HOME/.parity/dapps"), + data: replace_home(&data_dir, "$DATA"), + keys: replace_home(&data_dir, "$DATA/keys"), + signer: replace_home(&data_dir, "$DATA/signer"), + dapps: replace_home(&data_dir, "$DATA/dapps"), }; assert_eq!(expected, Directories::default()); } diff --git a/parity/helpers.rs b/parity/helpers.rs index d1b1e4027..f359d617f 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -24,7 +24,7 @@ use ethcore::client::{Mode, BlockId, VMType, DatabaseCompactionProfile, ClientCo use ethcore::miner::{PendingSet, GasLimit, PrioritizationStrategy}; use cache::CacheConfig; use dir::DatabaseDirectories; -use upgrade::upgrade; +use upgrade::{upgrade, upgrade_data_paths}; use migration::migrate; use ethsync::is_valid_node_url; @@ -132,9 +132,10 @@ pub fn to_price(s: &str) -> Result { } /// Replaces `$HOME` str with home directory path. -pub fn replace_home(arg: &str) -> String { +pub fn replace_home(base: &str, arg: &str) -> String { // the $HOME directory on mac os should be `~/Library` or `~/Library/Application Support` let r = arg.replace("$HOME", env::home_dir().unwrap().to_str().unwrap()); + let r = r.replace("$DATA", base ); r.replace("/", &::std::path::MAIN_SEPARATOR.to_string() ) } @@ -159,13 +160,13 @@ pub fn geth_ipc_path(testnet: bool) -> String { } /// Formats and returns parity ipc path. -pub fn parity_ipc_path(s: &str) -> String { +pub fn parity_ipc_path(base: &str, s: &str) -> String { // Windows path should not be hardcoded here. if cfg!(windows) { return r"\\.\pipe\parity.jsonrpc".to_owned(); } - replace_home(s) + replace_home(base, s) } /// Validates and formats bootnodes option. @@ -187,7 +188,7 @@ pub fn to_bootnodes(bootnodes: &Option) -> Result, String> { pub fn default_network_config() -> ::ethsync::NetworkConfiguration { use ethsync::{NetworkConfiguration, AllowIP}; NetworkConfiguration { - config_path: Some(replace_home("$HOME/.parity/network")), + config_path: Some(replace_home(&::dir::default_data_path(), "$DATA/network")), net_config_path: None, listen_address: Some("0.0.0.0:30303".into()), public_address: None, @@ -261,6 +262,8 @@ pub fn execute_upgrades( compaction_profile: CompactionProfile ) -> Result<(), String> { + upgrade_data_paths(dirs, pruning); + match upgrade(Some(&dirs.path)) { Ok(upgrades_applied) if upgrades_applied > 0 => { debug!("Executed {} upgrade scripts - ok", upgrades_applied); @@ -271,7 +274,7 @@ pub fn execute_upgrades( _ => {}, } - let client_path = dirs.version_path(pruning); + let client_path = dirs.db_path(pruning); migrate(&client_path, pruning, compaction_profile).map_err(|e| format!("{}", e)) } diff --git a/parity/informant.rs b/parity/informant.rs index 1991146ea..99a3f17fe 100644 --- a/parity/informant.rs +++ b/parity/informant.rs @@ -23,7 +23,7 @@ use std::sync::atomic::{AtomicUsize, Ordering as AtomicOrdering}; use std::time::{Instant, Duration}; use isatty::{stdout_isatty}; use ethsync::{SyncProvider, ManageNetwork}; -use util::{Uint, RwLock, Mutex, H256, Colour}; +use util::{Uint, RwLock, Mutex, H256, Colour, Bytes}; use ethcore::client::*; use ethcore::views::BlockView; use ethcore::snapshot::service::Service as SnapshotService; @@ -176,14 +176,13 @@ impl Informant { } impl ChainNotify for Informant { - fn new_blocks(&self, imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, duration: u64) { + fn new_blocks(&self, imported: Vec, _invalid: Vec, _enacted: Vec, _retracted: Vec, _sealed: Vec, _proposed: Vec, duration: u64) { let mut last_import = self.last_import.lock(); let sync_state = self.sync.as_ref().map(|s| s.status().state); let importing = is_major_importing(sync_state, self.client.queue_info()); - let ripe = Instant::now() > *last_import + Duration::from_secs(1) && !importing; let txs_imported = imported.iter() - .take(imported.len() - if ripe {1} else {0}) + .take(imported.len().saturating_sub(if ripe { 1 } else { 0 })) .filter_map(|h| self.client.block(BlockId::Hash(*h))) .map(|b| BlockView::new(&b).transactions_count()) .sum(); diff --git a/parity/main.rs b/parity/main.rs index 8c1f88e1e..2feb9b8fa 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -54,6 +54,7 @@ extern crate ansi_term; extern crate regex; extern crate isatty; extern crate toml; +extern crate app_dirs; #[macro_use] extern crate ethcore_util as util; @@ -70,6 +71,8 @@ extern crate ethcore_stratum; #[cfg(feature = "dapps")] extern crate ethcore_dapps; +extern crate rpc_cli; + macro_rules! dependency { ($dep_ty:ident, $url:expr) => { { @@ -146,6 +149,12 @@ fn execute(command: Execute) -> Result { Cmd::ImportPresaleWallet(presale_cmd) => presale::execute(presale_cmd), Cmd::Blockchain(blockchain_cmd) => blockchain::execute(blockchain_cmd), Cmd::SignerToken(signer_cmd) => signer::execute(signer_cmd), + Cmd::SignerSign { id, pwfile, port, authfile } => + rpc_cli::cmd_signer_sign(id, pwfile, port, authfile), + Cmd::SignerList { port, authfile } => + rpc_cli::cmd_signer_list(port, authfile), + Cmd::SignerReject { id, port, authfile } => + rpc_cli::cmd_signer_reject(id, port, authfile), Cmd::Snapshot(snapshot_cmd) => snapshot::execute(snapshot_cmd), } } diff --git a/parity/params.rs b/parity/params.rs index 81db4165d..9f549acee 100644 --- a/parity/params.rs +++ b/parity/params.rs @@ -49,7 +49,7 @@ impl str::FromStr for SpecType { let spec = match s { "frontier" | "homestead" | "mainnet" => SpecType::Mainnet, "frontier-dogmatic" | "homestead-dogmatic" | "classic" => SpecType::Classic, - "morden" | "testnet" => SpecType::Testnet, + "morden" | "testnet" | "classic-testnet" => SpecType::Testnet, "ropsten" => SpecType::Ropsten, "olympic" => SpecType::Olympic, "expanse" => SpecType::Expanse, @@ -76,6 +76,14 @@ impl SpecType { } } } + + pub fn legacy_fork_name(&self) -> Option { + match *self { + SpecType::Classic => Some("classic".to_owned()), + SpecType::Expanse => Some("expanse".to_owned()), + _ => None, + } + } } #[derive(Debug, PartialEq)] @@ -288,6 +296,8 @@ mod tests { assert_eq!(SpecType::Testnet, "morden".parse().unwrap()); assert_eq!(SpecType::Ropsten, "ropsten".parse().unwrap()); assert_eq!(SpecType::Olympic, "olympic".parse().unwrap()); + assert_eq!(SpecType::Classic, "classic".parse().unwrap()); + assert_eq!(SpecType::Testnet, "classic-testnet".parse().unwrap()); } #[test] diff --git a/parity/presale.rs b/parity/presale.rs index 7754e7e18..d8e19a96a 100644 --- a/parity/presale.rs +++ b/parity/presale.rs @@ -18,11 +18,13 @@ use ethcore::ethstore::{PresaleWallet, EthStore}; use ethcore::ethstore::dir::DiskDirectory; use ethcore::account_provider::AccountProvider; use helpers::{password_prompt, password_from_file}; +use params::SpecType; #[derive(Debug, PartialEq)] pub struct ImportWallet { pub iterations: u32, pub path: String, + pub spec: SpecType, pub wallet_path: String, pub password_file: Option, } diff --git a/parity/rpc.rs b/parity/rpc.rs index 2551b0736..b0668bf57 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -23,6 +23,7 @@ use ethcore_rpc::{RpcServerError, RpcServer as Server, IpcServerError}; use rpc_apis; use rpc_apis::ApiSet; use helpers::parity_ipc_path; +use dir::default_data_path; pub use ethcore_rpc::{IpcServer, Server as HttpServer}; @@ -58,9 +59,10 @@ pub struct IpcConfiguration { impl Default for IpcConfiguration { fn default() -> Self { + let data_dir = default_data_path(); IpcConfiguration { enabled: true, - socket_addr: parity_ipc_path("$HOME/.parity/jsonrpc.ipc"), + socket_addr: parity_ipc_path(&data_dir, "$DATA/jsonrpc.ipc"), apis: ApiSet::IpcContext, } } diff --git a/parity/run.rs b/parity/run.rs index e59df7039..140c2050c 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -41,6 +41,7 @@ use params::{ tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool }; use helpers::{to_client_config, execute_upgrades, passwords_from_files}; +use upgrade::upgrade_key_location; use dir::Directories; use cache::CacheConfig; use user_defaults::UserDefaults; @@ -129,9 +130,6 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { // increase max number of open files raise_fd_limit(); - // create dirs used by parity - try!(cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled)); - // load spec let spec = try!(cmd.spec.spec()); @@ -139,7 +137,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = cmd.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -166,7 +164,10 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()))); + + // create dirs used by parity + try!(cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled)); // run in daemon mode if let Some(pid_file) = cmd.daemon { @@ -175,7 +176,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { // display info about used pruning algorithm info!("Starting {}", Colour::White.bold().paint(version())); - info!("State DB configuation: {}{}{}", + info!("State DB configuration: {}{}{}", Colour::White.bold().paint(algorithm.as_str()), match fat_db { true => Colour::White.bold().paint(" +Fat").to_string(), @@ -217,7 +218,7 @@ pub fn execute(cmd: RunCmd, logger: Arc) -> Result<(), String> { let passwords = try!(passwords_from_files(&cmd.acc_conf.password_files)); // prepare account provider - let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, cmd.acc_conf, &passwords))); + let account_provider = Arc::new(try!(prepare_account_provider(&cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords))); // let the Engine access the accounts spec.engine.register_account_provider(account_provider.clone()); @@ -449,11 +450,13 @@ fn daemonize(_pid_file: String) -> Result<(), String> { Err("daemon is no supported on windows".into()) } -fn prepare_account_provider(dirs: &Directories, cfg: AccountsConfig, passwords: &[String]) -> Result { +fn prepare_account_provider(dirs: &Directories, data_dir: &str, cfg: AccountsConfig, passwords: &[String]) -> Result { use ethcore::ethstore::EthStore; use ethcore::ethstore::dir::DiskDirectory; - let dir = Box::new(try!(DiskDirectory::create(dirs.keys.clone()).map_err(|e| format!("Could not open keys directory: {}", e)))); + let path = dirs.keys_path(data_dir); + upgrade_key_location(&dirs.legacy_keys_path(cfg.testnet), &path); + let dir = Box::new(try!(DiskDirectory::create(&path).map_err(|e| format!("Could not open keys directory: {}", e)))); let account_service = AccountProvider::new(Box::new( try!(EthStore::open_with_iterations(dir, cfg.iterations).map_err(|e| format!("Could not open keys directory: {}", e))) )); diff --git a/parity/signer.rs b/parity/signer.rs index d247453f1..4d282888e 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -23,6 +23,7 @@ use util::path::restrict_permissions_owner; use rpc_apis; use ethcore_signer as signer; use helpers::replace_home; +use dir::default_data_path; pub use ethcore_signer::Server as SignerServer; const CODES_FILENAME: &'static str = "authcodes"; @@ -38,11 +39,12 @@ pub struct Configuration { impl Default for Configuration { fn default() -> Self { + let data_dir = default_data_path(); Configuration { enabled: true, port: 8180, interface: "127.0.0.1".into(), - signer_path: replace_home("$HOME/.parity/signer"), + signer_path: replace_home(&data_dir, "$DATA/signer"), skip_origin_validation: false, } } diff --git a/parity/snapshot.rs b/parity/snapshot.rs index 2963e9b84..d71ffe924 100644 --- a/parity/snapshot.rs +++ b/parity/snapshot.rs @@ -143,7 +143,7 @@ impl SnapshotCommand { let genesis_hash = spec.genesis_header().hash(); // database paths - let db_dirs = self.dirs.database(genesis_hash, spec.fork_name.clone()); + let db_dirs = self.dirs.database(genesis_hash, None, spec.data_dir.clone()); // user defaults path let user_defaults_path = db_dirs.user_defaults_path(); @@ -167,7 +167,7 @@ impl SnapshotCommand { let snapshot_path = db_dirs.snapshot_path(); // execute upgrades - try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.fork_path().as_path()))); + try!(execute_upgrades(&db_dirs, algorithm, self.compaction.compaction_profile(db_dirs.db_root_path().as_path()))); // prepare client config let client_config = to_client_config(&self.cache_config, Mode::Active, tracing, fat_db, self.compaction, self.wal, VMType::default(), "".into(), algorithm, self.pruning_history, true); diff --git a/parity/upgrade.rs b/parity/upgrade.rs index 401f6a722..5c49cb997 100644 --- a/parity/upgrade.rs +++ b/parity/upgrade.rs @@ -18,10 +18,14 @@ use semver::Version; use std::collections::*; -use std::fs::{File, create_dir_all}; +use std::fs::{self, File, create_dir_all}; use std::env; +use std::io; use std::io::{Read, Write}; -use std::path::PathBuf; +use std::path::{PathBuf, Path}; +use dir::{DatabaseDirectories, default_data_path}; +use helpers::replace_home; +use util::journaldb::Algorithm; #[cfg_attr(feature="dev", allow(enum_variant_names))] #[derive(Debug)] @@ -126,3 +130,84 @@ pub fn upgrade(db_path: Option<&str>) -> Result { upgrade_from_version(ver) }) } + +fn file_exists(path: &Path) -> bool { + match fs::metadata(&path) { + Err(ref e) if e.kind() == io::ErrorKind::NotFound => false, + _ => true, + } +} + +pub fn upgrade_key_location(from: &PathBuf, to: &PathBuf) { + let mut parent = to.clone(); + parent.pop(); + match fs::create_dir_all(&parent).and_then(|()| fs::read_dir(from)) { + Ok(entries) => { + let files: Vec<_> = entries.filter_map(|f| f.ok().and_then(|f| if f.file_type().ok().map_or(false, |f| f.is_file()) { f.file_name().to_str().map(|s| s.to_owned()) } else { None })).collect(); + let mut num: usize = 0; + for name in files { + let mut from = from.clone(); + from.push(&name); + let mut to = to.clone(); + to.push(&name); + if !file_exists(&to) { + if let Err(e) = fs::rename(&from, &to) { + debug!("Error upgrading key {:?}: {:?}", from, e); + } else { + num += 1; + } + } else { + debug!("Skipped upgrading key {:?}", from); + } + } + if num > 0 { + info!("Moved {} keys from {} to {}", num, from.to_string_lossy(), to.to_string_lossy()); + } + }, + Err(e) => { + warn!("Error moving keys from {:?} to {:?}: {:?}", from, to, e); + } + } +} + +fn upgrade_dir_location(source: &PathBuf, dest: &PathBuf) { + if file_exists(&source) { + if !file_exists(&dest) { + let mut parent = dest.clone(); + parent.pop(); + if let Err(e) = fs::create_dir_all(&parent).and_then(|()| fs::rename(&source, &dest)) { + debug!("Skipped path {:?} -> {:?} :{:?}", source, dest, e); + } else { + info!("Moved {} to {}", source.to_string_lossy(), dest.to_string_lossy()); + } + } else { + debug!("Skipped upgrading directory {:?}, Destination already exists at {:?}", source, dest); + } + } +} + +fn upgrade_user_defaults(dirs: &DatabaseDirectories) { + let source = dirs.legacy_user_defaults_path(); + let dest = dirs.user_defaults_path(); + if file_exists(&source) { + if !file_exists(&dest) { + if let Err(e) = fs::rename(&source, &dest) { + debug!("Skipped upgrading user defaults {:?}:{:?}", dest, e); + } + } else { + debug!("Skipped upgrading user defaults {:?}, File exists at {:?}", source, dest); + } + } +} + +pub fn upgrade_data_paths(dirs: &DatabaseDirectories, pruning: Algorithm) { + let legacy_root_path = replace_home("", "$HOME/.parity"); + let default_path = default_data_path(); + if legacy_root_path != dirs.path && dirs.path == default_path { + upgrade_dir_location(&PathBuf::from(legacy_root_path), &PathBuf::from(&dirs.path)); + } + upgrade_dir_location(&dirs.legacy_version_path(pruning), &dirs.db_path(pruning)); + upgrade_dir_location(&dirs.legacy_snapshot_path(), &dirs.snapshot_path()); + upgrade_dir_location(&dirs.legacy_network_path(), &dirs.network_path()); + upgrade_user_defaults(&dirs); +} diff --git a/rpc/Cargo.toml b/rpc/Cargo.toml index 5d974e59f..d766af674 100644 --- a/rpc/Cargo.toml +++ b/rpc/Cargo.toml @@ -15,6 +15,7 @@ serde_json = "0.8" jsonrpc-core = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-http-server = { git = "https://github.com/ethcore/jsonrpc.git" } jsonrpc-ipc-server = { git = "https://github.com/ethcore/jsonrpc.git" } +jsonrpc-macros = { git = "https://github.com/ethcore/jsonrpc.git" } ethcore-io = { path = "../util/io" } ethcore-util = { path = "../util" } ethcore = { path = "../ethcore" } diff --git a/rpc/src/lib.rs b/rpc/src/lib.rs index bbd4a5164..c5af5d727 100644 --- a/rpc/src/lib.rs +++ b/rpc/src/lib.rs @@ -42,6 +42,8 @@ extern crate fetch; extern crate log; #[macro_use] extern crate ethcore_util as util; +#[macro_use] +extern crate jsonrpc_macros; #[cfg(test)] extern crate ethjson; diff --git a/rpc/src/v1/helpers/auto_args.rs b/rpc/src/v1/helpers/auto_args.rs deleted file mode 100644 index 9c4e1d74a..000000000 --- a/rpc/src/v1/helpers/auto_args.rs +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2015, 2016 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Automatically serialize and deserialize parameters around a strongly-typed function. - -// because we reuse the type names as idents in the macros as a dirty hack to -// work around `concat_idents!` being unstable. -#![allow(non_snake_case)] - -use super::errors; - -use jsonrpc_core::{Error, Params, Value, from_params, to_value}; -use serde::{Serialize, Deserialize}; - -/// Auto-generates an RPC trait from trait definition. -/// -/// This just copies out all the methods, docs, and adds another -/// function `to_delegate` which will automatically wrap each strongly-typed -/// function in a wrapper which handles parameter and output type serialization. -/// -/// RPC functions may come in a couple forms: async and synchronous. -/// These are parsed with the custom `#[rpc]` attribute, which must follow -/// documentation. -/// -/// ## The #[rpc] attribute -/// -/// Valid forms: -/// - `#[rpc(name = "name_here")]` (a synchronous rpc function which should be bound to the given name) -/// - `#[rpc(async, name = "name_here")]` (an async rpc function which should be bound to the given name) -/// -/// Synchronous function format: -/// `fn foo(&self, Param1, Param2, Param3) -> Out`. -/// -/// Asynchronous RPC functions must come in this form: -/// `fn foo(&self, Param1, Param2, Param3, Ready); -/// -/// Anything else will be rejected by the code generator. -macro_rules! build_rpc_trait { - // entry-point. todo: make another for traits w/ bounds. - ( - $(#[$t_attr: meta])* - pub trait $name: ident { - $( - $( #[doc=$m_doc:expr] )* - #[ rpc( $($t:tt)* ) ] - fn $m_name: ident ( $($p: tt)* ) $( -> Result<$out: ty, Error> )* ; - )* - } - ) => { - $(#[$t_attr])* - pub trait $name: Sized + Send + Sync + 'static { - $( - $(#[doc=$m_doc])* - fn $m_name ( $($p)* ) $( -> Result<$out, Error> )* ; - )* - - /// Transform this into an `IoDelegate`, automatically wrapping - /// the parameters. - fn to_delegate(self) -> ::jsonrpc_core::IoDelegate { - let mut del = ::jsonrpc_core::IoDelegate::new(self.into()); - $( - build_rpc_trait!(WRAP del => - ( $($t)* ) - fn $m_name ( $($p)* ) $( -> Result<$out, Error> )* - ); - )* - del - } - } - }; - - ( WRAP $del: expr => - (name = $name: expr) - fn $method: ident (&self $(, $param: ty)*) -> Result<$out: ty, Error> - ) => { - $del.add_method($name, move |base, params| { - (Self::$method as fn(&_ $(, $param)*) -> Result<$out, Error>).wrap_rpc(base, params) - }) - }; - - ( WRAP $del: expr => - (async, name = $name: expr) - fn $method: ident (&self, Ready<$out: ty> $(, $param: ty)*) - ) => { - $del.add_async_method($name, move |base, params, ready| { - (Self::$method as fn(&_, Ready<$out> $(, $param)*)).wrap_rpc(base, params, ready) - }) - }; -} - -/// A wrapper type without an implementation of `Deserialize` -/// which allows a special implementation of `Wrap` for functions -/// that take a trailing default parameter. -pub struct Trailing(pub T); - -/// A wrapper type for `jsonrpc_core`'s weakly-typed `Ready` struct. -pub struct Ready { - inner: ::jsonrpc_core::Ready, - _marker: ::std::marker::PhantomData, -} - -impl From<::jsonrpc_core::Ready> for Ready { - fn from(ready: ::jsonrpc_core::Ready) -> Self { - Ready { inner: ready, _marker: ::std::marker::PhantomData } - } -} - -impl Ready { - /// Respond withthe asynchronous result. - pub fn ready(self, result: Result) { - self.inner.ready(result.map(to_value)) - } -} - -/// Wrapper trait for synchronous RPC functions. -pub trait Wrap { - fn wrap_rpc(&self, base: &B, params: Params) -> Result; -} - -/// Wrapper trait for asynchronous RPC functions. -pub trait WrapAsync { - fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready); -} - -// special impl for no parameters. -impl Wrap for fn(&B) -> Result - where B: Send + Sync + 'static, OUT: Serialize -{ - fn wrap_rpc(&self, base: &B, params: Params) -> Result { - ::v1::helpers::params::expect_no_params(params) - .and_then(|()| (self)(base)) - .map(to_value) - } -} - -impl WrapAsync for fn(&B, Ready) - where B: Send + Sync + 'static, OUT: Serialize -{ - fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready) { - match ::v1::helpers::params::expect_no_params(params) { - Ok(()) => (self)(base, ready.into()), - Err(e) => ready.ready(Err(e)), - } - } -} - -// creates a wrapper implementation which deserializes the parameters, -// calls the function with concrete type, and serializes the output. -macro_rules! wrap { - ($($x: ident),+) => { - - // synchronous implementation - impl < - BASE: Send + Sync + 'static, - OUT: Serialize, - $($x: Deserialize,)+ - > Wrap for fn(&BASE, $($x,)+) -> Result { - fn wrap_rpc(&self, base: &BASE, params: Params) -> Result { - from_params::<($($x,)+)>(params).and_then(|($($x,)+)| { - (self)(base, $($x,)+) - }).map(to_value) - } - } - - // asynchronous implementation - impl < - BASE: Send + Sync + 'static, - OUT: Serialize, - $($x: Deserialize,)+ - > WrapAsync for fn(&BASE, Ready, $($x,)+ ) { - fn wrap_rpc(&self, base: &BASE, params: Params, ready: ::jsonrpc_core::Ready) { - match from_params::<($($x,)+)>(params) { - Ok(($($x,)+)) => (self)(base, ready.into(), $($x,)+), - Err(e) => ready.ready(Err(e)), - } - } - } - } -} - -// special impl for no parameters other than block parameter. -impl Wrap for fn(&B, Trailing) -> Result - where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize -{ - fn wrap_rpc(&self, base: &B, params: Params) -> Result { - let len = match params { - Params::Array(ref v) => v.len(), - Params::None => 0, - _ => return Err(errors::invalid_params("not an array", "")), - }; - - let (id,) = match len { - 0 => (T::default(),), - 1 => try!(from_params::<(T,)>(params)), - _ => return Err(Error::invalid_params()), - }; - - (self)(base, Trailing(id)).map(to_value) - } -} - -impl WrapAsync for fn(&B, Ready, Trailing) - where B: Send + Sync + 'static, OUT: Serialize, T: Default + Deserialize -{ - fn wrap_rpc(&self, base: &B, params: Params, ready: ::jsonrpc_core::Ready) { - let len = match params { - Params::Array(ref v) => v.len(), - Params::None => 0, - _ => return ready.ready(Err(errors::invalid_params("not an array", ""))), - }; - - let id = match len { - 0 => Ok((T::default(),)), - 1 => from_params::<(T,)>(params), - _ => Err(Error::invalid_params()), - }; - - match id { - Ok((id,)) => (self)(base, ready.into(), Trailing(id)), - Err(e) => ready.ready(Err(e)), - } - } -} - -// similar to `wrap!`, but handles a single default trailing parameter -// accepts an additional argument indicating the number of non-trailing parameters. -macro_rules! wrap_with_trailing { - ($num: expr, $($x: ident),+) => { - // synchronous implementation - impl < - BASE: Send + Sync + 'static, - OUT: Serialize, - $($x: Deserialize,)+ - TRAILING: Default + Deserialize, - > Wrap for fn(&BASE, $($x,)+ Trailing) -> Result { - fn wrap_rpc(&self, base: &BASE, params: Params) -> Result { - let len = match params { - Params::Array(ref v) => v.len(), - Params::None => 0, - _ => return Err(errors::invalid_params("not an array", "")), - }; - - let params = match len - $num { - 0 => from_params::<($($x,)+)>(params) - .map(|($($x,)+)| ($($x,)+ TRAILING::default())), - 1 => from_params::<($($x,)+ TRAILING)>(params) - .map(|($($x,)+ id)| ($($x,)+ id)), - _ => Err(Error::invalid_params()), - }; - - let ($($x,)+ id) = try!(params); - (self)(base, $($x,)+ Trailing(id)).map(to_value) - } - } - - // asynchronous implementation - impl < - BASE: Send + Sync + 'static, - OUT: Serialize, - $($x: Deserialize,)+ - TRAILING: Default + Deserialize, - > WrapAsync for fn(&BASE, Ready, $($x,)+ Trailing) { - fn wrap_rpc(&self, base: &BASE, params: Params, ready: ::jsonrpc_core::Ready) { - let len = match params { - Params::Array(ref v) => v.len(), - Params::None => 0, - _ => return ready.ready(Err(errors::invalid_params("not an array", ""))), - }; - - let params = match len - $num { - 0 => from_params::<($($x,)+)>(params) - .map(|($($x,)+)| ($($x,)+ TRAILING::default())), - 1 => from_params::<($($x,)+ TRAILING)>(params) - .map(|($($x,)+ id)| ($($x,)+ id)), - _ => Err(Error::invalid_params()), - }; - - match params { - Ok(($($x,)+ id)) => (self)(base, ready.into(), $($x,)+ Trailing(id)), - Err(e) => ready.ready(Err(e)) - } - } - } - } -} - -wrap!(A, B, C, D, E); -wrap!(A, B, C, D); -wrap!(A, B, C); -wrap!(A, B); -wrap!(A); - -wrap_with_trailing!(5, A, B, C, D, E); -wrap_with_trailing!(4, A, B, C, D); -wrap_with_trailing!(3, A, B, C); -wrap_with_trailing!(2, A, B); -wrap_with_trailing!(1, A); \ No newline at end of file diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 93e99646b..122f8126b 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::fmt::Debug; +use std::ops::Deref; use rlp; use util::{Address, H256, U256, Uint, Bytes}; use util::bytes::ToPretty; @@ -37,46 +39,112 @@ use v1::types::{ pub const DEFAULT_MAC: [u8; 2] = [0, 0]; -pub fn execute(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: Option) -> Result +type AccountToken = String; + +#[derive(Debug, Clone, PartialEq)] +pub enum SignWith { + Nothing, + Password(String), + Token(AccountToken), +} + +#[derive(Debug)] +pub enum WithToken { + No(T), + Yes(T, AccountToken), +} + +impl Deref for WithToken { + type Target = T; + + fn deref(&self) -> &Self::Target { + match *self { + WithToken::No(ref v) => v, + WithToken::Yes(ref v, _) => v, + } + } +} + +impl WithToken { + pub fn map(self, f: F) -> WithToken where + S: Debug, + F: FnOnce(T) -> S, + { + match self { + WithToken::No(v) => WithToken::No(f(v)), + WithToken::Yes(v, token) => WithToken::Yes(f(v), token), + } + } + + pub fn into_value(self) -> T { + match self { + WithToken::No(v) => v, + WithToken::Yes(v, _) => v, + } + } +} + +impl From<(T, AccountToken)> for WithToken { + fn from(tuple: (T, AccountToken)) -> Self { + WithToken::Yes(tuple.0, tuple.1) + } +} + +pub fn execute(client: &C, miner: &M, accounts: &AccountProvider, payload: ConfirmationPayload, pass: SignWith) -> Result, Error> where C: MiningBlockChainClient, M: MinerService { match payload { ConfirmationPayload::SendTransaction(request) => { sign_and_dispatch(client, miner, accounts, request, pass) - .map(RpcH256::from) - .map(ConfirmationResponse::SendTransaction) + .map(|result| result + .map(RpcH256::from) + .map(ConfirmationResponse::SendTransaction) + ) }, ConfirmationPayload::SignTransaction(request) => { sign_no_dispatch(client, miner, accounts, request, pass) - .map(RpcRichRawTransaction::from) - .map(ConfirmationResponse::SignTransaction) + .map(|result| result + .map(RpcRichRawTransaction::from) + .map(ConfirmationResponse::SignTransaction) + ) }, ConfirmationPayload::Signature(address, hash) => { signature(accounts, address, hash, pass) - .map(RpcH520::from) - .map(ConfirmationResponse::Signature) + .map(|result| result + .map(RpcH520::from) + .map(ConfirmationResponse::Signature) + ) }, ConfirmationPayload::Decrypt(address, data) => { decrypt(accounts, address, data, pass) - .map(RpcBytes) - .map(ConfirmationResponse::Decrypt) + .map(|result| result + .map(RpcBytes) + .map(ConfirmationResponse::Decrypt) + ) }, } } -fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: Option) -> Result { - accounts.sign(address, password.clone(), hash).map_err(|e| match password { - Some(_) => errors::from_password_error(e), - None => errors::from_signing_error(e), +fn signature(accounts: &AccountProvider, address: Address, hash: H256, password: SignWith) -> Result, Error> { + match password.clone() { + SignWith::Nothing => accounts.sign(address, None, hash).map(WithToken::No), + SignWith::Password(pass) => accounts.sign(address, Some(pass), hash).map(WithToken::No), + SignWith::Token(token) => accounts.sign_with_token(address, token, hash).map(Into::into), + }.map_err(|e| match password { + SignWith::Nothing => errors::from_signing_error(e), + _ => errors::from_password_error(e), }) } -fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: Option) -> Result { - accounts.decrypt(address, password.clone(), &DEFAULT_MAC, &msg) - .map_err(|e| match password { - Some(_) => errors::from_password_error(e), - None => errors::from_signing_error(e), - }) +fn decrypt(accounts: &AccountProvider, address: Address, msg: Bytes, password: SignWith) -> Result, Error> { + match password.clone() { + SignWith::Nothing => accounts.decrypt(address, None, &DEFAULT_MAC, &msg).map(WithToken::No), + SignWith::Password(pass) => accounts.decrypt(address, Some(pass), &DEFAULT_MAC, &msg).map(WithToken::No), + SignWith::Token(token) => accounts.decrypt_with_token(address, token, &DEFAULT_MAC, &msg).map(Into::into), + }.map_err(|e| match password { + SignWith::Nothing => errors::from_signing_error(e), + _ => errors::from_password_error(e), + }) } pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: SignedTransaction) -> Result @@ -88,7 +156,7 @@ pub fn dispatch_transaction(client: &C, miner: &M, signed_transaction: Sig .map(|_| hash) } -pub fn sign_no_dispatch(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: Option) -> Result +pub fn sign_no_dispatch(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result, Error> where C: MiningBlockChainClient, M: MinerService { let network_id = client.signing_network_id(); @@ -110,20 +178,32 @@ pub fn sign_no_dispatch(client: &C, miner: &M, accounts: &AccountProvider, let hash = t.hash(network_id); let signature = try!(signature(accounts, address, hash, password)); - t.with_signature(signature, network_id) + signature.map(|sig| { + t.with_signature(sig, network_id) + }) }; Ok(signed_transaction) } -pub fn sign_and_dispatch(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: Option) -> Result +pub fn sign_and_dispatch(client: &C, miner: &M, accounts: &AccountProvider, filled: FilledTransactionRequest, password: SignWith) -> Result, Error> where C: MiningBlockChainClient, M: MinerService { let network_id = client.signing_network_id(); let signed_transaction = try!(sign_no_dispatch(client, miner, accounts, filled, password)); + let (signed_transaction, token) = match signed_transaction { + WithToken::No(signed_transaction) => (signed_transaction, None), + WithToken::Yes(signed_transaction, token) => (signed_transaction, Some(token)), + }; + trace!(target: "miner", "send_transaction: dispatching tx: {} for network ID {:?}", rlp::encode(&signed_transaction).to_vec().pretty(), network_id); - dispatch_transaction(&*client, &*miner, signed_transaction) + dispatch_transaction(&*client, &*miner, signed_transaction).map(|hash| { + match token { + Some(ref token) => WithToken::Yes(hash, token.clone()), + None => WithToken::No(hash), + } + }) } pub fn fill_optional_fields(request: TransactionRequest, client: &C, miner: &M) -> FilledTransactionRequest diff --git a/rpc/src/v1/helpers/mod.rs b/rpc/src/v1/helpers/mod.rs index 3c6d1a739..20ca49ec6 100644 --- a/rpc/src/v1/helpers/mod.rs +++ b/rpc/src/v1/helpers/mod.rs @@ -14,14 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#[macro_use] -pub mod auto_args; - #[macro_use] pub mod errors; pub mod dispatch; -pub mod params; pub mod block_import; mod poll_manager; diff --git a/rpc/src/v1/helpers/params.rs b/rpc/src/v1/helpers/params.rs deleted file mode 100644 index b533c1b89..000000000 --- a/rpc/src/v1/helpers/params.rs +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015, 2016 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Parameters parsing helpers - -use serde; -use jsonrpc_core::{Error, Params, from_params}; -use v1::types::BlockNumber; -use v1::helpers::errors; - -pub fn expect_no_params(params: Params) -> Result<(), Error> { - match params { - Params::None => Ok(()), - p => Err(errors::invalid_params("No parameters were expected", p)), - } -} - -/// Returns number of different parameters in given `Params` object. -pub fn params_len(params: &Params) -> usize { - match params { - &Params::Array(ref vec) => vec.len(), - _ => 0, - } -} - -/// Deserialize request parameters with optional third parameter `BlockNumber` defaulting to `BlockNumber::Latest`. -pub fn from_params_default_third(params: Params) -> Result<(F1, F2, BlockNumber, ), Error> where F1: serde::de::Deserialize, F2: serde::de::Deserialize { - match params_len(¶ms) { - 2 => from_params::<(F1, F2, )>(params).map(|(f1, f2)| (f1, f2, BlockNumber::Latest)), - _ => from_params::<(F1, F2, BlockNumber)>(params) - } -} - diff --git a/rpc/src/v1/impls/eth.rs b/rpc/src/v1/impls/eth.rs index 97134951c..055e57475 100644 --- a/rpc/src/v1/impls/eth.rs +++ b/rpc/src/v1/impls/eth.rs @@ -27,6 +27,7 @@ use time::get_time; use ethsync::{SyncProvider}; use ethcore::miner::{MinerService, ExternalMinerService}; use jsonrpc_core::*; +use jsonrpc_macros::Trailing; use util::{H256, Address, FixedHash, U256, H64, Uint}; use util::sha3::*; use util::{FromHex, Mutex}; @@ -51,7 +52,6 @@ use v1::types::{ use v1::helpers::{CallRequest as CRequest, errors, limit_logs}; use v1::helpers::dispatch::{dispatch_transaction, default_gas_price}; use v1::helpers::block_import::is_major_importing; -use v1::helpers::auto_args::Trailing; const EXTRA_INFO_PROOF: &'static str = "Object exists in in blockchain (fetched earlier), extra_info is always available if object exists; qed"; @@ -340,7 +340,11 @@ impl Eth for EthClient where let dapp = id.0; let store = take_weak!(self.accounts); - let accounts = try!(store.dapps_addresses(dapp.into()).map_err(|e| errors::internal("Could not fetch accounts.", e))); + let accounts = try!(store + .note_dapp_used(dapp.clone().into()) + .and_then(|_| store.dapps_addresses(dapp.into())) + .map_err(|e| errors::internal("Could not fetch accounts.", e)) + ); Ok(accounts.into_iter().map(Into::into).collect()) } diff --git a/rpc/src/v1/impls/parity.rs b/rpc/src/v1/impls/parity.rs index 1f995749a..2c6a498a1 100644 --- a/rpc/src/v1/impls/parity.rs +++ b/rpc/src/v1/impls/parity.rs @@ -32,6 +32,7 @@ use ethcore::mode::Mode; use ethcore::account_provider::AccountProvider; use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; use v1::traits::Parity; use v1::types::{ Bytes, U256, H160, H256, H512, @@ -41,7 +42,6 @@ use v1::types::{ }; use v1::helpers::{errors, SigningQueue, SignerService, NetworkSettings}; use v1::helpers::dispatch::DEFAULT_MAC; -use v1::helpers::auto_args::Trailing; /// Parity implementation. pub struct ParityClient where diff --git a/rpc/src/v1/impls/parity_accounts.rs b/rpc/src/v1/impls/parity_accounts.rs index bf53c7273..5fb21ccc7 100644 --- a/rpc/src/v1/impls/parity_accounts.rs +++ b/rpc/src/v1/impls/parity_accounts.rs @@ -164,19 +164,51 @@ impl ParityAccounts for ParityAccountsClient where C: MiningBlock fn set_dapps_addresses(&self, dapp: DappId, addresses: Vec) -> Result { let store = take_weak!(self.accounts); - let addresses = addresses.into_iter().map(Into::into).collect(); - store.set_dapps_addresses(dapp.into(), addresses) + store.set_dapps_addresses(dapp.into(), into_vec(addresses)) .map_err(|e| errors::account("Couldn't set dapps addresses.", e)) .map(|_| true) } + fn dapps_addresses(&self, dapp: DappId) -> Result, Error> { + let store = take_weak!(self.accounts); + + store.dapps_addresses(dapp.into()) + .map_err(|e| errors::account("Couldn't get dapps addresses.", e)) + .map(into_vec) + } + + fn set_new_dapps_whitelist(&self, whitelist: Option>) -> Result { + let store = take_weak!(self.accounts); + + store + .set_new_dapps_whitelist(whitelist.map(into_vec)) + .map_err(|e| errors::account("Couldn't set dapps whitelist.", e)) + .map(|_| true) + } + + fn new_dapps_whitelist(&self) -> Result>, Error> { + let store = take_weak!(self.accounts); + + store.new_dapps_whitelist() + .map_err(|e| errors::account("Couldn't get dapps whitelist.", e)) + .map(|accounts| accounts.map(into_vec)) + } + + fn recent_dapps(&self) -> Result, Error> { + let store = take_weak!(self.accounts); + + store.recent_dapps() + .map_err(|e| errors::account("Couldn't get recent dapps.", e)) + .map(into_vec) + } + fn import_geth_accounts(&self, addresses: Vec) -> Result, Error> { let store = take_weak!(self.accounts); store - .import_geth_accounts(addresses.into_iter().map(Into::into).collect(), false) - .map(|imported| imported.into_iter().map(Into::into).collect()) + .import_geth_accounts(into_vec(addresses), false) + .map(into_vec) .map_err(|e| errors::account("Couldn't import Geth accounts", e)) } @@ -184,10 +216,12 @@ impl ParityAccounts for ParityAccountsClient where C: MiningBlock try!(self.active()); let store = take_weak!(self.accounts); - Ok(store.list_geth_accounts(false) - .into_iter() - .map(Into::into) - .collect() - ) + Ok(into_vec(store.list_geth_accounts(false))) } } + +fn into_vec(a: Vec) -> Vec where + A: Into +{ + a.into_iter().map(Into::into).collect() +} diff --git a/rpc/src/v1/impls/parity_set.rs b/rpc/src/v1/impls/parity_set.rs index b63adaa67..6371a46ea 100644 --- a/rpc/src/v1/impls/parity_set.rs +++ b/rpc/src/v1/impls/parity_set.rs @@ -26,7 +26,7 @@ use fetch::{Client as FetchClient, Fetch}; use util::{Mutex, sha3}; use jsonrpc_core::Error; -use v1::helpers::auto_args::Ready; +use jsonrpc_macros::Ready; use v1::helpers::errors; use v1::traits::ParitySet; use v1::types::{Bytes, H160, H256, U256}; diff --git a/rpc/src/v1/impls/personal.rs b/rpc/src/v1/impls/personal.rs index cc4a40d4a..d1cdd9810 100644 --- a/rpc/src/v1/impls/personal.rs +++ b/rpc/src/v1/impls/personal.rs @@ -114,7 +114,7 @@ impl Personal for PersonalClient where C: MiningBl &*miner, &*accounts, request, - Some(password) - ).map(Into::into) + dispatch::SignWith::Password(password) + ).map(|v| v.into_value().into()) } } diff --git a/rpc/src/v1/impls/signer.rs b/rpc/src/v1/impls/signer.rs index 6e09a5ec8..95c2f9b94 100644 --- a/rpc/src/v1/impls/signer.rs +++ b/rpc/src/v1/impls/signer.rs @@ -26,9 +26,9 @@ use ethcore::miner::MinerService; use jsonrpc_core::Error; use v1::traits::Signer; -use v1::types::{TransactionModification, ConfirmationRequest, ConfirmationResponse, U256, Bytes}; +use v1::types::{TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, U256, Bytes}; use v1::helpers::{errors, SignerService, SigningQueue, ConfirmationPayload}; -use v1::helpers::dispatch::{self, dispatch_transaction}; +use v1::helpers::dispatch::{self, dispatch_transaction, WithToken}; /// Transactions confirmation (personal) rpc implementation. pub struct SignerClient where C: MiningBlockChainClient, M: MinerService { @@ -60,24 +60,10 @@ impl SignerClient where C: MiningBlockChainClient, take_weak!(self.client).keep_alive(); Ok(()) } -} -impl Signer for SignerClient where C: MiningBlockChainClient, M: MinerService { - - fn requests_to_confirm(&self) -> Result, Error> { - try!(self.active()); - let signer = take_weak!(self.signer); - - Ok(signer.requests() - .into_iter() - .map(Into::into) - .collect() - ) - } - - // TODO [ToDr] TransactionModification is redundant for some calls - // might be better to replace it in future - fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) -> Result { + fn confirm_internal(&self, id: U256, modification: TransactionModification, f: F) -> Result, Error> where + F: FnOnce(&C, &M, &AccountProvider, ConfirmationPayload) -> Result, Error>, + { try!(self.active()); let id = id.into(); @@ -97,14 +83,48 @@ impl Signer for SignerClient where C: MiningBlockC request.gas = gas.into(); } } + let result = f(&*client, &*miner, &*accounts, payload); // Execute - let result = dispatch::execute(&*client, &*miner, &*accounts, payload, Some(pass)); if let Ok(ref response) = result { - signer.request_confirmed(id, Ok(response.clone())); + signer.request_confirmed(id, Ok((*response).clone())); } result }).unwrap_or_else(|| Err(errors::invalid_params("Unknown RequestID", id))) } +} + +impl Signer for SignerClient where C: MiningBlockChainClient, M: MinerService { + + fn requests_to_confirm(&self) -> Result, Error> { + try!(self.active()); + let signer = take_weak!(self.signer); + + Ok(signer.requests() + .into_iter() + .map(Into::into) + .collect() + ) + } + + // TODO [ToDr] TransactionModification is redundant for some calls + // might be better to replace it in future + fn confirm_request(&self, id: U256, modification: TransactionModification, pass: String) -> Result { + self.confirm_internal(id, modification, move |client, miner, accounts, payload| { + dispatch::execute(client, miner, accounts, payload, dispatch::SignWith::Password(pass)) + }).map(|v| v.into_value()) + } + + fn confirm_request_with_token(&self, id: U256, modification: TransactionModification, token: String) -> Result { + self.confirm_internal(id, modification, move |client, miner, accounts, payload| { + dispatch::execute(client, miner, accounts, payload, dispatch::SignWith::Token(token)) + }).and_then(|v| match v { + WithToken::No(_) => Err(errors::internal("Unexpected response without token.", "")), + WithToken::Yes(response, token) => Ok(ConfirmationResponseWithToken { + result: response, + token: token, + }), + }) + } fn confirm_request_raw(&self, id: U256, bytes: Bytes) -> Result { try!(self.active()); diff --git a/rpc/src/v1/impls/signing.rs b/rpc/src/v1/impls/signing.rs index 5fc6558da..cc2c74fcc 100644 --- a/rpc/src/v1/impls/signing.rs +++ b/rpc/src/v1/impls/signing.rs @@ -25,7 +25,7 @@ use ethcore::miner::MinerService; use ethcore::client::MiningBlockChainClient; use jsonrpc_core::Error; -use v1::helpers::auto_args::Ready; +use jsonrpc_macros::Ready; use v1::helpers::{ errors, dispatch, SigningQueue, ConfirmationPromise, ConfirmationResult, ConfirmationPayload, SignerService @@ -99,7 +99,9 @@ impl SigningQueueClient where let sender = payload.sender(); if accounts.is_unlocked(sender) { - return dispatch::execute(&*client, &*miner, &*accounts, payload, None).map(DispatchResult::Value); + return dispatch::execute(&*client, &*miner, &*accounts, payload, dispatch::SignWith::Nothing) + .map(|v| v.into_value()) + .map(DispatchResult::Value); } take_weak!(self.signer).add_request(payload) diff --git a/rpc/src/v1/impls/signing_unsafe.rs b/rpc/src/v1/impls/signing_unsafe.rs index 2b3b81d57..9ad5828f2 100644 --- a/rpc/src/v1/impls/signing_unsafe.rs +++ b/rpc/src/v1/impls/signing_unsafe.rs @@ -24,7 +24,7 @@ use ethcore::miner::MinerService; use ethcore::client::MiningBlockChainClient; use jsonrpc_core::Error; -use v1::helpers::auto_args::Ready; +use jsonrpc_macros::Ready; use v1::helpers::errors; use v1::helpers::dispatch; use v1::traits::{EthSigning, ParitySigning}; @@ -76,7 +76,8 @@ impl SigningUnsafeClient where let accounts = take_weak!(self.accounts); let payload = dispatch::from_rpc(payload, &*client, &*miner); - dispatch::execute(&*client, &*miner, &*accounts, payload, None) + dispatch::execute(&*client, &*miner, &*accounts, payload, dispatch::SignWith::Nothing) + .map(|v| v.into_value()) } } diff --git a/rpc/src/v1/impls/traces.rs b/rpc/src/v1/impls/traces.rs index 2fd912284..5f42c0cf3 100644 --- a/rpc/src/v1/impls/traces.rs +++ b/rpc/src/v1/impls/traces.rs @@ -17,14 +17,16 @@ //! Traces api implementation. use std::sync::{Weak, Arc}; -use jsonrpc_core::*; + use rlp::{UntrustedRlp, View}; use ethcore::client::{BlockChainClient, CallAnalytics, TransactionId, TraceId}; use ethcore::miner::MinerService; use ethcore::transaction::{Transaction as EthTransaction, SignedTransaction, Action}; + +use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; use v1::traits::Traces; use v1::helpers::{errors, CallRequest as CRequest}; -use v1::helpers::params::from_params_default_third; use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256}; fn to_call_analytics(flags: Vec) -> CallAnalytics { @@ -73,90 +75,77 @@ impl TracesClient where C: BlockChainClient, M: MinerService { } impl Traces for TracesClient where C: BlockChainClient + 'static, M: MinerService + 'static { - fn filter(&self, params: Params) -> Result { + fn filter(&self, filter: TraceFilter) -> Result, Error> { try!(self.active()); - from_params::<(TraceFilter,)>(params) - .and_then(|(filter, )| { - let client = take_weak!(self.client); - let traces = client.filter_traces(filter.into()); - let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect()); - Ok(to_value(&traces)) - }) + + let client = take_weak!(self.client); + let traces = client.filter_traces(filter.into()); + let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect()); + Ok(traces) } - fn block_traces(&self, params: Params) -> Result { + fn block_traces(&self, block_number: BlockNumber) -> Result, Error> { try!(self.active()); - from_params::<(BlockNumber,)>(params) - .and_then(|(block_number,)| { - let client = take_weak!(self.client); - let traces = client.block_traces(block_number.into()); - let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect()); - Ok(to_value(&traces)) - }) + let client = take_weak!(self.client); + let traces = client.block_traces(block_number.into()); + let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect()); + Ok(traces) } - fn transaction_traces(&self, params: Params) -> Result { + fn transaction_traces(&self, transaction_hash: H256) -> Result, Error> { try!(self.active()); - from_params::<(H256,)>(params) - .and_then(|(transaction_hash,)| { - let client = take_weak!(self.client); - let traces = client.transaction_traces(TransactionId::Hash(transaction_hash.into())); - let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect()); - Ok(to_value(&traces)) - }) + + let client = take_weak!(self.client); + let traces = client.transaction_traces(TransactionId::Hash(transaction_hash.into())); + let traces = traces.map_or_else(Vec::new, |traces| traces.into_iter().map(LocalizedTrace::from).collect()); + Ok(traces) } - fn trace(&self, params: Params) -> Result { + fn trace(&self, transaction_hash: H256, address: Vec) -> Result, Error> { try!(self.active()); - from_params::<(H256, Vec)>(params) - .and_then(|(transaction_hash, address)| { - let client = take_weak!(self.client); - let id = TraceId { - transaction: TransactionId::Hash(transaction_hash.into()), - address: address.into_iter().map(|i| i.value()).collect() - }; - let trace = client.trace(id); - let trace = trace.map(LocalizedTrace::from); - Ok(to_value(&trace)) - }) + let client = take_weak!(self.client); + let id = TraceId { + transaction: TransactionId::Hash(transaction_hash.into()), + address: address.into_iter().map(|i| i.value()).collect() + }; + let trace = client.trace(id); + let trace = trace.map(LocalizedTrace::from); + + Ok(trace) } - fn call(&self, params: Params) -> Result { + fn call(&self, request: CallRequest, flags: Vec, block: Trailing) -> Result, Error> { try!(self.active()); - from_params_default_third(params) - .and_then(|(request, flags, block)| { - let request = CallRequest::into(request); - let signed = try!(self.sign_call(request)); - match take_weak!(self.client).call(&signed, block.into(), to_call_analytics(flags)) { - Ok(e) => Ok(to_value(&TraceResults::from(e))), - _ => Ok(Value::Null), - } - }) + let block = block.0; + + let request = CallRequest::into(request); + let signed = try!(self.sign_call(request)); + Ok(match take_weak!(self.client).call(&signed, block.into(), to_call_analytics(flags)) { + Ok(e) => Some(TraceResults::from(e)), + _ => None, + }) } - fn raw_transaction(&self, params: Params) -> Result { + fn raw_transaction(&self, raw_transaction: Bytes, flags: Vec, block: Trailing) -> Result, Error> { try!(self.active()); - from_params_default_third(params) - .and_then(|(raw_transaction, flags, block)| { - let raw_transaction = Bytes::to_vec(raw_transaction); - match UntrustedRlp::new(&raw_transaction).as_val() { - Ok(signed) => match take_weak!(self.client).call(&signed, block.into(), to_call_analytics(flags)) { - Ok(e) => Ok(to_value(&TraceResults::from(e))), - _ => Ok(Value::Null), - }, - Err(e) => Err(errors::invalid_params("Transaction is not valid RLP", e)), - } - }) + let block = block.0; + + let raw_transaction = Bytes::to_vec(raw_transaction); + match UntrustedRlp::new(&raw_transaction).as_val() { + Ok(signed) => Ok(match take_weak!(self.client).call(&signed, block.into(), to_call_analytics(flags)) { + Ok(e) => Some(TraceResults::from(e)), + _ => None, + }), + Err(e) => Err(errors::invalid_params("Transaction is not valid RLP", e)), + } } - fn replay_transaction(&self, params: Params) -> Result { + fn replay_transaction(&self, transaction_hash: H256, flags: Vec) -> Result, Error> { try!(self.active()); - from_params::<(H256, _)>(params) - .and_then(|(transaction_hash, flags)| { - match take_weak!(self.client).replay(TransactionId::Hash(transaction_hash.into()), to_call_analytics(flags)) { - Ok(e) => Ok(to_value(&TraceResults::from(e))), - _ => Ok(Value::Null), - } - }) + + Ok(match take_weak!(self.client).replay(TransactionId::Hash(transaction_hash.into()), to_call_analytics(flags)) { + Ok(e) => Some(TraceResults::from(e)), + _ => None, + }) } } diff --git a/rpc/src/v1/tests/mocked/eth.rs b/rpc/src/v1/tests/mocked/eth.rs index ea8409ef2..f428bbcbd 100644 --- a/rpc/src/v1/tests/mocked/eth.rs +++ b/rpc/src/v1/tests/mocked/eth.rs @@ -354,11 +354,18 @@ fn rpc_eth_gas_price() { #[test] fn rpc_eth_accounts() { let tester = EthTester::default(); - let _address = tester.accounts_provider.new_account("").unwrap(); + let address = tester.accounts_provider.new_account("").unwrap(); + tester.accounts_provider.set_new_dapps_whitelist(None).unwrap(); + // with current policy it should return the account + let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":[""#.to_owned() + &format!("0x{:?}", address) + r#""],"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + + tester.accounts_provider.set_new_dapps_whitelist(Some(vec![1.into()])).unwrap(); // even with some account it should return empty list (no dapp detected) let request = r#"{"jsonrpc": "2.0", "method": "eth_accounts", "params": [], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":[],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x0000000000000000000000000000000000000001"],"id":1}"#; assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); // when we add visible address it should return that. diff --git a/rpc/src/v1/tests/mocked/mod.rs b/rpc/src/v1/tests/mocked/mod.rs index 0ba0fc95c..0dbaa38f6 100644 --- a/rpc/src/v1/tests/mocked/mod.rs +++ b/rpc/src/v1/tests/mocked/mod.rs @@ -18,13 +18,14 @@ //! method calls properly. mod eth; +mod manage_network; mod net; -mod web3; -mod personal; mod parity; mod parity_accounts; mod parity_set; +mod personal; mod rpc; mod signer; mod signing; -mod manage_network; +mod traces; +mod web3; diff --git a/rpc/src/v1/tests/mocked/parity_accounts.rs b/rpc/src/v1/tests/mocked/parity_accounts.rs index a30b6c43c..851af9ebd 100644 --- a/rpc/src/v1/tests/mocked/parity_accounts.rs +++ b/rpc/src/v1/tests/mocked/parity_accounts.rs @@ -117,7 +117,7 @@ fn should_be_able_to_set_meta() { } #[test] -fn rpc_parity_set_dapps_accounts() { +fn rpc_parity_set_and_get_dapps_accounts() { // given let tester = setup(); assert_eq!(tester.accounts.dapps_addresses("app1".into()).unwrap(), vec![]); @@ -129,6 +129,52 @@ fn rpc_parity_set_dapps_accounts() { // then assert_eq!(tester.accounts.dapps_addresses("app1".into()).unwrap(), vec![10.into()]); + let request = r#"{"jsonrpc": "2.0", "method": "parity_getDappsAddresses","params":["app1"], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x000000000000000000000000000000000000000a"],"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_parity_set_and_get_new_dapps_whitelist() { + // given + let tester = setup(); + + // when set to whitelist + let request = r#"{"jsonrpc": "2.0", "method": "parity_setNewDappsWhitelist","params":[["0x000000000000000000000000000000000000000a"]], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + + // then + assert_eq!(tester.accounts.new_dapps_whitelist().unwrap(), Some(vec![10.into()])); + let request = r#"{"jsonrpc": "2.0", "method": "parity_getNewDappsWhitelist","params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["0x000000000000000000000000000000000000000a"],"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + + // when set to empty + let request = r#"{"jsonrpc": "2.0", "method": "parity_setNewDappsWhitelist","params":[null], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":true,"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); + + // then + assert_eq!(tester.accounts.new_dapps_whitelist().unwrap(), None); + let request = r#"{"jsonrpc": "2.0", "method": "parity_getNewDappsWhitelist","params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":null,"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_parity_recent_dapps() { + // given + let tester = setup(); + + // when + // trigger dapp usage + tester.accounts.note_dapp_used("dapp1".into()).unwrap(); + + // then + let request = r#"{"jsonrpc": "2.0", "method": "parity_listRecentDapps","params":[], "id": 1}"#; + let response = r#"{"jsonrpc":"2.0","result":["dapp1"],"id":1}"#; + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); } #[test] diff --git a/rpc/src/v1/tests/mocked/signer.rs b/rpc/src/v1/tests/mocked/signer.rs index 3537717d4..296d0d30c 100644 --- a/rpc/src/v1/tests/mocked/signer.rs +++ b/rpc/src/v1/tests/mocked/signer.rs @@ -209,6 +209,53 @@ fn should_confirm_transaction_and_dispatch() { assert_eq!(tester.miner.imported_transactions.lock().len(), 1); } +#[test] +fn should_confirm_transaction_with_token() { + // given + let tester = signer_tester(); + let address = tester.accounts.new_account("test").unwrap(); + let recipient = Address::from_str("d46e8dd67c5d32be8058bb8eb970870f07244567").unwrap(); + tester.signer.add_request(ConfirmationPayload::SendTransaction(FilledTransactionRequest { + from: address, + to: Some(recipient), + gas_price: U256::from(10_000), + gas: U256::from(10_000_000), + value: U256::from(1), + data: vec![], + nonce: None, + })).unwrap(); + + let t = Transaction { + nonce: U256::zero(), + gas_price: U256::from(0x1000), + gas: U256::from(10_000_000), + action: Action::Call(recipient), + value: U256::from(0x1), + data: vec![] + }; + let (signature, token) = tester.accounts.sign_with_token(address, "test".into(), t.hash(None)).unwrap(); + let t = t.with_signature(signature, None); + + assert_eq!(tester.signer.requests().len(), 1); + + // when + let request = r#"{ + "jsonrpc":"2.0", + "method":"signer_confirmRequestWithToken", + "params":["0x1", {"gasPrice":"0x1000"}, ""#.to_owned() + &token + r#""], + "id":1 + }"#; + let response = r#"{"jsonrpc":"2.0","result":{"result":""#.to_owned() + + format!("0x{:?}", t.hash()).as_ref() + + r#"","token":""#; + + // then + let result = tester.io.handle_request_sync(&request).unwrap(); + assert!(result.starts_with(&response), "Should return correct result. Expected: {:?}, Got: {:?}", response, result); + assert_eq!(tester.signer.requests().len(), 0); + assert_eq!(tester.miner.imported_transactions.lock().len(), 1); +} + #[test] fn should_confirm_transaction_with_rlp() { // given diff --git a/rpc/src/v1/tests/mocked/traces.rs b/rpc/src/v1/tests/mocked/traces.rs new file mode 100644 index 000000000..f9a9baa00 --- /dev/null +++ b/rpc/src/v1/tests/mocked/traces.rs @@ -0,0 +1,145 @@ +// Copyright 2015, 2016 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; + +use ethcore::executed::{CallType, Executed}; +use ethcore::trace::trace::{Action, Res, Call}; +use ethcore::trace::LocalizedTrace; +use ethcore::client::{TestBlockChainClient}; + +use jsonrpc_core::{IoHandler, GenericIoHandler}; +use v1::tests::helpers::{TestMinerService}; +use v1::{Traces, TracesClient}; + +struct Tester { + _client: Arc, + _miner: Arc, + io: IoHandler, +} + +fn io() -> Tester { + let client = Arc::new(TestBlockChainClient::new()); + *client.traces.write() = Some(vec![LocalizedTrace { + action: Action::Call(Call { + from: 0xf.into(), + to: 0x10.into(), + value: 0x1.into(), + gas: 0x100.into(), + input: vec![1, 2, 3], + call_type: CallType::Call, + }), + result: Res::None, + subtraces: 0, + trace_address: vec![0], + transaction_number: 0, + transaction_hash: 5.into(), + block_number: 10, + block_hash: 10.into(), + }]); + *client.execution_result.write() = Some(Ok(Executed { + gas: 20_000.into(), + gas_used: 10_000.into(), + refunded: 0.into(), + cumulative_gas_used: 10_000.into(), + logs: vec![], + contracts_created: vec![], + output: vec![1, 2, 3], + trace: vec![], + vm_trace: None, + state_diff: None, + })); + let miner = Arc::new(TestMinerService::default()); + let traces = TracesClient::new(&client, &miner); + let io = IoHandler::new(); + io.add_delegate(traces.to_delegate()); + + Tester { + _client: client, + _miner: miner, + io: io, + } +} + +#[test] +fn rpc_trace_filter() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_filter","params": [{}],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_trace_block() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_block","params": ["0x10"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_trace_transaction() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_transaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005"],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":[{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"}],"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_trace_get() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_get","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["0","0","0"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"action":{"callType":"call","from":"0x000000000000000000000000000000000000000f","gas":"0x100","input":"0x010203","to":"0x0000000000000000000000000000000000000010","value":"0x1"},"blockHash":"0x000000000000000000000000000000000000000000000000000000000000000a","blockNumber":10,"result":null,"subtraces":0,"traceAddress":[0],"transactionHash":"0x0000000000000000000000000000000000000000000000000000000000000005","transactionPosition":0,"type":"call"},"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_trace_call() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_call","params":[{}, ["stateDiff", "vmTrace", "trace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_trace_raw_transaction() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_rawTransaction","params":["0xf869018609184e72a0008276c094d46e8dd67c5d32be8058bb8eb970870f07244567849184e72a801ba0617f39c1a107b63302449c476d96a6cb17a5842fc98ff0c5bcf4d5c4d8166b95a009fdb6097c6196b9bbafc3a59f02f38d91baeef23d0c60a8e4f23c7714cea3a9", ["stateDiff", "vmTrace", "trace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} + +#[test] +fn rpc_trace_replay_transaction() { + let tester = io(); + + let request = r#"{"jsonrpc":"2.0","method":"trace_replayTransaction","params":["0x0000000000000000000000000000000000000000000000000000000000000005", ["trace", "stateDiff", "vmTrace"]],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"output":"0x010203","stateDiff":null,"trace":[],"vmTrace":null},"id":1}"#; + + assert_eq!(tester.io.handle_request_sync(request), Some(response.to_owned())); +} diff --git a/rpc/src/v1/traits/eth.rs b/rpc/src/v1/traits/eth.rs index 14ee8ca62..2d52b7c70 100644 --- a/rpc/src/v1/traits/eth.rs +++ b/rpc/src/v1/traits/eth.rs @@ -21,7 +21,7 @@ use v1::types::{RichBlock, BlockNumber, Bytes, CallRequest, Filter, FilterChange use v1::types::{Log, Receipt, SyncStatus, Transaction, Work}; use v1::types::{H64, H160, H256, U256}; -use v1::helpers::auto_args::{Trailing, Wrap}; +use jsonrpc_macros::Trailing; build_rpc_trait! { /// Eth rpc interface. diff --git a/rpc/src/v1/traits/eth_signing.rs b/rpc/src/v1/traits/eth_signing.rs index aa306f3f0..442883339 100644 --- a/rpc/src/v1/traits/eth_signing.rs +++ b/rpc/src/v1/traits/eth_signing.rs @@ -16,7 +16,8 @@ //! Eth rpc interface. -use v1::helpers::auto_args::{WrapAsync, Ready}; +use jsonrpc_macros::Ready; + use v1::types::{Bytes, H160, H256, H520, TransactionRequest, RichRawTransaction}; build_rpc_trait! { diff --git a/rpc/src/v1/traits/net.rs b/rpc/src/v1/traits/net.rs index a34270826..ebe93472e 100644 --- a/rpc/src/v1/traits/net.rs +++ b/rpc/src/v1/traits/net.rs @@ -17,8 +17,6 @@ //! Net rpc interface. use jsonrpc_core::Error; -use v1::helpers::auto_args::Wrap; - build_rpc_trait! { /// Net rpc interface. pub trait Net { @@ -35,4 +33,4 @@ build_rpc_trait! { #[rpc(name = "net_listening")] fn is_listening(&self) -> Result; } -} \ No newline at end of file +} diff --git a/rpc/src/v1/traits/parity.rs b/rpc/src/v1/traits/parity.rs index ba6514168..fecc05667 100644 --- a/rpc/src/v1/traits/parity.rs +++ b/rpc/src/v1/traits/parity.rs @@ -15,10 +15,12 @@ // along with Parity. If not, see . //! Parity-specific rpc interface. -use jsonrpc_core::Error; use std::collections::BTreeMap; -use v1::helpers::auto_args::{Wrap, Trailing}; + +use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; + use v1::types::{ H160, H256, H512, U256, Bytes, Peers, Transaction, RpcSettings, Histogram, diff --git a/rpc/src/v1/traits/parity_accounts.rs b/rpc/src/v1/traits/parity_accounts.rs index e4393c149..bf360c3c2 100644 --- a/rpc/src/v1/traits/parity_accounts.rs +++ b/rpc/src/v1/traits/parity_accounts.rs @@ -16,9 +16,8 @@ //! Parity Accounts-related rpc interface. use std::collections::BTreeMap; -use jsonrpc_core::{Value, Error}; -use v1::helpers::auto_args::Wrap; +use jsonrpc_core::{Value, Error}; use v1::types::{H160, H256, DappId}; build_rpc_trait! { @@ -79,6 +78,24 @@ build_rpc_trait! { #[rpc(name = "parity_setDappsAddresses")] fn set_dapps_addresses(&self, DappId, Vec) -> Result; + /// Gets accounts exposed for particular dapp. + #[rpc(name = "parity_getDappsAddresses")] + fn dapps_addresses(&self, DappId) -> Result, Error>; + + /// Sets accounts exposed for new dapps. + /// `None` means that all accounts will be exposed. + #[rpc(name = "parity_setNewDappsWhitelist")] + fn set_new_dapps_whitelist(&self, Option>) -> Result; + + /// Gets accounts exposed for new dapps. + /// `None` means that all accounts will be exposed. + #[rpc(name = "parity_getNewDappsWhitelist")] + fn new_dapps_whitelist(&self) -> Result>, Error>; + + /// Sets accounts exposed for particular dapp. + #[rpc(name = "parity_listRecentDapps")] + fn recent_dapps(&self) -> Result, Error>; + /// Imports a number of Geth accounts, with the list provided as the argument. #[rpc(name = "parity_importGethAccounts")] fn import_geth_accounts(&self, Vec) -> Result, Error>; diff --git a/rpc/src/v1/traits/parity_set.rs b/rpc/src/v1/traits/parity_set.rs index 486f7fb42..6ae3610c8 100644 --- a/rpc/src/v1/traits/parity_set.rs +++ b/rpc/src/v1/traits/parity_set.rs @@ -17,8 +17,8 @@ //! Parity-specific rpc interface for operations altering the settings. use jsonrpc_core::Error; +use jsonrpc_macros::Ready; -use v1::helpers::auto_args::{Wrap, WrapAsync, Ready}; use v1::types::{Bytes, H160, H256, U256}; build_rpc_trait! { diff --git a/rpc/src/v1/traits/parity_signing.rs b/rpc/src/v1/traits/parity_signing.rs index 5eb5ff8b3..a6fdbe2cd 100644 --- a/rpc/src/v1/traits/parity_signing.rs +++ b/rpc/src/v1/traits/parity_signing.rs @@ -16,8 +16,8 @@ //! ParitySigning rpc interface. use jsonrpc_core::Error; +use jsonrpc_macros::Ready; -use v1::helpers::auto_args::{Wrap, WrapAsync, Ready}; use v1::types::{U256, H160, H256, Bytes, ConfirmationResponse, TransactionRequest, Either}; build_rpc_trait! { diff --git a/rpc/src/v1/traits/personal.rs b/rpc/src/v1/traits/personal.rs index edb1a9d79..a7cc996ea 100644 --- a/rpc/src/v1/traits/personal.rs +++ b/rpc/src/v1/traits/personal.rs @@ -17,7 +17,6 @@ //! Personal rpc interface. use jsonrpc_core::Error; -use v1::helpers::auto_args::Wrap; use v1::types::{U128, H160, H256, TransactionRequest}; build_rpc_trait! { diff --git a/rpc/src/v1/traits/rpc.rs b/rpc/src/v1/traits/rpc.rs index a03d67b55..9b44b560f 100644 --- a/rpc/src/v1/traits/rpc.rs +++ b/rpc/src/v1/traits/rpc.rs @@ -16,12 +16,10 @@ //! RPC interface. -use jsonrpc_core::Error; - -use v1::helpers::auto_args::Wrap; - use std::collections::BTreeMap; +use jsonrpc_core::Error; + build_rpc_trait! { /// RPC Interface. pub trait Rpc { @@ -33,4 +31,4 @@ build_rpc_trait! { #[rpc(name = "rpc_modules")] fn rpc_modules(&self) -> Result, Error>; } -} \ No newline at end of file +} diff --git a/rpc/src/v1/traits/signer.rs b/rpc/src/v1/traits/signer.rs index 5a18fe293..1426a6a6a 100644 --- a/rpc/src/v1/traits/signer.rs +++ b/rpc/src/v1/traits/signer.rs @@ -17,9 +17,7 @@ //! Parity Signer-related rpc interface. use jsonrpc_core::Error; -use v1::helpers::auto_args::Wrap; -use v1::types::{U256, Bytes, TransactionModification, ConfirmationRequest, ConfirmationResponse}; - +use v1::types::{U256, Bytes, TransactionModification, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken}; build_rpc_trait! { /// Signer extension for confirmations rpc interface. @@ -33,6 +31,10 @@ build_rpc_trait! { #[rpc(name = "signer_confirmRequest")] fn confirm_request(&self, U256, TransactionModification, String) -> Result; + /// Confirm specific request with token. + #[rpc(name = "signer_confirmRequestWithToken")] + fn confirm_request_with_token(&self, U256, TransactionModification, String) -> Result; + /// Confirm specific request with already signed data. #[rpc(name = "signer_confirmRequestRaw")] fn confirm_request_raw(&self, U256, Bytes) -> Result; diff --git a/rpc/src/v1/traits/traces.rs b/rpc/src/v1/traits/traces.rs index 0440b4dff..1d5fef5ec 100644 --- a/rpc/src/v1/traits/traces.rs +++ b/rpc/src/v1/traits/traces.rs @@ -15,43 +15,40 @@ // along with Parity. If not, see . //! Traces specific rpc interface. -use std::sync::Arc; -use jsonrpc_core::*; -/// Traces specific rpc interface. -pub trait Traces: Sized + Send + Sync + 'static { - /// Returns traces matching given filter. - fn filter(&self, _: Params) -> Result; +use jsonrpc_core::Error; +use jsonrpc_macros::Trailing; +use v1::types::{TraceFilter, LocalizedTrace, BlockNumber, Index, CallRequest, Bytes, TraceResults, H256}; - /// Returns transaction trace at given index. - fn trace(&self, _: Params) -> Result; +build_rpc_trait! { + /// Traces specific rpc interface. + pub trait Traces { + /// Returns traces matching given filter. + #[rpc(name = "trace_filter")] + fn filter(&self, TraceFilter) -> Result, Error>; - /// Returns all traces of given transaction. - fn transaction_traces(&self, _: Params) -> Result; + /// Returns transaction trace at given index. + #[rpc(name = "trace_get")] + fn trace(&self, H256, Vec) -> Result, Error>; - /// Returns all traces produced at given block. - fn block_traces(&self, _: Params) -> Result; + /// Returns all traces of given transaction. + #[rpc(name = "trace_transaction")] + fn transaction_traces(&self, H256) -> Result, Error>; - /// Executes the given call and returns a number of possible traces for it. - fn call(&self, _: Params) -> Result; + /// Returns all traces produced at given block. + #[rpc(name = "trace_block")] + fn block_traces(&self, BlockNumber) -> Result, Error>; - /// Executes the given raw transaction and returns a number of possible traces for it. - fn raw_transaction(&self, _: Params) -> Result; + /// Executes the given call and returns a number of possible traces for it. + #[rpc(name = "trace_call")] + fn call(&self, CallRequest, Vec, Trailing) -> Result, Error>; - /// Executes the transaction with the given hash and returns a number of possible traces for it. - fn replay_transaction(&self, _: Params) -> Result; + /// Executes the given raw transaction and returns a number of possible traces for it. + #[rpc(name = "trace_rawTransaction")] + fn raw_transaction(&self, Bytes, Vec, Trailing) -> Result, Error>; - /// Should be used to convert object to io delegate. - fn to_delegate(self) -> IoDelegate { - let mut delegate = IoDelegate::new(Arc::new(self)); - delegate.add_method("trace_filter", Traces::filter); - delegate.add_method("trace_get", Traces::trace); - delegate.add_method("trace_transaction", Traces::transaction_traces); - delegate.add_method("trace_block", Traces::block_traces); - delegate.add_method("trace_call", Traces::call); - delegate.add_method("trace_rawTransaction", Traces::raw_transaction); - delegate.add_method("trace_replayTransaction", Traces::replay_transaction); - - delegate + /// Executes the transaction with the given hash and returns a number of possible traces for it. + #[rpc(name = "trace_replayTransaction")] + fn replay_transaction(&self, H256, Vec) -> Result, Error>; } } diff --git a/rpc/src/v1/traits/web3.rs b/rpc/src/v1/traits/web3.rs index efe26e307..c2f5f55e1 100644 --- a/rpc/src/v1/traits/web3.rs +++ b/rpc/src/v1/traits/web3.rs @@ -17,10 +17,8 @@ //! Web3 rpc interface. use jsonrpc_core::Error; -use v1::helpers::auto_args::Wrap; use v1::types::{H256, Bytes}; - build_rpc_trait! { /// Web3 rpc interface. pub trait Web3 { diff --git a/rpc/src/v1/types/confirmations.rs b/rpc/src/v1/types/confirmations.rs index fd81bf6e7..d7dc3f210 100644 --- a/rpc/src/v1/types/confirmations.rs +++ b/rpc/src/v1/types/confirmations.rs @@ -18,11 +18,13 @@ use std::fmt; use serde::{Serialize, Serializer}; +use util::log::Colour; + use v1::types::{U256, TransactionRequest, RichRawTransaction, H160, H256, H520, Bytes}; use v1::helpers; /// Confirmation waiting in a queue -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] pub struct ConfirmationRequest { /// Id of this confirmation pub id: U256, @@ -39,8 +41,25 @@ impl From for ConfirmationRequest { } } +impl fmt::Display for ConfirmationRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "#{}: {}", self.id, self.payload) + } +} + +impl fmt::Display for ConfirmationPayload { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ConfirmationPayload::SendTransaction(ref transaction) => write!(f, "{}", transaction), + ConfirmationPayload::SignTransaction(ref transaction) => write!(f, "(Sign only) {}", transaction), + ConfirmationPayload::Signature(ref sign) => write!(f, "{}", sign), + ConfirmationPayload::Decrypt(ref decrypt) => write!(f, "{}", decrypt), + } + } +} + /// Sign request -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] pub struct SignRequest { /// Address pub address: H160, @@ -57,8 +76,19 @@ impl From<(H160, H256)> for SignRequest { } } +impl fmt::Display for SignRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "sign 0x{:?} with {}", + self.hash, + Colour::White.bold().paint(format!("0x{:?}", self.address)), + ) + } +} + /// Decrypt request -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] pub struct DecryptRequest { /// Address pub address: H160, @@ -75,6 +105,16 @@ impl From<(H160, Bytes)> for DecryptRequest { } } +impl fmt::Display for DecryptRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "decrypt data with {}", + Colour::White.bold().paint(format!("0x{:?}", self.address)), + ) + } +} + /// Confirmation response for particular payload #[derive(Debug, Clone, PartialEq)] pub enum ConfirmationResponse { @@ -101,8 +141,17 @@ impl Serialize for ConfirmationResponse { } } +/// Confirmation response with additional token for further requests +#[derive(Debug, Clone, PartialEq, Serialize)] +pub struct ConfirmationResponseWithToken { + /// Actual response + pub result: ConfirmationResponse, + /// New token + pub token: String, +} + /// Confirmation payload, i.e. the thing to be confirmed -#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize)] +#[derive(Debug, Clone, Eq, PartialEq, Hash, Serialize, Deserialize)] pub enum ConfirmationPayload { /// Send Transaction #[serde(rename="sendTransaction")] @@ -136,7 +185,7 @@ impl From for ConfirmationPayload { } /// Possible modifications to the confirmed transaction sent by `Trusted Signer` -#[derive(Debug, PartialEq, Deserialize)] +#[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub struct TransactionModification { /// Modified gas price @@ -185,7 +234,7 @@ impl Serialize for Either where mod tests { use std::str::FromStr; use serde_json; - use v1::types::U256; + use v1::types::{U256, H256}; use v1::helpers; use super::*; @@ -299,5 +348,20 @@ mod tests { gas: None, }); } -} + #[test] + fn should_serialize_confirmation_response_with_token() { + // given + let response = ConfirmationResponseWithToken { + result: ConfirmationResponse::SendTransaction(H256::default()), + token: "test-token".into(), + }; + + // when + let res = serde_json::to_string(&response); + let expected = r#"{"result":"0x0000000000000000000000000000000000000000000000000000000000000000","token":"test-token"}"#; + + // then + assert_eq!(res.unwrap(), expected.to_owned()); + } +} diff --git a/rpc/src/v1/types/dapp_id.rs b/rpc/src/v1/types/dapp_id.rs index fbd016e8a..fd98d7c8c 100644 --- a/rpc/src/v1/types/dapp_id.rs +++ b/rpc/src/v1/types/dapp_id.rs @@ -26,6 +26,12 @@ impl Into for DappId { } } +impl From for DappId { + fn from(s: String) -> Self { + DappId(s) + } +} + #[cfg(test)] mod tests { diff --git a/rpc/src/v1/types/mod.rs.in b/rpc/src/v1/types/mod.rs.in index c5509bd57..76b45c136 100644 --- a/rpc/src/v1/types/mod.rs.in +++ b/rpc/src/v1/types/mod.rs.in @@ -39,7 +39,10 @@ pub use self::bytes::Bytes; pub use self::block::{RichBlock, Block, BlockTransactions}; pub use self::block_number::BlockNumber; pub use self::call_request::CallRequest; -pub use self::confirmations::{ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, TransactionModification, SignRequest, DecryptRequest, Either}; +pub use self::confirmations::{ + ConfirmationPayload, ConfirmationRequest, ConfirmationResponse, ConfirmationResponseWithToken, + TransactionModification, SignRequest, DecryptRequest, Either +}; pub use self::dapp_id::DappId; pub use self::filter::{Filter, FilterChanges}; pub use self::hash::{H64, H160, H256, H512, H520, H2048}; diff --git a/rpc/src/v1/types/transaction_request.rs b/rpc/src/v1/types/transaction_request.rs index 258346d56..c5613c5b2 100644 --- a/rpc/src/v1/types/transaction_request.rs +++ b/rpc/src/v1/types/transaction_request.rs @@ -18,6 +18,9 @@ use v1::types::{Bytes, H160, U256}; use v1::helpers; +use util::log::Colour; + +use std::fmt; /// Transaction request coming from RPC #[derive(Debug, Clone, Default, Eq, PartialEq, Hash, Serialize, Deserialize)] @@ -40,6 +43,43 @@ pub struct TransactionRequest { pub nonce: Option, } +pub fn format_ether(i: U256) -> String { + let mut string = format!("{}", i); + let idx = string.len() as isize - 18; + if idx <= 0 { + let mut prefix = String::from("0."); + for _ in 0..idx.abs() { + prefix.push('0'); + } + string = prefix + &string; + } else { + string.insert(idx as usize, '.'); + } + String::from(string.trim_right_matches('0') + .trim_right_matches('.')) +} + +impl fmt::Display for TransactionRequest { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let eth = self.value.unwrap_or(U256::from(0)); + match self.to { + Some(ref to) => write!( + f, + "{} ETH from {} to 0x{:?}", + Colour::White.bold().paint(format_ether(eth)), + Colour::White.bold().paint(format!("0x{:?}", self.from)), + to + ), + None => write!( + f, + "{} ETH from {} for contract creation", + Colour::White.bold().paint(format_ether(eth)), + Colour::White.bold().paint(format!("0x{:?}", self.from)), + ), + } + } +} + impl From for TransactionRequest { fn from(r: helpers::TransactionRequest) -> Self { TransactionRequest { @@ -191,5 +231,15 @@ mod tests { assert!(deserialized.is_err(), "Should be error because to is empty"); } -} + #[test] + fn test_format_ether() { + assert_eq!(&format_ether(U256::from(1000000000000000000u64)), "1"); + assert_eq!(&format_ether(U256::from(500000000000000000u64)), "0.5"); + assert_eq!(&format_ether(U256::from(50000000000000000u64)), "0.05"); + assert_eq!(&format_ether(U256::from(5000000000000000u64)), "0.005"); + assert_eq!(&format_ether(U256::from(2000000000000000000u64)), "2"); + assert_eq!(&format_ether(U256::from(2500000000000000000u64)), "2.5"); + assert_eq!(&format_ether(U256::from(10000000000000000000u64)), "10"); + } +} diff --git a/rpc/src/v1/types/uint.rs b/rpc/src/v1/types/uint.rs index 2dc9093c1..afa5c31dd 100644 --- a/rpc/src/v1/types/uint.rs +++ b/rpc/src/v1/types/uint.rs @@ -15,6 +15,7 @@ // along with Parity. If not, see . use std::str::FromStr; +use std::fmt; use serde; use util::{U256 as EthU256, U128 as EthU128, Uint}; @@ -46,6 +47,18 @@ macro_rules! impl_uint { } } + impl fmt::Display for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{}", self.0) + } + } + + impl fmt::LowerHex for $name { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:#x}", self.0) + } + } + impl serde::Serialize for $name { fn serialize(&self, serializer: &mut S) -> Result<(), S::Error> where S: serde::Serializer { serializer.serialize_str(&format!("0x{}", self.0.to_hex())) diff --git a/rpc_cli/Cargo.toml b/rpc_cli/Cargo.toml new file mode 100644 index 000000000..8169d3b71 --- /dev/null +++ b/rpc_cli/Cargo.toml @@ -0,0 +1,15 @@ +[package] +authors = ["Ethcore "] +description = "Parity Cli Tool" +homepage = "http://ethcore.io" +license = "GPL-3.0" +name = "rpc-cli" +version = "1.4.0" + +[dependencies] +futures = "0.1" +rpassword = "0.3.0" +ethcore-bigint = { path = "../util/bigint" } +ethcore-rpc = { path = "../rpc" } +parity-rpc-client = { path = "../rpc_client" } +ethcore-util = { path = "../util" } diff --git a/rpc_cli/src/lib.rs b/rpc_cli/src/lib.rs new file mode 100644 index 000000000..0ec20da61 --- /dev/null +++ b/rpc_cli/src/lib.rs @@ -0,0 +1,182 @@ +extern crate futures; + +extern crate ethcore_util as util; +extern crate ethcore_rpc as rpc; +extern crate ethcore_bigint as bigint; +extern crate rpassword; + +extern crate parity_rpc_client as client; + +use rpc::v1::types::{U256, ConfirmationRequest}; +use client::signer_client::SignerRpc; +use std::io::{Write, BufRead, BufReader, stdout, stdin}; +use std::path::PathBuf; +use std::fs::File; + +use futures::Future; + +fn sign_interactive( + signer: &mut SignerRpc, + password: &str, + request: ConfirmationRequest +) { + print!("\n{}\nSign this transaction? (y)es/(N)o/(r)eject: ", request); + let _ = stdout().flush(); + match BufReader::new(stdin()).lines().next() { + Some(Ok(line)) => { + match line.to_lowercase().chars().nth(0) { + Some('y') => { + match sign_transaction(signer, request.id, password) { + Ok(s) | Err(s) => println!("{}", s), + } + } + Some('r') => { + match reject_transaction(signer, request.id) { + Ok(s) | Err(s) => println!("{}", s), + } + } + _ => () + } + } + _ => println!("Could not read from stdin") + } +} + +fn sign_transactions( + signer: &mut SignerRpc, + password: String +) -> Result { + try!(signer.requests_to_confirm().map(|reqs| { + match reqs { + Ok(ref reqs) if reqs.is_empty() => { + Ok("No transactions in signing queue".to_owned()) + } + Ok(reqs) => { + for r in reqs { + sign_interactive(signer, &password, r) + } + Ok("".to_owned()) + } + Err(err) => { + Err(format!("error: {:?}", err)) + } + } + }).map_err(|err| { + format!("{:?}", err) + }).wait()) +} + +fn list_transactions(signer: &mut SignerRpc) -> Result { + try!(signer.requests_to_confirm().map(|reqs| { + match reqs { + Ok(ref reqs) if reqs.is_empty() => { + Ok("No transactions in signing queue".to_owned()) + } + Ok(ref reqs) => { + Ok(format!("Transaction queue:\n{}", reqs + .iter() + .map(|r| format!("{}", r)) + .collect::>() + .join("\n"))) + } + Err(err) => { + Err(format!("error: {:?}", err)) + } + } + }).map_err(|err| { + format!("{:?}", err) + }).wait()) +} + +fn sign_transaction( + signer: &mut SignerRpc, id: U256, password: &str +) -> Result { + try!(signer.confirm_request(id, None, None, password).map(|res| { + match res { + Ok(u) => Ok(format!("Signed transaction id: {:#x}", u)), + Err(e) => Err(format!("{:?}", e)), + } + }).map_err(|err| { + format!("{:?}", err) + }).wait()) +} + +fn reject_transaction( + signer: &mut SignerRpc, id: U256) -> Result +{ + try!(signer.reject_request(id).map(|res| { + match res { + Ok(true) => Ok(format!("Rejected transaction id {:#x}", id)), + Ok(false) => Err(format!("No such request")), + Err(e) => Err(format!("{:?}", e)), + } + }).map_err(|err| { + format!("{:?}", err) + }).wait()) +} + +// cmds + +pub fn cmd_signer_list( + signerport: u16, authfile: PathBuf +) -> Result { + let addr = &format!("ws://127.0.0.1:{}", signerport); + let mut signer = try!(SignerRpc::new(addr, &authfile).map_err(|err| { + format!("{:?}", err) + })); + list_transactions(&mut signer) +} + +pub fn cmd_signer_reject( + id: Option, signerport: u16, authfile: PathBuf +) -> Result { + let id = try!(id.ok_or(format!("id required for signer reject"))); + let addr = &format!("ws://127.0.0.1:{}", signerport); + let mut signer = try!(SignerRpc::new(addr, &authfile).map_err(|err| { + format!("{:?}", err) + })); + reject_transaction(&mut signer, U256::from(id)) +} + +pub fn cmd_signer_sign( + id: Option, + pwfile: Option, + signerport: u16, + authfile: PathBuf +) -> Result { + let password; + match pwfile { + Some(pwfile) => { + match File::open(pwfile) { + Ok(fd) => { + match BufReader::new(fd).lines().next() { + Some(Ok(line)) => password = line, + _ => return Err(format!("No password in file")) + } + }, + Err(e) => + return Err(format!("Could not open password file: {}", e)) + } + } + None => { + password = match rpassword::prompt_password_stdout("Password: ") { + Ok(p) => p, + Err(e) => return Err(format!("{}", e)), + } + } + } + + let addr = &format!("ws://127.0.0.1:{}", signerport); + let mut signer = try!(SignerRpc::new(addr, &authfile).map_err(|err| { + format!("{:?}", err) + })); + + match id { + Some(id) => { + sign_transaction(&mut signer, U256::from(id), &password) + }, + None => { + sign_transactions(&mut signer, password) + } + } +} diff --git a/rpc_client/Cargo.toml b/rpc_client/Cargo.toml new file mode 100644 index 000000000..9b93a1a5b --- /dev/null +++ b/rpc_client/Cargo.toml @@ -0,0 +1,23 @@ +[package] +authors = ["Ethcore "] +description = "Parity Rpc Client" +homepage = "http://ethcore.io" +license = "GPL-3.0" +name = "parity-rpc-client" +version = "1.4.0" + +[dependencies] +futures = "0.1" +jsonrpc-core = "3.0.2" +lazy_static = "0.2.1" +log = "0.3.6" +matches = "0.1.2" +rand = "0.3.14" +serde = "0.8" +serde_json = "0.8" +tempdir = "0.3.5" +url = "1.2.0" +ws = { git = "https://github.com/ethcore/ws-rs.git", branch = "mio-upstream-stable" } +ethcore-rpc = { path = "../rpc" } +ethcore-signer = { path = "../signer" } +ethcore-util = { path = "../util" } diff --git a/rpc_client/src/client.rs b/rpc_client/src/client.rs new file mode 100644 index 000000000..9ee9f5c9d --- /dev/null +++ b/rpc_client/src/client.rs @@ -0,0 +1,322 @@ +extern crate jsonrpc_core; + +use std::fmt::{Debug, Formatter, Error as FmtError}; +use std::io::{BufReader, BufRead}; +use std::sync::Arc; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::collections::BTreeMap; +use std::thread; +use std::time; + +use std::path::PathBuf; +use util::{Hashable, Mutex}; +use url::Url; +use std::fs::File; + +use ws::{ + self, + Request, + Handler, + Sender, + Handshake, + Error as WsError, + ErrorKind as WsErrorKind, + Message, + Result as WsResult, +}; + +use serde::Deserialize; +use serde_json::{ + self as json, + Value as JsonValue, + Error as JsonError, +}; + +use futures::{BoxFuture, Canceled, Complete, Future, oneshot, done}; + +use jsonrpc_core::{Id, Version, Params, Error as JsonRpcError}; +use jsonrpc_core::request::MethodCall; +use jsonrpc_core::response::{SyncOutput, Success, Failure}; + +/// The actual websocket connection handler, passed into the +/// event loop of ws-rs +struct RpcHandler { + pending: Pending, + // Option is used here as temporary storage until connection + // is setup and the values are moved into the new `Rpc` + complete: Option>>, + auth_code: String, + out: Option, +} + +impl RpcHandler { + fn new( + out: Sender, + auth_code: String, + complete: Complete> + ) -> Self { + RpcHandler { + out: Some(out), + auth_code: auth_code, + pending: Pending::new(), + complete: Some(complete), + } + } +} + +impl Handler for RpcHandler { + fn build_request(&mut self, url: &Url) -> WsResult { + match Request::from_url(url) { + Ok(mut r) => { + let timestamp = try!(time::UNIX_EPOCH.elapsed().map_err(|err| { + WsError::new(WsErrorKind::Internal, format!("{}", err)) + })); + let secs = timestamp.as_secs(); + let hashed = format!("{}:{}", self.auth_code, secs).sha3(); + let proto = format!("{:?}_{}", hashed, secs); + r.add_protocol(&proto); + Ok(r) + }, + Err(e) => + Err(WsError::new(WsErrorKind::Internal, format!("{}", e))), + } + } + fn on_error(&mut self, err: WsError) { + match self.complete.take() { + Some(c) => c.complete(Err(RpcError::WsError(err))), + None => println!("unexpected error: {}", err), + } + } + fn on_open(&mut self, _: Handshake) -> WsResult<()> { + match (self.complete.take(), self.out.take()) { + (Some(c), Some(out)) => { + c.complete(Ok(Rpc { + out: out, + counter: AtomicUsize::new(0), + pending: self.pending.clone(), + })); + Ok(()) + }, + _ => { + let msg = format!("on_open called twice"); + Err(WsError::new(WsErrorKind::Internal, msg)) + } + } + } + fn on_message(&mut self, msg: Message) -> WsResult<()> { + let ret: Result; + let response_id; + let string = &msg.to_string(); + match json::from_str::(&string) { + Ok(SyncOutput::Success(Success { result, id: Id::Num(id), .. })) => + { + ret = Ok(result); + response_id = id as usize; + } + Ok(SyncOutput::Failure(Failure { error, id: Id::Num(id), .. })) => { + ret = Err(error); + response_id = id as usize; + } + Err(e) => { + warn!( + target: "rpc-client", + "recieved invalid message: {}\n {:?}", + string, + e + ); + return Ok(()) + }, + _ => { + warn!( + target: "rpc-client", + "recieved invalid message: {}", + string + ); + return Ok(()) + } + } + + match self.pending.remove(response_id) { + Some(c) => c.complete(ret.map_err(|err| { + RpcError::JsonRpc(err) + })), + None => warn!( + target: "rpc-client", + "warning: unexpected id: {}", + response_id + ), + } + Ok(()) + } +} + +/// Keeping track of issued requests to be matched up with responses +#[derive(Clone)] +struct Pending( + Arc>>>> +); + +impl Pending { + fn new() -> Self { + Pending(Arc::new(Mutex::new(BTreeMap::new()))) + } + fn insert(&mut self, k: usize, v: Complete>) { + self.0.lock().insert(k, v); + } + fn remove( + &mut self, + k: usize + ) -> Option>> { + self.0.lock().remove(&k) + } +} + +fn get_authcode(path: &PathBuf) -> Result { + if let Ok(fd) = File::open(path) { + if let Some(Ok(line)) = BufReader::new(fd).lines().next() { + let mut parts = line.split(';'); + let token = parts.next(); + + if let Some(code) = token { + return Ok(code.into()); + } + } + } + Err(RpcError::NoAuthCode) +} + +/// The handle to the connection +pub struct Rpc { + out: Sender, + counter: AtomicUsize, + pending: Pending, +} + +impl Rpc { + /// Blocking, returns a new initialized connection or RpcError + pub fn new(url: &str, authpath: &PathBuf) -> Result { + let rpc = try!(Self::connect(url, authpath).map(|rpc| rpc).wait()); + rpc + } + /// Non-blocking, returns a future + pub fn connect( + url: &str, authpath: &PathBuf + ) -> BoxFuture, Canceled> { + let (c, p) = oneshot::>(); + match get_authcode(authpath) { + Err(e) => return done(Ok(Err(e))).boxed(), + Ok(code) => { + let url = String::from(url); + // The ws::connect takes a FnMut closure, which means c cannot + // be moved into it, since it's consumed on complete. + // Therefore we wrap it in an option and pick it out once. + let mut once = Some(c); + thread::spawn(move || { + let conn = ws::connect(url, |out| { + // this will panic if the closure is called twice, + // which it should never be. + let c = once.take() + .expect("connection closure called only once"); + RpcHandler::new(out, code.clone(), c) + }); + match conn { + Err(err) => { + // since ws::connect is only called once, it cannot + // both fail and succeed. + let c = once.take() + .expect("connection closure called only once"); + c.complete(Err(RpcError::WsError(err))); + }, + // c will complete on the `on_open` event in the Handler + _ => () + } + }); + p.boxed() + } + } + } + /// Non-blocking, returns a future of the request response + pub fn request( + &mut self, method: &'static str, params: Vec + ) -> BoxFuture, Canceled> + where T: Deserialize + Send + Sized { + + let (c, p) = oneshot::>(); + + let id = self.counter.fetch_add(1, Ordering::Relaxed); + self.pending.insert(id, c); + + let request = MethodCall { + jsonrpc: Version::V2, + method: method.to_owned(), + params: Some(Params::Array(params)), + id: Id::Num(id as u64), + }; + + let serialized = json::to_string(&request) + .expect("request is serializable"); + let _ = self.out.send(serialized); + + p.map(|result| { + match result { + Ok(json) => { + let t: T = try!(json::from_value(json)); + Ok(t) + }, + Err(err) => Err(err) + } + }).boxed() + } +} + +pub enum RpcError { + WrongVersion(String), + ParseError(JsonError), + MalformedResponse(String), + JsonRpc(JsonRpcError), + WsError(WsError), + Canceled(Canceled), + UnexpectedId, + NoAuthCode, +} + +impl Debug for RpcError { + fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { + match *self { + RpcError::WrongVersion(ref s) + => write!(f, "Expected version 2.0, got {}", s), + RpcError::ParseError(ref err) + => write!(f, "ParseError: {}", err), + RpcError::MalformedResponse(ref s) + => write!(f, "Malformed response: {}", s), + RpcError::JsonRpc(ref json) + => write!(f, "JsonRpc error: {:?}", json), + RpcError::WsError(ref s) + => write!(f, "Websocket error: {}", s), + RpcError::Canceled(ref s) + => write!(f, "Futures error: {:?}", s), + RpcError::UnexpectedId + => write!(f, "Unexpected response id"), + RpcError::NoAuthCode + => write!(f, "No authcodes available"), + } + } +} + +impl From for RpcError { + fn from(err: JsonError) -> RpcError { + RpcError::ParseError(err) + } +} + +impl From for RpcError { + fn from(err: WsError) -> RpcError { + RpcError::WsError(err) + } +} + +impl From for RpcError { + fn from(err: Canceled) -> RpcError { + RpcError::Canceled(err) + } +} diff --git a/rpc_client/src/lib.rs b/rpc_client/src/lib.rs new file mode 100644 index 000000000..3164480e5 --- /dev/null +++ b/rpc_client/src/lib.rs @@ -0,0 +1,73 @@ +pub mod client; +pub mod signer_client; + +extern crate ws; +extern crate ethcore_signer; +extern crate url; +extern crate futures; +extern crate ethcore_util as util; +extern crate ethcore_rpc as rpc; +extern crate serde; +extern crate serde_json; +extern crate rand; +extern crate tempdir; +extern crate jsonrpc_core; + +#[macro_use] +extern crate lazy_static; +#[macro_use] +extern crate matches; + +#[macro_use] +extern crate log; + +#[cfg(test)] +mod tests { + use futures::Future; + use std::path::PathBuf; + use client::{Rpc, RpcError}; + use ethcore_signer; + + #[test] + fn test_connection_refused() { + let (_srv, port, mut authcodes) = ethcore_signer::tests::serve(); + + let _ = authcodes.generate_new(); + authcodes.to_file(&authcodes.path).unwrap(); + + let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port - 1), + authcodes.path.as_path()); + + let _ = connect.map(|conn| { + assert!(matches!(&conn, &Err(RpcError::WsError(_)))); + }).wait(); + } + + #[test] + fn test_authcode_fail() { + let (_srv, port, _) = ethcore_signer::tests::serve(); + let path = PathBuf::from("nonexist"); + + let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), &path); + + let _ = connect.map(|conn| { + assert!(matches!(&conn, &Err(RpcError::NoAuthCode))); + }).wait(); + } + + #[test] + fn test_authcode_correct() { + let (_srv, port, mut authcodes) = ethcore_signer::tests::serve(); + + let _ = authcodes.generate_new(); + authcodes.to_file(&authcodes.path).unwrap(); + + let connect = Rpc::connect(&format!("ws://127.0.0.1:{}", port), + authcodes.path.as_path()); + + let _ = connect.map(|conn| { + assert!(conn.is_ok()) + }).wait(); + } + +} diff --git a/rpc_client/src/signer_client.rs b/rpc_client/src/signer_client.rs new file mode 100644 index 000000000..8a2eccd5d --- /dev/null +++ b/rpc_client/src/signer_client.rs @@ -0,0 +1,43 @@ +use client::{Rpc, RpcError}; +use rpc::v1::types::{ConfirmationRequest, + TransactionModification, + U256}; +use serde_json::{Value as JsonValue, to_value}; +use std::path::PathBuf; +use futures::{BoxFuture, Canceled}; + +pub struct SignerRpc { + rpc: Rpc, +} + +impl SignerRpc { + pub fn new(url: &str, authfile: &PathBuf) -> Result { + Ok(SignerRpc { rpc: try!(Rpc::new(&url, authfile)) }) + } + pub fn requests_to_confirm(&mut self) -> + BoxFuture, RpcError>, Canceled> + { + self.rpc.request("signer_requestsToConfirm", vec![]) + } + pub fn confirm_request( + &mut self, + id: U256, + new_gas: Option, + new_gas_price: Option, + pwd: &str + ) -> BoxFuture, Canceled> + { + self.rpc.request("signer_confirmRequest", vec![ + to_value(&format!("{:#x}", id)), + to_value(&TransactionModification { gas_price: new_gas_price, gas: new_gas }), + to_value(&pwd), + ]) + } + pub fn reject_request(&mut self, id: U256) -> + BoxFuture, Canceled> + { + self.rpc.request("signer_rejectRequest", vec![ + JsonValue::String(format!("{:#x}", id)) + ]) + } +} diff --git a/signer/src/lib.rs b/signer/src/lib.rs index 3d73b0668..196fb4fae 100644 --- a/signer/src/lib.rs +++ b/signer/src/lib.rs @@ -52,13 +52,13 @@ extern crate ethcore_io as io; extern crate ethcore_rpc as rpc; extern crate jsonrpc_core; extern crate ws; -#[cfg(test)] + extern crate ethcore_devtools as devtools; mod authcode_store; mod ws_server; -#[cfg(test)] -mod tests; +/// Exported tests for use in signer RPC client testing +pub mod tests; pub use authcode_store::*; pub use ws_server::*; diff --git a/signer/src/tests/mod.rs b/signer/src/tests/mod.rs index 043b0f693..ab46a0e6f 100644 --- a/signer/src/tests/mod.rs +++ b/signer/src/tests/mod.rs @@ -15,20 +15,23 @@ // along with Parity. If not, see . use std::ops::{Deref, DerefMut}; -use std::time; use std::sync::Arc; -use devtools::{http_client, RandomTempPath}; + +use devtools::http_client; +use devtools::RandomTempPath; + use rpc::ConfirmationsQueue; -use util::Hashable; use rand; use ServerBuilder; use Server; use AuthCodes; +/// Struct representing authcodes pub struct GuardedAuthCodes { authcodes: AuthCodes, - path: RandomTempPath, + /// The path to the mock authcodes + pub path: RandomTempPath, } impl Deref for GuardedAuthCodes { type Target = AuthCodes; @@ -42,6 +45,7 @@ impl DerefMut for GuardedAuthCodes { } } +/// Setup a mock signer for testsp pub fn serve() -> (Server, usize, GuardedAuthCodes) { let mut path = RandomTempPath::new(); path.panic_on_drop_failure = false; @@ -56,194 +60,202 @@ pub fn serve() -> (Server, usize, GuardedAuthCodes) { }) } +/// Test a single request to running server pub fn request(server: Server, request: &str) -> http_client::Response { http_client::request(server.addr(), request) } -#[test] -fn should_reject_invalid_host() { - // given - let server = serve().0; +#[cfg(test)] +mod testing { + use std::time; + use util::Hashable; + use devtools::http_client; + use super::{serve, request}; - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: test:8180\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); + #[test] + fn should_reject_invalid_host() { + // given + let server = serve().0; - // then - assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); - assert!(response.body.contains("URL Blocked")); - http_client::assert_security_headers_present(&response.headers, None); + // when + let response = request(server, + "\ + GET / HTTP/1.1\r\n\ + Host: test:8180\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); + assert!(response.body.contains("URL Blocked")); + http_client::assert_security_headers_present(&response.headers, None); + } + + #[test] + fn should_allow_home_parity_host() { + // given + let server = serve().0; + + // when + let response = request(server, + "\ + GET http://home.parity/ HTTP/1.1\r\n\ + Host: home.parity\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + http_client::assert_security_headers_present(&response.headers, None); + } + + #[test] + fn should_serve_styles_even_on_disallowed_domain() { + // given + let server = serve().0; + + // when + let response = request(server, + "\ + GET /styles.css HTTP/1.1\r\n\ + Host: test:8180\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + http_client::assert_security_headers_present(&response.headers, None); + } + + #[test] + fn should_return_200_ok_for_connect_requests() { + // given + let server = serve().0; + + // when + let response = request(server, + "\ + CONNECT home.parity:8080 HTTP/1.1\r\n\ + Host: home.parity\r\n\ + Connection: close\r\n\ + \r\n\ + {} + " + ); + + // then + assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); + } + + #[test] + fn should_block_if_authorization_is_incorrect() { + // given + let (server, port, _) = serve(); + + // when + let response = request(server, + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Upgrade\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol: wrong\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", port) + ); + + // then + assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); + http_client::assert_security_headers_present(&response.headers, None); + } + + #[test] + fn should_allow_if_authorization_is_correct() { + // given + let (server, port, mut authcodes) = serve(); + let code = authcodes.generate_new().unwrap().replace("-", ""); + authcodes.to_file(&authcodes.path).unwrap(); + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + + // when + let response = request(server, + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol: {:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + + // then + assert_eq!(response.status, "HTTP/1.1 101 Switching Protocols".to_owned()); + } + + #[test] + fn should_allow_initial_connection_but_only_once() { + // given + let (server, port, authcodes) = serve(); + let code = "initial"; + let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); + assert!(authcodes.is_empty()); + + // when + let response1 = http_client::request(server.addr(), + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol:{:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + let response2 = http_client::request(server.addr(), + &format!("\ + GET / HTTP/1.1\r\n\ + Host: 127.0.0.1:{}\r\n\ + Connection: Close\r\n\ + Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ + Sec-WebSocket-Protocol:{:?}_{}\r\n\ + Sec-WebSocket-Version: 13\r\n\ + \r\n\ + {{}} + ", + port, + format!("{}:{}", code, timestamp).sha3(), + timestamp, + ) + ); + + + // then + assert_eq!(response1.status, "HTTP/1.1 101 Switching Protocols".to_owned()); + assert_eq!(response2.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); + http_client::assert_security_headers_present(&response2.headers, None); + } } - -#[test] -fn should_allow_home_parity_host() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - GET http://home.parity/ HTTP/1.1\r\n\ - Host: home.parity\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - http_client::assert_security_headers_present(&response.headers, None); -} - -#[test] -fn should_serve_styles_even_on_disallowed_domain() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - GET /styles.css HTTP/1.1\r\n\ - Host: test:8180\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - http_client::assert_security_headers_present(&response.headers, None); -} - -#[test] -fn should_return_200_ok_for_connect_requests() { - // given - let server = serve().0; - - // when - let response = request(server, - "\ - CONNECT home.parity:8080 HTTP/1.1\r\n\ - Host: home.parity\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); -} - -#[test] -fn should_block_if_authorization_is_incorrect() { - // given - let (server, port, _) = serve(); - - // when - let response = request(server, - &format!("\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:{}\r\n\ - Connection: Upgrade\r\n\ - Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ - Sec-WebSocket-Protocol: wrong\r\n\ - Sec-WebSocket-Version: 13\r\n\ - \r\n\ - {{}} - ", port) - ); - - // then - assert_eq!(response.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); - http_client::assert_security_headers_present(&response.headers, None); -} - -#[test] -fn should_allow_if_authorization_is_correct() { - // given - let (server, port, mut authcodes) = serve(); - let code = authcodes.generate_new().unwrap().replace("-", ""); - authcodes.to_file(&authcodes.path).unwrap(); - let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); - - // when - let response = request(server, - &format!("\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:{}\r\n\ - Connection: Close\r\n\ - Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ - Sec-WebSocket-Protocol: {:?}_{}\r\n\ - Sec-WebSocket-Version: 13\r\n\ - \r\n\ - {{}} - ", - port, - format!("{}:{}", code, timestamp).sha3(), - timestamp, - ) - ); - - // then - assert_eq!(response.status, "HTTP/1.1 101 Switching Protocols".to_owned()); -} - -#[test] -fn should_allow_initial_connection_but_only_once() { - // given - let (server, port, authcodes) = serve(); - let code = "initial"; - let timestamp = time::UNIX_EPOCH.elapsed().unwrap().as_secs(); - assert!(authcodes.is_empty()); - - // when - let response1 = http_client::request(server.addr(), - &format!("\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:{}\r\n\ - Connection: Close\r\n\ - Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ - Sec-WebSocket-Protocol:{:?}_{}\r\n\ - Sec-WebSocket-Version: 13\r\n\ - \r\n\ - {{}} - ", - port, - format!("{}:{}", code, timestamp).sha3(), - timestamp, - ) - ); - let response2 = http_client::request(server.addr(), - &format!("\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:{}\r\n\ - Connection: Close\r\n\ - Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\ - Sec-WebSocket-Protocol:{:?}_{}\r\n\ - Sec-WebSocket-Version: 13\r\n\ - \r\n\ - {{}} - ", - port, - format!("{}:{}", code, timestamp).sha3(), - timestamp, - ) - ); - - - // then - assert_eq!(response1.status, "HTTP/1.1 101 Switching Protocols".to_owned()); - assert_eq!(response2.status, "HTTP/1.1 403 FORBIDDEN".to_owned()); - http_client::assert_security_headers_present(&response2.headers, None); -} - diff --git a/sync/src/api.rs b/sync/src/api.rs index 8f03fbac8..45a9c1eb7 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -35,7 +35,12 @@ use parking_lot::RwLock; use chain::{ETH_PACKET_COUNT, SNAPSHOT_SYNC_PACKET_COUNT}; use light::net::{LightProtocol, Params as LightParams, Capabilities, Handler as LightHandler, EventContext}; +/// Parity sync protocol pub const WARP_SYNC_PROTOCOL_ID: ProtocolId = *b"par"; +/// Ethereum sync protocol +pub const ETH_PROTOCOL: ProtocolId = *b"eth"; +/// Ethereum light protocol +pub const LES_PROTOCOL: ProtocolId = *b"les"; /// Sync configuration #[derive(Debug, Clone, Copy)] @@ -64,8 +69,8 @@ impl Default for SyncConfig { max_download_ahead_blocks: 20000, download_old_blocks: true, network_id: 1, - subprotocol_name: *b"eth", - light_subprotocol_name: *b"les", + subprotocol_name: ETH_PROTOCOL, + light_subprotocol_name: LES_PROTOCOL, fork_block: None, warp_sync: false, serve_light: false, @@ -143,7 +148,7 @@ pub struct EthSync { /// Network service network: NetworkService, /// Main (eth/par) protocol handler - sync_handler: Arc, + eth_handler: Arc, /// Light (les) protocol handler light_proto: Option>, /// The main subprotocol name @@ -182,7 +187,7 @@ impl EthSync { let sync = Arc::new(EthSync { network: service, - sync_handler: Arc::new(SyncProtocolHandler { + eth_handler: Arc::new(SyncProtocolHandler { sync: RwLock::new(chain_sync), chain: params.chain, snapshot_service: params.snapshot_service, @@ -201,15 +206,15 @@ impl EthSync { impl SyncProvider for EthSync { /// Get sync status fn status(&self) -> SyncStatus { - self.sync_handler.sync.write().status() + self.eth_handler.sync.write().status() } /// Get sync peers fn peers(&self) -> Vec { // TODO: [rob] LES peers/peer info self.network.with_context_eval(self.subprotocol_name, |context| { - let sync_io = NetSyncIo::new(context, &*self.sync_handler.chain, &*self.sync_handler.snapshot_service, &self.sync_handler.overlay); - self.sync_handler.sync.write().peers(&sync_io) + let sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); + self.eth_handler.sync.write().peers(&sync_io) }).unwrap_or(Vec::new()) } @@ -218,7 +223,7 @@ impl SyncProvider for EthSync { } fn transactions_stats(&self) -> BTreeMap { - let sync = self.sync_handler.sync.read(); + let sync = self.eth_handler.sync.read(); sync.transactions_stats() .iter() .map(|(hash, stats)| (*hash, stats.into())) @@ -277,19 +282,21 @@ impl ChainNotify for EthSync { enacted: Vec, retracted: Vec, sealed: Vec, + proposed: Vec, _duration: u64) { use light::net::Announcement; self.network.with_context(self.subprotocol_name, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.sync_handler.chain, &*self.sync_handler.snapshot_service, &self.sync_handler.overlay); - self.sync_handler.sync.write().chain_new_blocks( + let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); + self.eth_handler.sync.write().chain_new_blocks( &mut sync_io, &imported, &invalid, &enacted, &retracted, - &sealed); + &sealed, + &proposed); }); self.network.with_context(self.light_subprotocol_name, |context| { @@ -297,8 +304,8 @@ impl ChainNotify for EthSync { Some(lp) => lp, None => return, }; - - let chain_info = self.sync_handler.chain.chain_info(); + + let chain_info = self.eth_handler.chain.chain_info(); light_proto.make_announcement(context, Announcement { head_hash: chain_info.best_block_hash, head_num: chain_info.best_block_number, @@ -318,10 +325,10 @@ impl ChainNotify for EthSync { Err(err) => warn!("Error starting network: {}", err), _ => {}, } - self.network.register_protocol(self.sync_handler.clone(), self.subprotocol_name, ETH_PACKET_COUNT, &[62u8, 63u8]) + self.network.register_protocol(self.eth_handler.clone(), self.subprotocol_name, ETH_PACKET_COUNT, &[62u8, 63u8]) .unwrap_or_else(|e| warn!("Error registering ethereum protocol: {:?}", e)); // register the warp sync subprotocol - self.network.register_protocol(self.sync_handler.clone(), WARP_SYNC_PROTOCOL_ID, SNAPSHOT_SYNC_PACKET_COUNT, &[1u8]) + self.network.register_protocol(self.eth_handler.clone(), WARP_SYNC_PROTOCOL_ID, SNAPSHOT_SYNC_PACKET_COUNT, &[1u8, 2u8]) .unwrap_or_else(|e| warn!("Error registering snapshot sync protocol: {:?}", e)); // register the light protocol. @@ -332,12 +339,19 @@ impl ChainNotify for EthSync { } fn stop(&self) { - self.sync_handler.snapshot_service.abort_restore(); + self.eth_handler.snapshot_service.abort_restore(); self.network.stop().unwrap_or_else(|e| warn!("Error stopping network: {:?}", e)); } + fn broadcast(&self, message: Vec) { + self.network.with_context(WARP_SYNC_PROTOCOL_ID, |context| { + let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); + self.eth_handler.sync.write().propagate_consensus_packet(&mut sync_io, message.clone()); + }); + } + fn transactions_received(&self, hashes: Vec, peer_id: PeerId) { - let mut sync = self.sync_handler.sync.write(); + let mut sync = self.eth_handler.sync.write(); sync.transactions_received(hashes, peer_id); } } @@ -399,8 +413,8 @@ impl ManageNetwork for EthSync { fn stop_network(&self) { self.network.with_context(self.subprotocol_name, |context| { - let mut sync_io = NetSyncIo::new(context, &*self.sync_handler.chain, &*self.sync_handler.snapshot_service, &self.sync_handler.overlay); - self.sync_handler.sync.write().abort(&mut sync_io); + let mut sync_io = NetSyncIo::new(context, &*self.eth_handler.chain, &*self.eth_handler.snapshot_service, &self.eth_handler.overlay); + self.eth_handler.sync.write().abort(&mut sync_io); }); if let Some(light_proto) = self.light_proto.as_ref() { diff --git a/sync/src/chain.rs b/sync/src/chain.rs index 350a42d0e..d3638f35d 100644 --- a/sync/src/chain.rs +++ b/sync/src/chain.rs @@ -113,6 +113,7 @@ type PacketDecodeError = DecoderError; const PROTOCOL_VERSION_63: u8 = 63; const PROTOCOL_VERSION_62: u8 = 62; const PROTOCOL_VERSION_1: u8 = 1; +const PROTOCOL_VERSION_2: u8 = 2; const MAX_BODIES_TO_SEND: usize = 256; const MAX_HEADERS_TO_SEND: usize = 512; const MAX_NODE_DATA_TO_SEND: usize = 1024; @@ -149,8 +150,9 @@ const GET_SNAPSHOT_MANIFEST_PACKET: u8 = 0x11; const SNAPSHOT_MANIFEST_PACKET: u8 = 0x12; const GET_SNAPSHOT_DATA_PACKET: u8 = 0x13; const SNAPSHOT_DATA_PACKET: u8 = 0x14; +const CONSENSUS_DATA_PACKET: u8 = 0x15; -pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x15; +pub const SNAPSHOT_SYNC_PACKET_COUNT: u8 = 0x16; const MAX_SNAPSHOT_CHUNKS_DOWNLOAD_AHEAD: usize = 3; @@ -615,13 +617,15 @@ impl ChainSync { trace!(target: "sync", "Peer {} network id mismatch (ours: {}, theirs: {})", peer_id, self.network_id, peer.network_id); return Ok(()); } - if (warp_protocol && peer.protocol_version != PROTOCOL_VERSION_1) || (!warp_protocol && peer.protocol_version != PROTOCOL_VERSION_63 && peer.protocol_version != PROTOCOL_VERSION_62) { + if (warp_protocol && peer.protocol_version != PROTOCOL_VERSION_1 && peer.protocol_version != PROTOCOL_VERSION_2) || (!warp_protocol && peer.protocol_version != PROTOCOL_VERSION_63 && peer.protocol_version != PROTOCOL_VERSION_62) { io.disable_peer(peer_id); trace!(target: "sync", "Peer {} unsupported eth protocol ({})", peer_id, peer.protocol_version); return Ok(()); } self.peers.insert(peer_id.clone(), peer); + // Don't activate peer immediatelly when searching for common block. + // Let the current sync round complete first. self.active_peers.insert(peer_id.clone()); debug!(target: "sync", "Connected {}:{}", peer_id, io.peer_info(peer_id)); if let Some((fork_block, _)) = self.fork_block { @@ -1422,8 +1426,9 @@ impl ChainSync { /// Send Status message fn send_status(&mut self, io: &mut SyncIo, peer: PeerId) -> Result<(), NetworkError> { - let warp_protocol = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer) != 0; - let protocol = if warp_protocol { PROTOCOL_VERSION_1 } else { io.eth_protocol_version(peer) }; + let warp_protocol_version = io.protocol_version(&WARP_SYNC_PROTOCOL_ID, peer); + let warp_protocol = warp_protocol_version != 0; + let protocol = if warp_protocol { warp_protocol_version } else { PROTOCOL_VERSION_63 }; trace!(target: "sync", "Sending status to {}, protocol version {}", peer, protocol); let mut packet = RlpStream::new_list(if warp_protocol { 7 } else { 5 }); let chain = io.chain().chain_info(); @@ -1672,7 +1677,7 @@ impl ChainSync { GET_SNAPSHOT_DATA_PACKET => ChainSync::return_rlp(io, &rlp, peer, ChainSync::return_snapshot_data, |e| format!("Error sending snapshot data: {:?}", e)), - + CONSENSUS_DATA_PACKET => ChainSync::on_consensus_packet(io, peer, &rlp), _ => { sync.write().on_packet(io, peer, packet_id, data); Ok(()) @@ -1799,44 +1804,51 @@ impl ChainSync { } } + /// creates rlp from block bytes and total difficulty + fn create_block_rlp(bytes: &Bytes, total_difficulty: U256) -> Bytes { + let mut rlp_stream = RlpStream::new_list(2); + rlp_stream.append_raw(bytes, 1); + rlp_stream.append(&total_difficulty); + rlp_stream.out() + } + /// creates latest block rlp for the given client fn create_latest_block_rlp(chain: &BlockChainClient) -> Bytes { - let mut rlp_stream = RlpStream::new_list(2); - rlp_stream.append_raw(&chain.block(BlockId::Hash(chain.chain_info().best_block_hash)).expect("Best block always exists"), 1); - rlp_stream.append(&chain.chain_info().total_difficulty); - rlp_stream.out() + ChainSync::create_block_rlp( + &chain.block(BlockId::Hash(chain.chain_info().best_block_hash)).expect("Best block always exists"), + chain.chain_info().total_difficulty + ) } - /// creates latest block rlp for the given client + /// creates given hash block rlp for the given client fn create_new_block_rlp(chain: &BlockChainClient, hash: &H256) -> Bytes { - let mut rlp_stream = RlpStream::new_list(2); - rlp_stream.append_raw(&chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed"), 1); - rlp_stream.append(&chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.")); - rlp_stream.out() + ChainSync::create_block_rlp( + &chain.block(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed"), + chain.block_total_difficulty(BlockId::Hash(hash.clone())).expect("Block has just been sealed; qed.") + ) } - /// returns peer ids that have less blocks than our chain - fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo, io: &SyncIo) -> Vec { + /// returns peer ids that have different blocks than our chain + fn get_lagging_peers(&mut self, chain_info: &BlockChainInfo) -> Vec { let latest_hash = chain_info.best_block_hash; - self.peers.iter_mut().filter_map(|(&id, ref mut peer_info)| - match io.chain().block_status(BlockId::Hash(peer_info.latest_hash.clone())) { - BlockStatus::InChain => { - if peer_info.latest_hash != latest_hash { - Some(id) - } else { - None - } - }, - _ => None + self + .peers + .iter_mut() + .filter_map(|(&id, ref mut peer_info)| { + trace!(target: "sync", "Checking peer our best {} their best {}", latest_hash, peer_info.latest_hash); + if peer_info.latest_hash != latest_hash { + Some(id) + } else { + None + } }) .collect::>() } - fn select_random_lagging_peers(&mut self, peers: &[PeerId]) -> Vec { - use rand::Rng; + fn select_random_peers(peers: &[PeerId]) -> Vec { // take sqrt(x) peers let mut peers = peers.to_vec(); - let mut count = (self.peers.len() as f64).powf(0.5).round() as usize; + let mut count = (peers.len() as f64).powf(0.5).round() as usize; count = min(count, MAX_PEERS_PROPAGATION); count = max(count, MIN_PEERS_PROPAGATION); ::rand::thread_rng().shuffle(&mut peers); @@ -1844,16 +1856,20 @@ impl ChainSync { peers } - /// propagates latest block to lagging peers - fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, sealed: &[H256], peers: &[PeerId]) -> usize { + fn get_consensus_peers(&self) -> Vec { + self.peers.iter().filter_map(|(id, p)| if p.protocol_version == PROTOCOL_VERSION_2 { Some(*id) } else { None }).collect() + } + + /// propagates latest block to a set of peers + fn propagate_blocks(&mut self, chain_info: &BlockChainInfo, io: &mut SyncIo, blocks: &[H256], peers: &[PeerId]) -> usize { trace!(target: "sync", "Sending NewBlocks to {:?}", peers); let mut sent = 0; for peer_id in peers { - if sealed.is_empty() { + if blocks.is_empty() { let rlp = ChainSync::create_latest_block_rlp(io.chain()); self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); } else { - for h in sealed { + for h in blocks { let rlp = ChainSync::create_new_block_rlp(io.chain(), h); self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp); } @@ -1971,10 +1987,10 @@ impl ChainSync { fn propagate_latest_blocks(&mut self, io: &mut SyncIo, sealed: &[H256]) { let chain_info = io.chain().chain_info(); if (((chain_info.best_block_number as i64) - (self.last_sent_block_number as i64)).abs() as BlockNumber) < MAX_PEER_LAG_PROPAGATION { - let mut peers = self.get_lagging_peers(&chain_info, io); + let mut peers = self.get_lagging_peers(&chain_info); if sealed.is_empty() { let hashes = self.propagate_new_hashes(&chain_info, io, &peers); - peers = self.select_random_lagging_peers(&peers); + peers = ChainSync::select_random_peers(&peers); let blocks = self.propagate_blocks(&chain_info, io, sealed, &peers); if blocks != 0 || hashes != 0 { trace!(target: "sync", "Sent latest {} blocks and {} hashes to peers.", blocks, hashes); @@ -1989,6 +2005,21 @@ impl ChainSync { self.last_sent_block_number = chain_info.best_block_number; } + /// Distribute valid proposed blocks to subset of current peers. + fn propagate_proposed_blocks(&mut self, io: &mut SyncIo, proposed: &[Bytes]) { + let peers = self.get_consensus_peers(); + trace!(target: "sync", "Sending proposed blocks to {:?}", peers); + for block in proposed { + let rlp = ChainSync::create_block_rlp( + block, + io.chain().chain_info().total_difficulty + ); + for peer_id in &peers { + self.send_packet(io, *peer_id, NEW_BLOCK_PACKET, rlp.clone()); + } + } + } + /// Maintain other peers. Send out any new blocks and transactions pub fn maintain_sync(&mut self, io: &mut SyncIo) { self.maybe_start_snapshot_sync(io); @@ -1996,15 +2027,32 @@ impl ChainSync { } /// called when block is imported to chain - propagates the blocks and updates transactions sent to peers - pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], _enacted: &[H256], _retracted: &[H256], sealed: &[H256]) { + pub fn chain_new_blocks(&mut self, io: &mut SyncIo, _imported: &[H256], invalid: &[H256], _enacted: &[H256], _retracted: &[H256], sealed: &[H256], proposed: &[Bytes]) { if io.is_chain_queue_empty() { self.propagate_latest_blocks(io, sealed); + self.propagate_proposed_blocks(io, proposed); } if !invalid.is_empty() { trace!(target: "sync", "Bad blocks in the queue, restarting"); self.restart(io); } } + + /// Called when peer sends us new consensus packet + fn on_consensus_packet(io: &mut SyncIo, peer_id: PeerId, r: &UntrustedRlp) -> Result<(), PacketDecodeError> { + trace!(target: "sync", "Received consensus packet from {:?}", peer_id); + io.chain().queue_consensus_message(r.as_raw().to_vec()); + Ok(()) + } + + /// Broadcast consensus message to peers. + pub fn propagate_consensus_packet(&mut self, io: &mut SyncIo, packet: Bytes) { + let lucky_peers = ChainSync::select_random_peers(&self.get_consensus_peers()); + trace!(target: "sync", "Sending consensus packet to {:?}", lucky_peers); + for peer_id in lucky_peers { + self.send_packet(io, peer_id, CONSENSUS_DATA_PACKET, packet.clone()); + } + } } #[cfg(test)] @@ -2067,9 +2115,9 @@ mod tests { #[test] fn return_receipts_empty() { let mut client = TestBlockChainClient::new(); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &mut queue, None); + let io = TestIo::new(&mut client, &ss, &queue, None); let result = ChainSync::return_receipts(&io, &UntrustedRlp::new(&[0xc0]), 0); @@ -2079,10 +2127,10 @@ mod tests { #[test] fn return_receipts() { let mut client = TestBlockChainClient::new(); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let sync = dummy_sync_with_peer(H256::new(), &client); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let mut receipt_list = RlpStream::new_list(4); receipt_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -2103,7 +2151,7 @@ mod tests { io.sender = Some(2usize); ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, super::GET_RECEIPTS_PACKET, &receipts_request); - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); } #[test] @@ -2136,9 +2184,9 @@ mod tests { let headers: Vec<_> = blocks.iter().map(|b| Rlp::new(b).at(0).as_raw().to_vec()).collect(); let hashes: Vec<_> = headers.iter().map(|h| HeaderView::new(h).sha3()).collect(); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &mut queue, None); + let io = TestIo::new(&mut client, &ss, &queue, None); let unknown: H256 = H256::new(); let result = ChainSync::return_block_headers(&io, &UntrustedRlp::new(&make_hash_req(&unknown, 1, 0, false)), 0); @@ -2174,10 +2222,10 @@ mod tests { #[test] fn return_nodes() { let mut client = TestBlockChainClient::new(); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let sync = dummy_sync_with_peer(H256::new(), &client); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let mut node_list = RlpStream::new_list(3); node_list.append(&H256::from("0000000000000000000000000000000000000000000000005555555555555555")); @@ -2200,7 +2248,7 @@ mod tests { io.sender = Some(2usize); ChainSync::dispatch_packet(&RwLock::new(sync), &mut io, 0usize, super::GET_NODE_DATA_PACKET, &node_request); - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); } fn dummy_sync_with_peer(peer_latest_hash: H256, client: &BlockChainClient) -> ChainSync { @@ -2231,15 +2279,12 @@ mod tests { fn finds_lagging_peers() { let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(10), &client); let chain_info = client.chain_info(); - let ss = TestSnapshotService::new(); - let io = TestIo::new(&mut client, &ss, &mut queue, None); - let lagging_peers = sync.get_lagging_peers(&chain_info, &io); + let lagging_peers = sync.get_lagging_peers(&chain_info); - assert_eq!(1, lagging_peers.len()) + assert_eq!(1, lagging_peers.len()); } #[test] @@ -2263,62 +2308,99 @@ mod tests { fn sends_new_hashes_to_lagging_peer() { let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info, &io); + let peers = sync.get_lagging_peers(&chain_info); let peer_count = sync.propagate_new_hashes(&chain_info, &mut io, &peers); // 1 message should be send - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); // 1 peer should be updated assert_eq!(1, peer_count); // NEW_BLOCK_HASHES_PACKET - assert_eq!(0x01, io.queue[0].packet_id); + assert_eq!(0x01, io.packets[0].packet_id); } #[test] fn sends_latest_block_to_lagging_peer() { let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); - let peers = sync.get_lagging_peers(&chain_info, &io); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[], &peers); // 1 message should be send - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); // 1 peer should be updated assert_eq!(1, peer_count); // NEW_BLOCK_PACKET - assert_eq!(0x07, io.queue[0].packet_id); + assert_eq!(0x07, io.packets[0].packet_id); } #[test] fn sends_sealed_block() { let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let hash = client.block_hash(BlockId::Number(99)).unwrap(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); - let peers = sync.get_lagging_peers(&chain_info, &io); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + let peers = sync.get_lagging_peers(&chain_info); let peer_count = sync.propagate_blocks(&chain_info, &mut io, &[hash.clone()], &peers); // 1 message should be send - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); // 1 peer should be updated assert_eq!(1, peer_count); // NEW_BLOCK_PACKET - assert_eq!(0x07, io.queue[0].packet_id); + assert_eq!(0x07, io.packets[0].packet_id); + } + + #[test] + fn sends_proposed_block() { + let mut client = TestBlockChainClient::new(); + client.add_blocks(2, EachBlockWith::Uncle); + let queue = RwLock::new(VecDeque::new()); + let block = client.block(BlockId::Latest).unwrap(); + let mut sync = ChainSync::new(SyncConfig::default(), &client); + sync.peers.insert(0, + PeerInfo { + // Messaging protocol + protocol_version: 2, + genesis: H256::zero(), + network_id: 0, + latest_hash: client.block_hash_delta_minus(1), + difficulty: None, + asking: PeerAsking::Nothing, + asking_blocks: Vec::new(), + asking_hash: None, + ask_time: 0, + last_sent_transactions: HashSet::new(), + expired: false, + confirmation: super::ForkConfirmation::Confirmed, + snapshot_number: None, + snapshot_hash: None, + asking_snapshot_data: None, + block_set: None, + }); + let ss = TestSnapshotService::new(); + let mut io = TestIo::new(&mut client, &ss, &queue, None); + sync.propagate_proposed_blocks(&mut io, &[block]); + + // 1 message should be sent + assert_eq!(1, io.packets.len()); + // NEW_BLOCK_PACKET + assert_eq!(0x07, io.packets[0].packet_id); } #[test] @@ -2327,25 +2409,25 @@ mod tests { client.add_blocks(100, EachBlockWith::Uncle); client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let peer_count = sync.propagate_new_transactions(&mut io); // Try to propagate same transactions for the second time let peer_count2 = sync.propagate_new_transactions(&mut io); // Even after new block transactions should not be propagated twice - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]); + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); // Try to propagate same transactions for the third time let peer_count3 = sync.propagate_new_transactions(&mut io); // 1 message should be send - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); // 1 peer should be updated but only once assert_eq!(1, peer_count); assert_eq!(0, peer_count2); assert_eq!(0, peer_count3); // TRANSACTIONS_PACKET - assert_eq!(0x02, io.queue[0].packet_id); + assert_eq!(0x02, io.packets[0].packet_id); } #[test] @@ -2354,21 +2436,21 @@ mod tests { client.add_blocks(100, EachBlockWith::Uncle); client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let peer_count = sync.propagate_new_transactions(&mut io); io.chain.insert_transaction_to_queue(); // New block import should trigger propagation. - sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[]); + sync.chain_new_blocks(&mut io, &[], &[], &[], &[], &[], &[]); // 2 message should be send - assert_eq!(2, io.queue.len()); + assert_eq!(2, io.packets.len()); // 1 peer should receive the message assert_eq!(1, peer_count); // TRANSACTIONS_PACKET - assert_eq!(0x02, io.queue[0].packet_id); - assert_eq!(0x02, io.queue[1].packet_id); + assert_eq!(0x02, io.packets[0].packet_id); + assert_eq!(0x02, io.packets[1].packet_id); } #[test] @@ -2377,31 +2459,34 @@ mod tests { client.add_blocks(100, EachBlockWith::Uncle); client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); // should sent some { - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let peer_count = sync.propagate_new_transactions(&mut io); - assert_eq!(1, io.queue.len()); + assert_eq!(1, io.packets.len()); assert_eq!(1, peer_count); } // Insert some more client.insert_transaction_to_queue(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); - // Propagate new transactions - let peer_count2 = sync.propagate_new_transactions(&mut io); - // And now the peer should have all transactions - let peer_count3 = sync.propagate_new_transactions(&mut io); + let (peer_count2, peer_count3) = { + let mut io = TestIo::new(&mut client, &ss, &queue, None); + // Propagate new transactions + let peer_count2 = sync.propagate_new_transactions(&mut io); + // And now the peer should have all transactions + let peer_count3 = sync.propagate_new_transactions(&mut io); + (peer_count2, peer_count3) + }; // 2 message should be send (in total) - assert_eq!(2, io.queue.len()); + assert_eq!(2, queue.read().len()); // 1 peer should be updated but only once after inserting new transaction assert_eq!(1, peer_count2); assert_eq!(0, peer_count3); // TRANSACTIONS_PACKET - assert_eq!(0x02, io.queue[0].packet_id); - assert_eq!(0x02, io.queue[1].packet_id); + assert_eq!(0x02, queue.read()[0].packet_id); + assert_eq!(0x02, queue.read()[1].packet_id); } #[test] @@ -2410,9 +2495,9 @@ mod tests { client.add_blocks(100, EachBlockWith::Uncle); client.insert_transaction_to_queue(); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(1), &client); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); sync.propagate_new_transactions(&mut io); let stats = sync.transactions_stats(); @@ -2426,11 +2511,11 @@ mod tests { let block_data = get_dummy_block(11, client.chain_info().best_block_hash); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); //sync.have_common_block = true; let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let block = UntrustedRlp::new(&block_data); @@ -2446,10 +2531,10 @@ mod tests { let block_data = get_dummy_blocks(11, client.chain_info().best_block_hash); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let block = UntrustedRlp::new(&block_data); @@ -2462,10 +2547,10 @@ mod tests { fn handles_peer_new_block_empty() { let mut client = TestBlockChainClient::new(); client.add_blocks(10, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let empty_data = vec![]; let block = UntrustedRlp::new(&empty_data); @@ -2479,10 +2564,10 @@ mod tests { fn handles_peer_new_hashes() { let mut client = TestBlockChainClient::new(); client.add_blocks(10, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let hashes_data = get_dummy_hashes(); let hashes_rlp = UntrustedRlp::new(&hashes_data); @@ -2496,10 +2581,10 @@ mod tests { fn handles_peer_new_hashes_empty() { let mut client = TestBlockChainClient::new(); client.add_blocks(10, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); let empty_hashes_data = vec![]; let hashes_rlp = UntrustedRlp::new(&empty_hashes_data); @@ -2515,16 +2600,16 @@ mod tests { fn hashes_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info, &io); + let peers = sync.get_lagging_peers(&chain_info); sync.propagate_new_hashes(&chain_info, &mut io, &peers); - let data = &io.queue[0].data.clone(); + let data = &io.packets[0].data.clone(); let result = sync.on_peer_new_hashes(&mut io, 0, &UntrustedRlp::new(data)); assert!(result.is_ok()); } @@ -2535,16 +2620,16 @@ mod tests { fn block_rlp_mutually_acceptable() { let mut client = TestBlockChainClient::new(); client.add_blocks(100, EachBlockWith::Uncle); - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let mut sync = dummy_sync_with_peer(client.block_hash_delta_minus(5), &client); let chain_info = client.chain_info(); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); - let peers = sync.get_lagging_peers(&chain_info, &io); + let peers = sync.get_lagging_peers(&chain_info); sync.propagate_blocks(&chain_info, &mut io, &[], &peers); - let data = &io.queue[0].data.clone(); + let data = &io.packets[0].data.clone(); let result = sync.on_peer_new_block(&mut io, 0, &UntrustedRlp::new(data)); assert!(result.is_ok()); } @@ -2572,11 +2657,11 @@ mod tests { // when { - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &[], &good_blocks); - sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 1); } @@ -2587,11 +2672,11 @@ mod tests { client.set_nonce(view.transactions()[0].sender().unwrap(), U256::from(1)); } { - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&client, &ss, &queue, None); io.chain.miner.chain_new_blocks(io.chain, &[], &[], &good_blocks, &retracted_blocks); - sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); } // then @@ -2612,15 +2697,15 @@ mod tests { let good_blocks = vec![client.block_hash_delta_minus(2)]; let retracted_blocks = vec![client.block_hash_delta_minus(1)]; - let mut queue = VecDeque::new(); + let queue = RwLock::new(VecDeque::new()); let ss = TestSnapshotService::new(); - let mut io = TestIo::new(&mut client, &ss, &mut queue, None); + let mut io = TestIo::new(&mut client, &ss, &queue, None); // when - sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &[], &[], &good_blocks, &[], &[]); assert_eq!(io.chain.miner.status().transactions_in_future_queue, 0); assert_eq!(io.chain.miner.status().transactions_in_pending_queue, 0); - sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[]); + sync.chain_new_blocks(&mut io, &[], &[], &good_blocks, &retracted_blocks, &[], &[]); // then let status = io.chain.miner.status(); diff --git a/sync/src/tests/chain.rs b/sync/src/tests/chain.rs index 3d0c17fff..6d1ffaf83 100644 --- a/sync/src/tests/chain.rs +++ b/sync/src/tests/chain.rs @@ -101,7 +101,7 @@ fn forked_with_misbehaving_peer() { ::env_logger::init().ok(); let mut net = TestNet::new(3); // peer 0 is on a totally different chain with higher total difficulty - net.peer_mut(0).chain = TestBlockChainClient::new_with_extra_data(b"fork".to_vec()); + net.peer_mut(0).chain = Arc::new(TestBlockChainClient::new_with_extra_data(b"fork".to_vec())); net.peer(0).chain.add_blocks(50, EachBlockWith::Nothing); net.peer(1).chain.add_blocks(10, EachBlockWith::Nothing); net.peer(2).chain.add_blocks(10, EachBlockWith::Nothing); diff --git a/sync/src/tests/consensus.rs b/sync/src/tests/consensus.rs index b96997d1e..c1fa8fce7 100644 --- a/sync/src/tests/consensus.rs +++ b/sync/src/tests/consensus.rs @@ -15,7 +15,9 @@ // along with Parity. If not, see . use util::*; -use ethcore::client::BlockChainClient; +use io::{IoHandler, IoContext, IoChannel}; +use ethcore::client::{BlockChainClient, Client, MiningBlockChainClient}; +use ethcore::service::ClientIoMessage; use ethcore::spec::Spec; use ethcore::miner::MinerService; use ethcore::transaction::*; @@ -24,55 +26,171 @@ use ethkey::KeyPair; use super::helpers::*; use SyncConfig; -#[test] -fn test_authority_round() { - ::env_logger::init().ok(); +struct TestIoHandler { + client: Arc, +} - let s1 = KeyPair::from_secret("1".sha3()).unwrap(); - let s2 = KeyPair::from_secret("0".sha3()).unwrap(); - let spec_factory = || { - let spec = Spec::new_test_round(); - let account_provider = AccountProvider::transient_provider(); - account_provider.insert_account(s1.secret().clone(), "").unwrap(); - account_provider.insert_account(s2.secret().clone(), "").unwrap(); - spec.engine.register_account_provider(Arc::new(account_provider)); - spec - }; - let mut net = TestNet::new_with_spec(2, SyncConfig::default(), spec_factory); - let mut net = &mut *net; - // Push transaction to both clients. Only one of them gets lucky to mine a block. - net.peer(0).chain.miner().set_author(s1.address()); - net.peer(0).chain.engine().set_signer(s1.address(), "".to_owned()); - net.peer(1).chain.miner().set_author(s2.address()); - net.peer(1).chain.engine().set_signer(s2.address(), "".to_owned()); - let tx1 = Transaction { - nonce: 0.into(), +impl IoHandler for TestIoHandler { + fn message(&self, _io: &IoContext, net_message: &ClientIoMessage) { + match *net_message { + ClientIoMessage::UpdateSealing => self.client.update_sealing(), + ClientIoMessage::SubmitSeal(ref hash, ref seal) => self.client.submit_seal(*hash, seal.clone()), + ClientIoMessage::BroadcastMessage(ref message) => self.client.broadcast_consensus_message(message.clone()), + ClientIoMessage::NewMessage(ref message) => if let Err(e) = self.client.engine().handle_message(message) { + panic!("Invalid message received: {}", e); + }, + _ => {} // ignore other messages + } + } +} + +fn new_tx(secret: &H256, nonce: U256) -> SignedTransaction { + Transaction { + nonce: nonce.into(), gas_price: 0.into(), gas: 21000.into(), action: Action::Call(Address::default()), value: 0.into(), data: Vec::new(), - }.sign(s1.secret(), None); - // exhange statuses - net.sync_steps(5); - net.peer(0).chain.miner().import_own_transaction(&net.peer(0).chain, tx1).unwrap(); + }.sign(secret, None) +} + +#[test] +fn authority_round() { + let s0 = KeyPair::from_secret("1".sha3()).unwrap(); + let s1 = KeyPair::from_secret("0".sha3()).unwrap(); + let spec_factory = || { + let spec = Spec::new_test_round(); + let account_provider = AccountProvider::transient_provider(); + account_provider.insert_account(s0.secret().clone(), "").unwrap(); + account_provider.insert_account(s1.secret().clone(), "").unwrap(); + spec.engine.register_account_provider(Arc::new(account_provider)); + spec + }; + let mut net = TestNet::with_spec(2, SyncConfig::default(), spec_factory); + let mut net = &mut *net; + let io_handler0: Arc> = Arc::new(TestIoHandler { client: net.peer(0).chain.clone() }); + let io_handler1: Arc> = Arc::new(TestIoHandler { client: net.peer(1).chain.clone() }); + // Push transaction to both clients. Only one of them gets lucky to produce a block. + net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap(); + net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); + net.peer(0).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); + net.peer(1).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); + net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); + net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); + // exchange statuses + net.sync(); + // Trigger block proposal + net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into())).unwrap(); + // Sync a block net.sync(); assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1); assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1); - let tx2 = Transaction { - nonce: 0.into(), - gas_price: 0.into(), - gas: 21000.into(), - action: Action::Call(Address::default()), - value: 0.into(), - data: Vec::new(), - }.sign(s2.secret(), None); - net.peer(1).chain.miner().import_own_transaction(&net.peer(1).chain, tx2).unwrap(); + net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into())).unwrap(); + // Move to next proposer step + net.peer(0).chain.engine().step(); net.peer(1).chain.engine().step(); - net.peer(1).chain.miner().update_sealing(&net.peer(1).chain); net.sync(); assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2); assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2); + + // Fork the network + net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into())).unwrap(); + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 3); + net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into())).unwrap(); + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 3); + // Reorg to the correct one. + net.sync(); + let ci0 = net.peer(0).chain.chain_info(); + let ci1 = net.peer(1).chain.chain_info(); + assert_eq!(ci0.best_block_number, 3); + assert_eq!(ci1.best_block_number, 3); + assert_eq!(ci0.best_block_hash, ci1.best_block_hash); } +#[test] +fn tendermint() { + let s0 = KeyPair::from_secret("1".sha3()).unwrap(); + let s1 = KeyPair::from_secret("0".sha3()).unwrap(); + let spec_factory = || { + let spec = Spec::new_test_tendermint(); + let account_provider = AccountProvider::transient_provider(); + account_provider.insert_account(s0.secret().clone(), "").unwrap(); + account_provider.insert_account(s1.secret().clone(), "").unwrap(); + spec.engine.register_account_provider(Arc::new(account_provider)); + spec + }; + let mut net = TestNet::with_spec(2, SyncConfig::default(), spec_factory); + let mut net = &mut *net; + let io_handler0: Arc> = Arc::new(TestIoHandler { client: net.peer(0).chain.clone() }); + let io_handler1: Arc> = Arc::new(TestIoHandler { client: net.peer(1).chain.clone() }); + // Push transaction to both clients. Only one of them issues a proposal. + net.peer(0).chain.miner().set_engine_signer(s0.address(), "".to_owned()).unwrap(); + trace!(target: "poa", "Peer 0 is {}.", s0.address()); + net.peer(1).chain.miner().set_engine_signer(s1.address(), "".to_owned()).unwrap(); + trace!(target: "poa", "Peer 1 is {}.", s1.address()); + net.peer(0).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); + net.peer(1).chain.engine().register_message_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); + net.peer(0).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler0))); + net.peer(1).chain.set_io_channel(IoChannel::to_handler(Arc::downgrade(&io_handler1))); + // Exhange statuses + net.sync(); + // Propose + net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 0.into())).unwrap(); + net.sync(); + // Propose timeout, synchronous for now + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + // Prevote, precommit and commit + net.sync(); + + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 1); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 1); + + net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 0.into())).unwrap(); + // Commit timeout + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + // Propose + net.sync(); + // Propose timeout + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + // Prevote, precommit and commit + net.sync(); + assert_eq!(net.peer(0).chain.chain_info().best_block_number, 2); + assert_eq!(net.peer(1).chain.chain_info().best_block_number, 2); + + net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 1.into())).unwrap(); + net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 1.into())).unwrap(); + // Peers get disconnected. + // Commit + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + // Propose + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); +net.peer(0).chain.miner().import_own_transaction(&*net.peer(0).chain, new_tx(s0.secret(), 2.into())).unwrap(); + net.peer(1).chain.miner().import_own_transaction(&*net.peer(1).chain, new_tx(s1.secret(), 2.into())).unwrap(); + // Send different prevotes + net.sync(); + // Prevote timeout + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + // Precommit and commit + net.sync(); + // Propose timeout + net.peer(0).chain.engine().step(); + net.peer(1).chain.engine().step(); + net.sync(); + let ci0 = net.peer(0).chain.chain_info(); + let ci1 = net.peer(1).chain.chain_info(); + assert_eq!(ci0.best_block_number, 3); + assert_eq!(ci1.best_block_number, 3); + assert_eq!(ci0.best_block_hash, ci1.best_block_hash); +} diff --git a/sync/src/tests/helpers.rs b/sync/src/tests/helpers.rs index af9118431..6ad9965cd 100644 --- a/sync/src/tests/helpers.rs +++ b/sync/src/tests/helpers.rs @@ -45,14 +45,15 @@ impl FlushingBlockChainClient for TestBlockChainClient {} pub struct TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { pub chain: &'p C, pub snapshot_service: &'p TestSnapshotService, - pub queue: &'p mut VecDeque, + pub queue: &'p RwLock>, pub sender: Option, pub to_disconnect: HashSet, + pub packets: Vec, overlay: RwLock>, } impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { - pub fn new(chain: &'p C, ss: &'p TestSnapshotService, queue: &'p mut VecDeque, sender: Option) -> TestIo<'p, C> { + pub fn new(chain: &'p C, ss: &'p TestSnapshotService, queue: &'p RwLock>, sender: Option) -> TestIo<'p, C> { TestIo { chain: chain, snapshot_service: ss, @@ -60,10 +61,17 @@ impl<'p, C> TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { sender: sender, to_disconnect: HashSet::new(), overlay: RwLock::new(HashMap::new()), + packets: Vec::new(), } } } +impl<'p, C> Drop for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { + fn drop(&mut self) { + self.queue.write().extend(self.packets.drain(..)); + } +} + impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { fn disable_peer(&mut self, peer_id: PeerId) { self.disconnect_peer(peer_id); @@ -78,7 +86,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { } fn respond(&mut self, packet_id: PacketId, data: Vec) -> Result<(), NetworkError> { - self.queue.push_back(TestPacket { + self.packets.push(TestPacket { data: data, packet_id: packet_id, recipient: self.sender.unwrap() @@ -87,7 +95,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { } fn send(&mut self, peer_id: PeerId, packet_id: PacketId, data: Vec) -> Result<(), NetworkError> { - self.queue.push_back(TestPacket { + self.packets.push(TestPacket { data: data, packet_id: packet_id, recipient: peer_id, @@ -100,7 +108,7 @@ impl<'p, C> SyncIo for TestIo<'p, C> where C: FlushingBlockChainClient, C: 'p { } fn chain(&self) -> &BlockChainClient { - self.chain + &*self.chain } fn snapshot_service(&self) -> &SnapshotService { @@ -131,7 +139,7 @@ pub struct TestPacket { } pub struct TestPeer where C: FlushingBlockChainClient { - pub chain: C, + pub chain: Arc, pub snapshot_service: Arc, pub sync: RwLock, pub queue: RwLock>, @@ -167,7 +175,7 @@ impl TestNet { net.peers.push(Arc::new(TestPeer { sync: RwLock::new(sync), snapshot_service: ss, - chain: chain, + chain: Arc::new(chain), queue: RwLock::new(VecDeque::new()), })); } @@ -176,7 +184,7 @@ impl TestNet { } impl TestNet { - pub fn new_with_spec(n: usize, config: SyncConfig, spec_factory: F) -> GuardedTempResult> + pub fn with_spec(n: usize, config: SyncConfig, spec_factory: F) -> GuardedTempResult> where F: Fn() -> Spec { let mut net = TestNet { @@ -192,17 +200,17 @@ impl TestNet { let db_config = DatabaseConfig::with_columns(NUM_COLUMNS); let spec = spec_factory(); - let client = Arc::try_unwrap(EthcoreClient::new( + let client = EthcoreClient::new( ClientConfig::default(), &spec, client_dir.as_path(), Arc::new(Miner::with_spec(&spec)), IoChannel::disconnected(), &db_config - ).unwrap()).ok().unwrap(); + ).unwrap(); let ss = Arc::new(TestSnapshotService::new()); - let sync = ChainSync::new(config.clone(), &client); + let sync = ChainSync::new(config.clone(), &*client); let peer = Arc::new(TestPeer { sync: RwLock::new(sync), snapshot_service: ss, @@ -229,33 +237,38 @@ impl TestNet where C: FlushingBlockChainClient { } pub fn start(&mut self) { + if self.started { + return; + } for peer in 0..self.peers.len() { for client in 0..self.peers.len() { if peer != client { let p = &self.peers[peer]; - p.sync.write().update_targets(&p.chain); - p.sync.write().on_peer_connected(&mut TestIo::new(&p.chain, &p.snapshot_service, &mut p.queue.write(), Some(client as PeerId)), client as PeerId); + p.sync.write().update_targets(&*p.chain); + p.sync.write().on_peer_connected(&mut TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(client as PeerId)), client as PeerId); } } } + self.started = true; } pub fn sync_step(&mut self) { for peer in 0..self.peers.len() { + self.peers[peer].chain.flush(); let packet = self.peers[peer].queue.write().pop_front(); if let Some(packet) = packet { let disconnecting = { let p = &self.peers[packet.recipient]; - let mut queue = p.queue.write(); trace!("--- {} -> {} ---", peer, packet.recipient); let to_disconnect = { - let mut io = TestIo::new(&p.chain, &p.snapshot_service, &mut queue, Some(peer as PeerId)); + let mut io = TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(peer as PeerId)); ChainSync::dispatch_packet(&p.sync, &mut io, peer as PeerId, packet.packet_id, &packet.data); - io.to_disconnect + p.chain.flush(); + io.to_disconnect.clone() }; for d in &to_disconnect { // notify this that disconnecting peers are disconnecting - let mut io = TestIo::new(&p.chain, &p.snapshot_service, &mut queue, Some(*d)); + let mut io = TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(*d)); p.sync.write().on_peer_aborting(&mut io, *d); self.disconnect_events.push((peer, *d)); } @@ -264,8 +277,7 @@ impl TestNet where C: FlushingBlockChainClient { for d in &disconnecting { // notify other peers that this peer is disconnecting let p = &self.peers[*d]; - let mut queue = p.queue.write(); - let mut io = TestIo::new(&p.chain, &p.snapshot_service, &mut queue, Some(peer as PeerId)); + let mut io = TestIo::new(&*p.chain, &p.snapshot_service, &p.queue, Some(peer as PeerId)); p.sync.write().on_peer_aborting(&mut io, peer as PeerId); } } @@ -277,15 +289,14 @@ impl TestNet where C: FlushingBlockChainClient { pub fn sync_step_peer(&mut self, peer_num: usize) { let peer = self.peer(peer_num); peer.chain.flush(); - let mut queue = peer.queue.write(); - peer.sync.write().maintain_peers(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None)); - peer.sync.write().maintain_sync(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None)); - peer.sync.write().propagate_new_transactions(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None)); + peer.sync.write().maintain_peers(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None)); + peer.sync.write().maintain_sync(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None)); + peer.sync.write().propagate_new_transactions(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None)); } pub fn restart_peer(&mut self, i: usize) { let peer = self.peer(i); - peer.sync.write().restart(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut peer.queue.write(), None)); + peer.sync.write().restart(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None)); } pub fn sync(&mut self) -> u32 { @@ -299,10 +310,7 @@ impl TestNet where C: FlushingBlockChainClient { } pub fn sync_steps(&mut self, count: usize) { - if !self.started { - self.start(); - self.started = true; - } + self.start(); for _ in 0..count { self.sync_step(); } @@ -314,8 +322,7 @@ impl TestNet where C: FlushingBlockChainClient { pub fn trigger_chain_new_blocks(&mut self, peer_id: usize) { let peer = self.peer(peer_id); - let mut queue = peer.queue.write(); - peer.sync.write().chain_new_blocks(&mut TestIo::new(&peer.chain, &peer.snapshot_service, &mut queue, None), &[], &[], &[], &[], &[]); + peer.sync.write().chain_new_blocks(&mut TestIo::new(&*peer.chain, &peer.snapshot_service, &peer.queue, None), &[], &[], &[], &[], &[], &[]); } } @@ -326,21 +333,26 @@ impl ChainNotify for TestPeer { enacted: Vec, retracted: Vec, sealed: Vec, + proposed: Vec, _duration: u64) { - let mut queue = self.queue.write(); - let mut io = TestIo::new(&self.chain, &self.snapshot_service, &mut queue, None); + let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); self.sync.write().chain_new_blocks( &mut io, &imported, &invalid, &enacted, &retracted, - &sealed); + &sealed, + &proposed); } fn start(&self) {} fn stop(&self) {} -} + fn broadcast(&self, message: Vec) { + let mut io = TestIo::new(&*self.chain, &self.snapshot_service, &self.queue, None); + self.sync.write().propagate_consensus_packet(&mut io, message.clone()); + } +} diff --git a/util/io/src/service.rs b/util/io/src/service.rs index d9650a94a..f41cd866b 100644 --- a/util/io/src/service.rs +++ b/util/io/src/service.rs @@ -329,11 +329,18 @@ impl Handler for IoManager where Message: Send + Clone + Sync } } +#[derive(Clone)] +enum Handlers where Message: Send + Clone { + SharedCollection(Weak>, HandlerId>>>), + Single(Weak>), +} + /// Allows sending messages into the event loop. All the IO handlers will get the message /// in the `message` callback. pub struct IoChannel where Message: Send + Clone{ channel: Option>>, - handlers: Weak>, HandlerId>>>, + handlers: Handlers, + } impl Clone for IoChannel where Message: Send + Clone + Sync + 'static { @@ -348,19 +355,29 @@ impl Clone for IoChannel where Message: Send + Clone + Sync + impl IoChannel where Message: Send + Clone + Sync + 'static { /// Send a message through the channel pub fn send(&self, message: Message) -> Result<(), IoError> { - if let Some(ref channel) = self.channel { - try!(channel.send(IoMessage::UserMessage(message))); + match self.channel { + Some(ref channel) => try!(channel.send(IoMessage::UserMessage(message))), + None => try!(self.send_sync(message)) } Ok(()) } /// Send a message through the channel and handle it synchronously pub fn send_sync(&self, message: Message) -> Result<(), IoError> { - if let Some(handlers) = self.handlers.upgrade() { - for id in 0 .. MAX_HANDLERS { - if let Some(h) = handlers.read().get(id) { - let handler = h.clone(); - handler.message(&IoContext::new(self.clone(), id), &message); + match self.handlers { + Handlers::SharedCollection(ref handlers) => { + if let Some(handlers) = handlers.upgrade() { + for id in 0 .. MAX_HANDLERS { + if let Some(h) = handlers.read().get(id) { + let handler = h.clone(); + handler.message(&IoContext::new(self.clone(), id), &message); + } + } + } + }, + Handlers::Single(ref handler) => { + if let Some(handler) = handler.upgrade() { + handler.message(&IoContext::new(self.clone(), 0), &message); } } } @@ -378,14 +395,21 @@ impl IoChannel where Message: Send + Clone + Sync + 'static { pub fn disconnected() -> IoChannel { IoChannel { channel: None, - handlers: Weak::default(), + handlers: Handlers::SharedCollection(Weak::default()), } } + /// Create a new synchronous channel to a given handler. + pub fn to_handler(handler: Weak>) -> IoChannel { + IoChannel { + channel: None, + handlers: Handlers::Single(handler), + } + } fn new(channel: Sender>, handlers: Weak>, HandlerId>>>) -> IoChannel { IoChannel { channel: Some(channel), - handlers: handlers, + handlers: Handlers::SharedCollection(handlers), } } }