diff --git a/Cargo.lock b/Cargo.lock index 8a70f35a2..03823d6e9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,7 +10,6 @@ dependencies = [ "docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", "ethcore 1.7.0", - "ethcore-dapps 1.7.0", "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-ipc 1.7.0", @@ -24,15 +23,17 @@ dependencies = [ "ethcore-signer 1.7.0", "ethcore-stratum 1.7.0", "ethcore-util 1.7.0", + "ethkey 0.2.0", "ethsync 1.7.0", "evmbin 0.1.0", "fdlimit 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", - "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "isatty 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "num_cpus 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", "number_prefix 0.2.5 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps 1.7.0", "parity-hash-fetch 1.7.0", "parity-ipfs-api 1.7.0", "parity-local-store 0.1.0", @@ -40,6 +41,7 @@ dependencies = [ "parity-rpc-client 1.4.0", "parity-updater 1.7.0", "path 0.1.0", + "pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "regex 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "rlp 0.1.0", "rpassword 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -309,6 +311,11 @@ dependencies = [ "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "difference" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" + [[package]] name = "docopt" version = "0.7.0" @@ -446,40 +453,6 @@ dependencies = [ "siphasher 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "ethcore-dapps" -version = "1.7.0" -dependencies = [ - "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", - "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", - "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", - "ethcore-devtools 1.7.0", - "ethcore-rpc 1.7.0", - "ethcore-util 1.7.0", - "fetch 0.1.0", - "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", - "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", - "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-hash-fetch 1.7.0", - "parity-reactor 0.1.0", - "parity-ui 1.7.0", - "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", - "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", - "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", - "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", - "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", - "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", - "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "ethcore-devtools" version = "1.7.0" @@ -561,6 +534,7 @@ name = "ethcore-light" version = "1.7.0" dependencies = [ "ethcore 1.7.0", + "ethcore-devtools 1.7.0", "ethcore-io 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", @@ -642,6 +616,7 @@ dependencies = [ "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", "order-stat 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "parity-reactor 0.1.0", @@ -661,6 +636,9 @@ dependencies = [ name = "ethcore-secretstore" version = "1.0.0" dependencies = [ + "byteorder 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", + "ethabi 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore 1.7.0", "ethcore-devtools 1.7.0", "ethcore-ipc 1.7.0", "ethcore-ipc-codegen 1.7.0", @@ -668,9 +646,19 @@ dependencies = [ "ethcore-util 1.7.0", "ethcrypto 0.1.0", "ethkey 0.2.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "futures-cpupool 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "hyper 0.10.5 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "native-contracts 0.1.0", "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1088,7 +1076,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "jsonrpc-core" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1100,7 +1088,7 @@ dependencies = [ [[package]] name = "jsonrpc-http-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "hyper 0.10.0-a.0 (git+https://github.com/paritytech/hyper)", "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1113,7 +1101,7 @@ dependencies = [ [[package]] name = "jsonrpc-ipc-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1125,17 +1113,31 @@ dependencies = [ [[package]] name = "jsonrpc-macros" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "jsonrpc-minihttp-server" +version = "7.0.0" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" +dependencies = [ + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)", + "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "jsonrpc-pubsub" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1145,7 +1147,7 @@ dependencies = [ [[package]] name = "jsonrpc-server-utils" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", @@ -1156,7 +1158,7 @@ dependencies = [ [[package]] name = "jsonrpc-tcp-server" version = "7.0.0" -source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#f4521e8a543145bec7936de0f875d6550e92c7f7" +source = "git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7#32c1c083139db50db6a5d532ccfc2004236dbfc3" dependencies = [ "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", "jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", @@ -1604,6 +1606,38 @@ dependencies = [ "stable_deref_trait 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "parity-dapps" +version = "1.7.0" +dependencies = [ + "base32 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)", + "clippy 0.0.103 (registry+https://github.com/rust-lang/crates.io-index)", + "env_logger 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", + "ethcore-devtools 1.7.0", + "ethcore-util 1.7.0", + "fetch 0.1.0", + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "jsonrpc-core 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)", + "linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "mime_guess 1.6.1 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-hash-fetch 1.7.0", + "parity-reactor 0.1.0", + "parity-ui 1.7.0", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc-serialize 0.3.19 (registry+https://github.com/rust-lang/crates.io-index)", + "serde 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_derive 0.9.6 (registry+https://github.com/rust-lang/crates.io-index)", + "serde_json 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "unicase 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "url 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)", + "zip 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "parity-dapps-glue" version = "1.7.0" @@ -1723,7 +1757,7 @@ dependencies = [ [[package]] name = "parity-ui-precompiled" version = "1.4.0" -source = "git+https://github.com/paritytech/js-precompiled.git#6028c355854797a5938c26f5d2b2faf10d8833d7" +source = "git+https://github.com/paritytech/js-precompiled.git#9bfc6f3dfca2c337c53084bedcc65c2b526927a1" dependencies = [ "parity-dapps-glue 1.7.0 (registry+https://github.com/rust-lang/crates.io-index)", ] @@ -1826,6 +1860,14 @@ name = "podio" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" +[[package]] +name = "pretty_assertions" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +dependencies = [ + "difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "primal" version = "0.2.3" @@ -2431,6 +2473,21 @@ dependencies = [ "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-minihttp" +version = "0.1.0" +source = "git+https://github.com/tomusdrw/tokio-minihttp#8acbafae3e77e7f7eb516b441ec84695580221dd" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "httparse 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "time 0.1.35 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-named-pipes" version = "0.1.0" @@ -2441,6 +2498,22 @@ dependencies = [ "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "tokio-proto" +version = "0.1.0" +source = "git+https://github.com/tomusdrw/tokio-proto#f6ee08cb594fa2fc1b4178eaaca0855d66e68fd3" +dependencies = [ + "futures 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", + "net2 0.2.23 (registry+https://github.com/rust-lang/crates.io-index)", + "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", + "slab 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", + "smallvec 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", + "take 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)", + "tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "tokio-proto" version = "0.1.0" @@ -2706,6 +2779,7 @@ dependencies = [ "checksum ctrlc 1.1.1 (git+https://github.com/paritytech/rust-ctrlc.git)" = "" "checksum daemonize 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "271ec51b7e0bee92f0d04601422c73eb76ececf197026711c97ad25038a010cf" "checksum deque 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "1614659040e711785ed8ea24219140654da1729f3ec8a47a9719d041112fe7bf" +"checksum difference 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b3304d19798a8e067e48d8e69b2c37f0b5e9b4e462504ad9e27e9f3fce02bba8" "checksum docopt 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ab32ea6e284d87987066f21a9e809a73c14720571ef34516f0890b3d355ccfd8" "checksum dtoa 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5edd69c67b2f8e0911629b7e6b8a34cb3956613cd7c6e6414966dee349c2db4f" "checksum either 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "3d2b503c86dad62aaf414ecf2b8c527439abedb3f8d812537f0b12bfd6f32a91" @@ -2739,6 +2813,7 @@ dependencies = [ "checksum jsonrpc-http-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-ipc-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-macros 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" +"checksum jsonrpc-minihttp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-pubsub 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-server-utils 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" "checksum jsonrpc-tcp-server 7.0.0 (git+https://github.com/paritytech/jsonrpc.git?branch=parity-1.7)" = "" @@ -2802,6 +2877,7 @@ dependencies = [ "checksum phf_shared 0.7.14 (registry+https://github.com/rust-lang/crates.io-index)" = "fee4d039930e4f45123c9b15976cf93a499847b6483dc09c42ea0ec4940f2aa6" "checksum pkg-config 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)" = "8cee804ecc7eaf201a4a207241472cc870e825206f6c031e3ee2a72fa425f2fa" "checksum podio 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e5422a1ee1bc57cc47ae717b0137314258138f38fd5f3cea083f43a9725383a0" +"checksum pretty_assertions 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "2412f3332a07c7a2a50168988dcc184f32180a9758ad470390e5f55e089f6b6e" "checksum primal 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0e31b86efadeaeb1235452171a66689682783149a6249ff334a2c5d8218d00a4" "checksum primal-bit 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "464a91febc06166783d4f5ba3577b5ed8dda8e421012df80bfe48a971ed7be8f" "checksum primal-check 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "647c81b67bb9551a7b88d0bcd785ac35b7d0bf4b2f358683d7c2375d04daec51" @@ -2871,7 +2947,9 @@ dependencies = [ "checksum tokio-core 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "3d1be481b55126f02ef88ff86748086473cb537a949fc4a8f4be403a530ae54b" "checksum tokio-io 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6a278fde45f1be68e44995227d426aaa4841e0980bb0a21b981092f28c3c8473" "checksum tokio-line 0.1.0 (git+https://github.com/tokio-rs/tokio-line)" = "" +"checksum tokio-minihttp 0.1.0 (git+https://github.com/tomusdrw/tokio-minihttp)" = "" "checksum tokio-named-pipes 0.1.0 (git+https://github.com/alexcrichton/tokio-named-pipes)" = "" +"checksum tokio-proto 0.1.0 (git+https://github.com/tomusdrw/tokio-proto)" = "" "checksum tokio-proto 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7c0d6031f94d78d7b4d509d4a7c5e1cdf524a17e7b08d1c188a83cf720e69808" "checksum tokio-service 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "24da22d077e0f15f55162bdbdc661228c1581892f52074fb242678d015b45162" "checksum tokio-uds 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "ffc7b5fc8e19e220b29566d1750949224a518478eab9cebc8df60583242ca30a" diff --git a/Cargo.toml b/Cargo.toml index 737c21b09..09af66e09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,9 +23,9 @@ toml = "0.2" serde = "0.9" serde_json = "0.9" app_dirs = "1.1.1" +futures = "0.1" fdlimit = "0.1" ws2_32-sys = "0.2" -hyper = { default-features = false, git = "https://github.com/paritytech/hyper" } ctrlc = { git = "https://github.com/paritytech/rust-ctrlc.git" } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethsync = { path = "sync" } @@ -41,6 +41,7 @@ ethcore-ipc-hypervisor = { path = "ipc/hypervisor" } ethcore-light = { path = "ethcore/light" } ethcore-logger = { path = "logger" } ethcore-stratum = { path = "stratum" } +ethkey = { path = "ethkey" } evmbin = { path = "evmbin" } rlp = { path = "util/rlp" } rpc-cli = { path = "rpc_cli" } @@ -50,8 +51,9 @@ parity-ipfs-api = { path = "ipfs" } parity-updater = { path = "updater" } parity-reactor = { path = "util/reactor" } parity-local-store = { path = "local-store" } -ethcore-dapps = { path = "dapps", optional = true } path = { path = "util/path" } + +parity-dapps = { path = "dapps", optional = true } clippy = { version = "0.0.103", optional = true} ethcore-secretstore = { path = "secret_store", optional = true } @@ -60,6 +62,7 @@ rustc_version = "0.2" [dev-dependencies] ethcore-ipc-tests = { path = "ipc/tests" } +pretty_assertions = "0.1" [target.'cfg(windows)'.dependencies] winapi = "0.2" @@ -71,18 +74,18 @@ daemonize = "0.2" default = ["ui-precompiled"] ui = [ "dapps", - "ethcore-dapps/ui", + "parity-dapps/ui", "ethcore-signer/ui", ] ui-precompiled = [ "dapps", "ethcore-signer/ui-precompiled", - "ethcore-dapps/ui-precompiled", + "parity-dapps/ui-precompiled", ] -dapps = ["ethcore-dapps"] +dapps = ["parity-dapps"] ipc = ["ethcore/ipc", "ethsync/ipc"] jit = ["ethcore/jit"] -dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "ethcore-dapps/dev", "ethcore-signer/dev"] +dev = ["clippy", "ethcore/dev", "ethcore-util/dev", "ethsync/dev", "ethcore-rpc/dev", "parity-dapps/dev", "ethcore-signer/dev"] json-tests = ["ethcore/json-tests"] test-heavy = ["ethcore/test-heavy"] ethkey-cli = ["ethcore/ethkey-cli"] diff --git a/README.md b/README.md index d92b68c58..f668f3218 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,8 @@ # [Parity](https://ethcore.io/parity.html) ### Fast, light, and robust Ethereum implementation +### [Download latest release](https://github.com/paritytech/parity/releases) + [![build status](https://gitlab.ethcore.io/parity/parity/badges/master/build.svg)](https://gitlab.ethcore.io/parity/parity/commits/master) [![Coverage Status][coveralls-image]][coveralls-url] [![GPLv3][license-image]][license-url] ### Join the chat! @@ -22,7 +24,6 @@ Be sure to check out [our wiki][wiki-url] for more information. [doc-url]: https://paritytech.github.io/parity/ethcore/index.html [wiki-url]: https://github.com/paritytech/parity/wiki -**Parity requires Rust version 1.15.0 to build** ---- @@ -45,14 +46,14 @@ of RPC APIs. If you run into an issue while using parity, feel free to file one in this repository or hop on our [gitter chat room][gitter-url] to ask a question. We are glad to help! -Parity's current release is 1.5. You can download it at https://parity.io or follow the instructions +Parity's current release is 1.6. You can download it at https://github.com/paritytech/parity/releases or follow the instructions below to build from source. ---- ## Build dependencies -Parity is fully compatible with Stable Rust. +**Parity requires Rust version 1.16.0 to build** We recommend installing Rust through [rustup](https://www.rustup.rs/). If you don't already have rustup, you can install it like this: @@ -80,7 +81,7 @@ Once you have rustup, install parity or download and build from source ---- -## Quick install +## Quick build and install ```bash cargo install --git https://github.com/paritytech/parity.git parity diff --git a/dapps/Cargo.toml b/dapps/Cargo.toml index 07f136d78..429ed01f5 100644 --- a/dapps/Cargo.toml +++ b/dapps/Cargo.toml @@ -1,6 +1,6 @@ [package] description = "Parity Dapps crate" -name = "ethcore-dapps" +name = "parity-dapps" version = "1.7.0" license = "GPL-3.0" authors = ["Parity Technologies "] @@ -28,11 +28,8 @@ zip = { version = "0.1", default-features = false } jsonrpc-core = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } jsonrpc-http-server = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } -# TODO [ToDr] Temporary solution, server should be merged with RPC. -jsonrpc-server-utils = { git = "https://github.com/paritytech/jsonrpc.git", branch = "parity-1.7" } ethcore-devtools = { path = "../devtools" } -ethcore-rpc = { path = "../rpc" } ethcore-util = { path = "../util" } fetch = { path = "../util/fetch" } parity-hash-fetch = { path = "../hash-fetch" } @@ -42,7 +39,7 @@ parity-ui = { path = "./ui" } clippy = { version = "0.0.103", optional = true} [features] -dev = ["clippy", "ethcore-rpc/dev", "ethcore-util/dev"] +dev = ["clippy", "ethcore-util/dev"] ui = ["parity-ui/no-precompiled-js"] ui-precompiled = ["parity-ui/use-precompiled-js"] diff --git a/dapps/src/api/api.rs b/dapps/src/api/api.rs index e07bd4535..df3386358 100644 --- a/dapps/src/api/api.rs +++ b/dapps/src/api/api.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::sync::Arc; use unicase::UniCase; use hyper::{server, net, Decoder, Encoder, Next, Control}; use hyper::header; @@ -26,48 +25,49 @@ use apps::fetcher::Fetcher; use handlers::extract_url; use endpoint::{Endpoint, Endpoints, Handler, EndpointPath}; -use jsonrpc_http_server; -use jsonrpc_server_utils::cors; +use jsonrpc_http_server::{self, AccessControlAllowOrigin}; #[derive(Clone)] -pub struct RestApi { - cors_domains: Option>, - endpoints: Arc, - fetcher: Arc, +pub struct RestApi { + // TODO [ToDr] cors_domains should be handled by the server to avoid duplicated logic. + // RequestMiddleware should be able to tell that cors headers should be included. + cors_domains: Option>, + apps: Vec, + fetcher: F, } -impl RestApi { - pub fn new(cors_domains: Vec, endpoints: Arc, fetcher: Arc) -> Box { +impl RestApi { + pub fn new(cors_domains: Vec, endpoints: &Endpoints, fetcher: F) -> Box { Box::new(RestApi { cors_domains: Some(cors_domains), - endpoints: endpoints, + apps: Self::list_apps(endpoints), fetcher: fetcher, }) } - fn list_apps(&self) -> Vec { - self.endpoints.iter().filter_map(|(ref k, ref e)| { + fn list_apps(endpoints: &Endpoints) -> Vec { + endpoints.iter().filter_map(|(ref k, ref e)| { e.info().map(|ref info| App::from_info(k, info)) }).collect() } } -impl Endpoint for RestApi { +impl Endpoint for RestApi { fn to_async_handler(&self, path: EndpointPath, control: Control) -> Box { - Box::new(RestApiRouter::new(self.clone(), path, control)) + Box::new(RestApiRouter::new((*self).clone(), path, control)) } } -struct RestApiRouter { - api: RestApi, +struct RestApiRouter { + api: RestApi, cors_header: Option, path: Option, control: Option, handler: Box, } -impl RestApiRouter { - fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { +impl RestApiRouter { + fn new(api: RestApi, path: EndpointPath, control: Control) -> Self { RestApiRouter { path: Some(path), cors_header: None, @@ -114,7 +114,7 @@ impl RestApiRouter { } } -impl server::Handler for RestApiRouter { +impl server::Handler for RestApiRouter { fn on_request(&mut self, request: server::Request) -> Next { self.cors_header = jsonrpc_http_server::cors_header(&request, &self.api.cors_domains).into(); @@ -142,7 +142,7 @@ impl server::Handler for RestApiRouter { if let Some(ref hash) = hash { path.app_id = hash.clone().to_owned() } let handler = endpoint.and_then(|v| match v { - "apps" => Some(response::as_json(&self.api.list_apps())), + "apps" => Some(response::as_json(&self.api.apps)), "ping" => Some(response::ping()), "content" => self.resolve_content(hash, path, control), _ => None diff --git a/dapps/src/apps/fetcher/mod.rs b/dapps/src/apps/fetcher/mod.rs index c2607fe43..a824134cb 100644 --- a/dapps/src/apps/fetcher/mod.rs +++ b/dapps/src/apps/fetcher/mod.rs @@ -47,7 +47,8 @@ pub trait Fetcher: Send + Sync + 'static { fn to_async_handler(&self, path: EndpointPath, control: hyper::Control) -> Box; } -pub struct ContentFetcher { +#[derive(Clone)] +pub struct ContentFetcher { dapps_path: PathBuf, resolver: R, cache: Arc>, @@ -57,14 +58,14 @@ pub struct ContentFetcher Drop for ContentFetcher { +impl Drop for ContentFetcher { fn drop(&mut self) { // Clear cache path let _ = fs::remove_dir_all(&self.dapps_path); } } -impl ContentFetcher { +impl ContentFetcher { pub fn new(resolver: R, sync_status: Arc, embeddable_on: Option<(String, u16)>, remote: Remote, fetch: F) -> Self { let mut dapps_path = env::temp_dir(); @@ -97,7 +98,7 @@ impl ContentFetcher { } } -impl Fetcher for ContentFetcher { +impl Fetcher for ContentFetcher { fn contains(&self, content_id: &str) -> bool { { let mut cache = self.cache.lock(); @@ -233,6 +234,7 @@ mod tests { use page::LocalPageEndpoint; use super::{ContentFetcher, Fetcher}; + #[derive(Clone)] struct FakeResolver; impl URLHint for FakeResolver { fn resolve(&self, _id: Bytes) -> Option { diff --git a/dapps/src/endpoint.rs b/dapps/src/endpoint.rs index 648d82ff8..ea5825b74 100644 --- a/dapps/src/endpoint.rs +++ b/dapps/src/endpoint.rs @@ -16,9 +16,10 @@ //! URL Endpoint traits -use hyper::{self, server, net}; use std::collections::BTreeMap; +use hyper::{self, server, net}; + #[derive(Debug, PartialEq, Default, Clone)] pub struct EndpointPath { pub app_id: String, diff --git a/dapps/src/handlers/auth.rs b/dapps/src/handlers/auth.rs deleted file mode 100644 index db6018e0d..000000000 --- a/dapps/src/handlers/auth.rs +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! Authorization Handlers - -use hyper::{server, Decoder, Encoder, Next}; -use hyper::net::HttpStream; -use hyper::status::StatusCode; - -pub struct AuthRequiredHandler; - -impl server::Handler for AuthRequiredHandler { - fn on_request(&mut self, _request: server::Request) -> Next { - Next::write() - } - - fn on_request_readable(&mut self, _decoder: &mut Decoder) -> Next { - Next::write() - } - - fn on_response(&mut self, res: &mut server::Response) -> Next { - res.set_status(StatusCode::Unauthorized); - res.headers_mut().set_raw("WWW-Authenticate", vec![b"Basic realm=\"Parity\"".to_vec()]); - Next::write() - } - - fn on_response_writable(&mut self, _encoder: &mut Encoder) -> Next { - Next::end() - } -} - diff --git a/dapps/src/handlers/mod.rs b/dapps/src/handlers/mod.rs index cec7be631..3e2daf462 100644 --- a/dapps/src/handlers/mod.rs +++ b/dapps/src/handlers/mod.rs @@ -16,14 +16,12 @@ //! Hyper handlers implementations. -mod auth; mod content; mod echo; mod fetch; mod redirect; mod streaming; -pub use self::auth::AuthRequiredHandler; pub use self::content::ContentHandler; pub use self::echo::EchoHandler; pub use self::fetch::{ContentFetcherHandler, ContentValidator, FetchControl, ValidatorResponse}; diff --git a/dapps/src/lib.rs b/dapps/src/lib.rs index 252e1c3bb..60aba30a4 100644 --- a/dapps/src/lib.rs +++ b/dapps/src/lib.rs @@ -34,9 +34,7 @@ extern crate zip; extern crate jsonrpc_core; extern crate jsonrpc_http_server; -extern crate jsonrpc_server_utils; -extern crate ethcore_rpc; extern crate ethcore_util as util; extern crate fetch; extern crate parity_dapps_glue as parity_dapps; @@ -61,7 +59,6 @@ mod apps; mod page; mod router; mod handlers; -mod rpc; mod api; mod proxypac; mod url; @@ -69,23 +66,16 @@ mod web; #[cfg(test)] mod tests; -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::sync::Arc; -use std::net::SocketAddr; use std::collections::HashMap; -use jsonrpc_core::{Middleware, MetaIoHandler}; -use jsonrpc_http_server::tokio_core::reactor::Remote as TokioRemote; -pub use jsonrpc_http_server::{DomainsValidation, Host, AccessControlAllowOrigin}; -pub use jsonrpc_http_server::hyper; +use jsonrpc_http_server::{self as http, hyper, AccessControlAllowOrigin}; -use ethcore_rpc::Metadata; -use fetch::{Fetch, Client as FetchClient}; -use hash_fetch::urlhint::ContractClient; +use fetch::Fetch; use parity_reactor::Remote; -use router::auth::{Authorization, NoAuth, HttpBasicAuth}; -use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; +pub use hash_fetch::urlhint::ContractClient; /// Indicates sync status pub trait SyncStatus: Send + Sync { @@ -107,296 +97,92 @@ impl WebProxyTokens for F where F: Fn(String) -> bool + Send + Sync { fn is_web_proxy_token_valid(&self, token: &str) -> bool { self(token.to_owned()) } } -/// Webapps HTTP+RPC server build. -pub struct ServerBuilder { - dapps_path: PathBuf, - extra_dapps: Vec, - registrar: Arc, - sync_status: Arc, - web_proxy_tokens: Arc, - signer_address: Option<(String, u16)>, - allowed_hosts: Option>, - extra_cors: Option>, - remote: Remote, - fetch: Option, +/// Dapps server as `jsonrpc-http-server` request middleware. +pub struct Middleware { + router: router::Router>, } -impl ServerBuilder { - /// Construct new dapps server - pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { - ServerBuilder { - dapps_path: dapps_path.as_ref().to_owned(), - extra_dapps: vec![], - registrar: registrar, - sync_status: Arc::new(|| false), - web_proxy_tokens: Arc::new(|_| false), - signer_address: None, - allowed_hosts: Some(vec![]), - extra_cors: None, - remote: remote, - fetch: None, - } - } -} - -impl ServerBuilder { - /// Set a fetch client to use. - pub fn fetch(self, fetch: X) -> ServerBuilder { - ServerBuilder { - dapps_path: self.dapps_path, - extra_dapps: vec![], - registrar: self.registrar, - sync_status: self.sync_status, - web_proxy_tokens: self.web_proxy_tokens, - signer_address: self.signer_address, - allowed_hosts: self.allowed_hosts, - extra_cors: self.extra_cors, - remote: self.remote, - fetch: Some(fetch), - } - } - - /// Change default sync status. - pub fn sync_status(mut self, status: Arc) -> Self { - self.sync_status = status; - self - } - - /// Change default web proxy tokens validator. - pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { - self.web_proxy_tokens = tokens; - self - } - - /// Change default signer port. - pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { - self.signer_address = signer_address; - self - } - - /// Change allowed hosts. - /// `None` - All hosts are allowed - /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) - pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { - self.allowed_hosts = allowed_hosts.into(); - self - } - - /// Extra cors headers. - /// `None` - no additional CORS URLs - pub fn extra_cors_headers(mut self, cors: DomainsValidation) -> Self { - self.extra_cors = cors.into(); - self - } - - /// Change extra dapps paths (apart from `dapps_path`) - pub fn extra_dapps>(mut self, extra_dapps: &[P]) -> Self { - self.extra_dapps = extra_dapps.iter().map(|p| p.as_ref().to_owned()).collect(); - self - } - - /// Asynchronously start server with no authentication, - /// returns result with `Server` handle on success or an error. - pub fn start_unsecured_http>(self, addr: &SocketAddr, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { - let fetch = self.fetch_client()?; - Server::start_http( - addr, - self.allowed_hosts, - self.extra_cors, - NoAuth, - handler, - self.dapps_path, - self.extra_dapps, - self.signer_address, - self.registrar, - self.sync_status, - self.web_proxy_tokens, - self.remote, - tokio_remote, - fetch, - ) - } - - /// Asynchronously start server with `HTTP Basic Authentication`, - /// return result with `Server` handle on success or an error. - pub fn start_basic_auth_http>(self, addr: &SocketAddr, username: &str, password: &str, handler: MetaIoHandler, tokio_remote: TokioRemote) -> Result { - let fetch = self.fetch_client()?; - Server::start_http( - addr, - self.allowed_hosts, - self.extra_cors, - HttpBasicAuth::single_user(username, password), - handler, - self.dapps_path, - self.extra_dapps, - self.signer_address, - self.registrar, - self.sync_status, - self.web_proxy_tokens, - self.remote, - tokio_remote, - fetch, - ) - } - - fn fetch_client(&self) -> Result { - match self.fetch.clone() { - Some(fetch) => Ok(fetch), - None => T::new().map_err(|_| ServerError::FetchInitialization), - } - } -} - -/// Webapps HTTP server. -pub struct Server { - server: Option, -} - -impl Server { - /// Returns a list of allowed hosts or `None` if all hosts are allowed. - fn allowed_hosts(hosts: Option>, bind_address: String) -> Option> { - let mut allowed = Vec::new(); - - match hosts { - Some(hosts) => allowed.extend_from_slice(&hosts), - None => return None, - } - - // Add localhost domain as valid too if listening on loopback interface. - allowed.push(bind_address.replace("127.0.0.1", "localhost").into()); - allowed.push(bind_address.into()); - Some(allowed) - } - - /// Returns a list of CORS domains for API endpoint. - fn cors_domains( +impl Middleware { + /// Creates new Dapps server middleware. + pub fn new( + remote: Remote, signer_address: Option<(String, u16)>, - extra_cors: Option>, - ) -> Vec { - let basic_cors = match signer_address { - Some(signer_address) => [ - format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("http://{}", address(&signer_address)), - format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), - format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), - format!("https://{}", address(&signer_address)), - ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), - None => vec![], - }; - - match extra_cors { - None => basic_cors, - Some(extra_cors) => basic_cors.into_iter().chain(extra_cors).collect(), - } - } - - fn start_http>( - addr: &SocketAddr, - hosts: Option>, - extra_cors: Option>, - authorization: A, - handler: MetaIoHandler, dapps_path: PathBuf, extra_dapps: Vec, - signer_address: Option<(String, u16)>, registrar: Arc, sync_status: Arc, web_proxy_tokens: Arc, - remote: Remote, - tokio_remote: TokioRemote, fetch: F, - ) -> Result { - let authorization = Arc::new(authorization); - let content_fetcher = Arc::new(apps::fetcher::ContentFetcher::new( + ) -> Self { + let content_fetcher = apps::fetcher::ContentFetcher::new( hash_fetch::urlhint::URLHintContract::new(registrar), sync_status, signer_address.clone(), remote.clone(), fetch.clone(), - )); - let endpoints = Arc::new(apps::all_endpoints( + ); + let endpoints = apps::all_endpoints( dapps_path, extra_dapps, signer_address.clone(), web_proxy_tokens, remote.clone(), fetch.clone(), - )); - let cors_domains = Self::cors_domains(signer_address.clone(), extra_cors); + ); - let special = Arc::new({ + let cors_domains = cors_domains(signer_address.clone()); + + let special = { let mut special = HashMap::new(); - special.insert(router::SpecialEndpoint::Rpc, rpc::rpc(handler, tokio_remote, cors_domains.clone())); - special.insert(router::SpecialEndpoint::Utils, apps::utils()); + special.insert(router::SpecialEndpoint::Rpc, None); + special.insert(router::SpecialEndpoint::Utils, Some(apps::utils())); special.insert( router::SpecialEndpoint::Api, - api::RestApi::new(cors_domains, endpoints.clone(), content_fetcher.clone()) + Some(api::RestApi::new(cors_domains.clone(), &endpoints, content_fetcher.clone())), ); special - }); - let hosts = Self::allowed_hosts(hosts, format!("{}", addr)); + }; - hyper::Server::http(addr)? - .handle(move |ctrl| router::Router::new( - ctrl, - signer_address.clone(), - content_fetcher.clone(), - endpoints.clone(), - special.clone(), - authorization.clone(), - hosts.clone(), - )) - .map(|(l, srv)| { + let router = router::Router::new( + signer_address, + content_fetcher, + endpoints, + special, + ); - ::std::thread::spawn(move || { - srv.run(); - }); - - Server { - server: Some(l), - } - }) - .map_err(ServerError::from) - } - - #[cfg(test)] - /// Returns address that this server is bound to. - pub fn addr(&self) -> &SocketAddr { - self.server.as_ref() - .expect("server is always Some at the start; it's consumed only when object is dropped; qed") - .addrs() - .first() - .expect("You cannot start the server without binding to at least one address; qed") - } -} - -impl Drop for Server { - fn drop(&mut self) { - self.server.take().unwrap().close() - } -} - -/// Webapp Server startup error -#[derive(Debug)] -pub enum ServerError { - /// Wrapped `std::io::Error` - IoError(std::io::Error), - /// Other `hyper` error - Other(hyper::error::Error), - /// Fetch service initialization error - FetchInitialization, -} - -impl From for ServerError { - fn from(err: hyper::error::Error) -> Self { - match err { - hyper::error::Error::Io(e) => ServerError::IoError(e), - e => ServerError::Other(e), + Middleware { + router: router, } } } +impl http::RequestMiddleware for Middleware { + fn on_request(&self, req: &hyper::server::Request, control: &hyper::Control) -> http::RequestMiddlewareAction { + self.router.on_request(req, control) + } +} + +/// Returns a list of CORS domains for API endpoint. +fn cors_domains(signer_address: Option<(String, u16)>) -> Vec { + use self::apps::{HOME_PAGE, DAPPS_DOMAIN}; + + match signer_address { + Some(signer_address) => [ + format!("http://{}{}", HOME_PAGE, DAPPS_DOMAIN), + format!("http://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), + format!("http://{}", address(&signer_address)), + format!("https://{}{}", HOME_PAGE, DAPPS_DOMAIN), + format!("https://{}{}:{}", HOME_PAGE, DAPPS_DOMAIN, signer_address.1), + format!("https://{}", address(&signer_address)), + ].into_iter().map(|val| AccessControlAllowOrigin::Value(val.into())).collect(), + None => vec![], + } +} + +fn address(address: &(String, u16)) -> String { + format!("{}:{}", address.0, address.1) +} + /// Random filename fn random_filename() -> String { use ::rand::Rng; @@ -404,39 +190,18 @@ fn random_filename() -> String { rng.gen_ascii_chars().take(12).collect() } -fn address(address: &(String, u16)) -> String { - format!("{}:{}", address.0, address.1) -} - #[cfg(test)] mod util_tests { - use super::Server; + use super::cors_domains; use jsonrpc_http_server::AccessControlAllowOrigin; - #[test] - fn should_return_allowed_hosts() { - // given - let bind_address = "127.0.0.1".to_owned(); - - // when - let all = Server::allowed_hosts(None, bind_address.clone()); - let address = Server::allowed_hosts(Some(Vec::new()), bind_address.clone()); - let some = Server::allowed_hosts(Some(vec!["ethcore.io".into()]), bind_address.clone()); - - // then - assert_eq!(all, None); - assert_eq!(address, Some(vec!["localhost".into(), "127.0.0.1".into()])); - assert_eq!(some, Some(vec!["ethcore.io".into(), "localhost".into(), "127.0.0.1".into()])); - } - #[test] fn should_return_cors_domains() { // given // when - let none = Server::cors_domains(None, None); - let some = Server::cors_domains(Some(("127.0.0.1".into(), 18180)), None); - let extra = Server::cors_domains(None, Some(vec!["all".into()])); + let none = cors_domains(None); + let some = cors_domains(Some(("127.0.0.1".into(), 18180))); // then assert_eq!(none, Vec::::new()); @@ -448,6 +213,5 @@ mod util_tests { "https://parity.web3.site:18180".into(), "https://127.0.0.1:18180".into(), ]); - assert_eq!(extra, vec![AccessControlAllowOrigin::Any]); } } diff --git a/dapps/src/router/mod.rs b/dapps/src/router.rs similarity index 71% rename from dapps/src/router/mod.rs rename to dapps/src/router.rs index 0b4e632a6..995565f26 100644 --- a/dapps/src/router/mod.rs +++ b/dapps/src/router.rs @@ -15,24 +15,20 @@ // along with Parity. If not, see . //! Router implementation -//! Processes request handling authorization and dispatching it to proper application. - -pub mod auth; -mod host_validation; +//! Dispatch requests to proper application. use address; use std::cmp; -use std::sync::Arc; use std::collections::HashMap; use url::{Url, Host}; -use hyper::{self, server, header, Next, Encoder, Decoder, Control, StatusCode}; +use hyper::{self, server, header, Control, StatusCode}; use hyper::net::HttpStream; -use jsonrpc_server_utils::hosts; +use jsonrpc_http_server as http; use apps::{self, DAPPS_DOMAIN}; use apps::fetcher::Fetcher; -use endpoint::{Endpoint, Endpoints, EndpointPath}; +use endpoint::{Endpoint, Endpoints, EndpointPath, Handler}; use handlers::{self, Redirection, ContentHandler}; /// Special endpoints are accessible on every domain (every dapp) @@ -44,51 +40,29 @@ pub enum SpecialEndpoint { None, } -pub struct Router { - control: Option, +pub struct Router { signer_address: Option<(String, u16)>, - endpoints: Arc, - fetch: Arc, - special: Arc>>, - authorization: Arc, - allowed_hosts: Option>, - handler: Box + Send>, + endpoints: Endpoints, + fetch: F, + special: HashMap>>, } -impl server::Handler for Router { - - fn on_request(&mut self, req: server::Request) -> Next { +impl http::RequestMiddleware for Router { + fn on_request(&self, req: &server::Request, control: &Control) -> http::RequestMiddlewareAction { // Choose proper handler depending on path / domain - let url = handlers::extract_url(&req); + let url = handlers::extract_url(req); let endpoint = extract_endpoint(&url); - let referer = extract_referer_endpoint(&req); + let referer = extract_referer_endpoint(req); let is_utils = endpoint.1 == SpecialEndpoint::Utils; + let is_dapps_domain = endpoint.0.as_ref().map(|endpoint| endpoint.using_dapps_domains).unwrap_or(false); + let is_origin_set = req.headers().get::().is_some(); let is_get_request = *req.method() == hyper::Method::Get; trace!(target: "dapps", "Routing request to {:?}. Details: {:?}", url, req); - // Validate Host header - trace!(target: "dapps", "Validating host headers against: {:?}", self.allowed_hosts); - let is_valid = is_utils || host_validation::is_valid(&req, &self.allowed_hosts); - if !is_valid { - debug!(target: "dapps", "Rejecting invalid host header."); - self.handler = host_validation::host_invalid_response(); - return self.handler.on_request(req); - } - - trace!(target: "dapps", "Checking authorization."); - // Check authorization - let auth = self.authorization.is_authorized(&req); - if let auth::Authorized::No(handler) = auth { - debug!(target: "dapps", "Authorization denied."); - self.handler = handler; - return self.handler.on_request(req); - } - - - let control = self.control.take().expect("on_request is called only once; control is always defined at start; qed"); + let control = control.clone(); debug!(target: "dapps", "Handling endpoint request: {:?}", endpoint); - self.handler = match (endpoint.0, endpoint.1, referer) { + let handler: Option> = match (endpoint.0, endpoint.1, referer) { // Handle invalid web requests that we can recover from (ref path, SpecialEndpoint::None, Some((ref referer, ref referer_url))) if referer.app_id == apps::WEB_PATH @@ -100,26 +74,27 @@ impl server::Handler for Router let len = cmp::min(referer_url.path.len(), 2); // /web// let base = referer_url.path[..len].join("/"); let requested = url.map(|u| u.path.join("/")).unwrap_or_default(); - Redirection::boxed(&format!("/{}/{}", base, requested)) + Some(Redirection::boxed(&format!("/{}/{}", base, requested))) }, // First check special endpoints (ref path, ref endpoint, _) if self.special.contains_key(endpoint) => { trace!(target: "dapps", "Resolving to special endpoint."); self.special.get(endpoint) .expect("special known to contain key; qed") - .to_async_handler(path.clone().unwrap_or_default(), control) + .as_ref() + .map(|special| special.to_async_handler(path.clone().unwrap_or_default(), control)) }, // Then delegate to dapp (Some(ref path), _, _) if self.endpoints.contains_key(&path.app_id) => { trace!(target: "dapps", "Resolving to local/builtin dapp."); - self.endpoints.get(&path.app_id) + Some(self.endpoints.get(&path.app_id) .expect("endpoints known to contain key; qed") - .to_async_handler(path.clone(), control) + .to_async_handler(path.clone(), control)) }, // Try to resolve and fetch the dapp (Some(ref path), _, _) if self.fetch.contains(&path.app_id) => { trace!(target: "dapps", "Resolving to fetchable content."); - self.fetch.to_async_handler(path.clone(), control) + Some(self.fetch.to_async_handler(path.clone(), control)) }, // NOTE [todr] /home is redirected to home page since some users may have the redirection cached // (in the past we used 301 instead of 302) @@ -128,82 +103,61 @@ impl server::Handler for Router // 404 for non-existent content (Some(ref path), _, _) if is_get_request && path.app_id != "home" => { trace!(target: "dapps", "Resolving to 404."); - Box::new(ContentHandler::error( + Some(Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Requested content was not found.", None, self.signer_address.clone(), - )) + ))) }, // Redirect any other GET request to signer. _ if is_get_request => { if let Some(ref signer_address) = self.signer_address { trace!(target: "dapps", "Redirecting to signer interface."); - Redirection::boxed(&format!("http://{}", address(signer_address))) + Some(Redirection::boxed(&format!("http://{}", address(signer_address)))) } else { trace!(target: "dapps", "Signer disabled, returning 404."); - Box::new(ContentHandler::error( + Some(Box::new(ContentHandler::error( StatusCode::NotFound, "404 Not Found", "Your homepage is not available when Trusted Signer is disabled.", Some("You can still access dapps by writing a correct address, though. Re-enable Signer to get your homepage back."), self.signer_address.clone(), - )) + ))) } }, // RPC by default _ => { trace!(target: "dapps", "Resolving to RPC call."); - self.special.get(&SpecialEndpoint::Rpc) - .expect("RPC endpoint always stored; qed") - .to_async_handler(EndpointPath::default(), control) + None } }; - // Delegate on_request to proper handler - self.handler.on_request(req) - } - - /// This event occurs each time the `Request` is ready to be read from. - fn on_request_readable(&mut self, decoder: &mut Decoder) -> Next { - self.handler.on_request_readable(decoder) - } - - /// This event occurs after the first time this handled signals `Next::write()`. - fn on_response(&mut self, response: &mut server::Response) -> Next { - self.handler.on_response(response) - } - - /// This event occurs each time the `Response` is ready to be written to. - fn on_response_writable(&mut self, encoder: &mut Encoder) -> Next { - self.handler.on_response_writable(encoder) + match handler { + Some(handler) => http::RequestMiddlewareAction::Respond { + should_validate_hosts: !(is_utils || is_dapps_domain), + handler: handler, + }, + None => http::RequestMiddlewareAction::Proceed { + should_continue_on_invalid_cors: !is_origin_set, + }, + } } } -impl Router { +impl Router { pub fn new( - control: Control, signer_address: Option<(String, u16)>, - content_fetcher: Arc, - endpoints: Arc, - special: Arc>>, - authorization: Arc, - allowed_hosts: Option>, - ) -> Self { - - let handler = special.get(&SpecialEndpoint::Utils) - .expect("Utils endpoint always stored; qed") - .to_handler(EndpointPath::default()); + content_fetcher: F, + endpoints: Endpoints, + special: HashMap>>, + ) -> Self { Router { - control: Some(control), signer_address: signer_address, endpoints: endpoints, fetch: content_fetcher, special: special, - authorization: authorization, - allowed_hosts: allowed_hosts, - handler: handler, } } } diff --git a/dapps/src/router/auth.rs b/dapps/src/router/auth.rs deleted file mode 100644 index 007ebb96d..000000000 --- a/dapps/src/router/auth.rs +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -//! HTTP Authorization implementations - -use std::collections::HashMap; -use hyper::{server, net, header, status}; -use endpoint::Handler; -use handlers::{AuthRequiredHandler, ContentHandler}; - -/// Authorization result -pub enum Authorized { - /// Authorization was successful. - Yes, - /// Unsuccessful authorization. Handler for further work is returned. - No(Box), -} - -/// Authorization interface -pub trait Authorization : Send + Sync { - /// Checks if authorization is valid. - fn is_authorized(&self, req: &server::Request)-> Authorized; -} - -/// HTTP Basic Authorization handler -pub struct HttpBasicAuth { - users: HashMap, -} - -/// No-authorization implementation (authorization disabled) -pub struct NoAuth; - -impl Authorization for NoAuth { - fn is_authorized(&self, _req: &server::Request)-> Authorized { - Authorized::Yes - } -} - -impl Authorization for HttpBasicAuth { - fn is_authorized(&self, req: &server::Request) -> Authorized { - let auth = self.check_auth(&req); - - match auth { - Access::Denied => { - Authorized::No(Box::new(ContentHandler::error( - status::StatusCode::Unauthorized, - "Unauthorized", - "You need to provide valid credentials to access this page.", - None, - None, - ))) - }, - Access::AuthRequired => { - Authorized::No(Box::new(AuthRequiredHandler)) - }, - Access::Granted => { - Authorized::Yes - }, - } - } -} - -#[derive(Debug)] -enum Access { - Granted, - Denied, - AuthRequired, -} - -impl HttpBasicAuth { - /// Creates `HttpBasicAuth` instance with only one user. - pub fn single_user(username: &str, password: &str) -> Self { - let mut users = HashMap::new(); - users.insert(username.to_owned(), password.to_owned()); - HttpBasicAuth { - users: users - } - } - - fn is_authorized(&self, username: &str, password: &str) -> bool { - self.users.get(&username.to_owned()).map_or(false, |pass| pass == password) - } - - fn check_auth(&self, req: &server::Request) -> Access { - match req.headers().get::>() { - Some(&header::Authorization( - header::Basic { ref username, password: Some(ref password) } - )) if self.is_authorized(username, password) => Access::Granted, - Some(_) => Access::Denied, - None => Access::AuthRequired, - } - } -} diff --git a/dapps/src/router/host_validation.rs b/dapps/src/router/host_validation.rs deleted file mode 100644 index e5fcedd94..000000000 --- a/dapps/src/router/host_validation.rs +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - - -use apps::DAPPS_DOMAIN; -use hyper::{server, header, StatusCode}; -use hyper::net::HttpStream; - -use handlers::ContentHandler; -use jsonrpc_http_server; -use jsonrpc_server_utils::hosts; - -pub fn is_valid(req: &server::Request, allowed_hosts: &Option>) -> bool { - let header_valid = jsonrpc_http_server::is_host_allowed(req, allowed_hosts); - match (header_valid, req.headers().get::()) { - (true, _) => true, - (_, Some(host)) => host.hostname.ends_with(DAPPS_DOMAIN), - _ => false, - } -} - -pub fn host_invalid_response() -> Box + Send> { - Box::new(ContentHandler::error(StatusCode::Forbidden, - "Current Host Is Disallowed", - "You are trying to access your node using incorrect address.", - Some("Use allowed URL or specify different hosts CLI options."), - None, - )) -} diff --git a/dapps/src/rpc.rs b/dapps/src/rpc.rs index 6ddb31db0..b743408dc 100644 --- a/dapps/src/rpc.rs +++ b/dapps/src/rpc.rs @@ -66,14 +66,14 @@ impl> Endpoint for RpcEndpoint { #[derive(Default)] struct NoopMiddleware; impl http::RequestMiddleware for NoopMiddleware { - fn on_request(&self, request: &http::hyper::server::Request) -> http::RequestMiddlewareAction { + fn on_request(&self, request: &http::hyper::server::Request, _control: &http::hyper::Control) -> http::RequestMiddlewareAction { http::RequestMiddlewareAction::Proceed { should_continue_on_invalid_cors: request.headers().get::().is_none(), } } } -struct MetadataExtractor; +pub struct MetadataExtractor; impl HttpMetaExtractor for MetadataExtractor { fn read_metadata(&self, request: &http::hyper::server::Request) -> Metadata { let dapp_id = request.headers().get::() diff --git a/dapps/src/tests/api.rs b/dapps/src/tests/api.rs index 73467e854..043814377 100644 --- a/dapps/src/tests/api.rs +++ b/dapps/src/tests/api.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use tests::helpers::{serve, serve_with_registrar, serve_extra_cors, request, assert_security_headers}; +use tests::helpers::{serve, serve_with_registrar, request, assert_security_headers}; #[test] fn should_return_error() { @@ -195,26 +195,3 @@ fn should_return_signer_port_cors_headers_for_home_parity_with_port() { response.assert_status("HTTP/1.1 200 OK"); response.assert_header("Access-Control-Allow-Origin", "http://parity.web3.site:18180"); } - -#[test] -fn should_return_extra_cors_headers() { - // given - let server = serve_extra_cors(Some(vec!["all".to_owned()])); - - // when - let response = request(server, - "\ - POST /api/ping HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: http://somedomain.io\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - response.assert_status("HTTP/1.1 200 OK"); - response.assert_header("Access-Control-Allow-Origin", "http://somedomain.io"); -} - diff --git a/dapps/src/tests/authorization.rs b/dapps/src/tests/authorization.rs deleted file mode 100644 index 346f8f2fb..000000000 --- a/dapps/src/tests/authorization.rs +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015-2017 Parity Technologies (UK) Ltd. -// This file is part of Parity. - -// Parity is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Parity is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Parity. If not, see . - -use tests::helpers::{serve_with_auth, request, assert_security_headers_for_embed}; - -#[test] -fn should_require_authorization() { - // given - let server = serve_with_auth("test", "test"); - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); - assert_eq!(response.headers.get(0).unwrap(), "WWW-Authenticate: Basic realm=\"Parity\""); -} - -#[test] -fn should_reject_on_invalid_auth() { - // given - let server = serve_with_auth("test", "test"); - - // when - let response = request(server, - "\ - GET / HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 401 Unauthorized".to_owned()); - assert!(response.body.contains("Unauthorized"), response.body); - assert_eq!(response.headers_raw.contains("WWW-Authenticate"), false); -} - -#[test] -fn should_allow_on_valid_auth() { - // given - let server = serve_with_auth("Aladdin", "OpenSesame"); - - // when - let response = request(server, - "\ - GET /ui/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l\r\n - \r\n\ - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert_security_headers_for_embed(&response.headers); -} diff --git a/dapps/src/tests/helpers/mod.rs b/dapps/src/tests/helpers/mod.rs index 036933995..e6c032549 100644 --- a/dapps/src/tests/helpers/mod.rs +++ b/dapps/src/tests/helpers/mod.rs @@ -16,18 +16,20 @@ use std::env; use std::str; -use std::ops::Deref; +use std::net::SocketAddr; +use std::path::{Path, PathBuf}; use std::sync::Arc; use env_logger::LogBuilder; -use ethcore_rpc::Metadata; -use jsonrpc_core::MetaIoHandler; +use jsonrpc_core::IoHandler; +use jsonrpc_http_server::{self as http, Host, DomainsValidation}; -use ServerBuilder; -use Server; -use fetch::Fetch; use devtools::http_client; +use hash_fetch::urlhint::ContractClient; +use fetch::{Fetch, Client as FetchClient}; use parity_reactor::{EventLoop, Remote}; +use {Middleware, SyncStatus, WebProxyTokens}; + mod registrar; mod fetch; @@ -50,7 +52,7 @@ pub struct ServerLoop { pub event_loop: EventLoop, } -impl Deref for ServerLoop { +impl ::std::ops::Deref for ServerLoop { type Target = Server; fn deref(&self) -> &Self::Target { @@ -58,7 +60,7 @@ impl Deref for ServerLoop { } } -pub fn init_server(process: F, io: MetaIoHandler, remote: Remote) -> (ServerLoop, Arc) where +pub fn init_server(process: F, io: IoHandler, remote: Remote) -> (ServerLoop, Arc) where F: FnOnce(ServerBuilder) -> ServerBuilder, B: Fetch, { @@ -74,33 +76,15 @@ pub fn init_server(process: F, io: MetaIoHandler, remote: Remote &dapps_path, registrar.clone(), remote, )) .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io, event_loop.raw_remote()).unwrap(); + .start_unsecured_http(&"127.0.0.1:0".parse().unwrap(), io).unwrap(); ( ServerLoop { server: server, event_loop: event_loop }, registrar, ) } -pub fn serve_with_auth(user: &str, pass: &str) -> ServerLoop { - init_logger(); - let registrar = Arc::new(FakeRegistrar::new()); - let mut dapps_path = env::temp_dir(); - dapps_path.push("non-existent-dir-to-prevent-fs-files-from-loading"); - - let event_loop = EventLoop::spawn(); - let io = MetaIoHandler::default(); - let server = ServerBuilder::new(&dapps_path, registrar, event_loop.remote()) - .signer_address(Some(("127.0.0.1".into(), SIGNER_PORT))) - .allowed_hosts(None.into()) - .start_basic_auth_http(&"127.0.0.1:0".parse().unwrap(), user, pass, io, event_loop.raw_remote()).unwrap(); - ServerLoop { - server: server, - event_loop: event_loop, - } -} - -pub fn serve_with_rpc(io: MetaIoHandler) -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None.into()), io, Remote::new_sync()).0 +pub fn serve_with_rpc(io: IoHandler) -> ServerLoop { + init_server(|builder| builder, io, Remote::new_sync()).0 } pub fn serve_hosts(hosts: Option>) -> ServerLoop { @@ -108,20 +92,13 @@ pub fn serve_hosts(hosts: Option>) -> ServerLoop { init_server(|builder| builder.allowed_hosts(hosts.into()), Default::default(), Remote::new_sync()).0 } -pub fn serve_extra_cors(extra_cors: Option>) -> ServerLoop { - let extra_cors = extra_cors.map(|cors| cors.into_iter().map(Into::into).collect()); - init_server(|builder| builder.allowed_hosts(None.into()).extra_cors_headers(extra_cors.into()), Default::default(), Remote::new_sync()).0 -} - pub fn serve_with_registrar() -> (ServerLoop, Arc) { - init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()) + init_server(|builder| builder, Default::default(), Remote::new_sync()) } pub fn serve_with_registrar_and_sync() -> (ServerLoop, Arc) { init_server(|builder| { - builder - .sync_status(Arc::new(|| true)) - .allowed_hosts(None.into()) + builder.sync_status(Arc::new(|| true)) }, Default::default(), Remote::new_sync()) } @@ -133,7 +110,7 @@ pub fn serve_with_registrar_and_fetch_and_threads(multi_threaded: bool) -> (Serv let fetch = FakeFetch::default(); let f = fetch.clone(); let (server, reg) = init_server(move |builder| { - builder.allowed_hosts(None.into()).fetch(f.clone()) + builder.fetch(f.clone()) }, Default::default(), if multi_threaded { Remote::new_thread_per_future() } else { Remote::new_sync() }); (server, fetch, reg) @@ -144,7 +121,6 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { let f = fetch.clone(); let (server, _) = init_server(move |builder| { builder - .allowed_hosts(None.into()) .fetch(f.clone()) .web_proxy_tokens(Arc::new(move |token| &token == web_token)) }, Default::default(), Remote::new_sync()); @@ -153,7 +129,7 @@ pub fn serve_with_fetch(web_token: &'static str) -> (ServerLoop, FakeFetch) { } pub fn serve() -> ServerLoop { - init_server(|builder| builder.allowed_hosts(None.into()), Default::default(), Remote::new_sync()).0 + init_server(|builder| builder, Default::default(), Remote::new_sync()).0 } pub fn request(server: ServerLoop, request: &str) -> http_client::Response { @@ -166,3 +142,157 @@ pub fn assert_security_headers(headers: &[String]) { pub fn assert_security_headers_for_embed(headers: &[String]) { http_client::assert_security_headers_present(headers, Some(SIGNER_PORT)) } + + +/// Webapps HTTP+RPC server build. +pub struct ServerBuilder { + dapps_path: PathBuf, + registrar: Arc, + sync_status: Arc, + web_proxy_tokens: Arc, + signer_address: Option<(String, u16)>, + allowed_hosts: DomainsValidation, + remote: Remote, + fetch: Option, +} + +impl ServerBuilder { + /// Construct new dapps server + pub fn new>(dapps_path: P, registrar: Arc, remote: Remote) -> Self { + ServerBuilder { + dapps_path: dapps_path.as_ref().to_owned(), + registrar: registrar, + sync_status: Arc::new(|| false), + web_proxy_tokens: Arc::new(|_| false), + signer_address: None, + allowed_hosts: DomainsValidation::Disabled, + remote: remote, + fetch: None, + } + } +} + +impl ServerBuilder { + /// Set a fetch client to use. + pub fn fetch(self, fetch: X) -> ServerBuilder { + ServerBuilder { + dapps_path: self.dapps_path, + registrar: self.registrar, + sync_status: self.sync_status, + web_proxy_tokens: self.web_proxy_tokens, + signer_address: self.signer_address, + allowed_hosts: self.allowed_hosts, + remote: self.remote, + fetch: Some(fetch), + } + } + + /// Change default sync status. + pub fn sync_status(mut self, status: Arc) -> Self { + self.sync_status = status; + self + } + + /// Change default web proxy tokens validator. + pub fn web_proxy_tokens(mut self, tokens: Arc) -> Self { + self.web_proxy_tokens = tokens; + self + } + + /// Change default signer port. + pub fn signer_address(mut self, signer_address: Option<(String, u16)>) -> Self { + self.signer_address = signer_address; + self + } + + /// Change allowed hosts. + /// `None` - All hosts are allowed + /// `Some(whitelist)` - Allow only whitelisted hosts (+ listen address) + pub fn allowed_hosts(mut self, allowed_hosts: DomainsValidation) -> Self { + self.allowed_hosts = allowed_hosts; + self + } + + /// Asynchronously start server with no authentication, + /// returns result with `Server` handle on success or an error. + pub fn start_unsecured_http(self, addr: &SocketAddr, io: IoHandler) -> Result { + let fetch = self.fetch_client(); + Server::start_http( + addr, + io, + self.allowed_hosts, + self.signer_address, + self.dapps_path, + vec![], + self.registrar, + self.sync_status, + self.web_proxy_tokens, + self.remote, + fetch, + ) + } + + fn fetch_client(&self) -> T { + match self.fetch.clone() { + Some(fetch) => fetch, + None => T::new().unwrap(), + } + } +} + + +/// Webapps HTTP server. +pub struct Server { + server: Option, +} + +impl Server { + fn start_http( + addr: &SocketAddr, + io: IoHandler, + allowed_hosts: DomainsValidation, + signer_address: Option<(String, u16)>, + dapps_path: PathBuf, + extra_dapps: Vec, + registrar: Arc, + sync_status: Arc, + web_proxy_tokens: Arc, + remote: Remote, + fetch: F, + ) -> Result { + let middleware = Middleware::new( + remote, + signer_address, + dapps_path, + extra_dapps, + registrar, + sync_status, + web_proxy_tokens, + fetch, + ); + http::ServerBuilder::new(io) + .request_middleware(middleware) + .allowed_hosts(allowed_hosts) + .cors(http::DomainsValidation::Disabled) + .start_http(addr) + .map(|server| Server { + server: Some(server), + }) + } + + /// Returns address that this server is bound to. + pub fn addr(&self) -> &SocketAddr { + self.server.as_ref() + .expect("server is always Some at the start; it's consumed only when object is dropped; qed") + .addrs() + .first() + .expect("You cannot start the server without binding to at least one address; qed") + } +} + +impl Drop for Server { + fn drop(&mut self) { + self.server.take().unwrap().close() + } +} + diff --git a/dapps/src/tests/mod.rs b/dapps/src/tests/mod.rs index ced211d53..089318483 100644 --- a/dapps/src/tests/mod.rs +++ b/dapps/src/tests/mod.rs @@ -19,7 +19,6 @@ mod helpers; mod api; -mod authorization; mod fetch; mod redirection; mod rpc; diff --git a/dapps/src/tests/rpc.rs b/dapps/src/tests/rpc.rs index 2cc4ccb24..0cfc2c5a8 100644 --- a/dapps/src/tests/rpc.rs +++ b/dapps/src/tests/rpc.rs @@ -14,16 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use futures::{future, Future}; -use ethcore_rpc::{Metadata, Origin}; -use jsonrpc_core::{MetaIoHandler, Value}; +use jsonrpc_core::{IoHandler, Value}; use tests::helpers::{serve_with_rpc, request}; #[test] fn should_serve_rpc() { // given - let mut io = MetaIoHandler::default(); + let mut io = IoHandler::default(); io.add_method("rpc_test", |_| { Ok(Value::String("Hello World!".into())) }); @@ -49,70 +47,3 @@ fn should_serve_rpc() { response.assert_status("HTTP/1.1 200 OK"); assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); } - -#[test] -fn should_extract_metadata() { - // given - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("".into())); - assert_eq!(meta.dapp_id(), "".into()); - future::ok(Value::String("Hello World!".into())).boxed() - }); - let server = serve_with_rpc(io); - - // when - let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#; - let response = request(server, &format!( - "\ - POST /rpc/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - X-Parity-Origin: https://this.should.be.ignored\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - \r\n\ - {}\r\n\ - ", - req.as_bytes().len(), - req, - )); - - // then - response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); -} - -#[test] -fn should_extract_metadata_from_custom_header() { - // given - let mut io = MetaIoHandler::default(); - io.add_method_with_meta("rpc_test", |_params, meta: Metadata| { - assert_eq!(meta.origin, Origin::Dapps("https://parity.io/".into())); - assert_eq!(meta.dapp_id(), "https://parity.io/".into()); - future::ok(Value::String("Hello World!".into())).boxed() - }); - let server = serve_with_rpc(io); - - // when - let req = r#"{"jsonrpc":"2.0","id":1,"method":"rpc_test","params":[]}"#; - let response = request(server, &format!( - "\ - POST /rpc/ HTTP/1.1\r\n\ - Host: 127.0.0.1:8080\r\n\ - Connection: close\r\n\ - Origin: null\r\n\ - X-Parity-Origin: https://parity.io/\r\n\ - Content-Type: application/json\r\n\ - Content-Length: {}\r\n\ - \r\n\ - {}\r\n\ - ", - req.as_bytes().len(), - req, - )); - - // then - response.assert_status("HTTP/1.1 200 OK"); - assert_eq!(response.body, "31\n{\"jsonrpc\":\"2.0\",\"result\":\"Hello World!\",\"id\":1}\n\n0\n\n".to_owned()); -} diff --git a/dapps/src/tests/validation.rs b/dapps/src/tests/validation.rs index afeb7b5ef..fb68cf5ed 100644 --- a/dapps/src/tests/validation.rs +++ b/dapps/src/tests/validation.rs @@ -34,7 +34,7 @@ fn should_reject_invalid_host() { // then assert_eq!(response.status, "HTTP/1.1 403 Forbidden".to_owned()); - assert!(response.body.contains("Current Host Is Disallowed"), response.body); + assert!(response.body.contains("Provided Host header is not whitelisted."), response.body); } #[test] @@ -97,31 +97,3 @@ fn should_allow_parity_utils_even_on_invalid_domain() { // then assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); } - -#[test] -fn should_not_return_cors_headers_for_rpc() { - // given - let server = serve_hosts(Some(vec!["localhost:8080".into()])); - - // when - let response = request(server, - "\ - POST /rpc HTTP/1.1\r\n\ - Host: localhost:8080\r\n\ - Origin: null\r\n\ - Content-Type: application/json\r\n\ - Connection: close\r\n\ - \r\n\ - {} - " - ); - - // then - assert_eq!(response.status, "HTTP/1.1 200 OK".to_owned()); - assert!( - !response.headers_raw.contains("Access-Control-Allow-Origin"), - "CORS headers were not expected: {:?}", - response.headers - ); -} - diff --git a/docker/ubuntu/Dockerfile b/docker/ubuntu/Dockerfile index 475555be9..0ee84e1c5 100644 --- a/docker/ubuntu/Dockerfile +++ b/docker/ubuntu/Dockerfile @@ -8,7 +8,10 @@ RUN apt-get update && \ curl \ git \ file \ - binutils + binutils \ + libssl-dev \ + pkg-config \ + libudev-dev # install rustup RUN curl https://sh.rustup.rs -sSf | sh -s -- -y diff --git a/ethcore/light/Cargo.toml b/ethcore/light/Cargo.toml index 6f95d8a0e..78210904e 100644 --- a/ethcore/light/Cargo.toml +++ b/ethcore/light/Cargo.toml @@ -17,6 +17,7 @@ ethcore-util = { path = "../../util" } ethcore-network = { path = "../../util/network" } ethcore-io = { path = "../../util/io" } ethcore-ipc = { path = "../../ipc/rpc", optional = true } +ethcore-devtools = { path = "../../devtools" } rlp = { path = "../../util/rlp" } time = "0.1" smallvec = "0.3.1" diff --git a/ethcore/light/src/client/header_chain.rs b/ethcore/light/src/client/header_chain.rs index 9dcd25888..d4ea8d107 100644 --- a/ethcore/light/src/client/header_chain.rs +++ b/ethcore/light/src/client/header_chain.rs @@ -23,9 +23,9 @@ //! This is separate from the `BlockChain` for two reasons: //! - It stores only headers (and a pruned subset of them) //! - To allow for flexibility in the database layout once that's incorporated. -// TODO: use DB instead of memory. DB Layout: just the contents of `candidates`/`headers` -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; +use std::sync::Arc; use cht; @@ -34,7 +34,10 @@ use ethcore::error::BlockError; use ethcore::encoded; use ethcore::header::Header; use ethcore::ids::BlockId; -use util::{H256, U256, HeapSizeOf, Mutex, RwLock}; + +use rlp::{Encodable, Decodable, DecoderError, RlpStream, Rlp, UntrustedRlp}; +use util::{H256, U256, HeapSizeOf, RwLock}; +use util::kvdb::{DBTransaction, KeyValueDB}; use smallvec::SmallVec; @@ -43,6 +46,9 @@ use smallvec::SmallVec; /// relevant to any blocks we've got in memory. const HISTORY: u64 = 2048; +/// The best block key. Maps to an RLP list: [best_era, last_era] +const CURRENT_KEY: &'static [u8] = &*b"best_and_latest"; + /// Information about a block. #[derive(Debug, Clone)] pub struct BlockDescriptor { @@ -75,42 +81,142 @@ impl HeapSizeOf for Entry { } } +impl Encodable for Entry { + fn rlp_append(&self, s: &mut RlpStream) { + s.begin_list(self.candidates.len()); + + for candidate in &self.candidates { + s.begin_list(3) + .append(&candidate.hash) + .append(&candidate.parent_hash) + .append(&candidate.total_difficulty); + } + } +} + +impl Decodable for Entry { + fn decode(rlp: &UntrustedRlp) -> Result { + + let mut candidates = SmallVec::<[Candidate; 3]>::new(); + + for item in rlp.iter() { + candidates.push(Candidate { + hash: item.val_at(0)?, + parent_hash: item.val_at(1)?, + total_difficulty: item.val_at(2)?, + }) + } + + if candidates.is_empty() { return Err(DecoderError::Custom("Empty candidates vector submitted.")) } + + // rely on the invariant that the canonical entry is always first. + let canon_hash = candidates[0].hash; + Ok(Entry { + candidates: candidates, + canonical_hash: canon_hash, + }) + } +} + +fn cht_key(number: u64) -> String { + format!("{:08x}_canonical", number) +} + +fn era_key(number: u64) -> String { + format!("candidates_{}", number) +} + +/// Pending changes from `insert` to be applied after the database write has finished. +pub struct PendingChanges { + best_block: Option, // new best block. +} + /// Header chain. See module docs for more details. pub struct HeaderChain { genesis_header: encoded::Header, // special-case the genesis. candidates: RwLock>, - headers: RwLock>, best_block: RwLock, - cht_roots: Mutex>, + db: Arc, + col: Option, } impl HeaderChain { - /// Create a new header chain given this genesis block. - pub fn new(genesis: &[u8]) -> Self { + /// Create a new header chain given this genesis block and database to read from. + pub fn new(db: Arc, col: Option, genesis: &[u8]) -> Result { use ethcore::views::HeaderView; - let g_view = HeaderView::new(genesis); + let chain = if let Some(current) = db.get(col, CURRENT_KEY)? { + let (best_number, highest_number) = { + let rlp = Rlp::new(¤t); + (rlp.val_at(0), rlp.val_at(1)) + }; - HeaderChain { - genesis_header: encoded::Header::new(genesis.to_owned()), - best_block: RwLock::new(BlockDescriptor { - hash: g_view.hash(), - number: 0, - total_difficulty: g_view.difficulty(), - }), - candidates: RwLock::new(BTreeMap::new()), - headers: RwLock::new(HashMap::new()), - cht_roots: Mutex::new(Vec::new()), - } + let mut cur_number = highest_number; + let mut candidates = BTreeMap::new(); + + // load all era entries and referenced headers within them. + while let Some(entry) = db.get(col, era_key(cur_number).as_bytes())? { + let entry: Entry = ::rlp::decode(&entry); + trace!(target: "chain", "loaded header chain entry for era {} with {} candidates", + cur_number, entry.candidates.len()); + + candidates.insert(cur_number, entry); + + cur_number -= 1; + } + + // fill best block block descriptor. + let best_block = { + let era = match candidates.get(&best_number) { + Some(era) => era, + None => return Err(format!("Database corrupt: highest block referenced but no data.")), + }; + + let best = &era.candidates[0]; + BlockDescriptor { + hash: best.hash, + number: best_number, + total_difficulty: best.total_difficulty, + } + }; + + HeaderChain { + genesis_header: encoded::Header::new(genesis.to_owned()), + best_block: RwLock::new(best_block), + candidates: RwLock::new(candidates), + db: db, + col: col, + } + } else { + let g_view = HeaderView::new(genesis); + HeaderChain { + genesis_header: encoded::Header::new(genesis.to_owned()), + best_block: RwLock::new(BlockDescriptor { + hash: g_view.hash(), + number: 0, + total_difficulty: g_view.difficulty(), + }), + candidates: RwLock::new(BTreeMap::new()), + db: db, + col: col, + } + }; + + Ok(chain) } /// Insert a pre-verified header. /// /// This blindly trusts that the data given to it is sensible. - pub fn insert(&self, header: Header) -> Result<(), BlockError> { + /// Returns a set of pending changes to be applied with `apply_pending` + /// before the next call to insert and after the transaction has been written. + pub fn insert(&self, transaction: &mut DBTransaction, header: Header) -> Result { let hash = header.hash(); let number = header.number(); let parent_hash = *header.parent_hash(); + let mut pending = PendingChanges { + best_block: None, + }; // hold candidates the whole time to guard import order. let mut candidates = self.candidates.write(); @@ -128,20 +234,41 @@ impl HeaderChain { let total_difficulty = parent_td + *header.difficulty(); - // insert headers and candidates entries. - candidates.entry(number).or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }) - .candidates.push(Candidate { + // insert headers and candidates entries and write era to disk. + { + let cur_era = candidates.entry(number) + .or_insert_with(|| Entry { candidates: SmallVec::new(), canonical_hash: hash }); + cur_era.candidates.push(Candidate { hash: hash, parent_hash: parent_hash, total_difficulty: total_difficulty, - }); + }); - let raw = ::rlp::encode(&header).to_vec(); - self.headers.write().insert(hash, encoded::Header::new(raw)); + // fix ordering of era before writing. + if total_difficulty > cur_era.candidates[0].total_difficulty { + let cur_pos = cur_era.candidates.len() - 1; + cur_era.candidates.swap(cur_pos, 0); + cur_era.canonical_hash = hash; + } + + transaction.put(self.col, era_key(number).as_bytes(), &::rlp::encode(&*cur_era)) + } + + let raw = ::rlp::encode(&header); + transaction.put(self.col, &hash[..], &*raw); + + let (best_num, is_new_best) = { + let cur_best = self.best_block.read(); + if cur_best.total_difficulty < total_difficulty { + (number, true) + } else { + (cur_best.number, false) + } + }; // reorganize ancestors so canonical entries are first in their // respective candidates vectors. - if self.best_block.read().total_difficulty < total_difficulty { + if is_new_best { let mut canon_hash = hash; for (&height, entry) in candidates.iter_mut().rev().skip_while(|&(height, _)| *height > number) { if height != number && entry.canonical_hash == canon_hash { break; } @@ -160,23 +287,26 @@ impl HeaderChain { // what about reorgs > cht::SIZE + HISTORY? // resetting to the last block of a given CHT should be possible. canon_hash = entry.candidates[0].parent_hash; + + // write altered era to disk + if height != number { + let rlp_era = ::rlp::encode(&*entry); + transaction.put(self.col, era_key(height).as_bytes(), &rlp_era); + } } trace!(target: "chain", "New best block: ({}, {}), TD {}", number, hash, total_difficulty); - *self.best_block.write() = BlockDescriptor { + pending.best_block = Some(BlockDescriptor { hash: hash, number: number, total_difficulty: total_difficulty, - }; + }); // produce next CHT root if it's time. let earliest_era = *candidates.keys().next().expect("at least one era just created; qed"); if earliest_era + HISTORY + cht::SIZE <= number { let cht_num = cht::block_to_cht_number(earliest_era) .expect("fails only for number == 0; genesis never imported; qed"); - debug_assert_eq!(cht_num as usize, self.cht_roots.lock().len()); - - let mut headers = self.headers.write(); let cht_root = { let mut i = earliest_era; @@ -186,10 +316,12 @@ impl HeaderChain { let iter = || { let era_entry = candidates.remove(&i) .expect("all eras are sequential with no gaps; qed"); + transaction.delete(self.col, era_key(i).as_bytes()); + i += 1; for ancient in &era_entry.candidates { - headers.remove(&ancient.hash); + transaction.delete(self.col, &ancient.hash); } let canon = &era_entry.candidates[0]; @@ -199,28 +331,56 @@ impl HeaderChain { .expect("fails only when too few items; this is checked; qed") }; + // write the CHT root to the database. debug!(target: "chain", "Produced CHT {} root: {:?}", cht_num, cht_root); - - self.cht_roots.lock().push(cht_root); + transaction.put(self.col, cht_key(cht_num).as_bytes(), &::rlp::encode(&cht_root)); } } - Ok(()) + // write the best and latest eras to the database. + { + let latest_num = *candidates.iter().rev().next().expect("at least one era just inserted; qed").0; + let mut stream = RlpStream::new_list(2); + stream.append(&best_num).append(&latest_num); + transaction.put(self.col, CURRENT_KEY, &stream.out()) + } + Ok(pending) + } + + /// Apply pending changes from a previous `insert` operation. + /// Must be done before the next `insert` call. + pub fn apply_pending(&self, pending: PendingChanges) { + if let Some(best_block) = pending.best_block { + *self.best_block.write() = best_block; + } } /// Get a block header. In the case of query by number, only canonical blocks /// will be returned. pub fn block_header(&self, id: BlockId) -> Option { + let load_from_db = |hash: H256| { + match self.db.get(self.col, &hash) { + Ok(val) => val.map(|x| x.to_vec()).map(encoded::Header::new), + Err(e) => { + warn!(target: "chain", "Failed to read from database: {}", e); + None + } + } + }; + match id { BlockId::Earliest | BlockId::Number(0) => Some(self.genesis_header.clone()), - BlockId::Hash(hash) => self.headers.read().get(&hash).cloned(), + BlockId::Hash(hash) => load_from_db(hash), BlockId::Number(num) => { if self.best_block.read().number < num { return None } self.candidates.read().get(&num).map(|entry| entry.canonical_hash) - .and_then(|hash| self.headers.read().get(&hash).cloned()) + .and_then(load_from_db) } BlockId::Latest | BlockId::Pending => { + // hold candidates hear to prevent deletion of the header + // as we read it. + let _candidates = self.candidates.read(); let hash = { let best = self.best_block.read(); if best.number == 0 { @@ -230,7 +390,7 @@ impl HeaderChain { best.hash }; - self.headers.read().get(&hash).cloned() + load_from_db(hash) } } } @@ -257,7 +417,13 @@ impl HeaderChain { /// This is because it's assumed that the genesis hash is known, /// so including it within a CHT would be redundant. pub fn cht_root(&self, n: usize) -> Option { - self.cht_roots.lock().get(n).map(|h| h.clone()) + match self.db.get(self.col, cht_key(n as u64).as_bytes()) { + Ok(val) => val.map(|x| ::rlp::decode(&x)), + Err(e) => { + warn!(target: "chain", "Error reading from database: {}", e); + None + } + } } /// Get the genesis hash. @@ -287,7 +453,7 @@ impl HeaderChain { /// Get block status. pub fn status(&self, hash: &H256) -> BlockStatus { - match self.headers.read().contains_key(hash) { + match self.db.get(self.col, &*hash).ok().map_or(false, |x| x.is_some()) { true => BlockStatus::InChain, false => BlockStatus::Unknown, } @@ -296,9 +462,7 @@ impl HeaderChain { impl HeapSizeOf for HeaderChain { fn heap_size_of_children(&self) -> usize { - self.candidates.read().heap_size_of_children() + - self.headers.read().heap_size_of_children() + - self.cht_roots.lock().heap_size_of_children() + self.candidates.read().heap_size_of_children() } } @@ -324,16 +488,23 @@ impl<'a> Iterator for AncestryIter<'a> { #[cfg(test)] mod tests { use super::HeaderChain; + use std::sync::Arc; + use ethcore::ids::BlockId; use ethcore::header::Header; use ethcore::spec::Spec; + fn make_db() -> Arc<::util::KeyValueDB> { + Arc::new(::util::kvdb::in_memory(0)) + } + #[test] fn basic_chain() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); + let db = make_db(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -345,7 +516,10 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -361,7 +535,8 @@ mod tests { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let db = make_db(); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); let mut parent_hash = genesis_header.hash(); let mut rolling_timestamp = genesis_header.timestamp(); @@ -373,7 +548,10 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -389,7 +567,10 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * i.into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 10; } @@ -410,7 +591,10 @@ mod tests { header.set_difficulty(*genesis_header.difficulty() * (i * i).into()); parent_hash = header.hash(); - chain.insert(header).unwrap(); + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); rolling_timestamp += 11; } @@ -432,11 +616,101 @@ mod tests { fn earliest_is_latest() { let spec = Spec::new_test(); let genesis_header = spec.genesis_header(); + let db = make_db(); - let chain = HeaderChain::new(&::rlp::encode(&genesis_header)); + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); assert!(chain.block_header(BlockId::Earliest).is_some()); assert!(chain.block_header(BlockId::Latest).is_some()); assert!(chain.block_header(BlockId::Pending).is_some()); } + + #[test] + fn restore_from_db() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + { + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + for i in 1..10000 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + } + + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + assert!(chain.block_header(BlockId::Number(10)).is_none()); + assert!(chain.block_header(BlockId::Number(9000)).is_some()); + assert!(chain.cht_root(2).is_some()); + assert!(chain.cht_root(3).is_none()); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 9999); + } + + #[test] + fn restore_higher_non_canonical() { + let spec = Spec::new_test(); + let genesis_header = spec.genesis_header(); + let db = make_db(); + + { + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + let mut parent_hash = genesis_header.hash(); + let mut rolling_timestamp = genesis_header.timestamp(); + + // push 100 low-difficulty blocks. + for i in 1..101 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + // push fewer high-difficulty blocks. + for i in 1..11 { + let mut header = Header::new(); + header.set_parent_hash(parent_hash); + header.set_number(i); + header.set_timestamp(rolling_timestamp); + header.set_difficulty(*genesis_header.difficulty() * i.into() * 1000.into()); + parent_hash = header.hash(); + + let mut tx = db.transaction(); + let pending = chain.insert(&mut tx, header).unwrap(); + db.write(tx).unwrap(); + chain.apply_pending(pending); + + rolling_timestamp += 10; + } + + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + } + + // after restoration, non-canonical eras should still be loaded. + let chain = HeaderChain::new(db.clone(), None, &::rlp::encode(&genesis_header)).unwrap(); + assert_eq!(chain.block_header(BlockId::Latest).unwrap().number(), 10); + assert!(chain.candidates.read().get(&100).is_some()) + } } diff --git a/ethcore/light/src/client/mod.rs b/ethcore/light/src/client/mod.rs index f340b5bcf..79a5f097a 100644 --- a/ethcore/light/src/client/mod.rs +++ b/ethcore/light/src/client/mod.rs @@ -32,6 +32,7 @@ use ethcore::encoded; use io::IoChannel; use util::{H256, Mutex, RwLock}; +use util::kvdb::{KeyValueDB, CompactionProfile}; use self::header_chain::{AncestryIter, HeaderChain}; @@ -45,6 +46,14 @@ mod service; pub struct Config { /// Verification queue config. pub queue: queue::Config, + /// Chain column in database. + pub chain_column: Option, + /// Database cache size. `None` => rocksdb default. + pub db_cache_size: Option, + /// State db compaction profile + pub db_compaction: CompactionProfile, + /// Should db have WAL enabled? + pub db_wal: bool, } /// Trait for interacting with the header chain abstractly. @@ -113,18 +122,30 @@ pub struct Client { chain: HeaderChain, report: RwLock, import_lock: Mutex<()>, + db: Arc, } impl Client { /// Create a new `Client`. - pub fn new(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { - Client { + pub fn new(config: Config, db: Arc, chain_col: Option, spec: &Spec, io_channel: IoChannel) -> Result { + let gh = ::rlp::encode(&spec.genesis_header()); + + Ok(Client { queue: HeaderQueue::new(config.queue, spec.engine.clone(), io_channel, true), engine: spec.engine.clone(), - chain: HeaderChain::new(&::rlp::encode(&spec.genesis_header())), + chain: HeaderChain::new(db.clone(), chain_col, &gh)?, report: RwLock::new(ClientReport::default()), import_lock: Mutex::new(()), - } + db: db, + }) + } + + /// Create a new `Client` backed purely in-memory. + /// This will ignore all database options in the configuration. + pub fn in_memory(config: Config, spec: &Spec, io_channel: IoChannel) -> Self { + let db = ::util::kvdb::in_memory(0); + + Client::new(config, Arc::new(db), None, spec, io_channel).expect("New DB creation infallible; qed") } /// Import a header to the queue for additional verification. @@ -208,15 +229,23 @@ impl Client { for verified_header in self.queue.drain(MAX) { let (num, hash) = (verified_header.number(), verified_header.hash()); - match self.chain.insert(verified_header) { - Ok(()) => { + let mut tx = self.db.transaction(); + let pending = match self.chain.insert(&mut tx, verified_header) { + Ok(pending) => { good.push(hash); self.report.write().blocks_imported += 1; + pending } Err(e) => { debug!(target: "client", "Error importing header {:?}: {}", (num, hash), e); bad.push(hash); + break; } + }; + self.db.write_buffered(tx); + self.chain.apply_pending(pending); + if let Err(e) = self.db.flush() { + panic!("Database flush failed: {}. Check disk health and space.", e); } } diff --git a/ethcore/light/src/client/service.rs b/ethcore/light/src/client/service.rs index fe7caee94..55795d870 100644 --- a/ethcore/light/src/client/service.rs +++ b/ethcore/light/src/client/service.rs @@ -17,33 +17,80 @@ //! Minimal IO service for light client. //! Just handles block import messages and passes them to the client. +use std::fmt; +use std::path::Path; use std::sync::Arc; +use ethcore::db; use ethcore::service::ClientIoMessage; use ethcore::spec::Spec; use io::{IoContext, IoError, IoHandler, IoService}; +use util::kvdb::{Database, DatabaseConfig}; use super::{Client, Config as ClientConfig}; +/// Errors on service initialization. +#[derive(Debug)] +pub enum Error { + /// Database error. + Database(String), + /// I/O service error. + Io(IoError), +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Database(ref msg) => write!(f, "Database error: {}", msg), + Error::Io(ref err) => write!(f, "I/O service error: {}", err), + } + } +} + /// Light client service. pub struct Service { client: Arc, - _io_service: IoService, + io_service: IoService, } impl Service { /// Start the service: initialize I/O workers and client itself. - pub fn start(config: ClientConfig, spec: &Spec) -> Result { - let io_service = try!(IoService::::start()); - let client = Arc::new(Client::new(config, spec, io_service.channel())); - try!(io_service.register_handler(Arc::new(ImportBlocks(client.clone())))); + pub fn start(config: ClientConfig, spec: &Spec, path: &Path) -> Result { + // initialize database. + let mut db_config = DatabaseConfig::with_columns(db::NUM_COLUMNS); + // give all rocksdb cache to the header chain column. + if let Some(size) = config.db_cache_size { + db_config.set_cache(db::COL_LIGHT_CHAIN, size); + } + + db_config.compaction = config.db_compaction; + db_config.wal = config.db_wal; + + let db = Arc::new(Database::open( + &db_config, + &path.to_str().expect("DB path could not be converted to string.") + ).map_err(Error::Database)?); + + let io_service = IoService::::start().map_err(Error::Io)?; + let client = Arc::new(Client::new(config, + db, + db::COL_LIGHT_CHAIN, + spec, + io_service.channel(), + ).map_err(Error::Database)?); + io_service.register_handler(Arc::new(ImportBlocks(client.clone()))).map_err(Error::Io)?; Ok(Service { client: client, - _io_service: io_service, + io_service: io_service, }) } + /// Register an I/O handler on the service. + pub fn register_handler(&self, handler: Arc + Send>) -> Result<(), IoError> { + self.io_service.register_handler(handler) + } + /// Get a handle to the client. pub fn client(&self) -> &Arc { &self.client @@ -63,11 +110,13 @@ impl IoHandler for ImportBlocks { #[cfg(test)] mod tests { use super::Service; + use devtools::RandomTempPath; use ethcore::spec::Spec; #[test] fn it_works() { let spec = Spec::new_test(); - Service::start(Default::default(), &spec).unwrap(); + let temp_path = RandomTempPath::new(); + Service::start(Default::default(), &spec, temp_path.as_path()).unwrap(); } } diff --git a/ethcore/light/src/lib.rs b/ethcore/light/src/lib.rs index ada58d8de..82b6ea126 100644 --- a/ethcore/light/src/lib.rs +++ b/ethcore/light/src/lib.rs @@ -55,6 +55,7 @@ pub mod remote { mod types; +pub use self::cache::Cache; pub use self::provider::Provider; pub use self::transaction_queue::TransactionQueue; pub use types::request as request; @@ -76,3 +77,6 @@ extern crate stats; #[cfg(feature = "ipc")] extern crate ethcore_ipc as ipc; + +#[cfg(test)] +extern crate ethcore_devtools as devtools; diff --git a/ethcore/light/src/net/context.rs b/ethcore/light/src/net/context.rs index 9eafead57..64ddd19a3 100644 --- a/ethcore/light/src/net/context.rs +++ b/ethcore/light/src/net/context.rs @@ -61,10 +61,12 @@ impl<'a> IoContext for NetworkContext<'a> { } fn disconnect_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disconnect of peer {}", peer); NetworkContext::disconnect_peer(self, peer); } fn disable_peer(&self, peer: PeerId) { + trace!(target: "pip", "Initiating disable of peer {}", peer); NetworkContext::disable_peer(self, peer); } diff --git a/ethcore/light/src/net/mod.rs b/ethcore/light/src/net/mod.rs index 667e07cb4..e32e92145 100644 --- a/ethcore/light/src/net/mod.rs +++ b/ethcore/light/src/net/mod.rs @@ -27,7 +27,7 @@ use util::hash::H256; use util::{DBValue, Mutex, RwLock, U256}; use time::{Duration, SteadyTime}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::fmt; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -61,6 +61,9 @@ const TIMEOUT_INTERVAL_MS: u64 = 1000; const TICK_TIMEOUT: TimerToken = 1; const TICK_TIMEOUT_INTERVAL_MS: u64 = 5000; +const PROPAGATE_TIMEOUT: TimerToken = 2; +const PROPAGATE_TIMEOUT_INTERVAL_MS: u64 = 5000; + // minimum interval between updates. const UPDATE_INTERVAL_MS: i64 = 5000; @@ -132,6 +135,7 @@ pub struct Peer { last_update: SteadyTime, pending_requests: RequestSet, failed_requests: Vec, + propagated_transactions: HashSet, } /// A light protocol event handler. @@ -303,12 +307,18 @@ impl LightProtocol { match peer.remote_flow { None => Err(Error::NotServer), Some((ref mut creds, ref params)) => { - // check that enough credits are available. - let mut temp_creds: Credits = creds.clone(); - for request in requests.requests() { - temp_creds.deduct_cost(params.compute_cost(request))?; + // apply recharge to credits if there's no pending requests. + if peer.pending_requests.is_empty() { + params.recharge(creds); } - *creds = temp_creds; + + // compute and deduct cost. + let pre_creds = creds.current(); + let cost = params.compute_cost_multi(requests.requests()); + creds.deduct_cost(cost)?; + + trace!(target: "pip", "requesting from peer {}. Cost: {}; Available: {}", + peer_id, cost, pre_creds); let req_id = ReqId(self.req_id.fetch_add(1, Ordering::SeqCst)); io.send(*peer_id, packet::REQUEST, { @@ -318,7 +328,7 @@ impl LightProtocol { }); // begin timeout. - peer.pending_requests.insert(req_id, requests, SteadyTime::now()); + peer.pending_requests.insert(req_id, requests, cost, SteadyTime::now()); Ok(req_id) } } @@ -401,20 +411,25 @@ impl LightProtocol { let req_id = ReqId(raw.val_at(0)?); let cur_credits: U256 = raw.val_at(1)?; - trace!(target: "pip", "pre-verifying response from peer {}", peer); + trace!(target: "pip", "pre-verifying response for {} from peer {}", req_id, peer); let peers = self.peers.read(); let res = match peers.get(peer) { Some(peer_info) => { let mut peer_info = peer_info.lock(); let req_info = peer_info.pending_requests.remove(&req_id, SteadyTime::now()); + let cumulative_cost = peer_info.pending_requests.cumulative_cost(); let flow_info = peer_info.remote_flow.as_mut(); match (req_info, flow_info) { (Some(_), Some(flow_info)) => { let &mut (ref mut c, ref mut flow) = flow_info; - let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); - c.update_to(actual_credits); + + // only update if the cumulative cost of the request set is zero. + if cumulative_cost == 0.into() { + let actual_credits = ::std::cmp::min(cur_credits, *flow.limit()); + c.update_to(actual_credits); + } Ok(()) } @@ -488,6 +503,47 @@ impl LightProtocol { } } + // propagate transactions to relay peers. + // if we aren't on the mainnet, we just propagate to all relay peers + fn propagate_transactions(&self, io: &IoContext) { + if self.capabilities.read().tx_relay { return } + + let ready_transactions = self.provider.ready_transactions(); + if ready_transactions.is_empty() { return } + + trace!(target: "pip", "propagate transactions: {} ready", ready_transactions.len()); + + let all_transaction_hashes: HashSet<_> = ready_transactions.iter().map(|tx| tx.hash()).collect(); + let mut buf = Vec::new(); + + let peers = self.peers.read(); + for (peer_id, peer_info) in peers.iter() { + let mut peer_info = peer_info.lock(); + if !peer_info.capabilities.tx_relay { continue } + + let prop_filter = &mut peer_info.propagated_transactions; + *prop_filter = &*prop_filter & &all_transaction_hashes; + + // fill the buffer with all non-propagated transactions. + let to_propagate = ready_transactions.iter() + .filter(|tx| prop_filter.insert(tx.hash())) + .map(|tx| &tx.transaction); + + buf.extend(to_propagate); + + // propagate to the given peer. + if buf.is_empty() { continue } + io.send(*peer_id, packet::SEND_TRANSACTIONS, { + let mut stream = RlpStream::new_list(buf.len()); + for pending_tx in buf.drain(..) { + stream.append(pending_tx); + } + + stream.out() + }) + } + } + /// called when a peer connects. pub fn on_connect(&self, peer: &PeerId, io: &IoContext) { let proto_version = match io.protocol_version(*peer).ok_or(Error::WrongNetwork) { @@ -520,6 +576,7 @@ impl LightProtocol { last_update: SteadyTime::now(), }); + trace!(target: "pip", "Sending status to peer {}", peer); io.send(*peer, packet::STATUS, status_packet); } @@ -601,6 +658,7 @@ impl LightProtocol { last_update: pending.last_update, pending_requests: RequestSet::default(), failed_requests: Vec::new(), + propagated_transactions: HashSet::new(), })); for handler in &self.handlers { @@ -683,6 +741,8 @@ impl LightProtocol { trace!(target: "pip", "Received requests (id: {}) from peer {}", req_id, peer_id); // deserialize requests, check costs and request validity. + self.flow_params.recharge(&mut peer.local_credits); + peer.local_credits.deduct_cost(self.flow_params.base_cost())?; for request_rlp in raw.at(1)?.iter().take(MAX_REQUESTS) { let request: Request = request_rlp.as_val()?; @@ -709,6 +769,7 @@ impl LightProtocol { }); trace!(target: "pip", "Responded to {}/{} requests in packet {}", responses.len(), num_requests, req_id); + trace!(target: "pip", "Peer {} has {} credits remaining.", peer_id, peer.local_credits.current()); io.respond(packet::RESPONSE, { let mut stream = RlpStream::new_list(3); @@ -782,6 +843,8 @@ impl NetworkProtocolHandler for LightProtocol { .expect("Error registering sync timer."); io.register_timer(TICK_TIMEOUT, TICK_TIMEOUT_INTERVAL_MS) .expect("Error registering sync timer."); + io.register_timer(PROPAGATE_TIMEOUT, PROPAGATE_TIMEOUT_INTERVAL_MS) + .expect("Error registering sync timer."); } fn read(&self, io: &NetworkContext, peer: &PeerId, packet_id: u8, data: &[u8]) { @@ -800,6 +863,7 @@ impl NetworkProtocolHandler for LightProtocol { match timer { TIMEOUT => self.timeout_check(io), TICK_TIMEOUT => self.tick_handlers(io), + PROPAGATE_TIMEOUT => self.propagate_transactions(io), _ => warn!(target: "pip", "received timeout on unknown token {}", timer), } } diff --git a/ethcore/light/src/net/request_set.rs b/ethcore/light/src/net/request_set.rs index a2391ef6f..094fa1894 100644 --- a/ethcore/light/src/net/request_set.rs +++ b/ethcore/light/src/net/request_set.rs @@ -27,22 +27,29 @@ use std::iter::FromIterator; use request::Request; use request::Requests; use net::{timeout, ReqId}; +use util::U256; use time::{Duration, SteadyTime}; +// Request set entry: requests + cost. +#[derive(Debug)] +struct Entry(Requests, U256); + /// Request set. #[derive(Debug)] pub struct RequestSet { counter: u64, + cumulative_cost: U256, base: Option, ids: HashMap, - reqs: BTreeMap, + reqs: BTreeMap, } impl Default for RequestSet { fn default() -> Self { RequestSet { counter: 0, + cumulative_cost: 0.into(), base: None, ids: HashMap::new(), reqs: BTreeMap::new(), @@ -52,10 +59,12 @@ impl Default for RequestSet { impl RequestSet { /// Push requests onto the stack. - pub fn insert(&mut self, req_id: ReqId, req: Requests, now: SteadyTime) { + pub fn insert(&mut self, req_id: ReqId, req: Requests, cost: U256, now: SteadyTime) { let counter = self.counter; + self.cumulative_cost = self.cumulative_cost + cost; + self.ids.insert(req_id, counter); - self.reqs.insert(counter, req); + self.reqs.insert(counter, Entry(req, cost)); if self.reqs.keys().next().map_or(true, |x| *x == counter) { self.base = Some(now); @@ -71,7 +80,7 @@ impl RequestSet { None => return None, }; - let req = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); + let Entry(req, cost) = self.reqs.remove(&id).expect("entry in `ids` implies entry in `reqs`; qed"); match self.reqs.keys().next() { Some(k) if *k > id => self.base = Some(now), @@ -79,6 +88,7 @@ impl RequestSet { _ => {} } + self.cumulative_cost = self.cumulative_cost - cost; Some(req) } @@ -93,7 +103,7 @@ impl RequestSet { let first_req = self.reqs.values().next() .expect("base existing implies `reqs` non-empty; qed"); - base + compute_timeout(&first_req) <= now + base + compute_timeout(&first_req.0) <= now } /// Collect all pending request ids. @@ -108,6 +118,9 @@ impl RequestSet { /// Whether the set is empty. pub fn is_empty(&self) -> bool { self.len() == 0 } + + /// The cumulative cost of all requests in the set. + pub fn cumulative_cost(&self) -> U256 { self.cumulative_cost } } // helper to calculate timeout for a specific set of requests. @@ -141,8 +154,8 @@ mod tests { let the_req = RequestBuilder::default().build(); let req_time = compute_timeout(&the_req); - req_set.insert(ReqId(0), the_req.clone(), test_begin); - req_set.insert(ReqId(1), the_req, test_begin + Duration::seconds(1)); + req_set.insert(ReqId(0), the_req.clone(), 0.into(), test_begin); + req_set.insert(ReqId(1), the_req, 0.into(), test_begin + Duration::seconds(1)); assert_eq!(req_set.base, Some(test_begin)); @@ -153,4 +166,22 @@ mod tests { assert!(!req_set.check_timeout(test_end)); assert!(req_set.check_timeout(test_end + Duration::seconds(1))); } + + #[test] + fn cumulative_cost() { + let the_req = RequestBuilder::default().build(); + let test_begin = SteadyTime::now(); + let test_end = test_begin + Duration::seconds(1); + let mut req_set = RequestSet::default(); + + for i in 0..5 { + req_set.insert(ReqId(i), the_req.clone(), 1.into(), test_begin); + assert_eq!(req_set.cumulative_cost, (i + 1).into()); + } + + for i in (0..5).rev() { + assert!(req_set.remove(&ReqId(i), test_end).is_some()); + assert_eq!(req_set.cumulative_cost, i.into()); + } + } } diff --git a/ethcore/light/src/net/tests/mod.rs b/ethcore/light/src/net/tests/mod.rs index e2081534c..6dc5fbe7e 100644 --- a/ethcore/light/src/net/tests/mod.rs +++ b/ethcore/light/src/net/tests/mod.rs @@ -600,8 +600,8 @@ fn id_guard() { let mut pending_requests = RequestSet::default(); - pending_requests.insert(req_id_1, req.clone(), ::time::SteadyTime::now()); - pending_requests.insert(req_id_2, req, ::time::SteadyTime::now()); + pending_requests.insert(req_id_1, req.clone(), 0.into(), ::time::SteadyTime::now()); + pending_requests.insert(req_id_2, req, 1.into(), ::time::SteadyTime::now()); proto.peers.write().insert(peer_id, ::util::Mutex::new(Peer { local_credits: flow_params.create_credits(), @@ -612,6 +612,7 @@ fn id_guard() { last_update: ::time::SteadyTime::now(), pending_requests: pending_requests, failed_requests: Vec::new(), + propagated_transactions: Default::default(), })); // first, malformed responses. diff --git a/ethcore/light/src/on_demand/mod.rs b/ethcore/light/src/on_demand/mod.rs index 8d451c88e..a7c1ba2c4 100644 --- a/ethcore/light/src/on_demand/mod.rs +++ b/ethcore/light/src/on_demand/mod.rs @@ -37,7 +37,7 @@ use rlp::RlpStream; use util::{Bytes, RwLock, Mutex, U256, H256}; use util::sha3::{SHA3_NULL_RLP, SHA3_EMPTY_LIST_RLP}; -use net::{Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; +use net::{self, Handler, Status, Capabilities, Announcement, EventContext, BasicContext, ReqId}; use cache::Cache; use request::{self as basic_request, Request as NetworkRequest, Response as NetworkResponse}; @@ -57,15 +57,15 @@ impl Peer { self.capabilities.serve_headers && self.status.head_num > req.num(), Pending::HeaderByHash(_, _) => self.capabilities.serve_headers, Pending::Block(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.header.number()), Pending::BlockReceipts(ref req, _) => - self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x >= req.0.number()), + self.capabilities.serve_chain_since.as_ref().map_or(false, |x| *x <= req.0.number()), Pending::Account(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), Pending::Code(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.block_id.1), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.block_id.1), Pending::TxProof(ref req, _) => - self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x >= req.header.number()), + self.capabilities.serve_state_since.as_ref().map_or(false, |x| *x <= req.header.number()), } } } @@ -210,7 +210,7 @@ impl OnDemand { /// it as easily. pub fn header_by_hash(&self, ctx: &BasicContext, req: request::HeaderByHash) -> Receiver { let (sender, receiver) = oneshot::channel(); - match self.cache.lock().block_header(&req.0) { + match { self.cache.lock().block_header(&req.0) } { Some(hdr) => sender.send(hdr).expect(RECEIVER_IN_SCOPE), None => self.dispatch(ctx, Pending::HeaderByHash(req, sender)), } @@ -232,11 +232,13 @@ impl OnDemand { sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } else { - match self.cache.lock().block_body(&req.hash) { + match { self.cache.lock().block_body(&req.hash) } { Some(body) => { let mut stream = RlpStream::new_list(3); + let body = body.rlp(); stream.append_raw(&req.header.into_inner(), 1); - stream.append_raw(&body.into_inner(), 2); + stream.append_raw(&body.at(0).as_raw(), 1); + stream.append_raw(&body.at(1).as_raw(), 1); sender.send(encoded::Block::new(stream.out())).expect(RECEIVER_IN_SCOPE); } @@ -255,7 +257,7 @@ impl OnDemand { if req.0.receipts_root() == SHA3_NULL_RLP { sender.send(Vec::new()).expect(RECEIVER_IN_SCOPE); } else { - match self.cache.lock().block_receipts(&req.0.hash()) { + match { self.cache.lock().block_receipts(&req.0.hash()) } { Some(receipts) => sender.send(receipts).expect(RECEIVER_IN_SCOPE), None => self.dispatch(ctx, Pending::BlockReceipts(req, sender)), } @@ -303,23 +305,26 @@ impl OnDemand { let complete = builder.build(); + let kind = complete.requests()[0].kind(); for (id, peer) in self.peers.read().iter() { if !peer.can_handle(&pending) { continue } match ctx.request_from(*id, complete.clone()) { Ok(req_id) => { - trace!(target: "on_demand", "Assigning request to peer {}", id); + trace!(target: "on_demand", "{}: Assigned {:?} to peer {}", + req_id, kind, id); + self.pending_requests.write().insert( req_id, pending, ); return } + Err(net::Error::NoCredits) => {} Err(e) => trace!(target: "on_demand", "Failed to make request of peer {}: {:?}", id, e), } } - trace!(target: "on_demand", "No suitable peer for request"); self.orphaned_requests.write().push(pending); } @@ -353,6 +358,7 @@ impl OnDemand { let to_dispatch = ::std::mem::replace(&mut *self.orphaned_requests.write(), Vec::new()); + trace!(target: "on_demand", "Attempting to dispatch {} orphaned requests.", to_dispatch.len()); for mut orphaned in to_dispatch { let hung_up = match orphaned { Pending::HeaderProof(_, ref mut sender) => match *sender { @@ -397,10 +403,12 @@ impl Handler for OnDemand { } fn on_announcement(&self, ctx: &EventContext, announcement: &Announcement) { - let mut peers = self.peers.write(); - if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { - peer.status.update_from(&announcement); - peer.capabilities.update_from(&announcement); + { + let mut peers = self.peers.write(); + if let Some(ref mut peer) = peers.get_mut(&ctx.peer()) { + peer.status.update_from(&announcement); + peer.capabilities.update_from(&announcement); + } } self.dispatch_orphaned(ctx.as_basic()); @@ -422,6 +430,8 @@ impl Handler for OnDemand { } }; + trace!(target: "on_demand", "Handling response for request {}, kind={:?}", req_id, response.kind()); + // handle the response appropriately for the request. // all branches which do not return early lead to disabling of the peer // due to misbehavior. @@ -441,7 +451,7 @@ impl Handler for OnDemand { } return } - Err(e) => warn!("Error handling response for header request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -454,7 +464,7 @@ impl Handler for OnDemand { let _ = sender.send(header); return } - Err(e) => warn!("Error handling response for header request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for header request: {:?}", e), } } } @@ -467,7 +477,7 @@ impl Handler for OnDemand { let _ = sender.send(block); return } - Err(e) => warn!("Error handling response for block request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for block request: {:?}", e), } } } @@ -480,7 +490,7 @@ impl Handler for OnDemand { let _ = sender.send(receipts); return } - Err(e) => warn!("Error handling response for receipts request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for receipts request: {:?}", e), } } } @@ -493,7 +503,7 @@ impl Handler for OnDemand { let _ = sender.send(maybe_account); return } - Err(e) => warn!("Error handling response for state request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for state request: {:?}", e), } } } @@ -504,7 +514,7 @@ impl Handler for OnDemand { let _ = sender.send(response.code.clone()); return } - Err(e) => warn!("Error handling response for code request: {:?}", e), + Err(e) => warn!(target: "on_demand", "Error handling response for code request: {:?}", e), } } } @@ -519,7 +529,7 @@ impl Handler for OnDemand { let _ = sender.send(Err(err)); return } - ProvedExecution::BadProof => warn!("Error handling response for transaction proof request"), + ProvedExecution::BadProof => warn!(target: "on_demand", "Error handling response for transaction proof request"), } } } diff --git a/ethcore/light/src/on_demand/request.rs b/ethcore/light/src/on_demand/request.rs index cda1d6feb..8a37ddf7b 100644 --- a/ethcore/light/src/on_demand/request.rs +++ b/ethcore/light/src/on_demand/request.rs @@ -151,7 +151,8 @@ impl Body { // concatenate the header and the body. let mut stream = RlpStream::new_list(3); stream.append_raw(self.header.rlp().as_raw(), 1); - stream.append_raw(&body.rlp().as_raw(), 2); + stream.append_raw(body.rlp().at(0).as_raw(), 1); + stream.append_raw(body.rlp().at(1).as_raw(), 1); Ok(encoded::Block::new(stream.out())) } @@ -243,12 +244,14 @@ impl TransactionProof { pub fn check_response(&self, state_items: &[DBValue]) -> ProvedExecution { let root = self.header.state_root(); + let mut env_info = self.env_info.clone(); + env_info.gas_limit = self.tx.gas.clone(); state::check_proof( state_items, root, &self.tx, &*self.engine, - &self.env_info, + &env_info, ) } } diff --git a/ethcore/light/src/types/request/mod.rs b/ethcore/light/src/types/request/mod.rs index f7d0b7df6..3099f8fed 100644 --- a/ethcore/light/src/types/request/mod.rs +++ b/ethcore/light/src/types/request/mod.rs @@ -244,7 +244,8 @@ pub enum CompleteRequest { } impl Request { - fn kind(&self) -> Kind { + /// Get the request kind. + pub fn kind(&self) -> Kind { match *self { Request::Headers(_) => Kind::Headers, Request::HeaderProof(_) => Kind::HeaderProof, @@ -435,7 +436,8 @@ impl Response { } } - fn kind(&self) -> Kind { + /// Inspect the kind of this response. + pub fn kind(&self) -> Kind { match *self { Response::Headers(_) => Kind::Headers, Response::HeaderProof(_) => Kind::HeaderProof, @@ -726,7 +728,6 @@ pub mod header_proof { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - Ok(Response { proof: rlp.list_at(0)?, hash: rlp.val_at(1)?, @@ -737,12 +738,10 @@ pub mod header_proof { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(3).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - - s.append(&self.hash).append(&self.td); + s.begin_list(3) + .append_list::,_>(&self.proof[..]) + .append(&self.hash) + .append(&self.td); } } } @@ -826,7 +825,6 @@ pub mod block_receipts { impl Decodable for Response { fn decode(rlp: &UntrustedRlp) -> Result { - Ok(Response { receipts: rlp.as_list()?, }) @@ -923,8 +921,8 @@ pub mod block_body { use ethcore::transaction::UnverifiedTransaction; // check body validity. - let _: Vec = rlp.list_at(0)?; - let _: Vec = rlp.list_at(1)?; + let _: Vec = rlp.list_at(0)?; + let _: Vec = rlp.list_at(1)?; Ok(Response { body: encoded::Body::new(rlp.as_raw().to_owned()), @@ -1063,12 +1061,9 @@ pub mod account { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(5).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - - s.append(&self.nonce) + s.begin_list(5) + .append_list::,_>(&self.proof[..]) + .append(&self.nonce) .append(&self.balance) .append(&self.code_hash) .append(&self.storage_root); @@ -1207,11 +1202,9 @@ pub mod storage { impl Encodable for Response { fn rlp_append(&self, s: &mut RlpStream) { - s.begin_list(2).begin_list(self.proof.len()); - for item in &self.proof { - s.append_list(&item); - } - s.append(&self.value); + s.begin_list(2) + .append_list::,_>(&self.proof[..]) + .append(&self.value); } } } @@ -1486,9 +1479,16 @@ mod tests { fn check_roundtrip(val: T) where T: ::rlp::Encodable + ::rlp::Decodable + PartialEq + ::std::fmt::Debug { + // check as single value. let bytes = ::rlp::encode(&val); let new_val: T = ::rlp::decode(&bytes); assert_eq!(val, new_val); + + // check as list containing single value. + let list = [val]; + let bytes = ::rlp::encode_list(&list); + let new_list: Vec = ::rlp::decode_list(&bytes); + assert_eq!(&list, &new_list[..]); } #[test] @@ -1540,7 +1540,7 @@ mod tests { let full_req = Request::HeaderProof(req.clone()); let res = HeaderProofResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], hash: Default::default(), td: 100.into(), }; @@ -1572,6 +1572,7 @@ mod tests { #[test] fn body_roundtrip() { + use ethcore::transaction::{Transaction, UnverifiedTransaction}; let req = IncompleteBodyRequest { hash: Field::Scalar(Default::default()), }; @@ -1579,8 +1580,12 @@ mod tests { let full_req = Request::Body(req.clone()); let res = BodyResponse { body: { + let header = ::ethcore::header::Header::default(); + let tx = UnverifiedTransaction::from(Transaction::default().fake_sign(Default::default())); let mut stream = RlpStream::new_list(2); - stream.begin_list(0).begin_list(0); + stream.begin_list(2).append(&tx).append(&tx) + .begin_list(1).append(&header); + ::ethcore::encoded::Body::new(stream.out()) }, }; @@ -1601,7 +1606,7 @@ mod tests { let full_req = Request::Account(req.clone()); let res = AccountResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], nonce: 100.into(), balance: 123456.into(), code_hash: Default::default(), @@ -1625,7 +1630,7 @@ mod tests { let full_req = Request::Storage(req.clone()); let res = StorageResponse { - proof: Vec::new(), + proof: vec![vec![1, 2, 3], vec![4, 5, 6]], value: H256::default(), }; let full_res = Response::Storage(res.clone()); @@ -1707,4 +1712,31 @@ mod tests { assert_eq!(rlp.val_at::(0).unwrap(), 100usize); assert_eq!(rlp.list_at::(1).unwrap(), reqs); } + + #[test] + fn responses_vec() { + let mut stream = RlpStream::new_list(2); + stream.begin_list(0).begin_list(0); + + let body = ::ethcore::encoded::Body::new(stream.out()); + let reqs = vec![ + Response::Headers(HeadersResponse { headers: vec![] }), + Response::HeaderProof(HeaderProofResponse { proof: vec![], hash: Default::default(), td: 100.into()}), + Response::Receipts(ReceiptsResponse { receipts: vec![Default::default()] }), + Response::Body(BodyResponse { body: body }), + Response::Account(AccountResponse { + proof: vec![], + nonce: 100.into(), + balance: 123.into(), + code_hash: Default::default(), + storage_root: Default::default() + }), + Response::Storage(StorageResponse { proof: vec![], value: H256::default() }), + Response::Code(CodeResponse { code: vec![1, 2, 3, 4, 5] }), + Response::Execution(ExecutionResponse { items: vec![] }), + ]; + + let raw = ::rlp::encode_list(&reqs); + assert_eq!(::rlp::decode_list::(&raw), reqs); + } } diff --git a/ethcore/native_contracts/build.rs b/ethcore/native_contracts/build.rs index a8488617a..91eaa86cd 100644 --- a/ethcore/native_contracts/build.rs +++ b/ethcore/native_contracts/build.rs @@ -23,6 +23,7 @@ use std::io::Write; // TODO: `include!` these from files where they're pretty-printed? const REGISTRY_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"canReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"bytes32"}],"name":"setData","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"}],"name":"confirmReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserve","outputs":[{"name":"success","type":"bool"}],"payable":true,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"}],"name":"drop","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_amount","type":"uint256"}],"name":"setFee","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_to","type":"address"}],"name":"transfer","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getData","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"reserved","outputs":[{"name":"reserved","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"drain","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"proposeReverse","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"hasReverse","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"fee","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getOwner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_name","type":"bytes32"}],"name":"getReverse","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_data","type":"address"}],"name":"reverse","outputs":[{"name":"","type":"string"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"uint256"}],"name":"setUint","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"string"},{"name":"_who","type":"address"}],"name":"confirmReverseAs","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"},{"constant":false,"inputs":[],"name":"removeReverse","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_name","type":"bytes32"},{"name":"_key","type":"string"},{"name":"_value","type":"address"}],"name":"setAddress","outputs":[{"name":"success","type":"bool"}],"payable":false,"type":"function"}]"#; const SERVICE_TRANSACTION_ABI: &'static str = r#"[{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setOwner","outputs":[],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"certify","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getAddress","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_who","type":"address"}],"name":"revoke","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"owner","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[],"name":"delegate","outputs":[{"name":"","type":"address"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"getUint","outputs":[{"name":"","type":"uint256"}],"payable":false,"type":"function"},{"constant":false,"inputs":[{"name":"_new","type":"address"}],"name":"setDelegate","outputs":[],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"}],"name":"certified","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"},{"constant":true,"inputs":[{"name":"_who","type":"address"},{"name":"_field","type":"string"}],"name":"get","outputs":[{"name":"","type":"bytes32"}],"payable":false,"type":"function"}]"#; +const SECRETSTORE_ACL_STORAGE_ABI: &'static str = r#"[{"constant":true,"inputs":[{"name":"user","type":"address"},{"name":"document","type":"bytes32"}],"name":"checkPermissions","outputs":[{"name":"","type":"bool"}],"payable":false,"type":"function"}]"#; fn build_file(name: &str, abi: &str, filename: &str) { let code = ::native_contract_generator::generate_module(name, abi).unwrap(); @@ -37,4 +38,5 @@ fn build_file(name: &str, abi: &str, filename: &str) { fn main() { build_file("Registry", REGISTRY_ABI, "registry.rs"); build_file("ServiceTransactionChecker", SERVICE_TRANSACTION_ABI, "service_transaction.rs"); + build_file("SecretStoreAclStorage", SECRETSTORE_ACL_STORAGE_ABI, "secretstore_acl_storage.rs"); } diff --git a/ethcore/native_contracts/src/lib.rs b/ethcore/native_contracts/src/lib.rs index 55c6446b7..e894a636f 100644 --- a/ethcore/native_contracts/src/lib.rs +++ b/ethcore/native_contracts/src/lib.rs @@ -25,6 +25,8 @@ extern crate ethcore_util as util; mod registry; mod service_transaction; +mod secretstore_acl_storage; pub use self::registry::Registry; pub use self::service_transaction::ServiceTransactionChecker; +pub use self::secretstore_acl_storage::SecretStoreAclStorage; diff --git a/ethcore/src/migrations/v11.rs b/ethcore/native_contracts/src/secretstore_acl_storage.rs similarity index 75% rename from ethcore/src/migrations/v11.rs rename to ethcore/native_contracts/src/secretstore_acl_storage.rs index e33de6170..3ebfcfb75 100644 --- a/ethcore/src/migrations/v11.rs +++ b/ethcore/native_contracts/src/secretstore_acl_storage.rs @@ -14,13 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -//! Adds a seventh column for node information. +#![allow(unused_mut, unused_variables, unused_imports)] -use util::migration::ChangeColumns; +//! Secret store ACL storage contract. +// TODO: testing. -/// The migration from v10 to v11. -pub const TO_V11: ChangeColumns = ChangeColumns { - pre_columns: Some(6), - post_columns: Some(7), - version: 11, -}; +include!(concat!(env!("OUT_DIR"), "/secretstore_acl_storage.rs")); diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index 3accc777f..4bd29d100 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -395,7 +395,7 @@ impl Client { if header.number() < self.engine().params().validate_receipts_transition && header.receipts_root() != locked_block.block().header().receipts_root() { locked_block = locked_block.strip_receipts(); } - + // Final Verification if let Err(e) = self.verifier.verify_block_final(header, locked_block.block().header()) { warn!(target: "client", "Stage 4 block verification failed for #{} ({})\nError: {:?}", header.number(), header.hash(), e); @@ -1627,10 +1627,12 @@ impl ::client::ProvingBlockChainClient for Client { } fn prove_transaction(&self, transaction: SignedTransaction, id: BlockId) -> Option> { - let (state, env_info) = match (self.state_at(id), self.env_info(id)) { + let (state, mut env_info) = match (self.state_at(id), self.env_info(id)) { (Some(s), Some(e)) => (s, e), _ => return None, }; + + env_info.gas_limit = transaction.gas.clone(); let mut jdb = self.state_db.lock().journal_db().boxed_clone(); let backend = state::backend::Proving::new(jdb.as_hashdb_mut()); diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 5c7cf9471..b58ae83cb 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -26,7 +26,7 @@ use verification::{VerifierType, QueueConfig}; use util::{journaldb, CompactionProfile}; /// Client state db compaction profile -#[derive(Debug, PartialEq)] +#[derive(Debug, PartialEq, Clone)] pub enum DatabaseCompactionProfile { /// Try to determine compaction profile automatically Auto, diff --git a/ethcore/src/db.rs b/ethcore/src/db.rs index 4e8da714d..bccb8e943 100644 --- a/ethcore/src/db.rs +++ b/ethcore/src/db.rs @@ -38,8 +38,10 @@ pub const COL_TRACE: Option = Some(4); pub const COL_ACCOUNT_BLOOM: Option = Some(5); /// Column for general information from the local node which can persist. pub const COL_NODE_INFO: Option = Some(6); +/// Column for the light client chain. +pub const COL_LIGHT_CHAIN: Option = Some(7); /// Number of columns in DB -pub const NUM_COLUMNS: Option = Some(7); +pub const NUM_COLUMNS: Option = Some(8); /// Modes for updating caches. #[derive(Clone, Copy)] diff --git a/ethcore/src/migrations/mod.rs b/ethcore/src/migrations/mod.rs index 6cc4a13a8..76b10fd19 100644 --- a/ethcore/src/migrations/mod.rs +++ b/ethcore/src/migrations/mod.rs @@ -16,6 +16,8 @@ //! Database migrations. +use util::migration::ChangeColumns; + pub mod state; pub mod blocks; pub mod extras; @@ -27,5 +29,18 @@ pub use self::v9::Extract; mod v10; pub use self::v10::ToV10; -mod v11; -pub use self::v11::TO_V11; +/// The migration from v10 to v11. +/// Adds a column for node info. +pub const TO_V11: ChangeColumns = ChangeColumns { + pre_columns: Some(6), + post_columns: Some(7), + version: 11, +}; + +/// The migration from v11 to v12. +/// Adds a column for light chain storage. +pub const TO_V12: ChangeColumns = ChangeColumns { + pre_columns: Some(7), + post_columns: Some(8), + version: 12, +}; diff --git a/ethcrypto/src/lib.rs b/ethcrypto/src/lib.rs index a4d426b54..9c1352087 100644 --- a/ethcrypto/src/lib.rs +++ b/ethcrypto/src/lib.rs @@ -78,6 +78,12 @@ impl fmt::Display for Error { } } +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + impl From for Error { fn from(e: SecpError) -> Self { Error::Secp(e) diff --git a/ethkey/src/keypair.rs b/ethkey/src/keypair.rs index b25664cd7..f883c4738 100644 --- a/ethkey/src/keypair.rs +++ b/ethkey/src/keypair.rs @@ -27,6 +27,7 @@ pub fn public_to_address(public: &Public) -> Address { result } +#[derive(Clone)] /// secp256k1 key pair pub struct KeyPair { secret: Secret, diff --git a/hash-fetch/src/urlhint.rs b/hash-fetch/src/urlhint.rs index 1588b5482..579c83845 100644 --- a/hash-fetch/src/urlhint.rs +++ b/hash-fetch/src/urlhint.rs @@ -92,12 +92,13 @@ pub enum URLHintResult { } /// URLHint Contract interface -pub trait URLHint { +pub trait URLHint: Send + Sync { /// Resolves given id to registrar entry. fn resolve(&self, id: Bytes) -> Option; } /// `URLHintContract` API +#[derive(Clone)] pub struct URLHintContract { urlhint: Contract, registrar: Contract, diff --git a/ipfs/src/lib.rs b/ipfs/src/lib.rs index df03b6cd7..eeac2431b 100644 --- a/ipfs/src/lib.rs +++ b/ipfs/src/lib.rs @@ -32,12 +32,13 @@ use std::sync::Arc; use std::net::{SocketAddr, IpAddr}; use error::ServerError; use route::Out; -use http::hyper::server::{Listening, Handler, Request, Response}; +use http::hyper::server::{Handler, Request, Response}; use http::hyper::net::HttpStream; use http::hyper::header::{self, Vary, ContentLength, ContentType}; use http::hyper::{Next, Encoder, Decoder, Method, RequestUri, StatusCode}; use ethcore::client::BlockChainClient; +pub use http::hyper::server::Listening; pub use http::{AccessControlAllowOrigin, Host, DomainsValidation}; /// Request/response handler diff --git a/js/package.json b/js/package.json index 62803cc70..6e8b84f6d 100644 --- a/js/package.json +++ b/js/package.json @@ -1,6 +1,6 @@ { "name": "parity.js", - "version": "1.7.43", + "version": "1.7.46", "main": "release/index.js", "jsnext:main": "src/index.js", "author": "Parity Team ", @@ -176,7 +176,7 @@ "geopattern": "1.2.3", "isomorphic-fetch": "2.2.1", "js-sha3": "0.5.5", - "keythereum": "0.4.3", + "keythereum": "0.4.6", "lodash": "4.17.2", "loglevel": "1.4.1", "marked": "0.3.6", diff --git a/js/scripts/test.js b/js/scripts/test.js index f5bfb0835..e426642db 100644 --- a/js/scripts/test.js +++ b/js/scripts/test.js @@ -1,2 +1 @@ -// test script 9 -// trigger rebuild on master 15 Mar 2017, 11:19 +// test script 10 diff --git a/js/src/api/local/accounts/account.js b/js/src/api/local/accounts/account.js index da9de1359..94e923f45 100644 --- a/js/src/api/local/accounts/account.js +++ b/js/src/api/local/accounts/account.js @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { keythereum } from '../ethkey'; +import { createKeyObject, decryptPrivateKey } from '../ethkey'; export default class Account { constructor (persist, data) { @@ -31,12 +31,14 @@ export default class Account { } isValidPassword (password) { - try { - keythereum.recover(Buffer.from(password), this._keyObject); - return true; - } catch (e) { - return false; - } + return decryptPrivateKey(this._keyObject, password) + .then((privateKey) => { + if (!privateKey) { + return false; + } + + return true; + }); } get address () { @@ -68,21 +70,23 @@ export default class Account { } decryptPrivateKey (password) { - return keythereum.recover(Buffer.from(password), this._keyObject); + return decryptPrivateKey(this._keyObject, password); + } + + changePassword (key, password) { + return createKeyObject(key, password).then((keyObject) => { + this._keyObject = keyObject; + + this._persist(); + }); } static fromPrivateKey (persist, key, password) { - const iv = keythereum.crypto.randomBytes(16); - const salt = keythereum.crypto.randomBytes(32); + return createKeyObject(key, password).then((keyObject) => { + const account = new Account(persist, { keyObject }); - // Keythereum will fail if `password` is an empty string - password = Buffer.from(password); - - const keyObject = keythereum.dump(password, key, salt, iv); - - const account = new Account(persist, { keyObject }); - - return account; + return account; + }); } toJSON () { diff --git a/js/src/api/local/accounts/accounts.js b/js/src/api/local/accounts/accounts.js index 576addcb1..e7e5cc397 100644 --- a/js/src/api/local/accounts/accounts.js +++ b/js/src/api/local/accounts/accounts.js @@ -38,14 +38,23 @@ export default class Accounts { create (secret, password) { const privateKey = Buffer.from(secret.slice(2), 'hex'); - const account = Account.fromPrivateKey(this.persist, privateKey, password); - this._store.push(account); - this.lastAddress = account.address; + return Account + .fromPrivateKey(this.persist, privateKey, password) + .then((account) => { + const { address } = account; - this.persist(); + if (this._store.find((account) => account.address === address)) { + throw new Error(`Account ${address} already exists!`); + } - return account.address; + this._store.push(account); + this.lastAddress = address; + + this.persist(); + + return account.address; + }); } set lastAddress (value) { @@ -73,28 +82,41 @@ export default class Accounts { remove (address, password) { address = address.toLowerCase(); + const account = this.get(address); + + if (!account) { + return false; + } + + return account + .isValidPassword(password) + .then((isValid) => { + if (!isValid) { + return false; + } + + if (address === this.lastAddress) { + this.lastAddress = NULL_ADDRESS; + } + + this.removeUnsafe(address); + + return true; + }); + } + + removeUnsafe (address) { + address = address.toLowerCase(); + const index = this._store.findIndex((account) => account.address === address); if (index === -1) { - return false; - } - - const account = this._store[index]; - - if (!account.isValidPassword(password)) { - console.log('invalid password'); - return false; - } - - if (address === this.lastAddress) { - this.lastAddress = NULL_ADDRESS; + return; } this._store.splice(index, 1); this.persist(); - - return true; } mapArray (mapper) { diff --git a/js/src/api/local/ethkey/dummy.js b/js/src/api/local/ethkey/dummy.js new file mode 100644 index 000000000..38f7c84de --- /dev/null +++ b/js/src/api/local/ethkey/dummy.js @@ -0,0 +1,19 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +export default function () { + // empty file included while building parity.js (don't include local keygen) +} diff --git a/js/src/api/local/ethkey/index.js b/js/src/api/local/ethkey/index.js index ac2efa72e..a6967da25 100644 --- a/js/src/api/local/ethkey/index.js +++ b/js/src/api/local/ethkey/index.js @@ -14,31 +14,35 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -// Allow a web worker in the browser, with a fallback for Node.js -const hasWebWorkers = typeof Worker !== 'undefined'; -const KeyWorker = hasWebWorkers ? require('worker-loader!./worker') - : require('./worker').KeyWorker; +import workerPool from './workerPool'; -// Local accounts should never be used outside of the browser -export let keythereum = null; +export function createKeyObject (key, password) { + return workerPool.getWorker().action('createKeyObject', { key, password }) + .then((obj) => JSON.parse(obj)); +} -if (hasWebWorkers) { - require('keythereum/dist/keythereum'); +export function decryptPrivateKey (keyObject, password) { + return workerPool + .getWorker() + .action('decryptPrivateKey', { keyObject, password }) + .then((privateKey) => { + if (privateKey) { + return Buffer.from(privateKey); + } - keythereum = window.keythereum; + return null; + }); } export function phraseToAddress (phrase) { - return phraseToWallet(phrase).then((wallet) => wallet.address); + return phraseToWallet(phrase) + .then((wallet) => wallet.address); } export function phraseToWallet (phrase) { - return new Promise((resolve, reject) => { - const worker = new KeyWorker(); - - worker.postMessage(phrase); - worker.onmessage = ({ data }) => { - resolve(data); - }; - }); + return workerPool.getWorker().action('phraseToWallet', phrase); +} + +export function verifySecret (secret) { + return workerPool.getWorker().action('verifySecret', secret); } diff --git a/js/src/api/local/ethkey/worker.js b/js/src/api/local/ethkey/worker.js index a472ee29a..00f4a0bed 100644 --- a/js/src/api/local/ethkey/worker.js +++ b/js/src/api/local/ethkey/worker.js @@ -14,58 +14,104 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -import { keccak_256 as keccak256 } from 'js-sha3'; import secp256k1 from 'secp256k1/js'; +import { keccak_256 as keccak256 } from 'js-sha3'; +import { bytesToHex } from '~/api/util/format'; + +const isWorker = typeof self !== 'undefined'; // Stay compatible between environments -if (typeof self !== 'object') { +if (!isWorker) { const scope = typeof global === 'undefined' ? window : global; scope.self = scope; } -function bytesToHex (bytes) { - return '0x' + Array.from(bytes).map(n => ('0' + n.toString(16)).slice(-2)).join(''); +// keythereum should never be used outside of the browser +let keythereum = null; + +if (isWorker) { + require('keythereum/dist/keythereum'); + + keythereum = self.keythereum; } -// Logic ported from /ethkey/src/brain.rs -function phraseToWallet (phrase) { - let secret = keccak256.array(phrase); - - for (let i = 0; i < 16384; i++) { - secret = keccak256.array(secret); +function route ({ action, payload }) { + if (action in actions) { + return actions[action](payload); } - while (true) { - secret = keccak256.array(secret); + return null; +} - const secretBuf = Buffer.from(secret); +const actions = { + phraseToWallet (phrase) { + let secret = keccak256.array(phrase); - if (secp256k1.privateKeyVerify(secretBuf)) { - // No compression, slice out last 64 bytes - const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); - const address = keccak256.array(publicBuf).slice(12); + for (let i = 0; i < 16384; i++) { + secret = keccak256.array(secret); + } - if (address[0] !== 0) { - continue; + while (true) { + secret = keccak256.array(secret); + + const secretBuf = Buffer.from(secret); + + if (secp256k1.privateKeyVerify(secretBuf)) { + // No compression, slice out last 64 bytes + const publicBuf = secp256k1.publicKeyCreate(secretBuf, false).slice(-64); + const address = keccak256.array(publicBuf).slice(12); + + if (address[0] !== 0) { + continue; + } + + const wallet = { + secret: bytesToHex(secretBuf), + public: bytesToHex(publicBuf), + address: bytesToHex(address) + }; + + return wallet; } + } + }, - const wallet = { - secret: bytesToHex(secretBuf), - public: bytesToHex(publicBuf), - address: bytesToHex(address) - }; + verifySecret (secret) { + const key = Buffer.from(secret.slice(2), 'hex'); - return wallet; + return secp256k1.privateKeyVerify(key); + }, + + createKeyObject ({ key, password }) { + key = Buffer.from(key); + password = Buffer.from(password); + + const iv = keythereum.crypto.randomBytes(16); + const salt = keythereum.crypto.randomBytes(32); + const keyObject = keythereum.dump(password, key, salt, iv); + + return JSON.stringify(keyObject); + }, + + decryptPrivateKey ({ keyObject, password }) { + password = Buffer.from(password); + + try { + const key = keythereum.recover(password, keyObject); + + // Convert to array to safely send from the worker + return Array.from(key); + } catch (e) { + return null; } } -} +}; self.onmessage = function ({ data }) { - const wallet = phraseToWallet(data); + const result = route(data); - postMessage(wallet); - close(); + postMessage(result); }; // Emulate a web worker in Node.js @@ -73,9 +119,9 @@ class KeyWorker { postMessage (data) { // Force async setTimeout(() => { - const wallet = phraseToWallet(data); + const result = route(data); - this.onmessage({ data: wallet }); + this.onmessage({ data: result }); }, 0); } diff --git a/js/src/api/local/ethkey/workerPool.js b/js/src/api/local/ethkey/workerPool.js new file mode 100644 index 000000000..ff5315898 --- /dev/null +++ b/js/src/api/local/ethkey/workerPool.js @@ -0,0 +1,61 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +// Allow a web worker in the browser, with a fallback for Node.js +const hasWebWorkers = typeof Worker !== 'undefined'; +const KeyWorker = hasWebWorkers ? require('worker-loader!./worker') + : require('./worker').KeyWorker; + +class WorkerContainer { + busy = false; + _worker = new KeyWorker(); + + action (action, payload) { + if (this.busy) { + throw new Error('Cannot issue an action on a busy worker!'); + } + + this.busy = true; + + return new Promise((resolve, reject) => { + this._worker.postMessage({ action, payload }); + this._worker.onmessage = ({ data }) => { + this.busy = false; + resolve(data); + }; + }); + } +} + +class WorkerPool { + pool = []; + + getWorker () { + let container = this.pool.find((container) => !container.busy); + + if (container) { + return container; + } + + container = new WorkerContainer(); + + this.pool.push(container); + + return container; + } +} + +export default new WorkerPool(); diff --git a/js/src/api/local/middleware.js b/js/src/api/local/middleware.js index d5997c60a..36a8cd2cf 100644 --- a/js/src/api/local/middleware.js +++ b/js/src/api/local/middleware.js @@ -19,7 +19,7 @@ import accounts from './accounts'; import transactions from './transactions'; import { Middleware } from '../transport'; import { inNumber16 } from '../format/input'; -import { phraseToWallet, phraseToAddress } from './ethkey'; +import { phraseToWallet, phraseToAddress, verifySecret } from './ethkey'; import { randomPhrase } from '@parity/wordlist'; export default class LocalAccountsMiddleware extends Middleware { @@ -57,6 +57,22 @@ export default class LocalAccountsMiddleware extends Middleware { }); }); + register('parity_changePassword', ([address, oldPassword, newPassword]) => { + const account = accounts.get(address); + + return account + .decryptPrivateKey(oldPassword) + .then((privateKey) => { + if (!privateKey) { + return false; + } + + account.changePassword(privateKey, newPassword); + + return true; + }); + }); + register('parity_checkRequest', ([id]) => { return transactions.hash(id) || Promise.resolve(null); }); @@ -84,6 +100,17 @@ export default class LocalAccountsMiddleware extends Middleware { }); }); + register('parity_newAccountFromSecret', ([secret, password]) => { + return verifySecret(secret) + .then((isValid) => { + if (!isValid) { + throw new Error('Invalid secret key'); + } + + return accounts.create(secret, password); + }); + }); + register('parity_setAccountMeta', ([address, meta]) => { accounts.get(address).meta = meta; @@ -127,6 +154,12 @@ export default class LocalAccountsMiddleware extends Middleware { return accounts.remove(address, password); }); + register('parity_testPassword', ([address, password]) => { + const account = accounts.get(address); + + return account.isValidPassword(password); + }); + register('signer_confirmRequest', ([id, modify, password]) => { const { gasPrice, @@ -137,30 +170,33 @@ export default class LocalAccountsMiddleware extends Middleware { data } = Object.assign(transactions.get(id), modify); - return this - .rpcRequest('parity_nextNonce', [from]) - .then((nonce) => { - const tx = new EthereumTx({ - nonce, - to, - data, - gasLimit: inNumber16(gasLimit), - gasPrice: inNumber16(gasPrice), - value: inNumber16(value) - }); - const account = accounts.get(from); + const account = accounts.get(from); - tx.sign(account.decryptPrivateKey(password)); - - const serializedTx = `0x${tx.serialize().toString('hex')}`; - - return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); - }) - .then((hash) => { - transactions.confirm(id, hash); - - return {}; + return Promise.all([ + this.rpcRequest('parity_nextNonce', [from]), + account.decryptPrivateKey(password) + ]) + .then(([nonce, privateKey]) => { + const tx = new EthereumTx({ + nonce, + to, + data, + gasLimit: inNumber16(gasLimit), + gasPrice: inNumber16(gasPrice), + value: inNumber16(value) }); + + tx.sign(privateKey); + + const serializedTx = `0x${tx.serialize().toString('hex')}`; + + return this.rpcRequest('eth_sendRawTransaction', [serializedTx]); + }) + .then((hash) => { + transactions.confirm(id, hash); + + return {}; + }); }); register('signer_rejectRequest', ([id]) => { diff --git a/js/src/api/transport/jsonRpcBase.js b/js/src/api/transport/jsonRpcBase.js index 46df718a7..573204c3e 100644 --- a/js/src/api/transport/jsonRpcBase.js +++ b/js/src/api/transport/jsonRpcBase.js @@ -80,12 +80,16 @@ export default class JsonRpcBase extends EventEmitter { const res = middleware.handle(method, params); if (res != null) { - const result = this._wrapSuccessResult(res); - const json = this.encode(method, params); + // If `res` isn't a promise, we need to wrap it + return Promise.resolve(res) + .then((res) => { + const result = this._wrapSuccessResult(res); + const json = this.encode(method, params); - Logging.send(method, params, { json, result }); + Logging.send(method, params, { json, result }); - return res; + return res; + }); } } diff --git a/js/src/api/util/format.js b/js/src/api/util/format.js index c7594b692..61fc9d32c 100644 --- a/js/src/api/util/format.js +++ b/js/src/api/util/format.js @@ -17,7 +17,7 @@ import { range } from 'lodash'; export function bytesToHex (bytes) { - return '0x' + bytes.map((b) => ('0' + b.toString(16)).slice(-2)).join(''); + return '0x' + Buffer.from(bytes).toString('hex'); } export function cleanupValue (value, type) { diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.js b/js/src/modals/CreateAccount/NewAccount/newAccount.js index 04f2f272a..9c6be9f6e 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.js @@ -23,6 +23,7 @@ import { RadioButton, RadioButtonGroup } from 'material-ui/RadioButton'; import { Form, Input, IdentityIcon } from '~/ui'; import PasswordStrength from '~/ui/Form/PasswordStrength'; import { RefreshIcon } from '~/ui/Icons'; +import Loading from '~/ui/Loading'; import ChangeVault from '../ChangeVault'; import styles from '../createAccount.css'; @@ -170,7 +171,9 @@ export default class CreateAccount extends Component { const { accounts } = this.state; if (!accounts) { - return null; + return ( + + ); } const identities = Object @@ -205,6 +208,14 @@ export default class CreateAccount extends Component { createIdentities = () => { const { createStore } = this.props; + this.setState({ + accounts: null, + selectedAddress: '' + }); + + createStore.setAddress(''); + createStore.setPhrase(''); + return createStore .createIdentities() .then((accounts) => { diff --git a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js index 87c7ba3fc..935fe5b80 100644 --- a/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js +++ b/js/src/modals/CreateAccount/NewAccount/newAccount.spec.js @@ -58,12 +58,12 @@ describe('modals/CreateAccount/NewAccount', () => { return instance.componentWillMount(); }); - it('creates initial accounts', () => { - expect(Object.keys(instance.state.accounts).length).to.equal(7); + it('resets the accounts', () => { + expect(instance.state.accounts).to.be.null; }); - it('sets the initial selected value', () => { - expect(instance.state.selectedAddress).to.equal(Object.keys(instance.state.accounts)[0]); + it('resets the initial selected value', () => { + expect(instance.state.selectedAddress).to.equal(''); }); }); }); diff --git a/js/src/modals/CreateAccount/store.js b/js/src/modals/CreateAccount/store.js index 52dddac80..9bc60d9af 100644 --- a/js/src/modals/CreateAccount/store.js +++ b/js/src/modals/CreateAccount/store.js @@ -69,7 +69,7 @@ export default class Store { return !(this.nameError || this.walletFileError); case 'fromNew': - return !(this.nameError || this.passwordRepeatError); + return !(this.nameError || this.passwordRepeatError) && this.hasAddress; case 'fromPhrase': return !(this.nameError || this.passwordRepeatError); @@ -85,6 +85,10 @@ export default class Store { } } + @computed get hasAddress () { + return !!(this.address); + } + @computed get passwordRepeatError () { return this.password === this.passwordRepeat ? null diff --git a/js/src/modals/CreateAccount/store.spec.js b/js/src/modals/CreateAccount/store.spec.js index b02f013b6..9d7bc10a2 100644 --- a/js/src/modals/CreateAccount/store.spec.js +++ b/js/src/modals/CreateAccount/store.spec.js @@ -329,6 +329,7 @@ describe('modals/CreateAccount/Store', () => { describe('createType === fromNew', () => { beforeEach(() => { store.setCreateType('fromNew'); + store.setAddress('0x0000000000000000000000000000000000000000'); }); it('returns true on no errors', () => { @@ -337,11 +338,13 @@ describe('modals/CreateAccount/Store', () => { it('returns false on nameError', () => { store.setName(''); + expect(store.canCreate).to.be.false; }); it('returns false on passwordRepeatError', () => { store.setPassword('testing'); + expect(store.canCreate).to.be.false; }); }); diff --git a/js/src/util/qrscan.js b/js/src/util/qrscan.js index eabc95409..f3cf2f9e9 100644 --- a/js/src/util/qrscan.js +++ b/js/src/util/qrscan.js @@ -92,9 +92,9 @@ export function generateQr (from, tx, hash, rlp) { account: from.substr(2), hash: hash.substr(2), details: { - gasPrice: inNumber10(inHex(tx.gasPrice.toString('hex'))), - gas: inNumber10(inHex(tx.gasLimit.toString('hex'))), - nonce: inNumber10(inHex(tx.nonce.toString('hex'))), + gasPrice: inNumber10(inHex(tx.gasPrice.toString('hex') || '0')), + gas: inNumber10(inHex(tx.gasLimit.toString('hex') || '0')), + nonce: inNumber10(inHex(tx.nonce.toString('hex') || '0')), to: inAddress(tx.to.toString('hex')), value: inHex(tx.value.toString('hex') || '0') } diff --git a/js/webpack/npm.js b/js/webpack/npm.js index b1f41d805..b526b2f0f 100644 --- a/js/webpack/npm.js +++ b/js/webpack/npm.js @@ -24,9 +24,11 @@ const ENV = process.env.NODE_ENV || 'development'; const isProd = ENV === 'production'; const LIBRARY = process.env.LIBRARY; + if (!LIBRARY) { process.exit(-1); } + const SRC = LIBRARY.toLowerCase(); const OUTPUT_PATH = path.join(__dirname, '../.npmjs', SRC); @@ -63,12 +65,18 @@ module.exports = { 'babel-loader?cacheDirectory=true' ], exclude: /node_modules/ + }, + { + test: /\.js$/, + include: /node_modules\/(ethereumjs-tx|@parity\/wordlist)/, + use: 'babel-loader' } ] }, resolve: { alias: { + 'secp256k1/js': path.resolve(__dirname, '../src/api/local/ethkey/dummy.js'), '~': path.resolve(__dirname, '../src') }, modules: [ @@ -85,15 +93,12 @@ module.exports = { to: 'package.json', transform: function (content, path) { const json = JSON.parse(content.toString()); - json.version = packageJson.version; - // Add tests dependencies to Dev Deps json.devDependencies.chai = packageJson.devDependencies.chai; json.devDependencies.mocha = packageJson.devDependencies.mocha; json.devDependencies.nock = packageJson.devDependencies.nock; - - // Add test script json.scripts.test = 'mocha \'test/*.spec.js\''; + json.version = packageJson.version; return new Buffer(JSON.stringify(json, null, ' '), 'utf-8'); } diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index 94c79cb15..56cb60fc5 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -39,7 +39,7 @@ warp = true allow_ips = "all" snapshot_peers = 0 max_pending_peers = 64 -serve_light = true +no_serve_light = false reserved_only = false reserved_peers = "./path_to_file" diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 65b1cfea4..02f0838cc 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -94,6 +94,7 @@ usage! { flag_chain: String = "foundation", or |c: &Config| otry!(c.parity).chain.clone(), flag_keys_path: String = "$BASE/keys", or |c: &Config| otry!(c.parity).keys_path.clone(), flag_identity: String = "", or |c: &Config| otry!(c.parity).identity.clone(), + flag_light: bool = false, or |c: &Config| otry!(c.parity).light, // -- Account Options flag_unlock: Option = None, @@ -149,6 +150,8 @@ usage! { flag_reserved_only: bool = false, or |c: &Config| otry!(c.network).reserved_only.clone(), flag_no_ancient_blocks: bool = false, or |_| None, + flag_no_serve_light: bool = false, + or |c: &Config| otry!(c.network).no_serve_light.clone(), // -- API and Console Options // RPC @@ -164,6 +167,8 @@ usage! { or |c: &Config| otry!(c.rpc).apis.as_ref().map(|vec| vec.join(",")), flag_jsonrpc_hosts: String = "none", or |c: &Config| otry!(c.rpc).hosts.as_ref().map(|vec| vec.join(",")), + flag_jsonrpc_threads: Option = None, + or |c: &Config| otry!(c.rpc).threads.map(Some), // IPC flag_no_ipc: bool = false, @@ -176,21 +181,8 @@ usage! { // DAPPS flag_no_dapps: bool = false, or |c: &Config| otry!(c.dapps).disable.clone(), - flag_dapps_port: u16 = 8080u16, - or |c: &Config| otry!(c.dapps).port.clone(), - flag_dapps_interface: String = "local", - or |c: &Config| otry!(c.dapps).interface.clone(), - flag_dapps_hosts: String = "none", - or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| vec.join(",")), - flag_dapps_cors: Option = None, - or |c: &Config| otry!(c.dapps).cors.clone().map(Some), flag_dapps_path: String = "$BASE/dapps", or |c: &Config| otry!(c.dapps).path.clone(), - flag_dapps_user: Option = None, - or |c: &Config| otry!(c.dapps).user.clone().map(Some), - flag_dapps_pass: Option = None, - or |c: &Config| otry!(c.dapps).pass.clone().map(Some), - flag_dapps_apis_all: bool = false, or |_| None, // Secret Store flag_no_secretstore: bool = false, @@ -330,6 +322,22 @@ usage! { or |c: &Config| otry!(c.misc).log_file.clone().map(Some), flag_no_color: bool = false, or |c: &Config| otry!(c.misc).color.map(|c| !c).clone(), + + + // -- Legacy Options supported in configs + flag_dapps_port: Option = None, + or |c: &Config| otry!(c.dapps).port.clone().map(Some), + flag_dapps_interface: Option = None, + or |c: &Config| otry!(c.dapps).interface.clone().map(Some), + flag_dapps_hosts: Option = None, + or |c: &Config| otry!(c.dapps).hosts.as_ref().map(|vec| Some(vec.join(","))), + flag_dapps_cors: Option = None, + or |c: &Config| otry!(c.dapps).cors.clone().map(Some), + flag_dapps_user: Option = None, + or |c: &Config| otry!(c.dapps).user.clone().map(Some), + flag_dapps_pass: Option = None, + or |c: &Config| otry!(c.dapps).pass.clone().map(Some), + flag_dapps_apis_all: Option = None, or |_| None, } { // Values with optional default value. @@ -374,6 +382,7 @@ struct Operating { db_path: Option, keys_path: Option, identity: Option, + light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -409,6 +418,7 @@ struct Network { node_key: Option, reserved_peers: Option, reserved_only: Option, + no_serve_light: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -419,6 +429,7 @@ struct Rpc { cors: Option, apis: Option>, hosts: Option>, + threads: Option, } #[derive(Default, Debug, PartialEq, RustcDecodable)] @@ -633,6 +644,7 @@ mod tests { flag_db_path: Some("$HOME/.parity/chains".into()), flag_keys_path: "$HOME/.parity/keys".into(), flag_identity: "".into(), + flag_light: false, // -- Account Options flag_unlock: Some("0xdeadbeefcafe0000000000000000000000000000".into()), @@ -663,6 +675,7 @@ mod tests { flag_reserved_peers: Some("./path_to_file".into()), flag_reserved_only: false, flag_no_ancient_blocks: false, + flag_no_serve_light: false, // -- API and Console Options // RPC @@ -672,6 +685,7 @@ mod tests { flag_jsonrpc_cors: Some("null".into()), flag_jsonrpc_apis: "web3,eth,net,parity,traces,rpc".into(), flag_jsonrpc_hosts: "none".into(), + flag_jsonrpc_threads: None, // IPC flag_no_ipc: false, @@ -679,15 +693,8 @@ mod tests { flag_ipc_apis: "web3,eth,net,parity,parity_accounts,personal,traces,rpc".into(), // DAPPS - flag_no_dapps: false, - flag_dapps_port: 8080u16, - flag_dapps_interface: "local".into(), - flag_dapps_hosts: "none".into(), - flag_dapps_cors: None, flag_dapps_path: "$HOME/.parity/dapps".into(), - flag_dapps_user: Some("test_user".into()), - flag_dapps_pass: Some("test_pass".into()), - flag_dapps_apis_all: false, + flag_no_dapps: false, flag_no_secretstore: false, flag_secretstore_port: 8082u16, @@ -792,6 +799,14 @@ mod tests { flag_extradata: None, flag_cache: None, flag_warp: Some(true), + // Legacy-Dapps + flag_dapps_port: Some(8080), + flag_dapps_interface: Some("local".into()), + flag_dapps_hosts: Some("none".into()), + flag_dapps_cors: None, + flag_dapps_user: Some("test_user".into()), + flag_dapps_pass: Some("test_pass".into()), + flag_dapps_apis_all: None, // -- Miscellaneous Options flag_version: false, @@ -836,6 +851,7 @@ mod tests { db_path: None, keys_path: None, identity: None, + light: None, }), account: Some(Account { unlock: Some(vec!["0x1".into(), "0x2".into(), "0x3".into()]), @@ -865,6 +881,7 @@ mod tests { node_key: None, reserved_peers: Some("./path/to/reserved_peers".into()), reserved_only: Some(true), + no_serve_light: None, }), rpc: Some(Rpc { disable: Some(true), @@ -873,6 +890,7 @@ mod tests { cors: None, apis: None, hosts: None, + threads: None, }), ipc: Some(Ipc { disable: None, diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 4c1abafbe..1e5f3c0fb 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -70,6 +70,11 @@ Operating Options: --keys-path PATH Specify the path for JSON key files to be found (default: {flag_keys_path}). --identity NAME Specify your node's name. (default: {flag_identity}) + --light Experimental: run in light client mode. Light clients + synchronize a bare minimum of data and fetch necessary + data on-demand from the network. Much lower in storage, + potentially higher in bandwidth. Has no effect with + subcommands (default: {flag_light}). Account Options: --unlock ACCOUNTS Unlock ACCOUNTS for the duration of the execution. @@ -129,6 +134,7 @@ Networking Options: --max-pending-peers NUM Allow up to NUM pending connections. (default: {flag_max_pending_peers}) --no-ancient-blocks Disable downloading old blocks after snapshot restoration or warp sync. (default: {flag_no_ancient_blocks}) + --no-serve-light Disable serving of light peers. (default: {flag_no_serve_light}) API and Console Options: --no-jsonrpc Disable the JSON-RPC API server. (default: {flag_no_jsonrpc}) @@ -149,6 +155,8 @@ API and Console Options: is additional security against some attack vectors. Special options: "all", "none", (default: {flag_jsonrpc_hosts}). + --jsonrpc-threads THREADS Enables experimental faster implementation of JSON-RPC server. + Requires Dapps server to be disabled using --no-dapps. (default: {flag_jsonrpc_threads:?}) --no-ipc Disable JSON-RPC over IPC service. (default: {flag_no_ipc}) --ipc-path PATH Specify custom path for JSON-RPC over IPC service @@ -157,29 +165,8 @@ API and Console Options: IPC (default: {flag_ipc_apis}). --no-dapps Disable the Dapps server (e.g. status page). (default: {flag_no_dapps}) - --dapps-port PORT Specify the port portion of the Dapps server - (default: {flag_dapps_port}). - --dapps-interface IP Specify the hostname portion of the Dapps - server, IP should be an interface's IP address, - or local (default: {flag_dapps_interface}). - --dapps-hosts HOSTS List of allowed Host header values. This option will - validate the Host header sent by the browser, it - is additional security against some attack - vectors. Special options: "all", "none", - (default: {flag_dapps_hosts}). - --dapps-cors URL Specify CORS headers for Dapps server APIs. - (default: {flag_dapps_cors:?}) - --dapps-user USERNAME Specify username for Dapps server. It will be - used in HTTP Basic Authentication Scheme. - If --dapps-pass is not specified you will be - asked for password on startup. (default: {flag_dapps_user:?}) - --dapps-pass PASSWORD Specify password for Dapps server. Use only in - conjunction with --dapps-user. (default: {flag_dapps_pass:?}) --dapps-path PATH Specify directory where dapps should be installed. (default: {flag_dapps_path}) - --dapps-apis-all Expose all possible RPC APIs on Dapps port. - WARNING: INSECURE. Used only for development. - (default: {flag_dapps_apis_all}) --ipfs-api Enable IPFS-compatible HTTP API. (default: {flag_ipfs_api}) --ipfs-api-port PORT Configure on which port the IPFS HTTP API should listen. (default: {flag_ipfs_api_port}) @@ -392,6 +379,13 @@ Legacy Options: --jsonrpc-off Equivalent to --no-jsonrpc. -w --webapp Does nothing; dapps server is on by default now. --dapps-off Equivalent to --no-dapps. + --dapps-user USERNAME Dapps server authentication has been removed. (default: {flag_dapps_user:?}) + --dapps-pass PASSWORD Dapps server authentication has been removed. (default: {flag_dapps_pass:?}) + --dapps-apis-all Dapps server is merged with RPC server. Use --jsonrpc-apis. (default: {flag_dapps_apis_all:?}) + --dapps-cors URL Dapps server is merged with RPC server. Use --jsonrpc-cors. (default: {flag_dapps_cors:?}) + --dapps-hosts HOSTS Dapps server is merged with RPC server. Use --jsonrpc-hosts. (default: {flag_dapps_hosts:?}) + --dapps-interface IP Dapps server is merged with RPC server. Use --jsonrpc-interface. (default: {flag_dapps_interface:?}) + --dapps-port PORT Dapps server is merged with RPC server. Use --jsonrpc-port. (default: {flag_dapps_port:?}) --rpc Does nothing; JSON-RPC is on by default now. --warp Does nothing; Warp sync is on by default. (default: {flag_warp}) --rpcaddr IP Equivalent to --jsonrpc-interface IP. diff --git a/parity/configuration.rs b/parity/configuration.rs index 5dd11bd90..f585dc22e 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -132,12 +132,17 @@ impl Configuration { let warp_sync = !self.args.flag_no_warp && fat_db != Switch::On && tracing != Switch::On && pruning != Pruning::Specific(Algorithm::Archive); let geth_compatibility = self.args.flag_geth; let ui_address = self.ui_port().map(|port| (self.ui_interface(), port)); - let dapps_conf = self.dapps_config(); + let mut dapps_conf = self.dapps_config(); let ipfs_conf = self.ipfs_config(); let signer_conf = self.signer_config(); let secretstore_conf = self.secretstore_config(); let format = self.format()?; + if self.args.flag_jsonrpc_threads.is_some() && dapps_conf.enabled { + dapps_conf.enabled = false; + writeln!(&mut stderr(), "Warning: Disabling Dapps server because fast RPC server was enabled.").expect("Error writing to stderr.") + } + let cmd = if self.args.flag_version { Cmd::Version } else if self.args.cmd_signer { @@ -377,6 +382,8 @@ impl Configuration { check_seal: !self.args.flag_no_seal_check, download_old_blocks: !self.args.flag_no_ancient_blocks, verifier_settings: verifier_settings, + serve_light: !self.args.flag_no_serve_light, + light: self.args.flag_light, }; Cmd::Run(run_cmd) }; @@ -554,19 +561,12 @@ impl Configuration { fn dapps_config(&self) -> DappsConfiguration { DappsConfiguration { enabled: self.dapps_enabled(), - interface: self.dapps_interface(), - port: self.args.flag_dapps_port, - hosts: self.dapps_hosts(), - cors: self.dapps_cors(), - user: self.args.flag_dapps_user.clone(), - pass: self.args.flag_dapps_pass.clone(), dapps_path: PathBuf::from(self.directories().dapps), extra_dapps: if self.args.cmd_dapp { self.args.arg_path.iter().map(|path| PathBuf::from(path)).collect() } else { vec![] }, - all_apis: self.args.flag_dapps_apis_all, } } @@ -746,14 +746,10 @@ impl Configuration { Self::cors(self.args.flag_ipfs_api_cors.as_ref()) } - fn dapps_cors(&self) -> Option> { - Self::cors(self.args.flag_dapps_cors.as_ref()) - } - fn hosts(hosts: &str) -> Option> { match hosts { "none" => return Some(Vec::new()), - "all" => return None, + "*" | "all" | "any" => return None, _ => {} } let hosts = hosts.split(',').map(Into::into).collect(); @@ -764,10 +760,6 @@ impl Configuration { Self::hosts(&self.args.flag_jsonrpc_hosts) } - fn dapps_hosts(&self) -> Option> { - Self::hosts(&self.args.flag_dapps_hosts) - } - fn ipfs_hosts(&self) -> Option> { Self::hosts(&self.args.flag_ipfs_api_hosts) } @@ -793,12 +785,17 @@ impl Configuration { fn http_config(&self) -> Result { let conf = HttpConfiguration { - enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + enabled: self.rpc_enabled(), interface: self.rpc_interface(), port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), apis: self.rpc_apis().parse()?, hosts: self.rpc_hosts(), cors: self.rpc_cors(), + threads: match self.args.flag_jsonrpc_threads { + Some(threads) if threads > 0 => Some(threads), + None => None, + _ => return Err("--jsonrpc-threads number needs to be positive.".into()), + } }; Ok(conf) @@ -809,7 +806,7 @@ impl Configuration { name: self.args.flag_identity.clone(), chain: self.chain(), network_port: self.args.flag_port, - rpc_enabled: !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc, + rpc_enabled: self.rpc_enabled(), rpc_interface: self.args.flag_rpcaddr.clone().unwrap_or(self.args.flag_jsonrpc_interface.clone()), rpc_port: self.args.flag_rpcport.unwrap_or(self.args.flag_jsonrpc_port), } @@ -916,13 +913,6 @@ impl Configuration { Self::interface(&self.network_settings().rpc_interface) } - fn dapps_interface(&self) -> String { - match self.args.flag_dapps_interface.as_str() { - "local" => "127.0.0.1", - x => x, - }.into() - } - fn ipfs_interface(&self) -> String { Self::interface(&self.args.flag_ipfs_api_interface) } @@ -938,8 +928,12 @@ impl Configuration { Self::interface(&self.args.flag_stratum_interface) } + fn rpc_enabled(&self) -> bool { + !self.args.flag_jsonrpc_off && !self.args.flag_no_jsonrpc + } + fn dapps_enabled(&self) -> bool { - !self.args.flag_dapps_off && !self.args.flag_no_dapps && cfg!(feature = "dapps") + !self.args.flag_dapps_off && !self.args.flag_no_dapps && self.rpc_enabled() && cfg!(feature = "dapps") } fn secretstore_enabled(&self) -> bool { @@ -1209,6 +1203,8 @@ mod tests { check_seal: true, download_old_blocks: true, verifier_settings: Default::default(), + serve_light: true, + light: false, }; expected.secretstore_conf.enabled = cfg!(feature = "secretstore"); assert_eq!(conf.into_command().unwrap().cmd, Cmd::Run(expected)); @@ -1317,23 +1313,6 @@ mod tests { assert_eq!(conf3.rpc_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); } - #[test] - fn should_parse_dapps_hosts() { - // given - - // when - let conf0 = parse(&["parity"]); - let conf1 = parse(&["parity", "--dapps-hosts", "none"]); - let conf2 = parse(&["parity", "--dapps-hosts", "all"]); - let conf3 = parse(&["parity", "--dapps-hosts", "ethcore.io,something.io"]); - - // then - assert_eq!(conf0.dapps_hosts(), Some(Vec::new())); - assert_eq!(conf1.dapps_hosts(), Some(Vec::new())); - assert_eq!(conf2.dapps_hosts(), None); - assert_eq!(conf3.dapps_hosts(), Some(vec!["ethcore.io".into(), "something.io".into()])); - } - #[test] fn should_parse_ipfs_hosts() { // given diff --git a/parity/dapps.rs b/parity/dapps.rs index bbd5f4960..4cdd1e550 100644 --- a/parity/dapps.rs +++ b/parity/dapps.rs @@ -18,26 +18,20 @@ use std::path::PathBuf; use std::sync::Arc; use dir::default_data_path; -use ethcore::client::Client; -use ethcore_rpc::informant::RpcStats; -use ethsync::SyncProvider; +use ethcore::client::{Client, BlockChainClient, BlockId}; +use ethcore::transaction::{Transaction, Action}; use hash_fetch::fetch::Client as FetchClient; +use hash_fetch::urlhint::ContractClient; use helpers::replace_home; -use rpc_apis::{self, SignerService}; +use rpc_apis::SignerService; use parity_reactor; +use util::{Bytes, Address, U256}; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { pub enabled: bool, - pub interface: String, - pub port: u16, - pub hosts: Option>, - pub cors: Option>, - pub user: Option, - pub pass: Option, pub dapps_path: PathBuf, pub extra_dapps: Vec, - pub all_apis: bool, } impl Default for Configuration { @@ -45,80 +39,94 @@ impl Default for Configuration { let data_dir = default_data_path(); Configuration { enabled: true, - interface: "127.0.0.1".into(), - port: 8080, - hosts: Some(Vec::new()), - cors: None, - user: None, - pass: None, dapps_path: replace_home(&data_dir, "$BASE/dapps").into(), extra_dapps: vec![], - all_apis: false, } } } -pub struct Dependencies { - pub apis: Arc, +/// Registrar implementation of the full client. +pub struct FullRegistrar { + /// Handle to the full client. pub client: Arc, - pub sync: Arc, +} + +impl ContractClient for FullRegistrar { + fn registrar(&self) -> Result { + self.client.additional_params().get("registrar") + .ok_or_else(|| "Registrar not defined.".into()) + .and_then(|registrar| { + registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) + }) + } + + fn call(&self, address: Address, data: Bytes) -> Result { + let from = Address::default(); + let transaction = Transaction { + nonce: self.client.latest_nonce(&from), + action: Action::Call(address), + gas: U256::from(50_000_000), + gas_price: U256::default(), + value: U256::default(), + data: data, + }.fake_sign(from); + + self.client.call(&transaction, BlockId::Latest, Default::default()) + .map_err(|e| format!("{:?}", e)) + .map(|executed| { + executed.output + }) + } +} + +// TODO: light client implementation forwarding to OnDemand and waiting for future +// to resolve. +pub struct Dependencies { + pub sync_status: Arc<::parity_dapps::SyncStatus>, + pub contract_client: Arc, pub remote: parity_reactor::TokioRemote, pub fetch: FetchClient, pub signer: Arc, - pub stats: Arc, } -pub fn new(configuration: Configuration, deps: Dependencies) -> Result, String> { +pub fn new(configuration: Configuration, deps: Dependencies) + -> Result, String> +{ if !configuration.enabled { return Ok(None); } - let url = format!("{}:{}", configuration.interface, configuration.port); - let addr = url.parse().map_err(|_| format!("Invalid Webapps listen host/port given: {}", url))?; - - let auth = configuration.user.as_ref().map(|username| { - let password = configuration.pass.as_ref().map_or_else(|| { - use rpassword::read_password; - println!("Type password for WebApps server (user: {}): ", username); - let pass = read_password().unwrap(); - println!("OK, got it. Starting server..."); - pass - }, |pass| pass.to_owned()); - (username.to_owned(), password) - }); - - Ok(Some(setup_dapps_server( + dapps_middleware( deps, configuration.dapps_path, configuration.extra_dapps, - &addr, - configuration.hosts, - configuration.cors, - auth, - configuration.all_apis, - )?)) + ).map(Some) } -pub use self::server::WebappServer; -pub use self::server::setup_dapps_server; +pub use self::server::Middleware; +pub use self::server::dapps_middleware; #[cfg(not(feature = "dapps"))] mod server { use super::Dependencies; - use std::net::SocketAddr; use std::path::PathBuf; + use ethcore_rpc::{hyper, RequestMiddleware, RequestMiddlewareAction}; - pub struct WebappServer; - pub fn setup_dapps_server( + pub struct Middleware; + + impl RequestMiddleware for Middleware { + fn on_request( + &self, req: &hyper::server::Request, control: &hyper::Control + ) -> RequestMiddlewareAction { + unreachable!() + } + } + + pub fn dapps_middleware( _deps: Dependencies, _dapps_path: PathBuf, _extra_dapps: Vec, - _url: &SocketAddr, - _allowed_hosts: Option>, - _cors: Option>, - _auth: Option<(String, String)>, - _all_apis: bool, - ) -> Result { + ) -> Result { Err("Your Parity version has been compiled without WebApps support.".into()) } } @@ -128,109 +136,31 @@ mod server { use super::Dependencies; use std::path::PathBuf; use std::sync::Arc; - use std::net::SocketAddr; - use std::io; - use util::{Bytes, Address, U256}; - use ansi_term::Colour; - use ethcore::transaction::{Transaction, Action}; - use ethcore::client::{Client, BlockChainClient, BlockId}; - use ethcore_dapps::{AccessControlAllowOrigin, Host}; - use ethcore_rpc::is_major_importing; - use hash_fetch::urlhint::ContractClient; + use hash_fetch::fetch::Client as FetchClient; + use parity_dapps; use parity_reactor; - use rpc_apis; - pub use ethcore_dapps::Server as WebappServer; + pub type Middleware = parity_dapps::Middleware; - pub fn setup_dapps_server( + pub fn dapps_middleware( deps: Dependencies, dapps_path: PathBuf, extra_dapps: Vec, - url: &SocketAddr, - allowed_hosts: Option>, - cors: Option>, - auth: Option<(String, String)>, - all_apis: bool, - ) -> Result { - use ethcore_dapps as dapps; - - let server = dapps::ServerBuilder::new( - &dapps_path, - Arc::new(Registrar { client: deps.client.clone() }), - parity_reactor::Remote::new(deps.remote.clone()), - ); - let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); - let cors: Option> = cors.map(|cors| cors.into_iter().map(AccessControlAllowOrigin::from).collect()); - - let sync = deps.sync.clone(); - let client = deps.client.clone(); + ) -> Result { let signer = deps.signer.clone(); - let server = server - .fetch(deps.fetch.clone()) - .sync_status(Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info()))) - .web_proxy_tokens(Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token))) - .extra_dapps(&extra_dapps) - .signer_address(deps.signer.address()) - .allowed_hosts(allowed_hosts.into()) - .extra_cors_headers(cors.into()); + let parity_remote = parity_reactor::Remote::new(deps.remote.clone()); + let web_proxy_tokens = Arc::new(move |token| signer.is_valid_web_proxy_access_token(&token)); - let api_set = if all_apis { - warn!("{}", Colour::Red.bold().paint("*** INSECURE *** Running Dapps with all APIs exposed.")); - info!("If you do not intend this, exit now."); - rpc_apis::ApiSet::SafeContext - } else { - rpc_apis::ApiSet::UnsafeContext - }; - let apis = rpc_apis::setup_rpc(deps.stats, deps.apis.clone(), api_set); - let start_result = match auth { - None => { - server.start_unsecured_http(url, apis, deps.remote) - }, - Some((username, password)) => { - server.start_basic_auth_http(url, &username, &password, apis, deps.remote) - }, - }; - - match start_result { - Err(dapps::ServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("WebApps address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --dapps-port and --dapps-interface options.", url)), - _ => Err(format!("WebApps io error: {}", err)), - }, - Err(e) => Err(format!("WebApps error: {:?}", e)), - Ok(server) => Ok(server), - } - } - - struct Registrar { - client: Arc, - } - - impl ContractClient for Registrar { - fn registrar(&self) -> Result { - self.client.additional_params().get("registrar") - .ok_or_else(|| "Registrar not defined.".into()) - .and_then(|registrar| { - registrar.parse().map_err(|e| format!("Invalid registrar address: {:?}", e)) - }) - } - - fn call(&self, address: Address, data: Bytes) -> Result { - let from = Address::default(); - let transaction = Transaction { - nonce: self.client.latest_nonce(&from), - action: Action::Call(address), - gas: U256::from(50_000_000), - gas_price: U256::default(), - value: U256::default(), - data: data, - }.fake_sign(from); - - self.client.call(&transaction, BlockId::Latest, Default::default()) - .map_err(|e| format!("{:?}", e)) - .map(|executed| { - executed.output - }) - } + Ok(parity_dapps::Middleware::new( + parity_remote, + deps.signer.address(), + dapps_path, + extra_dapps, + deps.contract_client, + deps.sync_status, + web_proxy_tokens, + deps.fetch.clone(), + )) } } diff --git a/parity/deprecated.rs b/parity/deprecated.rs index 97c6ffe4a..820181efa 100644 --- a/parity/deprecated.rs +++ b/parity/deprecated.rs @@ -21,94 +21,89 @@ use cli::Args; pub enum Deprecated { DoesNothing(&'static str), Replaced(&'static str, &'static str), + Removed(&'static str), } impl fmt::Display for Deprecated { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { match *self { - Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default", s), - Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead", old, new), + Deprecated::DoesNothing(s) => write!(f, "Option '{}' does nothing. It's on by default.", s), + Deprecated::Replaced(old, new) => write!(f, "Option '{}' is deprecated. Please use '{}' instead.", old, new), + Deprecated::Removed(s) => write!(f, "Option '{}' has been removed and is no longer supported.", s) } } } -impl Deprecated { - fn jsonrpc() -> Self { - Deprecated::DoesNothing("--jsonrpc") - } - - fn rpc() -> Self { - Deprecated::DoesNothing("--rpc") - } - - fn jsonrpc_off() -> Self { - Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc") - } - - fn webapp() -> Self { - Deprecated::DoesNothing("--webapp") - } - - fn dapps_off() -> Self { - Deprecated::Replaced("--dapps-off", "--no-dapps") - } - - fn ipcdisable() -> Self { - Deprecated::Replaced("--ipcdisable", "--no-ipc") - } - - fn ipc_off() -> Self { - Deprecated::Replaced("--ipc-off", "--no-ipc") - } - - fn etherbase() -> Self { - Deprecated::Replaced("--etherbase", "--author") - } - - fn extradata() -> Self { - Deprecated::Replaced("--extradata", "--extra-data") - } -} - pub fn find_deprecated(args: &Args) -> Vec { let mut result = vec![]; if args.flag_jsonrpc { - result.push(Deprecated::jsonrpc()); + result.push(Deprecated::DoesNothing("--jsonrpc")); } if args.flag_rpc { - result.push(Deprecated::rpc()); + result.push(Deprecated::DoesNothing("--rpc")); } if args.flag_jsonrpc_off { - result.push(Deprecated::jsonrpc_off()); + result.push(Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc")); } if args.flag_webapp { - result.push(Deprecated::webapp()) + result.push(Deprecated::DoesNothing("--webapp")); } if args.flag_dapps_off { - result.push(Deprecated::dapps_off()); + result.push(Deprecated::Replaced("--dapps-off", "--no-dapps")); } if args.flag_ipcdisable { - result.push(Deprecated::ipcdisable()); + result.push(Deprecated::Replaced("--ipcdisable", "--no-ipc")); } if args.flag_ipc_off { - result.push(Deprecated::ipc_off()); + result.push(Deprecated::Replaced("--ipc-off", "--no-ipc")); } if args.flag_etherbase.is_some() { - result.push(Deprecated::etherbase()); + result.push(Deprecated::Replaced("--etherbase", "--author")); } if args.flag_extradata.is_some() { - result.push(Deprecated::extradata()); + result.push(Deprecated::Replaced("--extradata", "--extra-data")); } + // Removed in 1.7 + if args.flag_dapps_port.is_some() { + result.push(Deprecated::Replaced("--dapps-port", "--jsonrpc-port")); + } + + if args.flag_dapps_interface.is_some() { + result.push(Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface")); + } + + if args.flag_dapps_hosts.is_some() { + result.push(Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts")); + } + + if args.flag_dapps_cors.is_some() { + result.push(Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors")); + } + + if args.flag_dapps_user.is_some() { + result.push(Deprecated::Removed("--dapps-user")); + } + + if args.flag_dapps_pass.is_some() { + result.push(Deprecated::Removed("--dapps-pass")); + } + + if args.flag_dapps_apis_all.is_some() { + result.push(Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis")); + } + + // Removed in 1.8 + result } @@ -131,17 +126,31 @@ mod tests { args.flag_ipc_off = true; args.flag_etherbase = Some(Default::default()); args.flag_extradata = Some(Default::default()); + args.flag_dapps_port = Some(Default::default()); + args.flag_dapps_interface = Some(Default::default()); + args.flag_dapps_hosts = Some(Default::default()); + args.flag_dapps_cors = Some(Default::default()); + args.flag_dapps_user = Some(Default::default()); + args.flag_dapps_pass = Some(Default::default()); + args.flag_dapps_apis_all = Some(Default::default()); args }), vec![ - Deprecated::jsonrpc(), - Deprecated::rpc(), - Deprecated::jsonrpc_off(), - Deprecated::webapp(), - Deprecated::dapps_off(), - Deprecated::ipcdisable(), - Deprecated::ipc_off(), - Deprecated::etherbase(), - Deprecated::extradata(), + Deprecated::DoesNothing("--jsonrpc"), + Deprecated::DoesNothing("--rpc"), + Deprecated::Replaced("--jsonrpc-off", "--no-jsonrpc"), + Deprecated::DoesNothing("--webapp"), + Deprecated::Replaced("--dapps-off", "--no-dapps"), + Deprecated::Replaced("--ipcdisable", "--no-ipc"), + Deprecated::Replaced("--ipc-off", "--no-ipc"), + Deprecated::Replaced("--etherbase", "--author"), + Deprecated::Replaced("--extradata", "--extra-data"), + Deprecated::Replaced("--dapps-port", "--jsonrpc-port"), + Deprecated::Replaced("--dapps-interface", "--jsonrpc-interface"), + Deprecated::Replaced("--dapps-hosts", "--jsonrpc-hosts"), + Deprecated::Replaced("--dapps-cors", "--jsonrpc-cors"), + Deprecated::Removed("--dapps-user"), + Deprecated::Removed("--dapps-pass"), + Deprecated::Replaced("--dapps-apis-all", "--jsonrpc-apis"), ]); } } diff --git a/parity/ipfs.rs b/parity/ipfs.rs index 760868f91..45c3f7062 100644 --- a/parity/ipfs.rs +++ b/parity/ipfs.rs @@ -15,10 +15,9 @@ // along with Parity. If not, see . use std::sync::Arc; -use parity_ipfs_api::{self, AccessControlAllowOrigin, Host}; +use parity_ipfs_api::{self, AccessControlAllowOrigin, Host, Listening}; use parity_ipfs_api::error::ServerError; use ethcore::client::BlockChainClient; -use hyper::server::Listening; #[derive(Debug, PartialEq, Clone)] pub struct Configuration { diff --git a/parity/light_helpers/mod.rs b/parity/light_helpers/mod.rs new file mode 100644 index 000000000..488f970c2 --- /dev/null +++ b/parity/light_helpers/mod.rs @@ -0,0 +1,21 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Utilities and helpers for the light client. + +mod queue_cull; + +pub use self::queue_cull::QueueCull; diff --git a/parity/light_helpers/queue_cull.rs b/parity/light_helpers/queue_cull.rs new file mode 100644 index 000000000..10865d485 --- /dev/null +++ b/parity/light_helpers/queue_cull.rs @@ -0,0 +1,99 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Service for culling the light client's transaction queue. + +use std::sync::Arc; +use std::time::Duration; + +use ethcore::service::ClientIoMessage; +use ethsync::LightSync; +use io::{IoContext, IoHandler, TimerToken}; + +use light::client::Client; +use light::on_demand::{request, OnDemand}; +use light::TransactionQueue; + +use futures::{future, stream, Future, Stream}; + +use parity_reactor::Remote; + +use util::RwLock; + +// Attepmt to cull once every 10 minutes. +const TOKEN: TimerToken = 1; +const TIMEOUT_MS: u64 = 1000 * 60 * 10; + +// But make each attempt last only 9 minutes +const PURGE_TIMEOUT_MS: u64 = 1000 * 60 * 9; + +/// Periodically culls the transaction queue of mined transactions. +pub struct QueueCull { + /// A handle to the client, for getting the latest block header. + pub client: Arc, + /// A handle to the sync service. + pub sync: Arc, + /// The on-demand request service. + pub on_demand: Arc, + /// The transaction queue. + pub txq: Arc>, + /// Event loop remote. + pub remote: Remote, +} + +impl IoHandler for QueueCull { + fn initialize(&self, io: &IoContext) { + io.register_timer(TOKEN, TIMEOUT_MS).expect("Error registering timer"); + } + + fn timeout(&self, _io: &IoContext, timer: TimerToken) { + if timer != TOKEN { return } + + let senders = self.txq.read().queued_senders(); + if senders.is_empty() { return } + + let (sync, on_demand, txq) = (self.sync.clone(), self.on_demand.clone(), self.txq.clone()); + let best_header = self.client.best_block_header(); + let start_nonce = self.client.engine().account_start_nonce(); + + info!(target: "cull", "Attempting to cull queued transactions from {} senders.", senders.len()); + self.remote.spawn_with_timeout(move || { + let maybe_fetching = sync.with_context(move |ctx| { + // fetch the nonce of each sender in the queue. + let nonce_futures = senders.iter() + .map(|&address| request::Account { header: best_header.clone(), address: address }) + .map(|request| on_demand.account(ctx, request)) + .map(move |fut| fut.map(move |x| x.map(|acc| acc.nonce).unwrap_or(start_nonce))) + .zip(senders.iter()) + .map(|(fut, &addr)| fut.map(move |nonce| (addr, nonce))); + + // as they come in, update each sender to the new nonce. + stream::futures_unordered(nonce_futures) + .fold(txq, |txq, (address, nonce)| { + txq.write().cull(address, nonce); + future::ok(txq) + }) + .map(|_| ()) // finally, discard the txq handle and log errors. + .map_err(|_| debug!(target: "cull", "OnDemand prematurely closed channel.")) + }); + + match maybe_fetching { + Some(fut) => fut.boxed(), + None => future::ok(()).boxed(), + } + }, Duration::from_millis(PURGE_TIMEOUT_MS), || {}) + } +} diff --git a/parity/main.rs b/parity/main.rs index 2044b3ee0..0d55055da 100644 --- a/parity/main.rs +++ b/parity/main.rs @@ -28,7 +28,7 @@ extern crate ctrlc; extern crate docopt; extern crate env_logger; extern crate fdlimit; -extern crate hyper; +extern crate futures; extern crate isatty; extern crate jsonrpc_core; extern crate num_cpus; @@ -54,6 +54,7 @@ extern crate ethcore_logger; extern crate ethcore_rpc; extern crate ethcore_signer; extern crate ethcore_util as util; +extern crate ethkey; extern crate ethsync; extern crate parity_hash_fetch as hash_fetch; extern crate parity_ipfs_api; @@ -73,7 +74,11 @@ extern crate ethcore_stratum; extern crate ethcore_secretstore; #[cfg(feature = "dapps")] -extern crate ethcore_dapps; +extern crate parity_dapps; + +#[cfg(test)] +#[macro_use] +extern crate pretty_assertions; #[cfg(windows)] extern crate ws2_32; #[cfg(windows)] extern crate winapi; @@ -101,6 +106,7 @@ mod deprecated; mod dir; mod helpers; mod informant; +mod light_helpers; mod migration; mod modules; mod params; diff --git a/parity/migration.rs b/parity/migration.rs index 445724325..c4e5f5ac6 100644 --- a/parity/migration.rs +++ b/parity/migration.rs @@ -30,7 +30,7 @@ use ethcore::migrations::Extract; /// Database is assumed to be at default version, when no version file is found. const DEFAULT_VERSION: u32 = 5; /// Current version of database models. -const CURRENT_VERSION: u32 = 11; +const CURRENT_VERSION: u32 = 12; /// First version of the consolidated database. const CONSOLIDATION_VERSION: u32 = 9; /// Defines how many items are migrated to the new version of database at once. @@ -147,6 +147,7 @@ fn consolidated_database_migrations(compaction_profile: &CompactionProfile) -> R let mut manager = MigrationManager::new(default_migration_settings(compaction_profile)); manager.add_migration(migrations::ToV10::new()).map_err(|_| Error::MigrationImpossible)?; manager.add_migration(migrations::TO_V11).map_err(|_| Error::MigrationImpossible)?; + manager.add_migration(migrations::TO_V12).map_err(|_| Error::MigrationImpossible)?; Ok(manager) } diff --git a/parity/rpc.rs b/parity/rpc.rs index a435f24db..70a91c851 100644 --- a/parity/rpc.rs +++ b/parity/rpc.rs @@ -14,24 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::fmt; +use std::{io, fmt}; use std::sync::Arc; -use std::net::SocketAddr; -use std::io; +use dapps; use dir::default_data_path; -use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use ethcore_rpc::informant::{RpcStats, Middleware}; +use ethcore_rpc::{self as rpc, HttpServerError, Metadata, Origin, AccessControlAllowOrigin, Host}; use helpers::parity_ipc_path; -use hyper; use jsonrpc_core::MetaIoHandler; -use rpc_apis; -use rpc_apis::ApiSet; use parity_reactor::TokioRemote; +use rpc_apis::{self, ApiSet}; -pub use ethcore_rpc::{IpcServer, HttpServer}; +pub use ethcore_rpc::{IpcServer, HttpServer, RequestMiddleware}; -#[derive(Debug, PartialEq)] +#[derive(Debug, Clone, PartialEq)] pub struct HttpConfiguration { pub enabled: bool, pub interface: String, @@ -39,6 +36,7 @@ pub struct HttpConfiguration { pub apis: ApiSet, pub cors: Option>, pub hosts: Option>, + pub threads: Option, } impl Default for HttpConfiguration { @@ -50,6 +48,7 @@ impl Default for HttpConfiguration { apis: ApiSet::UnsafeContext, cors: None, hosts: Some(Vec::new()), + threads: None, } } } @@ -82,20 +81,24 @@ impl fmt::Display for IpcConfiguration { } } -pub struct Dependencies { - pub apis: Arc, +pub struct Dependencies { + pub apis: Arc, pub remote: TokioRemote, pub stats: Arc, } pub struct RpcExtractor; -impl rpc::HttpMetaExtractor for RpcExtractor { - fn read_metadata(&self, req: &hyper::server::Request) -> Metadata { - let origin = req.headers().get::() - .map(|origin| format!("{}://{}", origin.scheme, origin.host)) - .unwrap_or_else(|| "unknown".into()); +impl rpc::HttpMetaExtractor for RpcExtractor { + type Metadata = Metadata; + + fn read_metadata(&self, origin: String, dapps_origin: Option) -> Metadata { let mut metadata = Metadata::default(); - metadata.origin = Origin::Rpc(origin); + + metadata.origin = match (origin.as_str(), dapps_origin) { + ("null", Some(dapp)) => Origin::Dapps(dapp.into()), + _ => Origin::Rpc(origin), + }; + metadata } } @@ -109,52 +112,101 @@ impl rpc::IpcMetaExtractor for RpcExtractor { } } -pub fn new_http(conf: HttpConfiguration, deps: &Dependencies) -> Result, String> { +fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler> + where D: rpc_apis::Dependencies +{ + rpc_apis::setup_rpc(deps.stats.clone(), &*deps.apis, apis) +} + +pub fn new_http( + conf: HttpConfiguration, + deps: &Dependencies, + middleware: Option +) -> Result, String> { if !conf.enabled { return Ok(None); } let url = format!("{}:{}", conf.interface, conf.port); let addr = url.parse().map_err(|_| format!("Invalid JSONRPC listen host/port given: {}", url))?; - Ok(Some(setup_http_rpc_server(deps, &addr, conf.cors, conf.hosts, conf.apis)?)) -} + let handler = setup_apis(conf.apis, deps); + let remote = deps.remote.clone(); -fn setup_apis(apis: ApiSet, deps: &Dependencies) -> MetaIoHandler { - rpc_apis::setup_rpc(deps.stats.clone(), deps.apis.clone(), apis) -} + let cors_domains: Option> = conf.cors.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); + let allowed_hosts: Option> = conf.hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); + + let start_result = rpc::start_http( + &addr, + cors_domains.into(), + allowed_hosts.into(), + handler, + remote, + RpcExtractor, + match (conf.threads, middleware) { + (Some(threads), None) => rpc::HttpSettings::Threads(threads), + (None, middleware) => rpc::HttpSettings::Dapps(middleware), + (Some(_), Some(_)) => { + return Err("Dapps and fast multi-threaded RPC server cannot be enabled at the same time.".into()) + }, + } + ); -pub fn setup_http_rpc_server( - dependencies: &Dependencies, - url: &SocketAddr, - cors_domains: Option>, - allowed_hosts: Option>, - apis: ApiSet -) -> Result { - let handler = setup_apis(apis, dependencies); - let remote = dependencies.remote.clone(); - let cors_domains: Option> = cors_domains.map(|domains| domains.into_iter().map(AccessControlAllowOrigin::from).collect()); - let allowed_hosts: Option> = allowed_hosts.map(|hosts| hosts.into_iter().map(Host::from).collect()); - let start_result = rpc::start_http(url, cors_domains.into(), allowed_hosts.into(), handler, remote, RpcExtractor); match start_result { - Err(HttpServerError::IoError(err)) => match err.kind() { - io::ErrorKind::AddrInUse => Err(format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url)), + Ok(server) => Ok(Some(server)), + Err(HttpServerError::Io(err)) => match err.kind() { + io::ErrorKind::AddrInUse => Err( + format!("RPC address {} is already in use, make sure that another instance of an Ethereum client is not running or change the address using the --jsonrpc-port and --jsonrpc-interface options.", url) + ), _ => Err(format!("RPC io error: {}", err)), }, Err(e) => Err(format!("RPC error: {:?}", e)), - Ok(server) => Ok(server), } } -pub fn new_ipc(conf: IpcConfiguration, deps: &Dependencies) -> Result, String> { - if !conf.enabled { return Ok(None); } - Ok(Some(setup_ipc_rpc_server(deps, &conf.socket_addr, conf.apis)?)) -} - -pub fn setup_ipc_rpc_server(dependencies: &Dependencies, addr: &str, apis: ApiSet) -> Result { - let handler = setup_apis(apis, dependencies); +pub fn new_ipc( + conf: IpcConfiguration, + dependencies: &Dependencies +) -> Result, String> { + if !conf.enabled { + return Ok(None); + } + let handler = setup_apis(conf.apis, dependencies); let remote = dependencies.remote.clone(); - match rpc::start_ipc(addr, handler, remote, RpcExtractor) { + match rpc::start_ipc(&conf.socket_addr, handler, remote, RpcExtractor) { + Ok(server) => Ok(Some(server)), Err(io_error) => Err(format!("RPC io error: {}", io_error)), - Ok(server) => Ok(server) + } +} + +#[cfg(test)] +mod tests { + use super::RpcExtractor; + use ethcore_rpc::{HttpMetaExtractor, Origin}; + + #[test] + fn should_extract_rpc_origin() { + // given + let extractor = RpcExtractor; + + // when + let meta = extractor.read_metadata("http://parity.io".into(), None); + let meta1 = extractor.read_metadata("http://parity.io".into(), Some("ignored".into())); + + // then + assert_eq!(meta.origin, Origin::Rpc("http://parity.io".into())); + assert_eq!(meta1.origin, Origin::Rpc("http://parity.io".into())); + } + + #[test] + fn should_dapps_origin() { + // given + let extractor = RpcExtractor; + let dapp = "https://wallet.ethereum.org".to_owned(); + + // when + let meta = extractor.read_metadata("null".into(), Some(dapp.clone())); + + // then + assert_eq!(meta.origin, Origin::Dapps(dapp.into())); } } diff --git a/parity/rpc_apis.rs b/parity/rpc_apis.rs index e168f029c..ea1eabc61 100644 --- a/parity/rpc_apis.rs +++ b/parity/rpc_apis.rs @@ -27,12 +27,14 @@ use ethcore::client::Client; use ethcore::miner::{Miner, ExternalMiner}; use ethcore::snapshot::SnapshotService; use ethcore_rpc::{Metadata, NetworkSettings}; -use ethcore_rpc::informant::{Middleware, RpcStats, ClientNotifier}; -use ethcore_rpc::dispatch::FullDispatcher; -use ethsync::{ManageNetwork, SyncProvider}; +use ethcore_rpc::informant::{ActivityNotifier, Middleware, RpcStats, ClientNotifier}; +use ethcore_rpc::dispatch::{FullDispatcher, LightDispatcher}; +use ethsync::{ManageNetwork, SyncProvider, LightSync}; use hash_fetch::fetch::Client as FetchClient; use jsonrpc_core::{MetaIoHandler}; +use light::{TransactionQueue as LightTransactionQueue, Cache as LightDataCache}; use updater::Updater; +use util::{Mutex, RwLock}; use ethcore_logger::RotatingLogger; #[derive(Debug, PartialEq, Clone, Eq, Hash)] @@ -81,7 +83,7 @@ impl FromStr for Api { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum ApiSet { SafeContext, UnsafeContext, @@ -112,25 +114,6 @@ impl FromStr for ApiSet { } } -pub struct Dependencies { - pub signer_service: Arc, - pub client: Arc, - pub snapshot: Arc, - pub sync: Arc, - pub net: Arc, - pub secret_store: Option>, - pub miner: Arc, - pub external_miner: Arc, - pub logger: Arc, - pub settings: Arc, - pub net_service: Arc, - pub updater: Arc, - pub geth_compatibility: bool, - pub dapps_interface: Option, - pub dapps_port: Option, - pub fetch: FetchClient, -} - fn to_modules(apis: &[Api]) -> BTreeMap { let mut modules = BTreeMap::new(); for api in apis { @@ -151,6 +134,274 @@ fn to_modules(apis: &[Api]) -> BTreeMap { modules } +/// RPC dependencies can be used to initialize RPC endpoints from APIs. +pub trait Dependencies { + type Notifier: ActivityNotifier; + + /// Create the activity notifier. + fn activity_notifier(&self) -> Self::Notifier; + + /// Extend the given I/O handler with endpoints for each API. + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]); +} + +/// RPC dependencies for a full node. +pub struct FullDependencies { + pub signer_service: Arc, + pub client: Arc, + pub snapshot: Arc, + pub sync: Arc, + pub net: Arc, + pub secret_store: Option>, + pub miner: Arc, + pub external_miner: Arc, + pub logger: Arc, + pub settings: Arc, + pub net_service: Arc, + pub updater: Arc, + pub geth_compatibility: bool, + pub dapps_interface: Option, + pub dapps_port: Option, + pub fetch: FetchClient, +} + +impl Dependencies for FullDependencies { + type Notifier = ClientNotifier; + + fn activity_notifier(&self) -> ClientNotifier { + ClientNotifier { + client: self.client.clone(), + } + } + + fn extend_with_set(&self, handler: &mut MetaIoHandler, apis: &[Api]) { + use ethcore_rpc::v1::*; + + macro_rules! add_signing_methods { + ($namespace:ident, $handler:expr, $deps:expr) => { + { + let deps = &$deps; + let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) + } else { + $handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) + } + } + } + } + + let dispatcher = FullDispatcher::new(Arc::downgrade(&self.client), Arc::downgrade(&self.miner)); + for api in apis { + match *api { + Api::Web3 => { + handler.extend_with(Web3Client::new().to_delegate()); + }, + Api::Net => { + handler.extend_with(NetClient::new(&self.sync).to_delegate()); + }, + Api::Eth => { + let client = EthClient::new( + &self.client, + &self.snapshot, + &self.sync, + &self.secret_store, + &self.miner, + &self.external_miner, + EthClientOptions { + pending_nonce_from_queue: self.geth_compatibility, + allow_pending_receipt_query: !self.geth_compatibility, + send_block_number_in_get_work: !self.geth_compatibility, + } + ); + handler.extend_with(client.to_delegate()); + + let filter_client = EthFilterClient::new(&self.client, &self.miner); + handler.extend_with(filter_client.to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + }, + Api::Personal => { + handler.extend_with(PersonalClient::new(&self.secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); + }, + Api::Signer => { + handler.extend_with(SignerClient::new(&self.secret_store, dispatcher.clone(), &self.signer_service).to_delegate()); + }, + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with(ParityClient::new( + &self.client, + &self.miner, + &self.sync, + &self.updater, + &self.net_service, + &self.secret_store, + self.logger.clone(), + self.settings.clone(), + signer, + self.dapps_interface.clone(), + self.dapps_port, + ).to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + add_signing_methods!(ParitySigning, handler, self); + }, + Api::ParityAccounts => { + handler.extend_with(ParityAccountsClient::new(&self.secret_store).to_delegate()); + }, + Api::ParitySet => { + handler.extend_with(ParitySetClient::new( + &self.client, + &self.miner, + &self.updater, + &self.net_service, + self.fetch.clone(), + ).to_delegate()) + }, + Api::Traces => { + handler.extend_with(TracesClient::new(&self.client, &self.miner).to_delegate()) + }, + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + } + } + } +} + +/// Light client notifier. Doesn't do anything yet, but might in the future. +pub struct LightClientNotifier; + +impl ActivityNotifier for LightClientNotifier { + fn active(&self) {} +} + +/// RPC dependencies for a light client. +pub struct LightDependencies { + pub signer_service: Arc, + pub client: Arc<::light::client::Client>, + pub sync: Arc, + pub net: Arc, + pub secret_store: Arc, + pub logger: Arc, + pub settings: Arc, + pub on_demand: Arc<::light::on_demand::OnDemand>, + pub cache: Arc>, + pub transaction_queue: Arc>, + pub dapps_interface: Option, + pub dapps_port: Option, + pub fetch: FetchClient, + pub geth_compatibility: bool, +} + +impl Dependencies for LightDependencies { + type Notifier = LightClientNotifier; + + fn activity_notifier(&self) -> Self::Notifier { LightClientNotifier } + fn extend_with_set(&self, handler: &mut MetaIoHandler>, apis: &[Api]) { + use ethcore_rpc::v1::*; + + let dispatcher = LightDispatcher::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.cache.clone(), + self.transaction_queue.clone(), + ); + + macro_rules! add_signing_methods { + ($namespace:ident, $handler:expr, $deps:expr) => { + { + let deps = &$deps; + let dispatcher = dispatcher.clone(); + let secret_store = Some(deps.secret_store.clone()); + if deps.signer_service.is_enabled() { + $handler.extend_with($namespace::to_delegate( + SigningQueueClient::new(&deps.signer_service, dispatcher, &secret_store) + )) + } else { + $handler.extend_with( + $namespace::to_delegate(SigningUnsafeClient::new(&secret_store, dispatcher)) + ) + } + } + } + } + + for api in apis { + match *api { + Api::Web3 => { + handler.extend_with(Web3Client::new().to_delegate()); + }, + Api::Net => { + handler.extend_with(light::NetClient::new(self.sync.clone()).to_delegate()); + }, + Api::Eth => { + let client = light::EthClient::new( + self.sync.clone(), + self.client.clone(), + self.on_demand.clone(), + self.transaction_queue.clone(), + self.secret_store.clone(), + self.cache.clone(), + ); + handler.extend_with(client.to_delegate()); + + // TODO: filters. + add_signing_methods!(EthSigning, handler, self); + }, + Api::Personal => { + let secret_store = Some(self.secret_store.clone()); + handler.extend_with(PersonalClient::new(&secret_store, dispatcher.clone(), self.geth_compatibility).to_delegate()); + }, + Api::Signer => { + let secret_store = Some(self.secret_store.clone()); + handler.extend_with(SignerClient::new(&secret_store, dispatcher.clone(), &self.signer_service).to_delegate()); + }, + Api::Parity => { + let signer = match self.signer_service.is_enabled() { + true => Some(self.signer_service.clone()), + false => None, + }; + handler.extend_with(light::ParityClient::new( + Arc::new(dispatcher.clone()), + self.secret_store.clone(), + self.logger.clone(), + self.settings.clone(), + signer, + self.dapps_interface.clone(), + self.dapps_port, + ).to_delegate()); + + add_signing_methods!(EthSigning, handler, self); + add_signing_methods!(ParitySigning, handler, self); + }, + Api::ParityAccounts => { + let secret_store = Some(self.secret_store.clone()); + handler.extend_with(ParityAccountsClient::new(&secret_store).to_delegate()); + }, + Api::ParitySet => { + handler.extend_with(light::ParitySetClient::new( + self.sync.clone(), + self.fetch.clone(), + ).to_delegate()) + }, + Api::Traces => { + handler.extend_with(light::TracesClient.to_delegate()) + }, + Api::Rpc => { + let modules = to_modules(&apis); + handler.extend_with(RpcClient::new(modules).to_delegate()); + } + } + } + } +} + impl ApiSet { pub fn list_apis(&self) -> HashSet { let mut safe_list = vec![Api::Web3, Api::Net, Api::Eth, Api::Parity, Api::Traces, Api::Rpc] @@ -172,110 +423,12 @@ impl ApiSet { } } -macro_rules! add_signing_methods { - ($namespace:ident, $handler:expr, $deps:expr) => { - { - let handler = &mut $handler; - let deps = &$deps; - let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); - if deps.signer_service.is_enabled() { - handler.extend_with($namespace::to_delegate(SigningQueueClient::new(&deps.signer_service, dispatcher, &deps.secret_store))) - } else { - handler.extend_with($namespace::to_delegate(SigningUnsafeClient::new(&deps.secret_store, dispatcher))) - } - } - } -} - -pub fn setup_rpc(stats: Arc, deps: Arc, apis: ApiSet) -> MetaIoHandler { - use ethcore_rpc::v1::*; - - let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, ClientNotifier { - client: deps.client.clone(), - })); - +pub fn setup_rpc(stats: Arc, deps: &D, apis: ApiSet) -> MetaIoHandler> { + let mut handler = MetaIoHandler::with_middleware(Middleware::new(stats, deps.activity_notifier())); // it's turned into vector, cause ont of the cases requires &[] let apis = apis.list_apis().into_iter().collect::>(); - let dispatcher = FullDispatcher::new(Arc::downgrade(&deps.client), Arc::downgrade(&deps.miner)); + deps.extend_with_set(&mut handler, &apis[..]); - for api in &apis { - match *api { - Api::Web3 => { - handler.extend_with(Web3Client::new().to_delegate()); - }, - Api::Net => { - handler.extend_with(NetClient::new(&deps.sync).to_delegate()); - }, - Api::Eth => { - let client = EthClient::new( - &deps.client, - &deps.snapshot, - &deps.sync, - &deps.secret_store, - &deps.miner, - &deps.external_miner, - EthClientOptions { - pending_nonce_from_queue: deps.geth_compatibility, - allow_pending_receipt_query: !deps.geth_compatibility, - send_block_number_in_get_work: !deps.geth_compatibility, - } - ); - handler.extend_with(client.to_delegate()); - - let filter_client = EthFilterClient::new(&deps.client, &deps.miner); - handler.extend_with(filter_client.to_delegate()); - - add_signing_methods!(EthSigning, handler, deps); - }, - Api::Personal => { - handler.extend_with(PersonalClient::new(&deps.secret_store, dispatcher.clone(), deps.geth_compatibility).to_delegate()); - }, - Api::Signer => { - handler.extend_with(SignerClient::new(&deps.secret_store, dispatcher.clone(), &deps.signer_service).to_delegate()); - }, - Api::Parity => { - let signer = match deps.signer_service.is_enabled() { - true => Some(deps.signer_service.clone()), - false => None, - }; - handler.extend_with(ParityClient::new( - &deps.client, - &deps.miner, - &deps.sync, - &deps.updater, - &deps.net_service, - &deps.secret_store, - deps.logger.clone(), - deps.settings.clone(), - signer, - deps.dapps_interface.clone(), - deps.dapps_port, - ).to_delegate()); - - add_signing_methods!(EthSigning, handler, deps); - add_signing_methods!(ParitySigning, handler, deps); - }, - Api::ParityAccounts => { - handler.extend_with(ParityAccountsClient::new(&deps.secret_store).to_delegate()); - }, - Api::ParitySet => { - handler.extend_with(ParitySetClient::new( - &deps.client, - &deps.miner, - &deps.updater, - &deps.net_service, - deps.fetch.clone(), - ).to_delegate()) - }, - Api::Traces => { - handler.extend_with(TracesClient::new(&deps.client, &deps.miner).to_delegate()) - }, - Api::Rpc => { - let modules = to_modules(&apis); - handler.extend_with(RpcClient::new(modules).to_delegate()); - } - } - } handler } diff --git a/parity/run.rs b/parity/run.rs index c438c25a5..1ad124dbe 100644 --- a/parity/run.rs +++ b/parity/run.rs @@ -30,13 +30,13 @@ use ethcore::account_provider::{AccountProvider, AccountProviderSettings}; use ethcore::miner::{Miner, MinerService, ExternalMiner, MinerOptions}; use ethcore::snapshot; use ethcore::verification::queue::VerifierSettings; +use light::Cache as LightDataCache; use ethsync::SyncConfig; use informant::Informant; use updater::{UpdatePolicy, Updater}; use parity_reactor::EventLoop; use hash_fetch::fetch::{Fetch, Client as FetchClient}; -use rpc::{HttpConfiguration, IpcConfiguration}; use params::{ SpecType, Pruning, AccountsConfig, GasPricerConfig, MinerExtras, Switch, tracing_switch_to_bool, fatdb_switch_to_bool, mode_switch_to_bool @@ -61,6 +61,10 @@ const SNAPSHOT_PERIOD: u64 = 10000; // how many blocks to wait before starting a periodic snapshot. const SNAPSHOT_HISTORY: u64 = 100; +// Number of minutes before a given gas price corpus should expire. +// Light client only. +const GAS_CORPUS_EXPIRATION_MINUTES: i64 = 60 * 6; + // Pops along with error messages when a password is missing or invalid. const VERIFY_PASSWORD_HINT: &'static str = "Make sure valid password is present in files passed using `--password` or in the configuration file."; @@ -76,8 +80,8 @@ pub struct RunCmd { pub daemon: Option, pub logger_config: LogConfig, pub miner_options: MinerOptions, - pub http_conf: HttpConfiguration, - pub ipc_conf: IpcConfiguration, + pub http_conf: rpc::HttpConfiguration, + pub ipc_conf: rpc::IpcConfiguration, pub net_conf: NetworkConfiguration, pub network_id: Option, pub warp_sync: bool, @@ -108,13 +112,11 @@ pub struct RunCmd { pub check_seal: bool, pub download_old_blocks: bool, pub verifier_settings: VerifierSettings, + pub serve_light: bool, + pub light: bool, } -pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configuration) -> Result<(), String> { - if !dapps_conf.enabled { - return Err("Cannot use UI command with Dapps turned off.".into()) - } - +pub fn open_ui(signer_conf: &signer::Configuration) -> Result<(), String> { if !signer_conf.enabled { return Err("Cannot use UI command with UI turned off.".into()) } @@ -127,12 +129,12 @@ pub fn open_ui(dapps_conf: &dapps::Configuration, signer_conf: &signer::Configur Ok(()) } -pub fn open_dapp(dapps_conf: &dapps::Configuration, dapp: &str) -> Result<(), String> { +pub fn open_dapp(dapps_conf: &dapps::Configuration, rpc_conf: &rpc::HttpConfiguration, dapp: &str) -> Result<(), String> { if !dapps_conf.enabled { return Err("Cannot use DAPP command with Dapps turned off.".into()) } - let url = format!("http://{}:{}/{}/", dapps_conf.interface, dapps_conf.port, dapp); + let url = format!("http://{}:{}/{}/", rpc_conf.interface, rpc_conf.port, dapp); url::open(&url); Ok(()) } @@ -153,21 +155,191 @@ impl ::local_store::NodeInfo for FullNodeInfo { } } +// helper for light execution. +fn execute_light(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { + use light::client as light_client; + use ethsync::{LightSyncParams, LightSync, ManageNetwork}; + use util::RwLock; + + let panic_handler = PanicHandler::new_in_arc(); + + // load spec + let spec = cmd.spec.spec()?; + + // load genesis hash + let genesis_hash = spec.genesis_header().hash(); + + // database paths + let db_dirs = cmd.dirs.database(genesis_hash, cmd.spec.legacy_fork_name(), spec.data_dir.clone()); + + // user defaults path + let user_defaults_path = db_dirs.user_defaults_path(); + + // load user defaults + let user_defaults = UserDefaults::load(&user_defaults_path)?; + + // select pruning algorithm + let algorithm = cmd.pruning.to_algorithm(&user_defaults); + + let compaction = cmd.compaction.compaction_profile(db_dirs.db_root_path().as_path()); + + // execute upgrades + execute_upgrades(&cmd.dirs.base, &db_dirs, algorithm, compaction.clone())?; + + // create dirs used by parity + cmd.dirs.create_dirs(cmd.dapps_conf.enabled, cmd.signer_conf.enabled, cmd.secretstore_conf.enabled)?; + + info!("Starting {}", Colour::White.bold().paint(version())); + info!("Running in experimental {} mode.", Colour::Blue.bold().paint("Light Client")); + + // start client and create transaction queue. + let mut config = light_client::Config { + queue: Default::default(), + chain_column: ::ethcore::db::COL_LIGHT_CHAIN, + db_cache_size: Some(cmd.cache_config.blockchain() as usize * 1024 * 1024), + db_compaction: compaction, + db_wal: cmd.wal, + }; + + config.queue.max_mem_use = cmd.cache_config.queue() as usize * 1024 * 1024; + config.queue.verifier_settings = cmd.verifier_settings; + + let service = light_client::Service::start(config, &spec, &db_dirs.client_path(algorithm)) + .map_err(|e| format!("Error starting light client: {}", e))?; + let txq = Arc::new(RwLock::new(::light::transaction_queue::TransactionQueue::default())); + let provider = ::light::provider::LightProvider::new(service.client().clone(), txq.clone()); + + // start network. + // set up bootnodes + let mut net_conf = cmd.net_conf; + if !cmd.custom_bootnodes { + net_conf.boot_nodes = spec.nodes.clone(); + } + + // TODO: configurable cache size. + let cache = LightDataCache::new(Default::default(), ::time::Duration::minutes(GAS_CORPUS_EXPIRATION_MINUTES)); + let cache = Arc::new(::util::Mutex::new(cache)); + + // start on_demand service. + let on_demand = Arc::new(::light::on_demand::OnDemand::new(cache.clone())); + + // set network path. + net_conf.net_config_path = Some(db_dirs.network_path().to_string_lossy().into_owned()); + let sync_params = LightSyncParams { + network_config: net_conf.into_basic().map_err(|e| format!("Failed to produce network config: {}", e))?, + client: Arc::new(provider), + network_id: cmd.network_id.unwrap_or(spec.network_id()), + subprotocol_name: ::ethsync::LIGHT_PROTOCOL, + handlers: vec![on_demand.clone()], + }; + let light_sync = LightSync::new(sync_params).map_err(|e| format!("Error starting network: {}", e))?; + let light_sync = Arc::new(light_sync); + + // spin up event loop + let event_loop = EventLoop::spawn(); + + // queue cull service. + let queue_cull = Arc::new(::light_helpers::QueueCull { + client: service.client().clone(), + sync: light_sync.clone(), + on_demand: on_demand.clone(), + txq: txq.clone(), + remote: event_loop.remote(), + }); + + service.register_handler(queue_cull).map_err(|e| format!("Error attaching service: {:?}", e))?; + + // start the network. + light_sync.start_network(); + + // fetch service + let fetch = FetchClient::new().map_err(|e| format!("Error starting fetch client: {:?}", e))?; + let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; + + // prepare account provider + let account_provider = Arc::new(prepare_account_provider(&cmd.spec, &cmd.dirs, &spec.data_dir, cmd.acc_conf, &passwords)?); + let rpc_stats = Arc::new(informant::RpcStats::default()); + let signer_path = cmd.signer_conf.signer_path.clone(); + + // start RPCs + let deps_for_rpc_apis = Arc::new(rpc_apis::LightDependencies { + signer_service: Arc::new(rpc_apis::SignerService::new(move || { + signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) + }, cmd.ui_address)), + client: service.client().clone(), + sync: light_sync.clone(), + net: light_sync.clone(), + secret_store: account_provider, + logger: logger, + settings: Arc::new(cmd.net_settings), + on_demand: on_demand, + cache: cache, + transaction_queue: txq, + dapps_interface: match cmd.dapps_conf.enabled { + true => Some(cmd.http_conf.interface.clone()), + false => None, + }, + dapps_port: match cmd.dapps_conf.enabled { + true => Some(cmd.http_conf.port), + false => None, + }, + fetch: fetch, + geth_compatibility: cmd.geth_compatibility, + }); + + let dependencies = rpc::Dependencies { + apis: deps_for_rpc_apis.clone(), + remote: event_loop.raw_remote(), + stats: rpc_stats.clone(), + }; + + // start rpc servers + let _http_server = rpc::new_http(cmd.http_conf, &dependencies, None)?; + let _ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; + + // the signer server + let signer_deps = signer::Dependencies { + apis: deps_for_rpc_apis.clone(), + remote: event_loop.raw_remote(), + rpc_stats: rpc_stats.clone(), + }; + let signing_queue = deps_for_rpc_apis.signer_service.queue(); + let _signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; + + // TODO: Dapps + + // minimal informant thread. Just prints block number every 5 seconds. + // TODO: integrate with informant.rs + let informant_client = service.client().clone(); + ::std::thread::spawn(move || loop { + info!("#{}", informant_client.best_block_header().number()); + ::std::thread::sleep(::std::time::Duration::from_secs(5)); + }); + + // wait for ctrl-c. + Ok(wait_for_exit(panic_handler, None, None, can_restart)) +} + pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> Result<(bool, Option), String> { if cmd.ui && cmd.dapps_conf.enabled { // Check if Parity is already running - let addr = format!("{}:{}", cmd.dapps_conf.interface, cmd.dapps_conf.port); + let addr = format!("{}:{}", cmd.signer_conf.interface, cmd.signer_conf.port); if !TcpListener::bind(&addr as &str).is_ok() { - return open_ui(&cmd.dapps_conf, &cmd.signer_conf).map(|_| (false, None)); + return open_ui(&cmd.signer_conf).map(|_| (false, None)); } } + // increase max number of open files + raise_fd_limit(); + + // run as light client. + if cmd.light { + return execute_light(cmd, can_restart, logger); + } + // set up panic handler let panic_handler = PanicHandler::new_in_arc(); - // increase max number of open files - raise_fd_limit(); - // load spec let spec = cmd.spec.spec()?; @@ -249,6 +421,7 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R sync_config.fork_block = spec.fork_block(); sync_config.warp_sync = cmd.warp_sync; sync_config.download_old_blocks = cmd.download_old_blocks; + sync_config.serve_light = cmd.serve_light; let passwords = passwords_from_files(&cmd.acc_conf.password_files)?; @@ -412,7 +585,8 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R true => None, false => Some(account_provider.clone()) }; - let deps_for_rpc_apis = Arc::new(rpc_apis::Dependencies { + + let deps_for_rpc_apis = Arc::new(rpc_apis::FullDependencies { signer_service: Arc::new(rpc_apis::SignerService::new(move || { signer::generate_new_token(signer_path.clone()).map_err(|e| format!("{:?}", e)) }, cmd.ui_address)), @@ -429,11 +603,11 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R updater: updater.clone(), geth_compatibility: cmd.geth_compatibility, dapps_interface: match cmd.dapps_conf.enabled { - true => Some(cmd.dapps_conf.interface.clone()), + true => Some(cmd.http_conf.interface.clone()), false => None, }, dapps_port: match cmd.dapps_conf.enabled { - true => Some(cmd.dapps_conf.port), + true => Some(cmd.http_conf.port), false => None, }, fetch: fetch.clone(), @@ -445,21 +619,24 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R stats: rpc_stats.clone(), }; - // start rpc servers - let http_server = rpc::new_http(cmd.http_conf, &dependencies)?; - let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; - // the dapps server - let dapps_deps = dapps::Dependencies { - apis: deps_for_rpc_apis.clone(), - client: client.clone(), - sync: sync_provider.clone(), - remote: event_loop.raw_remote(), - fetch: fetch.clone(), - signer: deps_for_rpc_apis.signer_service.clone(), - stats: rpc_stats.clone(), + let dapps_deps = { + let (sync, client) = (sync_provider.clone(), client.clone()); + let contract_client = Arc::new(::dapps::FullRegistrar { client: client.clone() }); + + dapps::Dependencies { + sync_status: Arc::new(move || is_major_importing(Some(sync.status().state), client.queue_info())), + contract_client: contract_client, + remote: event_loop.raw_remote(), + fetch: fetch.clone(), + signer: deps_for_rpc_apis.signer_service.clone(), + } }; - let dapps_server = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; + let dapps_middleware = dapps::new(cmd.dapps_conf.clone(), dapps_deps)?; + + // start rpc servers + let http_server = rpc::new_http(cmd.http_conf.clone(), &dependencies, dapps_middleware)?; + let ipc_server = rpc::new_ipc(cmd.ipc_conf, &dependencies)?; // the signer server let signer_deps = signer::Dependencies { @@ -467,10 +644,13 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R remote: event_loop.raw_remote(), rpc_stats: rpc_stats.clone(), }; - let signer_server = signer::start(cmd.signer_conf.clone(), signer_deps)?; + let signing_queue = deps_for_rpc_apis.signer_service.queue(); + let signer_server = signer::start(cmd.signer_conf.clone(), signing_queue, signer_deps)?; // secret store key server - let secretstore_deps = secretstore::Dependencies { }; + let secretstore_deps = secretstore::Dependencies { + client: client.clone(), + }; let secretstore_key_server = secretstore::start(cmd.secretstore_conf.clone(), secretstore_deps); // the ipfs server @@ -524,18 +704,18 @@ pub fn execute(cmd: RunCmd, can_restart: bool, logger: Arc) -> R // start ui if cmd.ui { - open_ui(&cmd.dapps_conf, &cmd.signer_conf)?; + open_ui(&cmd.signer_conf)?; } if let Some(dapp) = cmd.dapp { - open_dapp(&cmd.dapps_conf, &dapp)?; + open_dapp(&cmd.dapps_conf, &cmd.http_conf, &dapp)?; } // Handle exit let restart = wait_for_exit(panic_handler, Some(updater), Some(client), can_restart); // drop this stuff as soon as exit detected. - drop((http_server, ipc_server, dapps_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); + drop((http_server, ipc_server, signer_server, secretstore_key_server, ipfs_server, event_loop)); info!("Finishing work, please wait..."); diff --git a/parity/secretstore.rs b/parity/secretstore.rs index 79a209504..d31614193 100644 --- a/parity/secretstore.rs +++ b/parity/secretstore.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::sync::Arc; use dir::default_data_path; +use ethcore::client::Client; use helpers::replace_home; #[derive(Debug, PartialEq, Clone)] @@ -30,10 +32,10 @@ pub struct Configuration { pub data_path: String, } -#[derive(Debug, PartialEq, Clone)] /// Secret store dependencies pub struct Dependencies { - // the only dependency will be BlockChainClient + /// Blockchain client. + pub client: Arc, } #[cfg(not(feature = "secretstore"))] @@ -53,6 +55,7 @@ mod server { #[cfg(feature="secretstore")] mod server { + use ethkey; use ethcore_secretstore; use super::{Configuration, Dependencies}; @@ -63,14 +66,39 @@ mod server { impl KeyServer { /// Create new key server - pub fn new(conf: Configuration, _deps: Dependencies) -> Result { + pub fn new(conf: Configuration, deps: Dependencies) -> Result { + let key_pairs = vec![ + ethkey::KeyPair::from_secret("6c26a76e9b31048d170873a791401c7e799a11f0cefc0171cc31a49800967509".parse().unwrap()).unwrap(), + ethkey::KeyPair::from_secret("7e94018b3731afdb3b4e6f4c3e179475640166da12e1d1b0c7d80729b1a5b452".parse().unwrap()).unwrap(), + ethkey::KeyPair::from_secret("5ab6ed2a52c33142380032c39a03a86b12eacb3fa4b53bc16d84f51318156f8c".parse().unwrap()).unwrap(), + ]; let conf = ethcore_secretstore::ServiceConfiguration { - listener_addr: conf.interface, - listener_port: conf.port, - data_path: conf.data_path, + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port, + }, + data_path: conf.data_path.clone(), + // TODO: this is test configuration. how it will be configured in production? + cluster_config: ethcore_secretstore::ClusterConfiguration { + threads: 4, + self_private: (***key_pairs[(conf.port - 8082) as usize].secret()).into(), + listener_address: ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: conf.port + 10, + }, + nodes: key_pairs.iter().enumerate().map(|(i, kp)| (kp.public().clone(), + ethcore_secretstore::NodeAddress { + address: conf.interface.clone(), + port: 8082 + 10 + (i as u16), + })).collect(), + allow_connecting_to_higher_nodes: true, + encryption_config: ethcore_secretstore::EncryptionConfiguration { + key_check_timeout_ms: 1000, + }, + } }; - let key_server = ethcore_secretstore::start(conf) + let key_server = ethcore_secretstore::start(deps.client, conf) .map_err(Into::::into)?; Ok(KeyServer { diff --git a/parity/signer.rs b/parity/signer.rs index 664a0e6e4..29429311e 100644 --- a/parity/signer.rs +++ b/parity/signer.rs @@ -23,7 +23,7 @@ pub use ethcore_signer::Server as SignerServer; use ansi_term::Colour; use dir::default_data_path; use ethcore_rpc::informant::RpcStats; -use ethcore_rpc; +use ethcore_rpc::{self, ConfirmationsQueue}; use ethcore_signer as signer; use helpers::replace_home; use parity_reactor::TokioRemote; @@ -55,8 +55,8 @@ impl Default for Configuration { } } -pub struct Dependencies { - pub apis: Arc, +pub struct Dependencies { + pub apis: Arc, pub remote: TokioRemote, pub rpc_stats: Arc, } @@ -77,11 +77,15 @@ impl signer::MetaExtractor for StandardExtractor { } } -pub fn start(conf: Configuration, deps: Dependencies) -> Result, String> { +pub fn start( + conf: Configuration, + queue: Arc, + deps: Dependencies, +) -> Result, String> { if !conf.enabled { Ok(None) } else { - Ok(Some(do_start(conf, deps)?)) + Ok(Some(do_start(conf, queue, deps)?)) } } @@ -125,14 +129,18 @@ pub fn generate_new_token(path: String) -> io::Result { Ok(code) } -fn do_start(conf: Configuration, deps: Dependencies) -> Result { +fn do_start( + conf: Configuration, + queue: Arc, + deps: Dependencies +) -> Result { let addr = format!("{}:{}", conf.interface, conf.port) .parse() .map_err(|_| format!("Invalid port specified: {}", conf.port))?; let start_result = { let server = signer::ServerBuilder::new( - deps.apis.signer_service.queue(), + queue, codes_path(conf.signer_path), ); if conf.skip_origin_validation { @@ -141,7 +149,7 @@ fn do_start(conf: Configuration, deps: Dependencies) -> Result for HttpServerError { + fn from(e: http::Error) -> Self { + use self::HttpServerError::*; + match e { + http::Error::Io(io) => Io(io), + http::Error::Other(hyper) => Hyper(hyper), + } + } +} + +impl From for HttpServerError { + fn from(e: minihttp::Error) -> Self { + use self::HttpServerError::*; + match e { + minihttp::Error::Io(io) => Io(io), + } + } +} + +/// HTTP RPC server impl-independent metadata extractor +pub trait HttpMetaExtractor: Send + Sync + 'static { + /// Type of Metadata + type Metadata: jsonrpc_core::Metadata; + /// Extracts metadata from given params. + fn read_metadata(&self, origin: String, dapps_origin: Option) -> Self::Metadata; +} + +/// HTTP server implementation-specific settings. +pub enum HttpSettings { + /// Enable fast minihttp server with given number of threads. + Threads(usize), + /// Enable standard server with optional dapps middleware. + Dapps(Option), +} + /// Start http server asynchronously and returns result with `Server` handle on success or an error. -pub fn start_http( +pub fn start_http( addr: &SocketAddr, cors_domains: http::DomainsValidation, allowed_hosts: http::DomainsValidation, handler: H, remote: tokio_core::reactor::Remote, extractor: T, + settings: HttpSettings, ) -> Result where M: jsonrpc_core::Metadata, S: jsonrpc_core::Middleware, H: Into>, - T: HttpMetaExtractor, + T: HttpMetaExtractor, + R: RequestMiddleware, { - http::ServerBuilder::new(handler) - .event_loop_remote(remote) - .meta_extractor(extractor) - .cors(cors_domains.into()) - .allowed_hosts(allowed_hosts.into()) - .start_http(addr) + Ok(match settings { + HttpSettings::Dapps(middleware) => { + let mut builder = http::ServerBuilder::new(handler) + .event_loop_remote(remote) + .meta_extractor(metadata::HyperMetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()); + + if let Some(dapps) = middleware { + builder = builder.request_middleware(dapps) + } + builder.start_http(addr) + .map(HttpServer::Hyper)? + }, + HttpSettings::Threads(threads) => { + minihttp::ServerBuilder::new(handler) + .threads(threads) + .meta_extractor(metadata::MiniMetaExtractor::new(extractor)) + .cors(cors_domains.into()) + .allowed_hosts(allowed_hosts.into()) + .start_http(addr) + .map(HttpServer::Mini)? + }, + }) } /// Start ipc server asynchronously and returns result with `Server` handle on success or an error. diff --git a/rpc/src/metadata.rs b/rpc/src/metadata.rs new file mode 100644 index 000000000..af3a5d183 --- /dev/null +++ b/rpc/src/metadata.rs @@ -0,0 +1,74 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use jsonrpc_core; +use http; +use hyper; +use minihttp; +use HttpMetaExtractor; + +pub struct HyperMetaExtractor { + extractor: T, +} + +impl HyperMetaExtractor { + pub fn new(extractor: T) -> Self { + HyperMetaExtractor { + extractor: extractor, + } + } +} + +impl http::MetaExtractor for HyperMetaExtractor where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, +{ + fn read_metadata(&self, req: &hyper::server::Request) -> M { + let origin = req.headers().get::() + .map(|origin| format!("{}://{}", origin.scheme, origin.host)) + .unwrap_or_else(|| "unknown".into()); + let dapps_origin = req.headers().get_raw("x-parity-origin") + .and_then(|raw| raw.one()) + .map(|raw| String::from_utf8_lossy(raw).into_owned()); + self.extractor.read_metadata(origin, dapps_origin) + } +} + +pub struct MiniMetaExtractor { + extractor: T, +} + +impl MiniMetaExtractor { + pub fn new(extractor: T) -> Self { + MiniMetaExtractor { + extractor: extractor, + } + } +} + +impl minihttp::MetaExtractor for MiniMetaExtractor where + T: HttpMetaExtractor, + M: jsonrpc_core::Metadata, +{ + fn read_metadata(&self, req: &minihttp::Req) -> M { + let origin = req.header("origin") + .unwrap_or_else(|| "unknown") + .to_owned(); + let dapps_origin = req.header("x-parity-origin").map(|h| h.to_owned()); + + self.extractor.read_metadata(origin, dapps_origin) + } +} diff --git a/rpc/src/v1/helpers/dispatch.rs b/rpc/src/v1/helpers/dispatch.rs index 8a99a7239..e1b298b9f 100644 --- a/rpc/src/v1/helpers/dispatch.rs +++ b/rpc/src/v1/helpers/dispatch.rs @@ -207,7 +207,6 @@ pub fn fetch_gas_price_corpus( } /// Dispatcher for light clients -- fetches default gas price, next nonce, etc. from network. -/// Light client `ETH` RPC. #[derive(Clone)] pub struct LightDispatcher { /// Sync service. diff --git a/rpc/src/v1/impls/light/eth.rs b/rpc/src/v1/impls/light/eth.rs index 6f283af02..fc8731bc2 100644 --- a/rpc/src/v1/impls/light/eth.rs +++ b/rpc/src/v1/impls/light/eth.rs @@ -108,7 +108,22 @@ impl Eth for EthClient { } fn syncing(&self) -> Result { - rpc_unimplemented!() + if self.sync.is_major_importing() { + let chain_info = self.client.chain_info(); + let current_block = U256::from(chain_info.best_block_number); + let highest_block = self.sync.highest_block().map(U256::from) + .unwrap_or_else(|| current_block.clone()); + + Ok(SyncStatus::Info(SyncInfo { + starting_block: U256::from(self.sync.start_block()).into(), + current_block: current_block.into(), + highest_block: highest_block.into(), + warp_chunks_amount: None, + warp_chunks_processed: None, + })) + } else { + Ok(SyncStatus::None) + } } fn author(&self, _meta: Self::Metadata) -> BoxFuture { diff --git a/rpc/src/v1/impls/light/mod.rs b/rpc/src/v1/impls/light/mod.rs index 8c2e6d240..38ba2438e 100644 --- a/rpc/src/v1/impls/light/mod.rs +++ b/rpc/src/v1/impls/light/mod.rs @@ -23,7 +23,10 @@ pub mod eth; pub mod parity; pub mod parity_set; pub mod trace; +pub mod net; pub use self::eth::EthClient; pub use self::parity::ParityClient; pub use self::parity_set::ParitySetClient; +pub use self::net::NetClient; +pub use self::trace::TracesClient; diff --git a/rpc/src/v1/impls/light/net.rs b/rpc/src/v1/impls/light/net.rs new file mode 100644 index 000000000..4f0ede48f --- /dev/null +++ b/rpc/src/v1/impls/light/net.rs @@ -0,0 +1,49 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +//! Net rpc implementation. +use std::sync::Arc; +use jsonrpc_core::Error; +use ethsync::LightSyncProvider; +use v1::traits::Net; + +/// Net rpc implementation. +pub struct NetClient { + sync: Arc +} + +impl NetClient where S: LightSyncProvider { + /// Creates new NetClient. + pub fn new(sync: Arc) -> Self { + NetClient { + sync: sync, + } + } +} + +impl Net for NetClient where S: LightSyncProvider { + fn version(&self) -> Result { + Ok(format!("{}", self.sync.network_id()).to_owned()) + } + + fn peer_count(&self) -> Result { + Ok(format!("0x{:x}", self.sync.peer_numbers().connected as u64).to_owned()) + } + + fn is_listening(&self) -> Result { + Ok(true) + } +} diff --git a/rpc/src/v1/impls/net.rs b/rpc/src/v1/impls/net.rs index 5588805ab..399b2201a 100644 --- a/rpc/src/v1/impls/net.rs +++ b/rpc/src/v1/impls/net.rs @@ -21,7 +21,7 @@ use ethsync::SyncProvider; use v1::traits::Net; /// Net rpc implementation. -pub struct NetClient where S: SyncProvider { +pub struct NetClient { sync: Weak } diff --git a/rpc/src/v1/tests/helpers/sync_provider.rs b/rpc/src/v1/tests/helpers/sync_provider.rs index fe2ae3f59..83c7db015 100644 --- a/rpc/src/v1/tests/helpers/sync_provider.rs +++ b/rpc/src/v1/tests/helpers/sync_provider.rs @@ -83,7 +83,7 @@ impl SyncProvider for TestSyncProvider { difficulty: Some(40.into()), head: 50.into(), }), - les_info: None, + pip_info: None, }, PeerInfo { id: None, @@ -96,7 +96,7 @@ impl SyncProvider for TestSyncProvider { difficulty: None, head: 60.into() }), - les_info: None, + pip_info: None, } ] } diff --git a/rpc/src/v1/tests/mocked/parity.rs b/rpc/src/v1/tests/mocked/parity.rs index d47c810f1..8de64c25c 100644 --- a/rpc/src/v1/tests/mocked/parity.rs +++ b/rpc/src/v1/tests/mocked/parity.rs @@ -304,7 +304,7 @@ fn rpc_parity_net_peers() { let io = deps.default_client(); let request = r#"{"jsonrpc": "2.0", "method": "parity_netPeers", "params":[], "id": 1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"les":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"les":null}}]},"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"active":0,"connected":120,"max":50,"peers":[{"caps":["eth/62","eth/63"],"id":"node1","name":"Parity/1","network":{"localAddress":"127.0.0.1:8888","remoteAddress":"127.0.0.1:7777"},"protocols":{"eth":{"difficulty":"0x28","head":"0000000000000000000000000000000000000000000000000000000000000032","version":62},"pip":null}},{"caps":["eth/63","eth/64"],"id":null,"name":"Parity/2","network":{"localAddress":"127.0.0.1:3333","remoteAddress":"Handshake"},"protocols":{"eth":{"difficulty":null,"head":"000000000000000000000000000000000000000000000000000000000000003c","version":64},"pip":null}}]},"id":1}"#; assert_eq!(io.handle_request_sync(request), Some(response.to_owned())); } diff --git a/rpc/src/v1/types/mod.rs b/rpc/src/v1/types/mod.rs index d76c92deb..7d0ae0541 100644 --- a/rpc/src/v1/types/mod.rs +++ b/rpc/src/v1/types/mod.rs @@ -65,7 +65,7 @@ pub use self::receipt::Receipt; pub use self::rpc_settings::RpcSettings; pub use self::sync::{ SyncStatus, SyncInfo, Peers, PeerInfo, PeerNetworkInfo, PeerProtocolsInfo, - TransactionStats, ChainStatus, EthProtocolInfo, LesProtocolInfo, + TransactionStats, ChainStatus, EthProtocolInfo, PipProtocolInfo, }; pub use self::trace::{LocalizedTrace, TraceResults}; pub use self::trace_filter::TraceFilter; diff --git a/rpc/src/v1/types/sync.rs b/rpc/src/v1/types/sync.rs index d83a3a64c..813fe8cb3 100644 --- a/rpc/src/v1/types/sync.rs +++ b/rpc/src/v1/types/sync.rs @@ -83,8 +83,8 @@ pub struct PeerNetworkInfo { pub struct PeerProtocolsInfo { /// Ethereum protocol information pub eth: Option, - /// LES protocol information. - pub les: Option, + /// PIP protocol information. + pub pip: Option, } /// Peer Ethereum protocol information @@ -108,10 +108,10 @@ impl From for EthProtocolInfo { } } -/// Peer LES protocol information +/// Peer PIP protocol information #[derive(Default, Debug, Serialize)] -pub struct LesProtocolInfo { - /// Negotiated LES protocol version +pub struct PipProtocolInfo { + /// Negotiated PIP protocol version pub version: u32, /// Peer total difficulty pub difficulty: U256, @@ -119,9 +119,9 @@ pub struct LesProtocolInfo { pub head: String, } -impl From for LesProtocolInfo { - fn from(info: ethsync::LesProtocolInfo) -> Self { - LesProtocolInfo { +impl From for PipProtocolInfo { + fn from(info: ethsync::PipProtocolInfo) -> Self { + PipProtocolInfo { version: info.version, difficulty: info.difficulty.into(), head: info.head.hex(), @@ -171,7 +171,7 @@ impl From for PeerInfo { }, protocols: PeerProtocolsInfo { eth: p.eth_info.map(Into::into), - les: p.les_info.map(Into::into), + pip: p.pip_info.map(Into::into), }, } } diff --git a/scripts/cov.sh b/scripts/cov.sh index 13ab792c7..13d042905 100755 --- a/scripts/cov.sh +++ b/scripts/cov.sh @@ -32,7 +32,6 @@ $HOME/.cargo,\ $HOME/.multirust,\ rocksdb,\ secp256k1,\ -src/tests,\ util/json-tests,\ util/src/network/tests,\ ethcore/src/evm/tests,\ diff --git a/scripts/targets.sh b/scripts/targets.sh index 505875336..040485d85 100644 --- a/scripts/targets.sh +++ b/scripts/targets.sh @@ -5,7 +5,7 @@ export TARGETS=" -p ethash \ -p ethcore \ -p ethcore-bigint\ - -p ethcore-dapps \ + -p parity-dapps \ -p ethcore-rpc \ -p ethcore-signer \ -p ethcore-util \ diff --git a/secret_store/Cargo.toml b/secret_store/Cargo.toml index eff7c1ef0..539f15f1f 100644 --- a/secret_store/Cargo.toml +++ b/secret_store/Cargo.toml @@ -10,16 +10,29 @@ build = "build.rs" ethcore-ipc-codegen = { path = "../ipc/codegen" } [dependencies] +byteorder = "1.0" log = "0.3" parking_lot = "0.4" hyper = { version = "0.10", default-features = false } +serde = "0.9" +serde_json = "0.9" +serde_derive = "0.9" +futures = "0.1" +futures-cpupool = "0.1" +rustc-serialize = "0.3" +tokio-core = "0.1" +tokio-service = "0.1" +tokio-proto = "0.1" url = "1.0" +ethabi = "1.0.0" +ethcore = { path = "../ethcore" } ethcore-devtools = { path = "../devtools" } ethcore-util = { path = "../util" } ethcore-ipc = { path = "../ipc/rpc" } ethcore-ipc-nano = { path = "../ipc/nano" } ethcrypto = { path = "../ethcrypto" } ethkey = { path = "../ethkey" } +native-contracts = { path = "../ethcore/native_contracts" } [profile.release] debug = true diff --git a/secret_store/src/acl_storage.rs b/secret_store/src/acl_storage.rs index 47ec3d44a..fea45c920 100644 --- a/secret_store/src/acl_storage.rs +++ b/secret_store/src/acl_storage.rs @@ -14,38 +14,92 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::collections::{HashMap, HashSet}; -use parking_lot::RwLock; +use std::sync::Arc; +use futures::{future, Future}; +use parking_lot::Mutex; +use ethkey::public_to_address; +use ethcore::client::{Client, BlockChainClient, BlockId}; +use native_contracts::SecretStoreAclStorage; use types::all::{Error, DocumentAddress, Public}; +const ACL_CHECKER_CONTRACT_REGISTRY_NAME: &'static str = "secretstore_acl_checker"; + /// ACL storage of Secret Store pub trait AclStorage: Send + Sync { /// Check if requestor with `public` key can access document with hash `document` fn check(&self, public: &Public, document: &DocumentAddress) -> Result; } -/// Dummy ACL storage implementation -#[derive(Default, Debug)] -pub struct DummyAclStorage { - prohibited: RwLock>>, +/// On-chain ACL storage implementation. +pub struct OnChainAclStorage { + /// Blockchain client. + client: Arc, + /// On-chain contract. + contract: Mutex>, } -impl DummyAclStorage { - #[cfg(test)] - /// Prohibit given requestor access to given document - pub fn prohibit(&self, public: Public, document: DocumentAddress) { - self.prohibited.write() - .entry(public) - .or_insert_with(Default::default) - .insert(document); +impl OnChainAclStorage { + pub fn new(client: Arc) -> Self { + OnChainAclStorage { + client: client, + contract: Mutex::new(None), + } } } -impl AclStorage for DummyAclStorage { +impl AclStorage for OnChainAclStorage { fn check(&self, public: &Public, document: &DocumentAddress) -> Result { - Ok(self.prohibited.read() - .get(public) - .map(|docs| !docs.contains(document)) - .unwrap_or(true)) + let mut contract = self.contract.lock(); + if !contract.is_some() { + *contract = self.client.registry_address(ACL_CHECKER_CONTRACT_REGISTRY_NAME.to_owned()) + .and_then(|contract_addr| { + trace!(target: "secretstore", "Configuring for ACL checker contract from {}", contract_addr); + + Some(SecretStoreAclStorage::new(contract_addr)) + }) + } + if let Some(ref contract) = *contract { + let address = public_to_address(&public); + let do_call = |a, d| future::done(self.client.call_contract(BlockId::Latest, a, d)); + contract.check_permissions(do_call, address, document.clone()) + .map_err(|err| Error::Internal(err)) + .wait() + } else { + Err(Error::Internal("ACL checker contract is not configured".to_owned())) + } + } +} + +#[cfg(test)] +pub mod tests { + use std::collections::{HashMap, HashSet}; + use parking_lot::RwLock; + use types::all::{Error, DocumentAddress, Public}; + use super::AclStorage; + + #[derive(Default, Debug)] + /// Dummy ACL storage implementation + pub struct DummyAclStorage { + prohibited: RwLock>>, + } + + impl DummyAclStorage { + #[cfg(test)] + /// Prohibit given requestor access to given document + pub fn prohibit(&self, public: Public, document: DocumentAddress) { + self.prohibited.write() + .entry(public) + .or_insert_with(Default::default) + .insert(document); + } + } + + impl AclStorage for DummyAclStorage { + fn check(&self, public: &Public, document: &DocumentAddress) -> Result { + Ok(self.prohibited.read() + .get(public) + .map(|docs| !docs.contains(document)) + .unwrap_or(true)) + } } } diff --git a/secret_store/src/http_listener.rs b/secret_store/src/http_listener.rs index 92799d221..79fe71330 100644 --- a/secret_store/src/http_listener.rs +++ b/secret_store/src/http_listener.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use std::str::FromStr; use std::sync::Arc; use hyper::header; use hyper::uri::RequestUri; @@ -39,7 +38,9 @@ pub struct KeyServerHttpListener { enum Request { /// Invalid request Invalid, - /// Request encryption key of given document for given requestor + /// Generate encryption key. + GenerateDocumentKey(DocumentAddress, RequestSignature, usize), + /// Request encryption key of given document for given requestor. GetDocumentKey(DocumentAddress, RequestSignature), } @@ -63,9 +64,9 @@ impl KeyServerHttpListener where T: KeyServer + 'static { handler: shared_handler.clone(), }; - let listener_addr: &str = &format!("{}:{}", config.listener_addr, config.listener_port); - let http_server = HttpServer::http(&listener_addr).unwrap(); - let http_server = http_server.handle(handler).unwrap(); + let listener_addr: &str = &format!("{}:{}", config.listener_address.address, config.listener_address.port); + let http_server = HttpServer::http(&listener_addr).expect("cannot start HttpServer"); + let http_server = http_server.handle(handler).expect("cannot start HttpServer"); let listener = KeyServerHttpListener { _http_server: http_server, handler: shared_handler, @@ -75,6 +76,10 @@ impl KeyServerHttpListener where T: KeyServer + 'static { } impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { + self.handler.key_server.generate_document_key(signature, document, threshold) + } + fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { self.handler.key_server.document_key(signature, document) } @@ -82,95 +87,103 @@ impl KeyServer for KeyServerHttpListener where T: KeyServer + 'static { impl HttpHandler for KeyServerHttpHandler where T: KeyServer + 'static { fn handle(&self, req: HttpRequest, mut res: HttpResponse) { - if req.method != HttpMethod::Get { - warn!(target: "secretstore", "Ignoring {}-request {}", req.method, req.uri); - *res.status_mut() = HttpStatusCode::NotFound; - return; - } - if req.headers.has::() { warn!(target: "secretstore", "Ignoring {}-request {} with Origin header", req.method, req.uri); *res.status_mut() = HttpStatusCode::NotFound; return; } - match req.uri { - RequestUri::AbsolutePath(ref path) => match parse_request(&path) { - Request::GetDocumentKey(document, signature) => { - let document_key = self.handler.key_server.document_key(&signature, &document) + let req_method = req.method.clone(); + let req_uri = req.uri.clone(); + match &req_uri { + &RequestUri::AbsolutePath(ref path) => match parse_request(&req_method, &path) { + Request::GenerateDocumentKey(document, signature, threshold) => { + return_document_key(req, res, self.handler.key_server.generate_document_key(&signature, &document, threshold) .map_err(|err| { - warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req.uri, err); + warn!(target: "secretstore", "GenerateDocumentKey request {} has failed with: {}", req_uri, err); err - }); - match document_key { - Ok(document_key) => { - let document_key = document_key.to_hex().into_bytes(); - res.headers_mut().set(header::ContentType::plaintext()); - if let Err(err) = res.send(&document_key) { - // nothing to do, but log error - warn!(target: "secretstore", "GetDocumentKey request {} response has failed with: {}", req.uri, err); - } - }, - Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, - Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden, - Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound, - Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, - Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, - } + })); + }, + Request::GetDocumentKey(document, signature) => { + return_document_key(req, res, self.handler.key_server.document_key(&signature, &document) + .map_err(|err| { + warn!(target: "secretstore", "GetDocumentKey request {} has failed with: {}", req_uri, err); + err + })); }, Request::Invalid => { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri); + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); *res.status_mut() = HttpStatusCode::BadRequest; }, }, _ => { - warn!(target: "secretstore", "Ignoring invalid {}-request {}", req.method, req.uri); + warn!(target: "secretstore", "Ignoring invalid {}-request {}", req_method, req_uri); *res.status_mut() = HttpStatusCode::NotFound; }, }; } } -fn parse_request(uri_path: &str) -> Request { +fn return_document_key(req: HttpRequest, mut res: HttpResponse, document_key: Result) { + match document_key { + Ok(document_key) => { + let document_key = document_key.to_hex().into_bytes(); + res.headers_mut().set(header::ContentType::plaintext()); + if let Err(err) = res.send(&document_key) { + // nothing to do, but to log an error + warn!(target: "secretstore", "response to request {} has failed with: {}", req.uri, err); + } + }, + Err(Error::BadSignature) => *res.status_mut() = HttpStatusCode::BadRequest, + Err(Error::AccessDenied) => *res.status_mut() = HttpStatusCode::Forbidden, + Err(Error::DocumentNotFound) => *res.status_mut() = HttpStatusCode::NotFound, + Err(Error::Database(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, + Err(Error::Internal(_)) => *res.status_mut() = HttpStatusCode::InternalServerError, + } +} + +fn parse_request(method: &HttpMethod, uri_path: &str) -> Request { let uri_path = match percent_decode(uri_path.as_bytes()).decode_utf8() { Ok(path) => path, Err(_) => return Request::Invalid, }; let path: Vec = uri_path.trim_left_matches('/').split('/').map(Into::into).collect(); - if path.len() != 2 || path[0].is_empty() || path[1].is_empty() { + if path.len() < 2 || path[0].is_empty() || path[1].is_empty() { return Request::Invalid; } - let document = DocumentAddress::from_str(&path[0]); - let signature = RequestSignature::from_str(&path[1]); - match (document, signature) { - (Ok(document), Ok(signature)) => Request::GetDocumentKey(document, signature), + let args_len = path.len(); + let document = path[0].parse(); + let signature = path[1].parse(); + let threshold = (if args_len > 2 { &path[2] } else { "" }).parse(); + match (args_len, method, document, signature, threshold) { + (3, &HttpMethod::Post, Ok(document), Ok(signature), Ok(threshold)) => Request::GenerateDocumentKey(document, signature, threshold), + (2, &HttpMethod::Get, Ok(document), Ok(signature), _) => Request::GetDocumentKey(document, signature), _ => Request::Invalid, } } #[cfg(test)] mod tests { - use std::str::FromStr; - use super::super::RequestSignature; + use hyper::method::Method as HttpMethod; use super::{parse_request, Request}; #[test] fn parse_request_successful() { - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), - RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); - assert_eq!(parse_request("/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); + assert_eq!(parse_request(&HttpMethod::Get, "/%30000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01"), Request::GetDocumentKey("0000000000000000000000000000000000000000000000000000000000000001".into(), - RequestSignature::from_str("a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01").unwrap())); + "a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01".parse().unwrap())); } #[test] fn parse_request_failed() { - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); - assert_eq!(parse_request("/a/b"), Request::Invalid); - assert_eq!(parse_request("/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/a/b"), Request::Invalid); + assert_eq!(parse_request(&HttpMethod::Get, "/0000000000000000000000000000000000000000000000000000000000000001/a199fb39e11eefb61c78a4074a53c0d4424600a3e74aad4fb9d93a26c30d067e1d4d29936de0c73f19827394a1dd049480a0d581aee7ae7546968da7d3d1c2fd01/0000000000000000000000000000000000000000000000000000000000000002"), Request::Invalid); } } diff --git a/secret_store/src/key_server.rs b/secret_store/src/key_server.rs index 32ac48031..598f06338 100644 --- a/secret_store/src/key_server.rs +++ b/secret_store/src/key_server.rs @@ -14,42 +14,78 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::thread; +use std::sync::Arc; +use std::sync::mpsc; +use futures::{self, Future}; +use parking_lot::Mutex; +use tokio_core::reactor::Core; use ethcrypto; use ethkey; use super::acl_storage::AclStorage; use super::key_storage::KeyStorage; +use key_server_cluster::ClusterCore; use traits::KeyServer; -use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey}; +use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey, ClusterConfiguration}; +use key_server_cluster::{ClusterClient, ClusterConfiguration as NetClusterConfiguration}; /// Secret store key server implementation -pub struct KeyServerImpl { - acl_storage: T, - key_storage: U, +pub struct KeyServerImpl { + data: Arc>, } -impl KeyServerImpl where T: AclStorage, U: KeyStorage { +/// Secret store key server data. +pub struct KeyServerCore { + close: Option>, + handle: Option>, + cluster: Option>, +} + +impl KeyServerImpl { /// Create new key server instance - pub fn new(acl_storage: T, key_storage: U) -> Self { - KeyServerImpl { - acl_storage: acl_storage, - key_storage: key_storage, - } + pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + Ok(KeyServerImpl { + data: Arc::new(Mutex::new(KeyServerCore::new(config, acl_storage, key_storage)?)), + }) + } + + #[cfg(test)] + /// Get cluster client reference. + pub fn cluster(&self) -> Arc { + self.data.lock().cluster.clone() + .expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") } } -impl KeyServer for KeyServerImpl where T: AclStorage, U: KeyStorage { +impl KeyServer for KeyServerImpl { + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result { + // recover requestor' public key from signature + let public = ethkey::recover(signature, document) + .map_err(|_| Error::BadSignature)?; + + // generate document key + let data = self.data.lock(); + let encryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") + .new_encryption_session(document.clone(), threshold)?; + let document_key = encryption_session.wait()?; + + // encrypt document key with requestor public key + let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) + .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; + Ok(document_key) + } + fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result { // recover requestor' public key from signature let public = ethkey::recover(signature, document) .map_err(|_| Error::BadSignature)?; - // check that requestor has access to the document - if !self.acl_storage.check(&public, document)? { - return Err(Error::AccessDenied); - } + // decrypt document key + let data = self.data.lock(); + let decryption_session = data.cluster.as_ref().expect("cluster can be None in test cfg only; test cfg is for correct tests; qed") + .new_decryption_session(document.clone(), signature.clone())?; + let document_key = decryption_session.wait()?; - // read unencrypted document key - let document_key = self.key_storage.get(document)?; // encrypt document key with requestor public key let document_key = ethcrypto::ecies::encrypt_single_message(&public, &document_key) .map_err(|err| Error::Internal(format!("Error encrypting document key: {}", err)))?; @@ -57,68 +93,132 @@ impl KeyServer for KeyServerImpl where T: AclStorage, U: KeyStorage } } +impl KeyServerCore { + pub fn new(config: &ClusterConfiguration, acl_storage: Arc, key_storage: Arc) -> Result { + let config = NetClusterConfiguration { + threads: config.threads, + self_key_pair: ethkey::KeyPair::from_secret_slice(&config.self_private)?, + listen_address: (config.listener_address.address.clone(), config.listener_address.port), + nodes: config.nodes.iter() + .map(|(node_id, node_address)| (node_id.clone(), (node_address.address.clone(), node_address.port))) + .collect(), + allow_connecting_to_higher_nodes: config.allow_connecting_to_higher_nodes, + encryption_config: config.encryption_config.clone(), + acl_storage: acl_storage, + key_storage: key_storage, + }; + + let (stop, stopped) = futures::oneshot(); + let (tx, rx) = mpsc::channel(); + let handle = thread::spawn(move || { + let mut el = match Core::new() { + Ok(el) => el, + Err(e) => { + tx.send(Err(Error::Internal(format!("error initializing event loop: {}", e)))).expect("Rx is blocking upper thread."); + return; + }, + }; + + let cluster = ClusterCore::new(el.handle(), config); + let cluster_client = cluster.and_then(|c| c.run().map(|_| c.client())); + tx.send(cluster_client.map_err(Into::into)).expect("Rx is blocking upper thread."); + let _ = el.run(futures::empty().select(stopped)); + }); + let cluster = rx.recv().map_err(|e| Error::Internal(format!("error initializing event loop: {}", e)))??; + + Ok(KeyServerCore { + close: Some(stop), + handle: Some(handle), + cluster: Some(cluster), + }) + } +} + +impl Drop for KeyServerCore { + fn drop(&mut self) { + self.close.take().map(|v| v.send(())); + self.handle.take().map(|h| h.join()); + } +} + #[cfg(test)] mod tests { - use std::str::FromStr; + use std::time; + use std::sync::Arc; use ethcrypto; - use ethkey::{self, Secret}; - use acl_storage::DummyAclStorage; - use key_storage::KeyStorage; + use ethkey::{self, Random, Generator}; + use acl_storage::tests::DummyAclStorage; use key_storage::tests::DummyKeyStorage; - use super::super::{Error, RequestSignature, DocumentAddress}; + use types::all::{ClusterConfiguration, NodeAddress, EncryptionConfiguration, DocumentEncryptedKey, DocumentKey}; + use super::super::{RequestSignature, DocumentAddress}; use super::{KeyServer, KeyServerImpl}; const DOCUMENT1: &'static str = "0000000000000000000000000000000000000000000000000000000000000001"; - const DOCUMENT2: &'static str = "0000000000000000000000000000000000000000000000000000000000000002"; - const KEY1: &'static str = "key1"; const PRIVATE1: &'static str = "03055e18a8434dcc9061cc1b81c4ef84dc7cf4574d755e52cdcf0c8898b25b11"; - const PUBLIC2: &'static str = "dfe62f56bb05fbd85b485bac749f3410309e24b352bac082468ce151e9ddb94fa7b5b730027fe1c7c5f3d5927621d269f91aceb5caa3c7fe944677a22f88a318"; - const PRIVATE2: &'static str = "0eb3816f4f705fa0fd952fb27b71b8c0606f09f4743b5b65cbc375bd569632f2"; - - fn create_key_server() -> KeyServerImpl { - let acl_storage = DummyAclStorage::default(); - let key_storage = DummyKeyStorage::default(); - key_storage.insert(DOCUMENT1.into(), KEY1.into()).unwrap(); - acl_storage.prohibit(PUBLIC2.into(), DOCUMENT1.into()); - KeyServerImpl::new(acl_storage, key_storage) - } fn make_signature(secret: &str, document: &'static str) -> RequestSignature { - let secret = Secret::from_str(secret).unwrap(); + let secret = secret.parse().unwrap(); let document: DocumentAddress = document.into(); ethkey::sign(&secret, &document).unwrap() } - #[test] - fn document_key_succeeds() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE1, DOCUMENT1); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); - let document_key = ethcrypto::ecies::decrypt_single_message(&Secret::from_str(PRIVATE1).unwrap(), &document_key); - assert_eq!(document_key, Ok(KEY1.into())); + fn decrypt_document_key(secret: &str, document_key: DocumentEncryptedKey) -> DocumentKey { + let secret = secret.parse().unwrap(); + ethcrypto::ecies::decrypt_single_message(&secret, &document_key).unwrap() } #[test] - fn document_key_fails_when_bad_signature() { - let key_server = create_key_server(); - let signature = RequestSignature::default(); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); - assert_eq!(document_key, Err(Error::BadSignature)); - } + fn document_key_generation_and_retrievement_works_over_network() { + //::util::log::init_log(); - #[test] - fn document_key_fails_when_acl_check_fails() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE2, DOCUMENT1); - let document_key = key_server.document_key(&signature, &DOCUMENT1.into()); - assert_eq!(document_key, Err(Error::AccessDenied)); - } + let num_nodes = 3; + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let configs: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { + threads: 1, + self_private: (***key_pairs[i].secret()).into(), + listener_address: NodeAddress { + address: "127.0.0.1".into(), + port: 6060 + (i as u16), + }, + nodes: key_pairs.iter().enumerate().map(|(j, kp)| (kp.public().clone(), + NodeAddress { + address: "127.0.0.1".into(), + port: 6060 + (j as u16), + })).collect(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + }).collect(); + let key_servers: Vec<_> = configs.into_iter().map(|cfg| + KeyServerImpl::new(&cfg, Arc::new(DummyAclStorage::default()), Arc::new(DummyKeyStorage::default())).unwrap() + ).collect(); - #[test] - fn document_key_fails_when_document_not_found() { - let key_server = create_key_server(); - let signature = make_signature(PRIVATE1, DOCUMENT2); - let document_key = key_server.document_key(&signature, &DOCUMENT2.into()); - assert_eq!(document_key, Err(Error::DocumentNotFound)); + // wait until connections are established + let start = time::Instant::now(); + loop { + if key_servers.iter().all(|ks| ks.cluster().cluster_state().connected.len() == num_nodes - 1) { + break; + } + if time::Instant::now() - start > time::Duration::from_millis(30000) { + panic!("connections are not established in 30000ms"); + } + } + + let test_cases = [0, 1, 2]; + for threshold in &test_cases { + // generate document key + // TODO: it is an error that we can regenerate key for the same DOCUMENT + let signature = make_signature(PRIVATE1, DOCUMENT1); + let generated_key = key_servers[0].generate_document_key(&signature, &DOCUMENT1.into(), *threshold).unwrap(); + let generated_key = decrypt_document_key(PRIVATE1, generated_key); + + // now let's try to retrieve key back + for key_server in key_servers.iter() { + let retrieved_key = key_server.document_key(&signature, &DOCUMENT1.into()).unwrap(); + let retrieved_key = decrypt_document_key(PRIVATE1, retrieved_key); + assert_eq!(retrieved_key, generated_key); + } + } } } diff --git a/secret_store/src/key_server_cluster/cluster.rs b/secret_store/src/key_server_cluster/cluster.rs index 5f6c99808..388a79aef 100644 --- a/secret_store/src/key_server_cluster/cluster.rs +++ b/secret_store/src/key_server_cluster/cluster.rs @@ -14,11 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use key_server_cluster::{Error, NodeId}; -use key_server_cluster::message::Message; +use std::io; +use std::time; +use std::sync::Arc; +use std::collections::{BTreeMap, BTreeSet, VecDeque}; +use std::collections::btree_map::Entry; +use std::net::{SocketAddr, IpAddr}; +use futures::{finished, failed, Future, Stream, BoxFuture}; +use futures_cpupool::CpuPool; +use parking_lot::{RwLock, Mutex}; +use tokio_core::io::IoFuture; +use tokio_core::reactor::{Handle, Remote, Timeout, Interval}; +use tokio_core::net::{TcpListener, TcpStream}; +use ethkey::{Secret, KeyPair, Signature, Random, Generator}; +use key_server_cluster::{Error, NodeId, SessionId, EncryptionConfiguration, AclStorage, KeyStorage}; +use key_server_cluster::message::{self, Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; +use key_server_cluster::decryption_session::{SessionImpl as DecryptionSessionImpl, DecryptionSessionId, + SessionParams as DecryptionSessionParams, Session as DecryptionSession}; +use key_server_cluster::encryption_session::{SessionImpl as EncryptionSessionImpl, SessionState as EncryptionSessionState, + SessionParams as EncryptionSessionParams, Session as EncryptionSession}; +use key_server_cluster::io::{DeadlineStatus, ReadMessage, SharedTcpStream, read_encrypted_message, WriteMessage, write_encrypted_message}; +use key_server_cluster::net::{accept_connection as net_accept_connection, connect as net_connect, Connection as NetConnection}; + +pub type BoxedEmptyFuture = BoxFuture<(), ()>; + +/// Cluster interface for external clients. +pub trait ClusterClient: Send + Sync { + /// Get cluster state. + fn cluster_state(&self) -> ClusterState; + /// Start new encryption session. + fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error>; + /// Start new decryption session. + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result, Error>; +} /// Cluster access for single encryption/decryption participant. -pub trait Cluster { +pub trait Cluster: Send + Sync { /// Broadcast message to all other nodes. fn broadcast(&self, message: Message) -> Result<(), Error>; /// Send message to given node. @@ -27,13 +58,841 @@ pub trait Cluster { fn blacklist(&self, node: &NodeId); } +#[derive(Clone)] +/// Cluster initialization parameters. +pub struct ClusterConfiguration { + /// Number of threads reserved by cluster. + pub threads: usize, + /// Allow connecting to 'higher' nodes. + pub allow_connecting_to_higher_nodes: bool, + /// KeyPair this node holds. + pub self_key_pair: KeyPair, + /// Interface to listen to. + pub listen_address: (String, u16), + /// Cluster nodes. + pub nodes: BTreeMap, + /// Encryption session configuration. + pub encryption_config: EncryptionConfiguration, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, +} + +/// Cluster state. +pub struct ClusterState { + /// Nodes, to which connections are established. + pub connected: BTreeSet, +} + +/// Network cluster implementation. +pub struct ClusterCore { + /// Handle to the event loop. + handle: Handle, + /// Listen address. + listen_address: SocketAddr, + /// Cluster data. + data: Arc, +} + +/// Network cluster client interface implementation. +pub struct ClusterClientImpl { + /// Cluster data. + data: Arc, +} + +/// Network cluster view. It is a communication channel, required in single session. +pub struct ClusterView { + core: Arc>, +} + +/// Cross-thread shareable cluster data. +pub struct ClusterData { + /// Cluster configuration. + config: ClusterConfiguration, + /// Handle to the event loop. + handle: Remote, + /// Handle to the cpu thread pool. + pool: CpuPool, + /// KeyPair this node holds. + self_key_pair: KeyPair, + /// Connections data. + connections: ClusterConnections, + /// Active sessions data. + sessions: ClusterSessions, +} + +/// Connections that are forming the cluster. +pub struct ClusterConnections { + /// Self node id. + pub self_node_id: NodeId, + /// All known other key servers. + pub nodes: BTreeMap, + /// Active connections to key servers. + pub connections: RwLock>>, +} + +/// Active sessions on this cluster. +pub struct ClusterSessions { + /// Self node id. + pub self_node_id: NodeId, + /// Reference to key storage + pub key_storage: Arc, + /// Reference to ACL storage + pub acl_storage: Arc, + /// Active encryption sessions. + pub encryption_sessions: RwLock>, + /// Active decryption sessions. + pub decryption_sessions: RwLock>, +} + +/// Encryption session and its message queue. +pub struct QueuedEncryptionSession { + /// Encryption session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, EncryptionMessage)>, +} + +/// Decryption session and its message queue. +pub struct QueuedDecryptionSession { + /// Decryption session. + pub session: Arc, + /// Messages queue. + pub queue: VecDeque<(NodeId, DecryptionMessage)>, +} + +/// Cluster view core. +struct ClusterViewCore { + /// Cluster reference. + cluster: Arc, + /// Subset of nodes, required for this session. + nodes: BTreeSet, +} + +/// Connection to single node. +pub struct Connection { + /// Node id. + node_id: NodeId, + /// Node address. + node_address: SocketAddr, + /// Is inbound connection? + is_inbound: bool, + /// Tcp stream. + stream: SharedTcpStream, + /// Connection key. + key: Secret, + /// Last message time. + last_message_time: Mutex, +} + +impl ClusterCore { + pub fn new(handle: Handle, config: ClusterConfiguration) -> Result, Error> { + let listen_address = make_socket_address(&config.listen_address.0, config.listen_address.1)?; + let connections = ClusterConnections::new(&config)?; + let sessions = ClusterSessions::new(&config); + let data = ClusterData::new(&handle, config, connections, sessions); + + Ok(Arc::new(ClusterCore { + handle: handle, + listen_address: listen_address, + data: data, + })) + } + + /// Create new client interface. + pub fn client(&self) -> Arc { + Arc::new(ClusterClientImpl::new(self.data.clone())) + } + + #[cfg(test)] + /// Get cluster configuration. + pub fn config(&self) -> &ClusterConfiguration { + &self.data.config + } + + #[cfg(test)] + /// Get connection to given node. + pub fn connection(&self, node: &NodeId) -> Option> { + self.data.connection(node) + } + + /// Run cluster + pub fn run(&self) -> Result<(), Error> { + // try to connect to every other peer + ClusterCore::connect_disconnected_nodes(self.data.clone()); + + // schedule maintain procedures + ClusterCore::schedule_maintain(&self.handle, self.data.clone()); + + // start listening for incoming connections + self.handle.spawn(ClusterCore::listen(&self.handle, self.data.clone(), self.listen_address.clone())?); + + Ok(()) + } + + /// Connect to peer. + fn connect(data: Arc, node_address: SocketAddr) { + data.handle.clone().spawn(move |handle| { + data.pool.clone().spawn(ClusterCore::connect_future(handle, data, node_address)) + }) + } + + /// Connect to socket using given context and handle. + fn connect_future(handle: &Handle, data: Arc, node_address: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); + net_connect(&node_address, handle, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| ClusterCore::process_connection_result(data, false, result)) + .then(|_| finished(())) + .boxed() + } + + /// Start listening for incoming connections. + fn listen(handle: &Handle, data: Arc, listen_address: SocketAddr) -> Result { + Ok(TcpListener::bind(&listen_address, &handle)? + .incoming() + .and_then(move |(stream, node_address)| { + ClusterCore::accept_connection(data.clone(), stream, node_address); + Ok(()) + }) + .for_each(|_| Ok(())) + .then(|_| finished(())) + .boxed()) + } + + /// Accept connection. + fn accept_connection(data: Arc, stream: TcpStream, node_address: SocketAddr) { + data.handle.clone().spawn(move |handle| { + data.pool.clone().spawn(ClusterCore::accept_connection_future(handle, data, stream, node_address)) + }) + } + + /// Accept connection future. + fn accept_connection_future(handle: &Handle, data: Arc, stream: TcpStream, node_address: SocketAddr) -> BoxedEmptyFuture { + let disconnected_nodes = data.connections.disconnected_nodes().keys().cloned().collect(); + net_accept_connection(node_address, stream, handle, data.self_key_pair.clone(), disconnected_nodes) + .then(move |result| ClusterCore::process_connection_result(data, true, result)) + .then(|_| finished(())) + .boxed() + } + + /// Schedule mainatain procedures. + fn schedule_maintain(handle: &Handle, data: Arc) { + // TODO: per-session timeouts (node can respond to messages, but ignore sessions messages) + let (d1, d2, d3) = (data.clone(), data.clone(), data.clone()); + let interval: BoxedEmptyFuture = Interval::new(time::Duration::new(10, 0), handle) + .expect("failed to create interval") + .and_then(move |_| Ok(trace!(target: "secretstore_net", "{}: executing maintain procedures", d1.self_key_pair.public()))) + .and_then(move |_| Ok(ClusterCore::keep_alive(d2.clone()))) + .and_then(move |_| Ok(ClusterCore::connect_disconnected_nodes(d3.clone()))) + .for_each(|_| Ok(())) + .then(|_| finished(())) + .boxed(); + + data.spawn(interval); + } + + /// Called for every incomming mesage. + fn process_connection_messages(data: Arc, connection: Arc) -> IoFuture> { + connection + .read_message() + .then(move |result| + match result { + Ok((_, Ok(message))) => { + ClusterCore::process_connection_message(data.clone(), connection.clone(), message); + // continue serving connection + data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); + finished(Ok(())).boxed() + }, + Ok((_, Err(err))) => { + warn!(target: "secretstore_net", "{}: protocol error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + // continue serving connection + data.spawn(ClusterCore::process_connection_messages(data.clone(), connection)); + finished(Err(err)).boxed() + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: network error {} when reading message from node {}", data.self_key_pair.public(), err, connection.node_id()); + // close connection + data.connections.remove(connection.node_id(), connection.is_inbound()); + failed(err).boxed() + }, + } + ).boxed() + } + + /// Send keepalive messages to every othe node. + fn keep_alive(data: Arc) { + for connection in data.connections.active_connections() { + let last_message_diff = time::Instant::now() - connection.last_message_time(); + if last_message_diff > time::Duration::from_secs(60) { + data.connections.remove(connection.node_id(), connection.is_inbound()); + data.sessions.on_connection_timeout(connection.node_id()); + } + else if last_message_diff > time::Duration::from_secs(30) { + data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAlive(message::KeepAlive {})))); + } + } + } + + /// Try to connect to every disconnected node. + fn connect_disconnected_nodes(data: Arc) { + for (node_id, node_address) in data.connections.disconnected_nodes() { + if data.config.allow_connecting_to_higher_nodes || data.self_key_pair.public() < &node_id { + ClusterCore::connect(data.clone(), node_address); + } + } + } + + /// Process connection future result. + fn process_connection_result(data: Arc, is_inbound: bool, result: Result>, io::Error>) -> IoFuture> { + match result { + Ok(DeadlineStatus::Meet(Ok(connection))) => { + let connection = Connection::new(is_inbound, connection); + if data.connections.insert(connection.clone()) { + ClusterCore::process_connection_messages(data.clone(), connection) + } else { + finished(Ok(())).boxed() + } + }, + Ok(DeadlineStatus::Meet(Err(_))) => { + finished(Ok(())).boxed() + }, + Ok(DeadlineStatus::Timeout) => { + finished(Ok(())).boxed() + }, + Err(_) => { + // network error + finished(Ok(())).boxed() + }, + } + } + + /// Process single message from the connection. + fn process_connection_message(data: Arc, connection: Arc, message: Message) { + connection.set_last_message_time(time::Instant::now()); + trace!(target: "secretstore_net", "{}: processing message {} from {}", data.self_key_pair.public(), message, connection.node_id()); + match message { + Message::Encryption(message) => ClusterCore::process_encryption_message(data, connection, message), + Message::Decryption(message) => ClusterCore::process_decryption_message(data, connection, message), + Message::Cluster(message) => ClusterCore::process_cluster_message(data, connection, message), + } + } + + /// Process single encryption message from the connection. + fn process_encryption_message(data: Arc, connection: Arc, mut message: EncryptionMessage) { + let mut sender = connection.node_id().clone(); + let mut is_queued_message = false; + let session_id = message.session_id().clone(); + let key_check_timeout_ms = data.config.encryption_config.key_check_timeout_ms; + loop { + let result = match message { + EncryptionMessage::InitializeSession(ref message) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + let session_id: SessionId = message.session.clone().into(); + data.sessions.new_encryption_session(sender.clone(), session_id.clone(), cluster) + .and_then(|s| s.on_initialize_session(sender.clone(), message)) + }, + EncryptionMessage::ConfirmInitialization(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_confirm_initialization(sender.clone(), message)), + EncryptionMessage::CompleteInitialization(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complete_initialization(sender.clone(), message)), + EncryptionMessage::KeysDissemination(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| { + // TODO: move this logic to session (or session connector) + let is_in_key_check_state = s.state() == EncryptionSessionState::KeyCheck; + let result = s.on_keys_dissemination(sender.clone(), message); + if !is_in_key_check_state && s.state() == EncryptionSessionState::KeyCheck { + let session = s.clone(); + let d = data.clone(); + data.handle.spawn(move |handle| + Timeout::new(time::Duration::new(key_check_timeout_ms / 1000, 0), handle) + .expect("failed to create timeout") + .and_then(move |_| { + if let Err(error) = session.start_key_generation_phase() { + session.on_session_error(d.self_key_pair.public().clone(), &message::SessionError { + session: session.id().clone().into(), + error: error.into(), + }); + } + Ok(()) + }) + .then(|_| finished(())) + ); + } + + result + }), + EncryptionMessage::Complaint(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complaint(sender.clone(), message)), + EncryptionMessage::ComplaintResponse(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_complaint_response(sender.clone(), message)), + EncryptionMessage::PublicKeyShare(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_public_key_share(sender.clone(), message)), + EncryptionMessage::SessionError(ref message) => { + if let Some(s) = data.sessions.encryption_session(&*message.session) { + data.sessions.remove_encryption_session(s.id()); + s.on_session_error(sender.clone(), message); + } + Ok(()) + }, + EncryptionMessage::SessionCompleted(ref message) => data.sessions.encryption_session(&*message.session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| { + let result = s.on_session_completed(sender.clone(), message); + if result.is_ok() && s.state() == EncryptionSessionState::Finished { + data.sessions.remove_encryption_session(s.id()); + } + + result + }), + }; + + match result { + Err(Error::TooEarlyForRequest) => { + data.sessions.enqueue_encryption_message(&session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + warn!(target: "secretstore_net", "{}: error {} when processing message {} from node {}", data.self_key_pair.public(), err, message, sender); + if let Some(connection) = data.connections.get(&sender) { + data.spawn(connection.send_message(Message::Encryption(EncryptionMessage::SessionError(message::SessionError { + session: session_id.clone().into(), + error: format!("{:?}", err), + })))); + } + + if err != Error::InvalidSessionId { + data.sessions.remove_encryption_session(&session_id); + } + break; + }, + _ => { + match data.sessions.dequeue_encryption_message(&session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + } + } + } + + /// Process single decryption message from the connection. + fn process_decryption_message(data: Arc, connection: Arc, mut message: DecryptionMessage) { + let mut sender = connection.node_id().clone(); + let mut is_queued_message = false; + let session_id = message.session_id().clone(); + let sub_session_id = message.sub_session_id().clone(); + loop { + let result = match message { + DecryptionMessage::InitializeDecryptionSession(ref message) => { + let mut connected_nodes = data.connections.connected_nodes(); + connected_nodes.insert(data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(data.clone(), connected_nodes)); + data.sessions.new_decryption_session(sender.clone(), session_id.clone(), sub_session_id.clone(), cluster) + .and_then(|s| s.on_initialize_session(sender.clone(), message)) + }, + DecryptionMessage::ConfirmDecryptionInitialization(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_confirm_initialization(sender.clone(), message)), + DecryptionMessage::RequestPartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_partial_decryption_requested(sender.clone(), message)), + DecryptionMessage::PartialDecryption(ref message) => data.sessions.decryption_session(&*message.session, &*message.sub_session) + .ok_or(Error::InvalidSessionId) + .and_then(|s| s.on_partial_decryption(sender.clone(), message)), + DecryptionMessage::DecryptionSessionError(ref message) => { + if let Some(s) = data.sessions.decryption_session(&*message.session, &*message.sub_session) { + data.sessions.remove_decryption_session(&session_id, &sub_session_id); + s.on_session_error(sender.clone(), message); + } + Ok(()) + }, + }; + + match result { + Err(Error::TooEarlyForRequest) => { + data.sessions.enqueue_decryption_message(&session_id, &sub_session_id, sender, message, is_queued_message); + break; + }, + Err(err) => { + if let Some(connection) = data.connections.get(&sender) { + data.spawn(connection.send_message(Message::Decryption(DecryptionMessage::DecryptionSessionError(message::DecryptionSessionError { + session: session_id.clone().into(), + sub_session: sub_session_id.clone().into(), + error: format!("{:?}", err), + })))); + } + + if err != Error::InvalidSessionId { + data.sessions.remove_decryption_session(&session_id, &sub_session_id); + } + break; + }, + _ => { + match data.sessions.dequeue_decryption_message(&session_id, &sub_session_id) { + Some((msg_sender, msg)) => { + is_queued_message = true; + sender = msg_sender; + message = msg; + }, + None => break, + } + }, + } + } + } + + /// Process single cluster message from the connection. + fn process_cluster_message(data: Arc, connection: Arc, message: ClusterMessage) { + match message { + ClusterMessage::KeepAlive(_) => data.spawn(connection.send_message(Message::Cluster(ClusterMessage::KeepAliveResponse(message::KeepAliveResponse {})))), + ClusterMessage::KeepAliveResponse(_) => (), + _ => warn!(target: "secretstore_net", "{}: received unexpected message {} from node {} at {}", data.self_key_pair.public(), message, connection.node_id(), connection.node_address()), + } + } +} + +impl ClusterConnections { + pub fn new(config: &ClusterConfiguration) -> Result { + let mut connections = ClusterConnections { + self_node_id: config.self_key_pair.public().clone(), + nodes: BTreeMap::new(), + connections: RwLock::new(BTreeMap::new()), + }; + + for (node_id, &(ref node_addr, node_port)) in config.nodes.iter().filter(|&(node_id, _)| node_id != config.self_key_pair.public()) { + let socket_address = make_socket_address(&node_addr, node_port)?; + connections.nodes.insert(node_id.clone(), socket_address); + } + + Ok(connections) + } + + pub fn cluster_state(&self) -> ClusterState { + ClusterState { + connected: self.connections.read().keys().cloned().collect(), + } + } + + pub fn get(&self, node: &NodeId) -> Option> { + self.connections.read().get(node).cloned() + } + + pub fn insert(&self, connection: Arc) -> bool { + let mut connections = self.connections.write(); + if connections.contains_key(connection.node_id()) { + // we have already connected to the same node + // the agreement is that node with lower id must establish connection to node with higher id + if (&self.self_node_id < connection.node_id() && connection.is_inbound()) + || (&self.self_node_id > connection.node_id() && !connection.is_inbound()) { + return false; + } + } + trace!(target: "secretstore_net", "{}: inserting connection to {} at {}", self.self_node_id, connection.node_id(), connection.node_address()); + connections.insert(connection.node_id().clone(), connection); + true + } + + pub fn remove(&self, node: &NodeId, is_inbound: bool) { + let mut connections = self.connections.write(); + if let Entry::Occupied(entry) = connections.entry(node.clone()) { + if entry.get().is_inbound() != is_inbound { + return; + } + + trace!(target: "secretstore_net", "{}: removing connection to {} at {}", self.self_node_id, entry.get().node_id(), entry.get().node_address()); + entry.remove_entry(); + } + } + + pub fn connected_nodes(&self) -> BTreeSet { + self.connections.read().keys().cloned().collect() + } + + pub fn active_connections(&self)-> Vec> { + self.connections.read().values().cloned().collect() + } + + pub fn disconnected_nodes(&self) -> BTreeMap { + let connections = self.connections.read(); + self.nodes.iter() + .filter(|&(node_id, _)| !connections.contains_key(node_id)) + .map(|(node_id, node_address)| (node_id.clone(), node_address.clone())) + .collect() + } +} + +impl ClusterSessions { + pub fn new(config: &ClusterConfiguration) -> Self { + ClusterSessions { + self_node_id: config.self_key_pair.public().clone(), + acl_storage: config.acl_storage.clone(), + key_storage: config.key_storage.clone(), + encryption_sessions: RwLock::new(BTreeMap::new()), + decryption_sessions: RwLock::new(BTreeMap::new()), + } + } + + pub fn new_encryption_session(&self, _master: NodeId, session_id: SessionId, cluster: Arc) -> Result, Error> { + let mut encryption_sessions = self.encryption_sessions.write(); + if encryption_sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(EncryptionSessionImpl::new(EncryptionSessionParams { + id: session_id.clone(), + self_node_id: self.self_node_id.clone(), + key_storage: self.key_storage.clone(), + cluster: cluster, + })); + let encryption_session = QueuedEncryptionSession { + session: session.clone(), + queue: VecDeque::new() + }; + encryption_sessions.insert(session_id, encryption_session); + Ok(session) + } + + pub fn remove_encryption_session(&self, session_id: &SessionId) { + self.encryption_sessions.write().remove(session_id); + } + + pub fn encryption_session(&self, session_id: &SessionId) -> Option> { + self.encryption_sessions.read().get(session_id).map(|s| s.session.clone()) + } + + pub fn enqueue_encryption_message(&self, session_id: &SessionId, sender: NodeId, message: EncryptionMessage, is_queued_message: bool) { + self.encryption_sessions.write().get_mut(session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_encryption_message(&self, session_id: &SessionId) -> Option<(NodeId, EncryptionMessage)> { + self.encryption_sessions.write().get_mut(session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn new_decryption_session(&self, _master: NodeId, session_id: SessionId, sub_session_id: Secret, cluster: Arc) -> Result, Error> { + let mut decryption_sessions = self.decryption_sessions.write(); + let session_id = DecryptionSessionId::new(session_id, sub_session_id); + if decryption_sessions.contains_key(&session_id) { + return Err(Error::DuplicateSessionId); + } + + let session = Arc::new(DecryptionSessionImpl::new(DecryptionSessionParams { + id: session_id.id.clone(), + access_key: session_id.access_key.clone(), + self_node_id: self.self_node_id.clone(), + encrypted_data: self.key_storage.get(&session_id.id).map_err(|e| Error::KeyStorage(e.into()))?, + acl_storage: self.acl_storage.clone(), + cluster: cluster, + })?); + let decryption_session = QueuedDecryptionSession { + session: session.clone(), + queue: VecDeque::new() + }; + decryption_sessions.insert(session_id, decryption_session); + Ok(session) + } + + pub fn remove_decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().remove(&session_id); + } + + pub fn decryption_session(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option> { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.read().get(&session_id).map(|s| s.session.clone()) + } + + pub fn enqueue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret, sender: NodeId, message: DecryptionMessage, is_queued_message: bool) { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().get_mut(&session_id) + .map(|session| if is_queued_message { session.queue.push_front((sender, message)) } + else { session.queue.push_back((sender, message)) }); + } + + pub fn dequeue_decryption_message(&self, session_id: &SessionId, sub_session_id: &Secret) -> Option<(NodeId, DecryptionMessage)> { + let session_id = DecryptionSessionId::new(session_id.clone(), sub_session_id.clone()); + self.decryption_sessions.write().get_mut(&session_id) + .and_then(|session| session.queue.pop_front()) + } + + pub fn on_connection_timeout(&self, node_id: &NodeId) { + for encryption_session in self.encryption_sessions.read().values() { + encryption_session.session.on_session_timeout(node_id); + } + for decryption_session in self.decryption_sessions.read().values() { + decryption_session.session.on_session_timeout(node_id); + } + } +} + +impl ClusterData { + pub fn new(handle: &Handle, config: ClusterConfiguration, connections: ClusterConnections, sessions: ClusterSessions) -> Arc { + Arc::new(ClusterData { + handle: handle.remote().clone(), + pool: CpuPool::new(config.threads), + self_key_pair: config.self_key_pair.clone(), + connections: connections, + sessions: sessions, + config: config, + }) + } + + /// Get connection to given node. + pub fn connection(&self, node: &NodeId) -> Option> { + self.connections.get(node) + } + + /// Spawns a future using thread pool and schedules execution of it with event loop handle. + pub fn spawn(&self, f: F) where F: Future + Send + 'static, F::Item: Send + 'static, F::Error: Send + 'static { + let pool_work = self.pool.spawn(f); + self.handle.spawn(move |_handle| { + pool_work.then(|_| finished(())) + }) + } +} + +impl Connection { + pub fn new(is_inbound: bool, connection: NetConnection) -> Arc { + Arc::new(Connection { + node_id: connection.node_id, + node_address: connection.address, + is_inbound: is_inbound, + stream: connection.stream, + key: connection.key, + last_message_time: Mutex::new(time::Instant::now()), + }) + } + + pub fn is_inbound(&self) -> bool { + self.is_inbound + } + + pub fn node_id(&self) -> &NodeId { + &self.node_id + } + + pub fn last_message_time(&self) -> time::Instant { + *self.last_message_time.lock() + } + + pub fn set_last_message_time(&self, last_message_time: time::Instant) { + *self.last_message_time.lock() = last_message_time; + } + + pub fn node_address(&self) -> &SocketAddr { + &self.node_address + } + + pub fn send_message(&self, message: Message) -> WriteMessage { + write_encrypted_message(self.stream.clone(), &self.key, message) + } + + pub fn read_message(&self) -> ReadMessage { + read_encrypted_message(self.stream.clone(), self.key.clone()) + } +} + +impl ClusterView { + pub fn new(cluster: Arc, nodes: BTreeSet) -> Self { + ClusterView { + core: Arc::new(Mutex::new(ClusterViewCore { + cluster: cluster, + nodes: nodes, + })), + } + } +} + +impl Cluster for ClusterView { + fn broadcast(&self, message: Message) -> Result<(), Error> { + let core = self.core.lock(); + for node in core.nodes.iter().filter(|n| *n != core.cluster.self_key_pair.public()) { + let connection = core.cluster.connection(node).ok_or(Error::NodeDisconnected)?; + core.cluster.spawn(connection.send_message(message.clone())) + } + Ok(()) + } + + fn send(&self, to: &NodeId, message: Message) -> Result<(), Error> { + let core = self.core.lock(); + let connection = core.cluster.connection(to).ok_or(Error::NodeDisconnected)?; + core.cluster.spawn(connection.send_message(message)); + Ok(()) + } + + fn blacklist(&self, _node: &NodeId) { + // TODO: unimplemented!() + } +} + +impl ClusterClientImpl { + pub fn new(data: Arc) -> Self { + ClusterClientImpl { + data: data, + } + } +} + +impl ClusterClient for ClusterClientImpl { + fn cluster_state(&self) -> ClusterState { + self.data.connections.cluster_state() + } + + fn new_encryption_session(&self, session_id: SessionId, threshold: usize) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_encryption_session(self.data.self_key_pair.public().clone(), session_id, cluster)?; + session.initialize(threshold, connected_nodes)?; + Ok(session) + } + + fn new_decryption_session(&self, session_id: SessionId, requestor_signature: Signature) -> Result, Error> { + let mut connected_nodes = self.data.connections.connected_nodes(); + connected_nodes.insert(self.data.self_key_pair.public().clone()); + + let access_key = Random.generate()?.secret().clone(); + let cluster = Arc::new(ClusterView::new(self.data.clone(), connected_nodes.clone())); + let session = self.data.sessions.new_decryption_session(self.data.self_key_pair.public().clone(), session_id, access_key, cluster)?; + session.initialize(requestor_signature)?; + Ok(session) + } +} + +fn make_socket_address(address: &str, port: u16) -> Result { + let ip_address: IpAddr = address.parse().map_err(|_| Error::InvalidNodeAddress)?; + Ok(SocketAddr::new(ip_address, port)) +} + #[cfg(test)] pub mod tests { + use std::sync::Arc; + use std::time; use std::collections::VecDeque; use parking_lot::Mutex; - use key_server_cluster::{NodeId, Error}; + use tokio_core::reactor::Core; + use ethkey::{Random, Generator}; + use key_server_cluster::{NodeId, Error, EncryptionConfiguration, DummyAclStorage, DummyKeyStorage}; use key_server_cluster::message::Message; - use key_server_cluster::cluster::Cluster; + use key_server_cluster::cluster::{Cluster, ClusterCore, ClusterConfiguration}; #[derive(Debug)] pub struct DummyCluster { @@ -87,4 +946,61 @@ pub mod tests { fn blacklist(&self, _node: &NodeId) { } } + + pub fn loop_until(core: &mut Core, timeout: time::Duration, predicate: F) where F: Fn() -> bool { + let start = time::Instant::now(); + loop { + core.turn(Some(time::Duration::from_millis(1))); + if predicate() { + break; + } + + if time::Instant::now() - start > timeout { + panic!("no result in {:?}", timeout); + } + } + } + + pub fn all_connections_established(cluster: &Arc) -> bool { + cluster.config().nodes.keys() + .filter(|p| *p != cluster.config().self_key_pair.public()) + .all(|p| cluster.connection(p).is_some()) + } + + pub fn make_clusters(core: &Core, ports_begin: u16, num_nodes: usize) -> Vec> { + let key_pairs: Vec<_> = (0..num_nodes).map(|_| Random.generate().unwrap()).collect(); + let cluster_params: Vec<_> = (0..num_nodes).map(|i| ClusterConfiguration { + threads: 1, + self_key_pair: key_pairs[i].clone(), + listen_address: ("127.0.0.1".to_owned(), ports_begin + i as u16), + nodes: key_pairs.iter().enumerate() + .map(|(j, kp)| (kp.public().clone(), ("127.0.0.1".into(), ports_begin + j as u16))) + .collect(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + key_storage: Arc::new(DummyKeyStorage::default()), + acl_storage: Arc::new(DummyAclStorage::default()), + }).collect(); + let clusters: Vec<_> = cluster_params.into_iter().enumerate() + .map(|(_, params)| ClusterCore::new(core.handle(), params).unwrap()) + .collect(); + + clusters + } + + pub fn run_clusters(clusters: &[Arc]) { + for cluster in clusters { + cluster.run().unwrap(); + } + } + + #[test] + fn cluster_connects_to_other_nodes() { + let mut core = Core::new().unwrap(); + let clusters = make_clusters(&core, 6010, 3); + run_clusters(&clusters); + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + } } diff --git a/secret_store/src/key_server_cluster/decryption_session.rs b/secret_store/src/key_server_cluster/decryption_session.rs index d4160851e..652ed5c5a 100644 --- a/secret_store/src/key_server_cluster/decryption_session.rs +++ b/secret_store/src/key_server_cluster/decryption_session.rs @@ -14,15 +14,22 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::cmp::{Ord, PartialOrd, Ordering}; use std::collections::{BTreeSet, BTreeMap}; use std::sync::Arc; -use parking_lot::Mutex; +use parking_lot::{Mutex, Condvar}; use ethkey::{self, Secret, Public, Signature}; -use key_server_cluster::{Error, AclStorage, EncryptedData, NodeId, SessionId}; +use key_server_cluster::{Error, AclStorage, DocumentKeyShare, NodeId, SessionId}; use key_server_cluster::cluster::Cluster; use key_server_cluster::math; -use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmDecryptionInitialization, - RequestPartialDecryption, PartialDecryption}; +use key_server_cluster::message::{Message, DecryptionMessage, InitializeDecryptionSession, ConfirmDecryptionInitialization, + RequestPartialDecryption, PartialDecryption, DecryptionSessionError}; + +/// Decryption session API. +pub trait Session: Send + Sync + 'static { + /// Wait until session is completed. Returns distributely restored secret key. + fn wait(&self) -> Result; +} /// Distributed decryption session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -32,7 +39,7 @@ use key_server_cluster::message::{Message, InitializeDecryptionSession, ConfirmD /// 2) ACL check: all nodes which have received the request are querying ACL-contract to check if requestor has access to the document /// 3) partial decryption: every node which has succussfully checked access for the requestor do a partial decryption /// 4) decryption: master node receives all partial decryptions of the secret and restores the secret -pub struct Session { +pub struct SessionImpl { /// Encryption session id. id: SessionId, /// Decryption session access key. @@ -40,25 +47,36 @@ pub struct Session { /// Public identifier of this node. self_node_id: NodeId, /// Encrypted data. - encrypted_data: EncryptedData, + encrypted_data: DocumentKeyShare, /// ACL storate to check access to the resource. acl_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, /// Mutable session data. data: Mutex, } -/// Session creation parameters -pub struct SessionParams { - /// Session identifier. +/// Decryption session Id. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DecryptionSessionId { + /// Encryption session id. pub id: SessionId, - /// Session access key. + /// Decryption session access key. + pub access_key: Secret, +} + +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// SessionImpl access key. pub access_key: Secret, /// Id of node, on which this session is running. pub self_node_id: Public, - /// Encrypted data (result of running encryption_session::Session). - pub encrypted_data: EncryptedData, + /// Encrypted data (result of running encryption_session::SessionImpl). + pub encrypted_data: DocumentKeyShare, /// ACL storage. pub acl_storage: Arc, /// Cluster @@ -91,16 +109,11 @@ struct SessionData { /// === Values, filled during final decryption === /// Decrypted secret - decrypted_secret: Option, -} - -#[derive(Debug)] -struct NodeData { - /// Node-generated shadow point. - shadow_point: Option, + decrypted_secret: Option>, } #[derive(Debug, Clone, PartialEq)] +/// Decryption session data. pub enum SessionState { /// Every node starts in this state. WaitingForInitialization, @@ -116,18 +129,19 @@ pub enum SessionState { Failed, } -impl Session { +impl SessionImpl { /// Create new decryption session. pub fn new(params: SessionParams) -> Result { check_encrypted_data(¶ms.self_node_id, ¶ms.encrypted_data)?; - Ok(Session { + Ok(SessionImpl { id: params.id, access_key: params.access_key, self_node_id: params.self_node_id, encrypted_data: params.encrypted_data, acl_storage: params.acl_storage, cluster: params.cluster, + completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, master: None, @@ -146,19 +160,22 @@ impl Session { &self.self_node_id } + #[cfg(test)] /// Get this session access key. pub fn access_key(&self) -> &Secret { &self.access_key } + #[cfg(test)] /// Get current session state. pub fn state(&self) -> SessionState { self.data.lock().state.clone() } + #[cfg(test)] /// Get decrypted secret pub fn decrypted_secret(&self) -> Option { - self.data.lock().decrypted_secret.clone() + self.data.lock().decrypted_secret.clone().and_then(|r| r.ok()) } /// Initialize decryption session. @@ -188,17 +205,22 @@ impl Session { // not enough nodes => pass initialization message to all other nodes SessionState::WaitingForInitializationConfirm => { for node in self.encrypted_data.id_numbers.keys().filter(|n| *n != self.node()) { - self.cluster.send(node, Message::InitializeDecryptionSession(InitializeDecryptionSession { - session: self.id.clone(), - sub_session: self.access_key.clone(), - requestor_signature: requestor_signature.clone(), - }))?; + self.cluster.send(node, Message::Decryption(DecryptionMessage::InitializeDecryptionSession(InitializeDecryptionSession { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + requestor_signature: requestor_signature.clone().into(), + })))?; } }, // we can decrypt data on our own - SessionState::WaitingForPartialDecryption => unimplemented!(), + SessionState::WaitingForPartialDecryption => { + data.confirmed_nodes.insert(self.node().clone()); + SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data)?; + SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; + self.completed.notify_all(); + }, // we can not decrypt data - SessionState::Failed => (), + SessionState::Failed => self.completed.notify_all(), // cannot reach other states _ => unreachable!("process_initialization_response can change state to WaitingForPartialDecryption or Failed; checked that we are in WaitingForInitializationConfirm state above; qed"), } @@ -207,9 +229,9 @@ impl Session { } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, message: InitializeDecryptionSession) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeDecryptionSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -230,17 +252,17 @@ impl Session { // respond to master node data.master = Some(sender.clone()); - self.cluster.send(&sender, Message::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { - session: self.id.clone(), - sub_session: self.access_key.clone(), + self.cluster.send(&sender, Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(ConfirmDecryptionInitialization { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), is_confirmed: is_requestor_allowed_to_read, - })) + }))) } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, message: ConfirmDecryptionInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmDecryptionInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -260,37 +282,22 @@ impl Session { // we do not yet have enough nodes for decryption SessionState::WaitingForInitializationConfirm => Ok(()), // we have enough nodes for decryption - SessionState::WaitingForPartialDecryption => { - let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); - for node in data.confirmed_nodes.iter().filter(|n| n != &self.node()) { - self.cluster.send(node, Message::RequestPartialDecryption(RequestPartialDecryption { - session: self.id.clone(), - sub_session: self.access_key.clone(), - nodes: confirmed_nodes.clone(), - }))?; - } - - assert!(data.confirmed_nodes.remove(self.node())); - - let shadow_point = { - let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); - do_partial_decryption(self.node(), &requestor, &data.confirmed_nodes, &self.access_key, &self.encrypted_data)? - }; - data.shadow_points.insert(self.node().clone(), shadow_point); - + SessionState::WaitingForPartialDecryption => + SessionImpl::start_waiting_for_partial_decryption(self.node().clone(), self.id.clone(), self.access_key.clone(), &self.cluster, &self.encrypted_data, &mut *data), + // we can not have enough nodes for decryption + SessionState::Failed => { + self.completed.notify_all(); Ok(()) }, - // we can not have enough nodes for decryption - SessionState::Failed => Ok(()), // cannot reach other states _ => unreachable!("process_initialization_response can change state to WaitingForPartialDecryption or Failed; checked that we are in WaitingForInitializationConfirm state above; qed"), } } /// When partial decryption is requested. - pub fn on_partial_decryption_requested(&self, sender: NodeId, message: RequestPartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_partial_decryption_requested(&self, sender: NodeId, message: &RequestPartialDecryption) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); // check message @@ -311,13 +318,13 @@ impl Session { // calculate shadow point let shadow_point = { let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryptionRequest follows initialization; qed"); - do_partial_decryption(self.node(), &requestor, &message.nodes, &self.access_key, &self.encrypted_data)? + do_partial_decryption(self.node(), &requestor, &message.nodes.iter().cloned().map(Into::into).collect(), &self.access_key, &self.encrypted_data)? }; - self.cluster.send(&sender, Message::PartialDecryption(PartialDecryption { - session: self.id.clone(), - sub_session: self.access_key.clone(), - shadow_point: shadow_point, - }))?; + self.cluster.send(&sender, Message::Decryption(DecryptionMessage::PartialDecryption(PartialDecryption { + session: self.id.clone().into(), + sub_session: self.access_key.clone().into(), + shadow_point: shadow_point.into(), + })))?; // update sate data.state = SessionState::Finished; @@ -326,9 +333,9 @@ impl Session { } /// When partial decryption is received. - pub fn on_partial_decryption(&self, sender: NodeId, message: PartialDecryption) -> Result<(), Error> { - debug_assert!(self.id == message.session); - debug_assert!(self.access_key == message.sub_session); + pub fn on_partial_decryption(&self, sender: NodeId, message: &PartialDecryption) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(self.access_key == *message.sub_session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -341,24 +348,113 @@ impl Session { if !data.confirmed_nodes.remove(&sender) { return Err(Error::InvalidStateForRequest); } - data.shadow_points.insert(sender, message.shadow_point); + data.shadow_points.insert(sender, message.shadow_point.clone().into()); // check if we have enough shadow points to decrypt the secret if data.shadow_points.len() != self.encrypted_data.threshold + 1 { return Ok(()); } + SessionImpl::do_decryption(self.access_key.clone(), &self.encrypted_data, &mut *data)?; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &DecryptionSessionError) { + warn!("{}: decryption session error: {:?} from {}", self.node(), message, sender); + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.decrypted_secret = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + } + + /// When session timeout has occured. + pub fn on_session_timeout(&self, _node: &NodeId) { + warn!("{}: decryption session timeout", self.node()); + let mut data = self.data.lock(); + // TODO: check that node is a part of decryption process + data.state = SessionState::Failed; + data.decrypted_secret = Some(Err(Error::Io("session expired".into()))); + self.completed.notify_all(); + } + + fn start_waiting_for_partial_decryption(self_node_id: NodeId, session_id: SessionId, access_key: Secret, cluster: &Arc, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { + let confirmed_nodes: BTreeSet<_> = data.confirmed_nodes.clone(); + for node in data.confirmed_nodes.iter().filter(|n| n != &&self_node_id) { + cluster.send(node, Message::Decryption(DecryptionMessage::RequestPartialDecryption(RequestPartialDecryption { + session: session_id.clone().into(), + sub_session: access_key.clone().into(), + nodes: confirmed_nodes.iter().cloned().map(Into::into).collect(), + })))?; + } + + assert!(data.confirmed_nodes.remove(&self_node_id)); + + let shadow_point = { + let requestor = data.requestor.as_ref().expect("requestor public is filled during initialization; WaitingForPartialDecryption follows initialization; qed"); + do_partial_decryption(&self_node_id, &requestor, &data.confirmed_nodes, &access_key, &encrypted_data)? + }; + data.shadow_points.insert(self_node_id.clone(), shadow_point); + + Ok(()) + } + + fn do_decryption(access_key: Secret, encrypted_data: &DocumentKeyShare, data: &mut SessionData) -> Result<(), Error> { // decrypt the secret using shadow points let joint_shadow_point = math::compute_joint_shadow_point(data.shadow_points.values())?; - let decrypted_secret = math::decrypt_with_joint_shadow(&self.access_key, &self.encrypted_data.encrypted_point, &joint_shadow_point)?; - data.decrypted_secret = Some(decrypted_secret); + let decrypted_secret = math::decrypt_with_joint_shadow(encrypted_data.threshold, &access_key, &encrypted_data.encrypted_point, &joint_shadow_point)?; + data.decrypted_secret = Some(Ok(decrypted_secret)); + + // switch to completed state data.state = SessionState::Finished; Ok(()) } } -fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) -> Result<(), Error> { +impl Session for SessionImpl { + fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.decrypted_secret.is_some() { + self.completed.wait(&mut data); + } + + data.decrypted_secret.as_ref() + .expect("checked above or waited for completed; completed is only signaled when decrypted_secret.is_some(); qed") + .clone() + } +} + +impl DecryptionSessionId { + /// Create new decryption session Id. + pub fn new(session_id: SessionId, sub_session_id: Secret) -> Self { + DecryptionSessionId { + id: session_id, + access_key: sub_session_id, + } + } +} + +impl PartialOrd for DecryptionSessionId { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + + +impl Ord for DecryptionSessionId { + fn cmp(&self, other: &Self) -> Ordering { + match self.id.cmp(&other.id) { + Ordering::Equal => self.access_key.cmp(&other.access_key), + r @ _ => r, + } + } +} + + +fn check_encrypted_data(self_node_id: &Public, encrypted_data: &DocumentKeyShare) -> Result<(), Error> { use key_server_cluster::encryption_session::{check_cluster_nodes, check_threshold}; let nodes = encrypted_data.id_numbers.keys().cloned().collect(); @@ -368,7 +464,7 @@ fn check_encrypted_data(self_node_id: &Public, encrypted_data: &EncryptedData) - Ok(()) } -fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { +fn process_initialization_response(encrypted_data: &DocumentKeyShare, data: &mut SessionData, node: &NodeId, check_result: bool) -> Result<(), Error> { if !data.requested_nodes.remove(node) { return Err(Error::InvalidMessage); } @@ -387,6 +483,7 @@ fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut Se // check if we still can receive enough confirmations to do a decryption? if encrypted_data.id_numbers.len() - data.rejected_nodes.len() < encrypted_data.threshold + 1 { + data.decrypted_secret = Some(Err(Error::AccessDenied)); data.state = SessionState::Failed; } }, @@ -395,7 +492,7 @@ fn process_initialization_response(encrypted_data: &EncryptedData, data: &mut Se Ok(()) } -fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet, access_key: &Secret, encrypted_data: &EncryptedData) -> Result { +fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants: &BTreeSet, access_key: &Secret, encrypted_data: &DocumentKeyShare) -> Result { let node_id_number = &encrypted_data.id_numbers[node]; let node_secret_share = &encrypted_data.secret_share; let other_id_numbers = participants.iter() @@ -409,43 +506,42 @@ fn do_partial_decryption(node: &NodeId, _requestor_public: &Public, participants #[cfg(test)] mod tests { use std::sync::Arc; - use std::str::FromStr; use std::collections::BTreeMap; - use super::super::super::acl_storage::DummyAclStorage; + use super::super::super::acl_storage::tests::DummyAclStorage; use ethkey::{self, Random, Generator, Public, Secret}; - use key_server_cluster::{NodeId, EncryptedData, SessionId, Error}; + use key_server_cluster::{NodeId, DocumentKeyShare, SessionId, Error}; use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::decryption_session::{Session, SessionParams, SessionState}; - use key_server_cluster::message::{self, Message}; + use key_server_cluster::decryption_session::{SessionImpl, SessionParams, SessionState}; + use key_server_cluster::message::{self, Message, DecryptionMessage}; const SECRET_PLAIN: &'static str = "d2b57ae7619e070af0af6bc8c703c0cd27814c54d5d6a999cacac0da34ede279ca0d9216e85991029e54e2f0c92ee0bd30237725fa765cbdbfc4529489864c5f"; - fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { + fn prepare_decryption_sessions() -> (Vec>, Vec>, Vec) { // prepare encrypted data + cluster configuration for scheme 4-of-5 let session_id = SessionId::default(); let access_key = Random.generate().unwrap().secret().clone(); - let secret_shares = vec![ - Secret::from_str("834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec").unwrap(), - Secret::from_str("5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b").unwrap(), - Secret::from_str("71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9").unwrap(), - Secret::from_str("80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4").unwrap(), - Secret::from_str("c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad").unwrap(), + let secret_shares: Vec = vec![ + "834cb736f02d9c968dfaf0c37658a1d86ff140554fc8b59c9fdad5a8cf810eec".parse().unwrap(), + "5a3c1d90fafafa66bb808bcc464354a98b05e6b2c95b5f609d4511cdd1b17a0b".parse().unwrap(), + "71bf61e7848e08e3a8486c308ce521bdacfebcf9116a0151447eb301f3a2d0e9".parse().unwrap(), + "80c0e5e2bea66fa9b2e07f7ce09630a9563e8242446d5ee63221feb09c4338f4".parse().unwrap(), + "c06546b5669877ba579ca437a5602e89425c53808c708d44ccd6afcaa4610fad".parse().unwrap(), ]; let id_numbers: Vec<(NodeId, Secret)> = vec![ ("b486d3840218837b035c66196ecb15e6b067ca20101e11bd5e626288ab6806ecc70b8307012626bd512bad1559112d11d21025cef48cc7a1d2f3976da08f36c8".into(), - Secret::from_str("281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c").unwrap()), + "281b6bf43cb86d0dc7b98e1b7def4a80f3ce16d28d2308f934f116767306f06c".parse().unwrap()), ("1395568277679f7f583ab7c0992da35f26cde57149ee70e524e49bdae62db3e18eb96122501e7cbb798b784395d7bb5a499edead0706638ad056d886e56cf8fb".into(), - Secret::from_str("00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b").unwrap()), + "00125d85a05e5e63e214cb60fe63f132eec8a103aa29266b7e6e6c5b7597230b".parse().unwrap()), ("99e82b163b062d55a64085bacfd407bb55f194ba5fb7a1af9c34b84435455520f1372e0e650a4f91aed0058cb823f62146ccb5599c8d13372c300dea866b69fc".into(), - Secret::from_str("f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62").unwrap()), + "f43ac0fba42a5b6ed95707d2244659e89ba877b1c9b82c0d0a9dcf834e80fc62".parse().unwrap()), ("7e05df9dd077ec21ed4bc45c9fe9e0a43d65fa4be540630de615ced5e95cf5c3003035eb713317237d7667feeeb64335525158f5f7411f67aca9645169ea554c".into(), - Secret::from_str("5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f").unwrap()), + "5a324938dfb2516800487d25ab7289ba8ec38811f77c3df602e4e65e3c9acd9f".parse().unwrap()), ("321977760d1d8e15b047a309e4c7fe6f355c10bb5a06c68472b676926427f69f229024fa2692c10da167d14cdc77eb95d0fce68af0a0f704f0d3db36baa83bb2".into(), - Secret::from_str("12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8").unwrap()), + "12cf422d50002d04e52bd4906fd7f5f235f051ca36abfe37e061f8da248008d8".parse().unwrap()), ]; let common_point: Public = "6962be696e1bcbba8e64cc7fddf140f854835354b5804f3bb95ae5a2799130371b589a131bd39699ac7174ccb35fc4342dab05331202209582fc8f3a40916ab0".into(); let encrypted_point: Public = "b07031982bde9890e12eff154765f03c56c3ab646ad47431db5dd2d742a9297679c4c65b998557f8008469afd0c43d40b6c5f6c6a1c7354875da4115237ed87a".into(); - let encrypted_datas: Vec<_> = (0..5).map(|i| EncryptedData { + let encrypted_datas: Vec<_> = (0..5).map(|i| DocumentKeyShare { threshold: 3, id_numbers: id_numbers.clone().into_iter().collect(), secret_share: secret_shares[i].clone(), @@ -454,7 +550,7 @@ mod tests { }).collect(); let acl_storages: Vec<_> = (0..5).map(|_| Arc::new(DummyAclStorage::default())).collect(); let clusters: Vec<_> = (0..5).map(|i| Arc::new(DummyCluster::new(id_numbers.iter().nth(i).clone().unwrap().0))).collect(); - let sessions: Vec<_> = (0..5).map(|i| Session::new(SessionParams { + let sessions: Vec<_> = (0..5).map(|i| SessionImpl::new(SessionParams { id: session_id.clone(), access_key: access_key.clone(), self_node_id: id_numbers.iter().nth(i).clone().unwrap().0, @@ -466,11 +562,11 @@ mod tests { (clusters, acl_storages, sessions) } - fn do_messages_exchange(clusters: &[Arc], sessions: &[Session]) { + fn do_messages_exchange(clusters: &[Arc], sessions: &[SessionImpl]) { do_messages_exchange_until(clusters, sessions, |_, _, _| false); } - fn do_messages_exchange_until(clusters: &[Arc], sessions: &[Session], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { + fn do_messages_exchange_until(clusters: &[Arc], sessions: &[SessionImpl], mut cond: F) where F: FnMut(&NodeId, &NodeId, &Message) -> bool { while let Some((from, to, message)) = clusters.iter().filter_map(|c| c.take_message().map(|(to, msg)| (c.node(), to, msg))).next() { let session = &sessions[sessions.iter().position(|s| s.node() == &to).unwrap()]; if cond(&from, &to, &message) { @@ -478,10 +574,10 @@ mod tests { } match message { - Message::InitializeDecryptionSession(message) => session.on_initialize_session(from, message).unwrap(), - Message::ConfirmDecryptionInitialization(message) => session.on_confirm_initialization(from, message).unwrap(), - Message::RequestPartialDecryption(message) => session.on_partial_decryption_requested(from, message).unwrap(), - Message::PartialDecryption(message) => session.on_partial_decryption(from, message).unwrap(), + Message::Decryption(DecryptionMessage::InitializeDecryptionSession(message)) => session.on_initialize_session(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(message)) => session.on_confirm_initialization(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(message)) => session.on_partial_decryption_requested(from, &message).unwrap(), + Message::Decryption(DecryptionMessage::PartialDecryption(message)) => session.on_partial_decryption(from, &message).unwrap(), _ => panic!("unexpected"), } } @@ -492,11 +588,11 @@ mod tests { let mut nodes = BTreeMap::new(); let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id, Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -517,11 +613,11 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 0, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -542,11 +638,11 @@ mod tests { let self_node_id = Random.generate().unwrap().public().clone(); nodes.insert(self_node_id.clone(), Random.generate().unwrap().secret().clone()); nodes.insert(Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()); - match Session::new(SessionParams { + match SessionImpl::new(SessionParams { id: SessionId::default(), access_key: Random.generate().unwrap().secret().clone(), self_node_id: self_node_id.clone(), - encrypted_data: EncryptedData { + encrypted_data: DocumentKeyShare { threshold: 2, id_numbers: nodes, secret_share: Random.generate().unwrap().secret().clone(), @@ -572,70 +668,70 @@ mod tests { fn fails_to_accept_initialization_when_already_initialized() { let (_, _, sessions) = prepare_decryption_sessions(); assert_eq!(sessions[0].initialize(ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[0].on_initialize_session(sessions[1].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_partial_decrypt_if_not_waiting() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidStateForRequest); } #[test] fn fails_to_partial_decrypt_if_requested_by_slave() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(4).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[2].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(4).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_partial_decrypt_if_wrong_number_of_nodes_participating() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), message::InitializeDecryptionSession { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap(), + assert_eq!(sessions[1].on_initialize_session(sessions[0].node().clone(), &message::InitializeDecryptionSession { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + requestor_signature: ethkey::sign(Random.generate().unwrap().secret(), &SessionId::default()).unwrap().into(), }).unwrap(), ()); - assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), message::RequestPartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - nodes: sessions.iter().map(|s| s.node().clone()).take(2).collect(), + assert_eq!(sessions[1].on_partial_decryption_requested(sessions[0].node().clone(), &message::RequestPartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + nodes: sessions.iter().map(|s| s.node().clone().into()).take(2).collect(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_partial_decrypt_if_not_waiting() { let (_, _, sessions) = prepare_decryption_sessions(); - assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), message::PartialDecryption { - session: SessionId::default(), - sub_session: sessions[0].access_key().clone(), - shadow_point: Random.generate().unwrap().public().clone(), + assert_eq!(sessions[0].on_partial_decryption(sessions[1].node().clone(), &message::PartialDecryption { + session: SessionId::default().into(), + sub_session: sessions[0].access_key().clone().into(), + shadow_point: Random.generate().unwrap().public().clone().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -647,7 +743,7 @@ mod tests { let mut pd_from = None; let mut pd_msg = None; do_messages_exchange_until(&clusters, &sessions, |from, _, msg| match msg { - &Message::PartialDecryption(ref msg) => { + &Message::Decryption(DecryptionMessage::PartialDecryption(ref msg)) => { pd_from = Some(from.clone()); pd_msg = Some(msg.clone()); true @@ -655,8 +751,8 @@ mod tests { _ => false, }); - assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), pd_msg.clone().unwrap()).unwrap(), ()); - assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); + assert_eq!(sessions[0].on_partial_decryption(pd_from.clone().unwrap(), &pd_msg.clone().unwrap()).unwrap(), ()); + assert_eq!(sessions[0].on_partial_decryption(pd_from.unwrap(), &pd_msg.unwrap()).unwrap_err(), Error::InvalidStateForRequest); } #[test] @@ -704,4 +800,9 @@ mod tests { // 3) 0 sessions have decrypted key value assert!(sessions.iter().all(|s| s.decrypted_secret().is_none())); } + + #[test] + fn decryption_session_works_over_network() { + // TODO + } } diff --git a/secret_store/src/key_server_cluster/encryption_session.rs b/secret_store/src/key_server_cluster/encryption_session.rs index 6f5705a73..beca00443 100644 --- a/secret_store/src/key_server_cluster/encryption_session.rs +++ b/secret_store/src/key_server_cluster/encryption_session.rs @@ -17,13 +17,22 @@ use std::collections::{BTreeSet, BTreeMap, VecDeque}; use std::fmt::{Debug, Formatter, Error as FmtError}; use std::sync::Arc; -use parking_lot::Mutex; +use parking_lot::{Condvar, Mutex}; use ethkey::{Public, Secret}; -use key_server_cluster::{Error, NodeId, SessionId}; +use key_server_cluster::{Error, NodeId, SessionId, KeyStorage, DocumentKeyShare}; use key_server_cluster::math; use key_server_cluster::cluster::Cluster; -use key_server_cluster::message::{Message, InitializeSession, ConfirmInitialization, CompleteInitialization, - KeysDissemination, Complaint, ComplaintResponse, PublicKeyShare}; +use key_server_cluster::message::{Message, EncryptionMessage, InitializeSession, ConfirmInitialization, CompleteInitialization, + KeysDissemination, Complaint, ComplaintResponse, PublicKeyShare, SessionError, SessionCompleted}; + +/// Encryption session API. +pub trait Session: Send + Sync + 'static { + #[cfg(test)] + /// Get joint public key (if it is known). + fn joint_public_key(&self) -> Option; + /// Wait until session is completed. Returns distributely generated secret key. + fn wait(&self) -> Result; +} /// Encryption (distributed key generation) session. /// Based on "ECDKG: A Distributed Key Generation Protocol Based on Elliptic Curve Discrete Logarithm" paper: @@ -34,17 +43,34 @@ use key_server_cluster::message::{Message, InitializeSession, ConfirmInitializat /// 3) key verification (KV): all nodes are checking values, received for other nodes and complaining if keys are wrong /// 4) key check phase (KC): nodes are processing complaints, received from another nodes /// 5) key generation phase (KG): nodes are exchanging with information, enough to generate joint public key -pub struct Session { +/// 6) encryption phase: master node generates secret key, encrypts it using joint public && broadcasts encryption result +pub struct SessionImpl { /// Unique session id. id: SessionId, /// Public identifier of this node. self_node_id: NodeId, + /// Key storage. + key_storage: Arc, /// Cluster which allows this node to send messages to other nodes in the cluster. cluster: Arc, + /// SessionImpl completion condvar. + completed: Condvar, /// Mutable session data. data: Mutex, } +/// SessionImpl creation parameters +pub struct SessionParams { + /// SessionImpl identifier. + pub id: SessionId, + /// Id of node, on which this session is running. + pub self_node_id: Public, + /// Key storage. + pub key_storage: Arc, + /// Cluster + pub cluster: Arc, +} + #[derive(Debug)] /// Mutable data of encryption (distributed key generation) session. struct SessionData { @@ -74,7 +100,9 @@ struct SessionData { /// === Values, filled when DKG session is completed successfully === /// Jointly generated public key, which can be used to encrypt secret. Public. - joint_public: Option, + joint_public: Option>, + /// Secret point. + secret_point: Option>, } #[derive(Debug, Clone)] @@ -95,13 +123,17 @@ struct NodeData { /// Public values, which have been received from this node. pub publics: Option>, - /// === Values, filled during KC phase === + // === Values, filled during KC phase === /// Nodes, complaining against this node. pub complaints: BTreeSet, - /// === Values, filled during KG phase === + // === Values, filled during KG phase === /// Public share, which has been received from this node. pub public_share: Option, + + // === Values, filled during encryption phase === + /// Flags marking that node has confirmed session completion (encryption data is stored). + pub completion_confirmed: bool, } #[derive(Debug, Clone, PartialEq)] @@ -139,6 +171,10 @@ pub enum SessionState { /// Node is waiting for joint public key share to be received from every other node. WaitingForPublicKeyShare, + // === Encryption phase states === + /// Node is waiting for session completion/session completion confirmation. + WaitingForEncryptionConfirmation, + // === Final states of the session === /// Joint public key generation is completed. Finished, @@ -146,13 +182,15 @@ pub enum SessionState { Failed, } -impl Session { +impl SessionImpl { /// Create new encryption session. - pub fn new(id: SessionId, self_node_id: Public, cluster: Arc) -> Self { - Session { - id: id, - self_node_id: self_node_id, - cluster: cluster, + pub fn new(params: SessionParams) -> Self { + SessionImpl { + id: params.id, + self_node_id: params.self_node_id, + key_storage: params.key_storage, + cluster: params.cluster, + completed: Condvar::new(), data: Mutex::new(SessionData { state: SessionState::WaitingForInitialization, master: None, @@ -162,10 +200,16 @@ impl Session { secret_coeff: None, secret_share: None, joint_public: None, + secret_point: None, }), } } + /// Get this session Id. + pub fn id(&self) -> &SessionId { + &self.id + } + /// Get this node Id. pub fn node(&self) -> &NodeId { &self.self_node_id @@ -176,11 +220,6 @@ impl Session { self.data.lock().state.clone() } - /// Get joint public key. - pub fn joint_public_key(&self) -> Option { - self.data.lock().joint_public.clone() - } - #[cfg(test)] /// Get derived point. pub fn derived_point(&self) -> Option { @@ -220,15 +259,15 @@ impl Session { // start initialization let derived_point = math::generate_random_point()?; - self.cluster.send(&next_node, Message::InitializeSession(InitializeSession { - session: self.id.clone(), - derived_point: derived_point, - })) + self.cluster.send(&next_node, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + derived_point: derived_point.into(), + }))) } /// When session initialization message is received. - pub fn on_initialize_session(&self, sender: NodeId, mut message: InitializeSession) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_initialize_session(&self, sender: NodeId, message: &InitializeSession) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -239,13 +278,14 @@ impl Session { } // update derived point with random scalar - math::update_random_point(&mut message.derived_point)?; + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; // send confirmation back to master node - self.cluster.send(&sender, Message::ConfirmInitialization(ConfirmInitialization { - session: self.id.clone(), - derived_point: message.derived_point, - }))?; + self.cluster.send(&sender, Message::Encryption(EncryptionMessage::ConfirmInitialization(ConfirmInitialization { + session: self.id.clone().into(), + derived_point: derived_point.into(), + })))?; // update state data.master = Some(sender); @@ -255,8 +295,8 @@ impl Session { } /// When session initialization confirmation message is reeived. - pub fn on_confirm_initialization(&self, sender: NodeId, mut message: ConfirmInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_confirm_initialization(&self, sender: NodeId, message: &ConfirmInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -277,25 +317,26 @@ impl Session { // proceed message match next_receiver { Some(next_receiver) => { - return self.cluster.send(&next_receiver, Message::InitializeSession(InitializeSession { - session: self.id.clone(), - derived_point: message.derived_point, - })); + return self.cluster.send(&next_receiver, Message::Encryption(EncryptionMessage::InitializeSession(InitializeSession { + session: self.id.clone().into(), + derived_point: message.derived_point.clone().into(), + }))); }, None => { // update point once again to make sure that derived point is not generated by last node - math::update_random_point(&mut message.derived_point)?; + let mut derived_point = message.derived_point.clone().into(); + math::update_random_point(&mut derived_point)?; // remember derived point - data.derived_point = Some(message.derived_point.clone()); + data.derived_point = Some(derived_point.clone().into()); // broadcast derived point && other session paraeters to every other node - self.cluster.broadcast(Message::CompleteInitialization(CompleteInitialization { - session: self.id.clone(), - nodes: data.nodes.iter().map(|(id, data)| (id.clone(), data.id_number.clone())).collect(), + self.cluster.broadcast(Message::Encryption(EncryptionMessage::CompleteInitialization(CompleteInitialization { + session: self.id.clone().into(), + nodes: data.nodes.iter().map(|(id, data)| (id.clone().into(), data.id_number.clone().into())).collect(), threshold: data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"), - derived_point: message.derived_point, - }))?; + derived_point: derived_point.into(), + })))?; }, } @@ -305,12 +346,12 @@ impl Session { } /// When session initialization completion message is received. - pub fn on_complete_initialization(&self, sender: NodeId, message: CompleteInitialization) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complete_initialization(&self, sender: NodeId, message: &CompleteInitialization) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); // check message - let nodes_ids = message.nodes.keys().cloned().collect(); + let nodes_ids = message.nodes.keys().cloned().map(Into::into).collect(); check_cluster_nodes(self.node(), &nodes_ids)?; check_threshold(message.threshold, &nodes_ids)?; @@ -326,8 +367,8 @@ impl Session { // remember passed data data.threshold = Some(message.threshold); - data.derived_point = Some(message.derived_point); - data.nodes = message.nodes.into_iter().map(|(id, number)| (id, NodeData::with_id_number(number))).collect(); + data.derived_point = Some(message.derived_point.clone().into()); + data.nodes = message.nodes.iter().map(|(id, number)| (id.clone().into(), NodeData::with_id_number(number.clone().into()))).collect(); // now it is time for keys dissemination (KD) phase drop(data); @@ -335,17 +376,20 @@ impl Session { } /// When keys dissemination message is received. - pub fn on_keys_dissemination(&self, sender: NodeId, message: KeysDissemination) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_keys_dissemination(&self, sender: NodeId, message: &KeysDissemination) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); - debug_assert!(data.nodes.contains_key(&sender)); // check state if data.state != SessionState::WaitingForKeysDissemination { - return Err(Error::InvalidStateForRequest); + match data.state { + SessionState::WaitingForInitializationComplete => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } } + debug_assert!(data.nodes.contains_key(&sender)); // check message let threshold = data.threshold.expect("threshold is filled in initialization phase; KD phase follows initialization phase; qed"); @@ -360,9 +404,9 @@ impl Session { return Err(Error::InvalidStateForRequest); } - node_data.secret1 = Some(message.secret1); - node_data.secret2 = Some(message.secret2); - node_data.publics = Some(message.publics); + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); + node_data.publics = Some(message.publics.iter().cloned().map(Into::into).collect()); } // check if we have received keys from every other node @@ -382,10 +426,10 @@ impl Session { if !is_key_verification_ok { node_data.complaints.insert(self.node().clone()); - self.cluster.broadcast(Message::Complaint(Complaint { - session: self.id.clone(), - against: node_id.clone(), - }))?; + self.cluster.broadcast(Message::Encryption(EncryptionMessage::Complaint(Complaint { + session: self.id.clone().into(), + against: node_id.clone().into(), + })))?; } } @@ -396,8 +440,8 @@ impl Session { } /// When complaint is received. - pub fn on_complaint(&self, sender: NodeId, message: Complaint) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complaint(&self, sender: NodeId, message: &Complaint) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -412,16 +456,16 @@ impl Session { } // respond to complaint - if &message.against == self.node() { + if &*message.against == self.node() { let secret1_sent = data.nodes[&sender].secret1_sent.clone().expect("secrets were sent on KD phase; KC phase follows KD phase; qed"); let secret2_sent = data.nodes[&sender].secret2_sent.clone().expect("secrets were sent on KD phase; KC phase follows KD phase; qed"); // someone is complaining against us => let's respond - return self.cluster.broadcast(Message::ComplaintResponse(ComplaintResponse { - session: self.id.clone(), - secret1: secret1_sent, - secret2: secret2_sent, - })); + return self.cluster.broadcast(Message::Encryption(EncryptionMessage::ComplaintResponse(ComplaintResponse { + session: self.id.clone().into(), + secret1: secret1_sent.into(), + secret2: secret2_sent.into(), + }))); } // someone is complaining against someone else => let's remember this @@ -434,15 +478,15 @@ impl Session { if is_critical_complaints_num { // too many complaints => exclude from session - Session::disqualify_node(&message.against, &*self.cluster, &mut *data); + SessionImpl::disqualify_node(&message.against, &*self.cluster, &mut *data); } Ok(()) } /// When complaint response is received - pub fn on_complaint_response(&self, sender: NodeId, message: ComplaintResponse) -> Result<(), Error> { - debug_assert!(self.id == message.session); + pub fn on_complaint_response(&self, sender: NodeId, message: &ComplaintResponse) -> Result<(), Error> { + debug_assert!(self.id == *message.session); debug_assert!(&sender != self.node()); let mut data = self.data.lock(); @@ -471,11 +515,11 @@ impl Session { }; if !is_key_verification_ok { - Session::disqualify_node(&sender, &*self.cluster, &mut *data); + SessionImpl::disqualify_node(&sender, &*self.cluster, &mut *data); } else { let node_data = data.nodes.get_mut(&sender).expect("cluster guarantees to deliver messages from qualified nodes only; qed"); - node_data.secret1 = Some(message.secret1); - node_data.secret2 = Some(message.secret2); + node_data.secret1 = Some(message.secret1.clone().into()); + node_data.secret2 = Some(message.secret2.clone().into()); node_data.complaints.remove(self.node()); } @@ -510,19 +554,24 @@ impl Session { self_node.public_share = Some(self_public_share.clone()); // broadcast self public key share - self.cluster.broadcast(Message::PublicKeyShare(PublicKeyShare { - session: self.id.clone(), - public_share: self_public_share, - })) + self.cluster.broadcast(Message::Encryption(EncryptionMessage::PublicKeyShare(PublicKeyShare { + session: self.id.clone().into(), + public_share: self_public_share.into(), + }))) } /// When public key share is received. - pub fn on_public_key_share(&self, sender: NodeId, message: PublicKeyShare) -> Result<(), Error> { + pub fn on_public_key_share(&self, sender: NodeId, message: &PublicKeyShare) -> Result<(), Error> { let mut data = self.data.lock(); // check state if data.state != SessionState::WaitingForPublicKeyShare { - return Err(Error::InvalidStateForRequest); + match data.state { + SessionState::WaitingForInitializationComplete | + SessionState::WaitingForKeysDissemination | + SessionState::KeyCheck => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } } // update node data with received public share @@ -532,7 +581,7 @@ impl Session { return Err(Error::InvalidMessage); } - node_data.public_share = Some(message.public_share); + node_data.public_share = Some(message.public_share.clone().into()); } // if there's also nodes, which has not sent us their public shares - do nothing @@ -540,16 +589,149 @@ impl Session { return Ok(()); } - // else - calculate joint public key && finish session - data.joint_public = { + // else - calculate joint public key + let joint_public = { let public_shares = data.nodes.values().map(|n| n.public_share.as_ref().expect("keys received on KD phase; KG phase follows KD phase; qed")); - Some(math::compute_joint_public(public_shares)?) + math::compute_joint_public(public_shares)? }; - data.state = SessionState::Finished; + + // if we are at the slave node - wait for session completion + if data.master.as_ref() != Some(self.node()) { + data.joint_public = Some(Ok(joint_public)); + data.state = SessionState::WaitingForEncryptionConfirmation; + return Ok(()); + } + + // then generate secret point + // then encrypt secret point with joint public key + let secret_point = math::generate_random_point()?; + let encrypted_secret_point = math::encrypt_secret(&secret_point, &joint_public)?; + + // then save encrypted data to the key storage + let encrypted_data = DocumentKeyShare { + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: encrypted_secret_point.common_point, + encrypted_point: encrypted_secret_point.encrypted_point, + }; + self.key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // then distribute encrypted data to every other node + self.cluster.broadcast(Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + common_point: encrypted_data.common_point.clone().into(), + encrypted_point: encrypted_data.encrypted_point.clone().into(), + })))?; + + // then wait for confirmation from all other nodes + { + let self_node = data.nodes.get_mut(self.node()).expect("node is always qualified by himself; qed"); + self_node.completion_confirmed = true; + } + data.joint_public = Some(Ok(joint_public)); + data.secret_point = Some(Ok(secret_point)); + data.state = SessionState::WaitingForEncryptionConfirmation; Ok(()) } + /// When session completion message is received. + pub fn on_session_completed(&self, sender: NodeId, message: &SessionCompleted) -> Result<(), Error> { + debug_assert!(self.id == *message.session); + debug_assert!(&sender != self.node()); + + let mut data = self.data.lock(); + debug_assert!(data.nodes.contains_key(&sender)); + + // check state + if data.state != SessionState::WaitingForEncryptionConfirmation { + match data.state { + SessionState::WaitingForPublicKeyShare => return Err(Error::TooEarlyForRequest), + _ => return Err(Error::InvalidStateForRequest), + } + } + + // if we are not masters, save result and respond with confirmation + if data.master.as_ref() != Some(self.node()) { + // check that we have received message from master + if data.master.as_ref() != Some(&sender) { + return Err(Error::InvalidMessage); + } + + // save encrypted data to key storage + let encrypted_data = DocumentKeyShare { + threshold: data.threshold.expect("threshold is filled in initialization phase; KG phase follows initialization phase; qed"), + id_numbers: data.nodes.iter().map(|(node_id, node_data)| (node_id.clone(), node_data.id_number.clone())).collect(), + secret_share: data.secret_share.as_ref().expect("secret_share is filled in KG phase; we are at the end of KG phase; qed").clone(), + common_point: message.common_point.clone().into(), + encrypted_point: message.encrypted_point.clone().into(), + }; + self.key_storage.insert(self.id.clone(), encrypted_data.clone()) + .map_err(|e| Error::KeyStorage(e.into()))?; + + // then respond with confirmation + data.state = SessionState::Finished; + return self.cluster.send(&sender, Message::Encryption(EncryptionMessage::SessionCompleted(SessionCompleted { + session: self.id.clone().into(), + common_point: encrypted_data.common_point.clone().into(), + encrypted_point: encrypted_data.encrypted_point.clone().into(), + }))); + } + + // remember that we have received confirmation from sender node + { + let sender_node = data.nodes.get_mut(&sender).expect("node is always qualified by himself; qed"); + if sender_node.completion_confirmed { + return Err(Error::InvalidMessage); + } + + sender_node.completion_confirmed = true; + } + + // check if we have received confirmations from all cluster nodes + if data.nodes.iter().any(|(_, node_data)| !node_data.completion_confirmed) { + return Ok(()) + } + + // we have received enough confirmations => complete session + data.state = SessionState::Finished; + self.completed.notify_all(); + + Ok(()) + } + + /// When error has occured on another node. + pub fn on_session_error(&self, sender: NodeId, message: &SessionError) { + warn!("{}: encryption session error: {:?} from {}", self.node(), message, sender); + let mut data = self.data.lock(); + data.state = SessionState::Failed; + data.joint_public = Some(Err(Error::Io(message.error.clone()))); + data.secret_point = Some(Err(Error::Io(message.error.clone()))); + self.completed.notify_all(); + } + + /// When session timeout has occured. + pub fn on_session_timeout(&self, node: &NodeId) { + warn!("{}: encryption session timeout", self.node()); + let mut data = self.data.lock(); + + match data.state { + SessionState::WaitingForInitialization | + SessionState::WaitingForInitializationConfirm(_) | + SessionState::WaitingForInitializationComplete => (), + _ => if !data.nodes.contains_key(node) { + return; + }, + } + + data.state = SessionState::Failed; + data.joint_public = Some(Err(Error::Io("session expired".into()))); + data.secret_point = Some(Err(Error::Io("session expired".into()))); + self.completed.notify_all(); + } + /// Keys dissemination (KD) phase fn disseminate_keys(&self) -> Result<(), Error> { let mut data = self.data.lock(); @@ -576,12 +758,12 @@ impl Session { node_data.secret1_sent = Some(secret1.clone()); node_data.secret2_sent = Some(secret2.clone()); - self.cluster.send(&node, Message::KeysDissemination(KeysDissemination { - session: self.id.clone(), - secret1: secret1, - secret2: secret2, - publics: publics.clone(), - }))?; + self.cluster.send(&node, Message::Encryption(EncryptionMessage::KeysDissemination(KeysDissemination { + session: self.id.clone().into(), + secret1: secret1.into(), + secret2: secret2.into(), + publics: publics.iter().cloned().map(Into::into).collect(), + })))?; } else { node_data.secret1 = Some(secret1); node_data.secret2 = Some(secret2); @@ -599,7 +781,7 @@ impl Session { fn disqualify_node(node: &NodeId, cluster: &Cluster, data: &mut SessionData) { let threshold = data.threshold .expect("threshold is filled on initialization phase; node can only be disqualified during KC phase; KC phase follows initialization phase; qed"); - + // blacklist node cluster.blacklist(&node); // too many complaints => exclude from session @@ -612,6 +794,25 @@ impl Session { } } +impl Session for SessionImpl { + #[cfg(test)] + fn joint_public_key(&self) -> Option { + self.data.lock().joint_public.clone().and_then(|r| r.ok()) + } + + + fn wait(&self) -> Result { + let mut data = self.data.lock(); + if !data.secret_point.is_some() { + self.completed.wait(&mut data); + } + + data.secret_point.as_ref() + .expect("checked above or waited for completed; completed is only signaled when secret_point.is_some(); qed") + .clone() + } +} + impl EveryOtherNodeVisitor { pub fn new(self_id: &NodeId, nodes: I) -> Self where I: Iterator { EveryOtherNodeVisitor { @@ -648,11 +849,12 @@ impl NodeData { secret2: None, publics: None, public_share: None, + completion_confirmed: false, } } } -impl Debug for Session { +impl Debug for SessionImpl { fn fmt(&self, f: &mut Formatter) -> Result<(), FmtError> { write!(f, "Encryption session {} on {}", self.id, self.self_node_id) } @@ -682,26 +884,29 @@ pub fn check_threshold(threshold: usize, nodes: &BTreeSet) -> Result<(), #[cfg(test)] mod tests { + use std::time; use std::sync::Arc; - use std::collections::{BTreeSet, BTreeMap}; + use std::collections::{BTreeSet, BTreeMap, VecDeque}; + use tokio_core::reactor::Core; use ethkey::{Random, Generator}; - use key_server_cluster::{NodeId, SessionId, Error}; - use key_server_cluster::message::{self, Message}; - use key_server_cluster::cluster::tests::DummyCluster; - use key_server_cluster::encryption_session::{Session, SessionState}; + use key_server_cluster::{NodeId, SessionId, Error, DummyKeyStorage}; + use key_server_cluster::message::{self, Message, EncryptionMessage}; + use key_server_cluster::cluster::tests::{DummyCluster, make_clusters, run_clusters, loop_until, all_connections_established}; + use key_server_cluster::encryption_session::{Session, SessionImpl, SessionState, SessionParams}; use key_server_cluster::math; use key_server_cluster::math::tests::do_encryption_and_decryption; #[derive(Debug)] struct Node { pub cluster: Arc, - pub session: Session, + pub session: SessionImpl, } #[derive(Debug)] struct MessageLoop { pub session_id: SessionId, pub nodes: BTreeMap, + pub queue: VecDeque<(NodeId, NodeId, Message)>, } impl MessageLoop { @@ -712,7 +917,12 @@ mod tests { let key_pair = Random.generate().unwrap(); let node_id = key_pair.public().clone(); let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = Session::new(session_id.clone(), node_id.clone(), cluster.clone()); + let session = SessionImpl::new(SessionParams { + id: session_id.clone(), + self_node_id: node_id.clone(), + key_storage: Arc::new(DummyKeyStorage::default()), + cluster: cluster.clone(), + }); nodes.insert(node_id, Node { cluster: cluster, session: session }); } @@ -726,22 +936,23 @@ mod tests { MessageLoop { session_id: session_id, nodes: nodes, + queue: VecDeque::new(), } } - pub fn master(&self) -> &Session { + pub fn master(&self) -> &SessionImpl { &self.nodes.values().nth(0).unwrap().session } - pub fn first_slave(&self) -> &Session { + pub fn first_slave(&self) -> &SessionImpl { &self.nodes.values().nth(1).unwrap().session } - pub fn second_slave(&self) -> &Session { + pub fn second_slave(&self) -> &SessionImpl { &self.nodes.values().nth(2).unwrap().session } - pub fn third_slave(&self) -> &Session { + pub fn third_slave(&self) -> &SessionImpl { &self.nodes.values().nth(3).unwrap().session } @@ -749,18 +960,29 @@ mod tests { self.nodes.values() .filter_map(|n| n.cluster.take_message().map(|m| (n.session.node().clone(), m.0, m.1))) .nth(0) + .or_else(|| self.queue.pop_front()) } pub fn process_message(&mut self, msg: (NodeId, NodeId, Message)) -> Result<(), Error> { - match msg.2 { - Message::InitializeSession(message) => self.nodes[&msg.1].session.on_initialize_session(msg.0, message), - Message::ConfirmInitialization(message) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0, message), - Message::CompleteInitialization(message) => self.nodes[&msg.1].session.on_complete_initialization(msg.0, message), - Message::KeysDissemination(message) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0, message), - Message::Complaint(message) => self.nodes[&msg.1].session.on_complaint(msg.0, message), - Message::ComplaintResponse(message) => self.nodes[&msg.1].session.on_complaint_response(msg.0, message), - Message::PublicKeyShare(message) => self.nodes[&msg.1].session.on_public_key_share(msg.0, message), - _ => panic!("unexpected"), + match { + match msg.2 { + Message::Encryption(EncryptionMessage::InitializeSession(ref message)) => self.nodes[&msg.1].session.on_initialize_session(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::ConfirmInitialization(ref message)) => self.nodes[&msg.1].session.on_confirm_initialization(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::CompleteInitialization(ref message)) => self.nodes[&msg.1].session.on_complete_initialization(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::KeysDissemination(ref message)) => self.nodes[&msg.1].session.on_keys_dissemination(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::Complaint(ref message)) => self.nodes[&msg.1].session.on_complaint(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::ComplaintResponse(ref message)) => self.nodes[&msg.1].session.on_complaint_response(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::PublicKeyShare(ref message)) => self.nodes[&msg.1].session.on_public_key_share(msg.0.clone(), &message), + Message::Encryption(EncryptionMessage::SessionCompleted(ref message)) => self.nodes[&msg.1].session.on_session_completed(msg.0.clone(), &message), + _ => panic!("unexpected"), + } + } { + Ok(_) => Ok(()), + Err(Error::TooEarlyForRequest) => { + self.queue.push_back(msg); + Ok(()) + }, + Err(err) => Err(err), } } @@ -796,7 +1018,12 @@ mod tests { fn fails_to_initialize_if_not_a_part_of_cluster() { let node_id = math::generate_random_point().unwrap(); let cluster = Arc::new(DummyCluster::new(node_id.clone())); - let session = Session::new(SessionId::default(), node_id.clone(), cluster); + let session = SessionImpl::new(SessionParams { + id: SessionId::default(), + self_node_id: node_id.clone(), + key_storage: Arc::new(DummyKeyStorage::default()), + cluster: cluster, + }); let cluster_nodes: BTreeSet<_> = (0..2).map(|_| math::generate_random_point().unwrap()).collect(); assert_eq!(session.initialize(0, cluster_nodes).unwrap_err(), Error::InvalidNodesConfiguration); } @@ -816,9 +1043,9 @@ mod tests { fn fails_to_accept_initialization_when_already_initialized() { let (sid, m, _, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.first_slave().on_initialize_session(m, message::InitializeSession { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.first_slave().on_initialize_session(m, &message::InitializeSession { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -826,16 +1053,16 @@ mod tests { fn slave_updates_derived_point_on_initialization() { let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); let passed_point = match l.take_message().unwrap() { - (f, t, Message::InitializeSession(message)) => { + (f, t, Message::Encryption(EncryptionMessage::InitializeSession(message))) => { let point = message.derived_point.clone(); - l.process_message((f, t, Message::InitializeSession(message))).unwrap(); + l.process_message((f, t, Message::Encryption(EncryptionMessage::InitializeSession(message)))).unwrap(); point }, _ => panic!("unexpected"), }; match l.take_message().unwrap() { - (_, _, Message::ConfirmInitialization(message)) => assert!(passed_point != message.derived_point), + (_, _, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => assert!(passed_point != message.derived_point), _ => panic!("unexpected"), } } @@ -846,9 +1073,9 @@ mod tests { l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, message::ConfirmInitialization { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -857,9 +1084,9 @@ mod tests { let (sid, _, s, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); l.take_and_process_message().unwrap(); - assert_eq!(l.master().on_confirm_initialization(s, message::ConfirmInitialization { - session: sid, - derived_point: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_confirm_initialization(s, &message::ConfirmInitialization { + session: sid.into(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -868,15 +1095,15 @@ mod tests { let (_, _, _, mut l) = make_simple_cluster(0, 2).unwrap(); l.take_and_process_message().unwrap(); let passed_point = match l.take_message().unwrap() { - (f, t, Message::ConfirmInitialization(message)) => { + (f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message))) => { let point = message.derived_point.clone(); - l.process_message((f, t, Message::ConfirmInitialization(message))).unwrap(); + l.process_message((f, t, Message::Encryption(EncryptionMessage::ConfirmInitialization(message)))).unwrap(); point }, _ => panic!("unexpected"), }; - assert!(passed_point != l.master().derived_point().unwrap()); + assert!(l.master().derived_point().unwrap() != passed_point.into()); } #[test] @@ -884,11 +1111,11 @@ mod tests { let (sid, m, s, l) = make_simple_cluster(0, 2).unwrap(); let mut nodes = BTreeMap::new(); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidNodesCount); } @@ -898,11 +1125,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(math::generate_random_point().unwrap(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidNodesConfiguration); } @@ -912,11 +1139,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 2, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidThreshold); } @@ -926,11 +1153,11 @@ mod tests { let mut nodes = BTreeMap::new(); nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(m, message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(m, &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -945,22 +1172,22 @@ mod tests { nodes.insert(m, math::generate_random_scalar().unwrap()); nodes.insert(s, math::generate_random_scalar().unwrap()); nodes.insert(l.second_slave().node().clone(), math::generate_random_scalar().unwrap()); - assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), message::CompleteInitialization { - session: sid, - nodes: nodes, + assert_eq!(l.first_slave().on_complete_initialization(l.second_slave().node().clone(), &message::CompleteInitialization { + session: sid.into(), + nodes: nodes.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), threshold: 0, - derived_point: math::generate_random_point().unwrap(), + derived_point: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn fails_to_accept_keys_dissemination_if_not_waiting_for_it() { let (sid, _, s, l) = make_simple_cluster(0, 2).unwrap(); - assert_eq!(l.master().on_keys_dissemination(s, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap()], + assert_eq!(l.master().on_keys_dissemination(s, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidStateForRequest); } @@ -974,11 +1201,11 @@ mod tests { l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap(), math::generate_random_point().unwrap()], + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidMessage); } @@ -992,11 +1219,11 @@ mod tests { l.take_and_process_message().unwrap(); // m -> s1: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s2: CompleteInitialization l.take_and_process_message().unwrap(); // m -> s1: KeysDissemination - assert_eq!(l.first_slave().on_keys_dissemination(m, message::KeysDissemination { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap()], + assert_eq!(l.first_slave().on_keys_dissemination(m, &message::KeysDissemination { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into()], }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1004,12 +1231,12 @@ mod tests { fn defends_if_receives_complain_on_himself() { let (sid, m, s, mut l) = make_simple_cluster(1, 3).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(s, message::Complaint { - session: sid, - against: m, + l.master().on_complaint(s, &message::Complaint { + session: sid.into(), + against: m.into(), }).unwrap(); match l.take_message().unwrap() { - (_, _, Message::ComplaintResponse(_)) => (), + (_, _, Message::Encryption(EncryptionMessage::ComplaintResponse(_))) => (), _ => panic!("unexpected"), } } @@ -1018,13 +1245,13 @@ mod tests { fn node_is_disqualified_if_enough_complaints_received() { let (sid, _, s, mut l) = make_simple_cluster(1, 4).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s.clone(), + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.clone().into(), }).unwrap(); - l.master().on_complaint(l.third_slave().node().clone(), message::Complaint { - session: sid, - against: s, + l.master().on_complaint(l.third_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); } @@ -1033,13 +1260,13 @@ mod tests { fn node_is_not_disqualified_if_enough_complaints_received_from_the_same_node() { let (sid, _, s, mut l) = make_simple_cluster(1, 4).unwrap(); l.take_and_process_all_messages().unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s.clone(), + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.clone().into(), }).unwrap(); - l.master().on_complaint(l.second_slave().node().clone(), message::Complaint { - session: sid, - against: s, + l.master().on_complaint(l.second_slave().node().clone(), &message::Complaint { + session: sid.into(), + against: s.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 4); } @@ -1058,17 +1285,17 @@ mod tests { l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination let s2 = l.second_slave().node().clone(); - l.master().on_keys_dissemination(s2.clone(), message::KeysDissemination { - session: sid.clone(), - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: vec![math::generate_random_point().unwrap(), math::generate_random_point().unwrap()], + l.master().on_keys_dissemination(s2.clone(), &message::KeysDissemination { + session: sid.clone().into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: vec![math::generate_random_point().unwrap().into(), math::generate_random_point().unwrap().into()], }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); - l.master().on_complaint_response(s2, message::ComplaintResponse { - session: sid, - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), + l.master().on_complaint_response(s2, &message::ComplaintResponse { + session: sid.into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 2); } @@ -1087,22 +1314,22 @@ mod tests { l.take_and_process_message().unwrap(); // s1 -> m: KeysDissemination l.take_and_process_message().unwrap(); // s1 -> s2: KeysDissemination let (f, t, msg) = match l.take_message() { - Some((f, t, Message::KeysDissemination(msg))) => (f, t, msg), + Some((f, t, Message::Encryption(EncryptionMessage::KeysDissemination(msg)))) => (f, t, msg), _ => panic!("unexpected"), }; assert_eq!(&f, l.second_slave().node()); assert_eq!(&t, l.master().node()); - l.master().on_keys_dissemination(f.clone(), message::KeysDissemination { - session: sid.clone(), - secret1: math::generate_random_scalar().unwrap(), - secret2: math::generate_random_scalar().unwrap(), - publics: msg.publics.clone(), + l.master().on_keys_dissemination(f.clone(), &message::KeysDissemination { + session: sid.clone().into(), + secret1: math::generate_random_scalar().unwrap().into(), + secret2: math::generate_random_scalar().unwrap().into(), + publics: msg.publics.clone().into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); - l.master().on_complaint_response(f, message::ComplaintResponse { - session: sid, - secret1: msg.secret1, - secret2: msg.secret2, + l.master().on_complaint_response(f, &message::ComplaintResponse { + session: sid.into(), + secret1: msg.secret1.into(), + secret2: msg.secret2.into(), }).unwrap(); assert_eq!(l.master().qualified_nodes().len(), 3); } @@ -1116,9 +1343,9 @@ mod tests { #[test] fn should_not_accept_public_key_share_when_is_not_waiting_for_it() { let (sid, _, s, l) = make_simple_cluster(1, 3).unwrap(); - assert_eq!(l.master().on_public_key_share(s, message::PublicKeyShare { - session: sid, - public_share: math::generate_random_point().unwrap(), + assert_eq!(l.master().on_public_key_share(s, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidStateForRequest); } @@ -1129,24 +1356,21 @@ mod tests { l.master().start_key_generation_phase().unwrap(); l.first_slave().start_key_generation_phase().unwrap(); let (f, t, msg) = match l.take_message() { - Some((f, t, Message::PublicKeyShare(msg))) => (f, t, msg), + Some((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg)))) => (f, t, msg), _ => panic!("unexpected"), }; assert_eq!(&f, l.master().node()); assert_eq!(&t, l.first_slave().node()); - l.process_message((f, t, Message::PublicKeyShare(msg.clone()))).unwrap(); - assert_eq!(l.first_slave().on_public_key_share(m, message::PublicKeyShare { - session: sid, - public_share: math::generate_random_point().unwrap(), + l.process_message((f, t, Message::Encryption(EncryptionMessage::PublicKeyShare(msg.clone())))).unwrap(); + assert_eq!(l.first_slave().on_public_key_share(m, &message::PublicKeyShare { + session: sid.into(), + public_share: math::generate_random_point().unwrap().into(), }).unwrap_err(), Error::InvalidMessage); } #[test] fn complete_enc_dec_session() { - // TODO: when number of nodes, needed to decrypt message is odd, algorithm won't work - // let test_cases = [/*(0, 2), */(1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), - // (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; - let test_cases = [(3, 5)]; + let test_cases = [(0, 5), (2, 5), (3, 5)]; for &(threshold, num_nodes) in &test_cases { let mut l = MessageLoop::new(num_nodes); l.master().initialize(threshold, l.nodes.keys().cloned().collect()).unwrap(); @@ -1194,4 +1418,26 @@ mod tests { } // TODO: add test where some nodes are disqualified from session + + #[test] + fn encryption_session_works_over_network() { + //::util::log::init_log(); + + let test_cases = [(1, 3)]; + for &(threshold, num_nodes) in &test_cases { + let mut core = Core::new().unwrap(); + + // prepare cluster objects for each node + let clusters = make_clusters(&core, 6020, num_nodes); + run_clusters(&clusters); + + // establish connections + loop_until(&mut core, time::Duration::from_millis(300), || clusters.iter().all(all_connections_established)); + + // run session to completion + let session_id = SessionId::default(); + let session = clusters[0].client().new_encryption_session(session_id, threshold).unwrap(); + loop_until(&mut core, time::Duration::from_millis(1000), || session.joint_public_key().is_some()); + } + } } diff --git a/secret_store/src/key_server_cluster/io/deadline.rs b/secret_store/src/key_server_cluster/io/deadline.rs new file mode 100644 index 000000000..7b8c4d0ed --- /dev/null +++ b/secret_store/src/key_server_cluster/io/deadline.rs @@ -0,0 +1,85 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::time::Duration; +use futures::{Future, Select, BoxFuture, Poll, Async}; +use tokio_core::reactor::{Handle, Timeout}; + +type DeadlineBox where F: Future = BoxFuture, F::Error>; + +/// Complete a passed future or fail if it is not completed within timeout. +pub fn deadline(duration: Duration, handle: &Handle, future: F) -> Result, io::Error> + where F: Future + Send + 'static, T: 'static { + let timeout = try!(Timeout::new(duration, handle)).map(|_| DeadlineStatus::Timeout).boxed(); + let future = future.map(DeadlineStatus::Meet).boxed(); + let deadline = Deadline { + future: timeout.select(future), + }; + Ok(deadline) +} + +#[derive(Debug, PartialEq)] +/// Deadline future completion status. +pub enum DeadlineStatus { + /// Completed a future. + Meet(T), + /// Faled with timeout. + Timeout, +} + +/// Future, which waits for passed future completion within given period, or fails with timeout. +pub struct Deadline where F: Future { + future: Select, DeadlineBox>, +} + +impl Future for Deadline where F: Future { + type Item = DeadlineStatus; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + match self.future.poll() { + Ok(Async::Ready((result, _other))) => Ok(Async::Ready(result)), + Ok(Async::NotReady) => Ok(Async::NotReady), + Err((err, _other)) => Err(err), + } + } +} + +#[cfg(test)] +mod tests { + use std::io; + use std::time::Duration; + use futures::{Future, empty, done}; + use tokio_core::reactor::Core; + use super::{deadline, DeadlineStatus}; + + //#[test] TODO: not working + fn _deadline_timeout_works() { + let mut core = Core::new().unwrap(); + let deadline = deadline(Duration::from_millis(1), &core.handle(), empty::<(), io::Error>()).unwrap(); + core.turn(Some(Duration::from_millis(3))); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Timeout); + } + + #[test] + fn deadline_result_works() { + let mut core = Core::new().unwrap(); + let deadline = deadline(Duration::from_millis(1000), &core.handle(), done(Ok(()))).unwrap(); + core.turn(Some(Duration::from_millis(3))); + assert_eq!(deadline.wait().unwrap(), DeadlineStatus::Meet(())); + } +} \ No newline at end of file diff --git a/secret_store/src/key_server_cluster/io/handshake.rs b/secret_store/src/key_server_cluster/io/handshake.rs new file mode 100644 index 000000000..0d71d25aa --- /dev/null +++ b/secret_store/src/key_server_cluster/io/handshake.rs @@ -0,0 +1,320 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::collections::BTreeSet; +use futures::{Future, Poll, Async}; +use ethkey::{Random, Generator, KeyPair, Secret, sign, verify_public}; +use util::H256; +use key_server_cluster::{NodeId, Error}; +use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; +use key_server_cluster::io::{write_message, write_encrypted_message, WriteMessage, ReadMessage, + read_message, compute_shared_key}; + +/// Start handshake procedure with another node from the cluster. +pub fn handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); + handshake_with_plain_confirmation(a, self_confirmation_plain, self_key_pair, trusted_nodes) +} + +/// Start handshake procedure with another node from the cluster and given plain confirmation. +pub fn handshake_with_plain_confirmation(a: A, self_confirmation_plain: Result, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let (error, state) = match self_confirmation_plain.clone() + .and_then(|c| Handshake::::make_public_key_message(self_key_pair.public().clone(), c)) { + Ok(message) => (None, HandshakeState::SendPublicKey(write_message(a, message))), + Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + }; + + Handshake { + is_active: true, + error: error, + state: state, + self_key_pair: self_key_pair, + self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + trusted_nodes: trusted_nodes, + other_node_id: None, + other_confirmation_plain: None, + shared_key: None, + } +} + +/// Wait for handshake procedure to be started by another node from the cluster. +pub fn accept_handshake(a: A, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Handshake where A: io::Write + io::Read { + let self_confirmation_plain = Random.generate().map(|kp| *kp.secret().clone()).map_err(Into::into); + let (error, state) = match self_confirmation_plain.clone() { + Ok(_) => (None, HandshakeState::ReceivePublicKey(read_message(a))), + Err(err) => (Some((a, Err(err))), HandshakeState::Finished), + }; + + Handshake { + is_active: false, + error: error, + state: state, + self_key_pair: self_key_pair, + self_confirmation_plain: self_confirmation_plain.unwrap_or(Default::default()), + trusted_nodes: trusted_nodes, + other_node_id: None, + other_confirmation_plain: None, + shared_key: None, + } +} + +#[derive(Debug, PartialEq)] +/// Result of handshake procedure. +pub struct HandshakeResult { + /// Node id. + pub node_id: NodeId, + /// Shared key. + pub shared_key: Secret, +} + +/// Future handshake procedure. +pub struct Handshake { + is_active: bool, + error: Option<(A, Result)>, + state: HandshakeState, + self_key_pair: KeyPair, + self_confirmation_plain: H256, + trusted_nodes: BTreeSet, + other_node_id: Option, + other_confirmation_plain: Option, + shared_key: Option, +} + +/// Active handshake state. +enum HandshakeState { + SendPublicKey(WriteMessage), + ReceivePublicKey(ReadMessage), + SendPrivateKeySignature(WriteMessage), + ReceivePrivateKeySignature(ReadMessage), + Finished, +} + +impl Handshake where A: io::Read + io::Write { + #[cfg(test)] + pub fn set_self_confirmation_plain(&mut self, self_confirmation_plain: H256) { + self.self_confirmation_plain = self_confirmation_plain; + } + + pub fn make_public_key_message(self_node_id: NodeId, confirmation_plain: H256) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: self_node_id.into(), + confirmation_plain: confirmation_plain.into(), + }))) + } + + fn make_private_key_signature_message(secret: &Secret, confirmation_plain: &H256) -> Result { + Ok(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: sign(secret, confirmation_plain)?.into(), + }))) + } +} + +impl Future for Handshake where A: io::Read + io::Write { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + if let Some(error_result) = self.error.take() { + return Ok(error_result.into()); + } + + let (next, result) = match self.state { + HandshakeState::SendPublicKey(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); + + if self.is_active { + (HandshakeState::ReceivePublicKey( + read_message(stream) + ), Async::NotReady) + } else { + self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.other_node_id.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_node_id is filled in ReceivePublicKey; qed") + ) { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_private_key_signature_message( + self.self_key_pair.secret(), + self.other_confirmation_plain.as_ref().expect("we are in passive mode; in passive mode SendPublicKey follows ReceivePublicKey; other_confirmation_plain is filled in ReceivePublicKey; qed") + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, + self.shared_key.as_ref().expect("filled couple of lines above; qed"), + message)), Async::NotReady) + } + }, + HandshakeState::ReceivePublicKey(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); + + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePublicKey(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; + + if !self.trusted_nodes.contains(&*message.node_id) { + return Ok((stream, Err(Error::InvalidNodeId)).into()); + } + + self.other_node_id = Some(message.node_id.into()); + self.other_confirmation_plain = Some(message.confirmation_plain.into()); + if self.is_active { + self.shared_key = match compute_shared_key(self.self_key_pair.secret(), + self.other_node_id.as_ref().expect("filled couple of lines above; qed") + ) { + Ok(shared_key) => Some(shared_key), + Err(err) => return Ok((stream, Err(err)).into()), + }; + + let message = match Handshake::::make_private_key_signature_message( + self.self_key_pair.secret(), + self.other_confirmation_plain.as_ref().expect("filled couple of lines above; qed") + ) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPrivateKeySignature(write_encrypted_message(stream, + self.shared_key.as_ref().expect("filled couple of lines above; qed"), + message)), Async::NotReady) + } else { + let message = match Handshake::::make_public_key_message(self.self_key_pair.public().clone(), self.self_confirmation_plain.clone()) { + Ok(message) => message, + Err(err) => return Ok((stream, Err(err)).into()), + }; + (HandshakeState::SendPublicKey(write_message(stream, message)), Async::NotReady) + } + }, + HandshakeState::SendPrivateKeySignature(ref mut future) => { + let (stream, _) = try_ready!(future.poll()); + + (HandshakeState::ReceivePrivateKeySignature( + read_message(stream) + ), Async::NotReady) + }, + HandshakeState::ReceivePrivateKeySignature(ref mut future) => { + let (stream, message) = try_ready!(future.poll()); + + let message = match message { + Ok(message) => match message { + Message::Cluster(ClusterMessage::NodePrivateKeySignature(message)) => message, + _ => return Ok((stream, Err(Error::InvalidMessage)).into()), + }, + Err(err) => return Ok((stream, Err(err.into())).into()), + }; + + let other_node_public = self.other_node_id.as_ref().expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"); + if !verify_public(other_node_public, &*message.confirmation_signed, &self.self_confirmation_plain).unwrap_or(false) { + return Ok((stream, Err(Error::InvalidMessage)).into()); + } + + (HandshakeState::Finished, Async::Ready((stream, Ok(HandshakeResult { + node_id: self.other_node_id.expect("other_node_id is filled in ReceivePublicKey; ReceivePrivateKeySignature follows ReceivePublicKey; qed"), + shared_key: self.shared_key.clone().expect("shared_key is filled in Send/ReceivePublicKey; ReceivePrivateKeySignature follows Send/ReceivePublicKey; qed"), + })))) + }, + HandshakeState::Finished => panic!("poll Handshake after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} + +#[cfg(test)] +mod tests { + use std::collections::BTreeSet; + use futures::Future; + use ethcrypto::ecdh::agree; + use ethkey::{Random, Generator, sign}; + use util::H256; + use key_server_cluster::io::message::tests::TestIo; + use key_server_cluster::message::{Message, ClusterMessage, NodePublicKey, NodePrivateKeySignature}; + use super::{handshake_with_plain_confirmation, accept_handshake, HandshakeResult}; + + fn prepare_test_io() -> (H256, TestIo) { + let self_key_pair = Random.generate().unwrap(); + let peer_key_pair = Random.generate().unwrap(); + let mut io = TestIo::new(self_key_pair.clone(), peer_key_pair.public().clone()); + + let self_confirmation_plain = *Random.generate().unwrap().secret().clone(); + let peer_confirmation_plain = *Random.generate().unwrap().secret().clone(); + + let self_confirmation_signed = sign(peer_key_pair.secret(), &self_confirmation_plain).unwrap(); + let peer_confirmation_signed = sign(self_key_pair.secret(), &peer_confirmation_plain).unwrap(); + + io.add_input_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: peer_key_pair.public().clone().into(), + confirmation_plain: peer_confirmation_plain.into(), + }))); + io.add_input_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: self_confirmation_signed.into(), + }))); + + io.add_output_message(Message::Cluster(ClusterMessage::NodePublicKey(NodePublicKey { + node_id: self_key_pair.public().clone().into(), + confirmation_plain: self_confirmation_plain.clone().into(), + }))); + io.add_output_message(Message::Cluster(ClusterMessage::NodePrivateKeySignature(NodePrivateKeySignature { + confirmation_signed: peer_confirmation_signed.into(), + }))); + + (self_confirmation_plain, io) + } + + #[test] + fn active_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = io.self_key_pair().clone(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); + let shared_key = agree(self_key_pair.secret(), trusted_nodes.iter().nth(0).unwrap()).unwrap(); + + let handshake = handshake_with_plain_confirmation(io, Ok(self_confirmation_plain), self_key_pair, trusted_nodes); + let handshake_result = handshake.wait().unwrap(); + assert_eq!(handshake_result.1, Ok(HandshakeResult { + node_id: handshake_result.0.peer_public().clone(), + shared_key: shared_key, + })); + handshake_result.0.assert_output(); + } + + #[test] + fn passive_handshake_works() { + let (self_confirmation_plain, io) = prepare_test_io(); + let self_key_pair = io.self_key_pair().clone(); + let trusted_nodes: BTreeSet<_> = vec![io.peer_public().clone()].into_iter().collect(); + let shared_key = agree(self_key_pair.secret(), io.peer_public()).unwrap(); + + let mut handshake = accept_handshake(io, self_key_pair, trusted_nodes); + handshake.set_self_confirmation_plain(self_confirmation_plain); + + let handshake_result = handshake.wait().unwrap(); + assert_eq!(handshake_result.1, Ok(HandshakeResult { + node_id: handshake_result.0.peer_public().clone(), + shared_key: shared_key, + })); + handshake_result.0.assert_output(); + } +} diff --git a/secret_store/src/key_server_cluster/io/message.rs b/secret_store/src/key_server_cluster/io/message.rs new file mode 100644 index 000000000..bcabebf76 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/message.rs @@ -0,0 +1,247 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io::Cursor; +use std::u16; +use std::ops::Deref; +use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; +use serde_json; +use ethcrypto::ecdh::agree; +use ethkey::{Public, Secret}; +use key_server_cluster::Error; +use key_server_cluster::message::{Message, ClusterMessage, EncryptionMessage, DecryptionMessage}; + +/// Size of serialized header. +pub const MESSAGE_HEADER_SIZE: usize = 4; + +#[derive(Debug, PartialEq)] +/// Message header. +pub struct MessageHeader { + /// Message/Header version. + pub version: u8, + /// Message kind. + pub kind: u8, + /// Message payload size (without header). + pub size: u16, +} + +#[derive(Debug, Clone, PartialEq)] +/// Serialized message. +pub struct SerializedMessage(Vec); + +impl Deref for SerializedMessage { + type Target = [u8]; + + fn deref(&self) -> &[u8] { + &self.0 + } +} + +impl Into> for SerializedMessage { + fn into(self) -> Vec { + self.0 + } +} + +/// Serialize message. +pub fn serialize_message(message: Message) -> Result { + let (message_kind, payload) = match message { + Message::Cluster(ClusterMessage::NodePublicKey(payload)) => (1, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::NodePrivateKeySignature(payload)) => (2, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAlive(payload)) => (3, serde_json::to_vec(&payload)), + Message::Cluster(ClusterMessage::KeepAliveResponse(payload)) => (4, serde_json::to_vec(&payload)), + + Message::Encryption(EncryptionMessage::InitializeSession(payload)) => (50, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ConfirmInitialization(payload)) => (51, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::CompleteInitialization(payload)) => (52, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::KeysDissemination(payload)) => (53, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::Complaint(payload)) => (54, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::ComplaintResponse(payload)) => (55, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::PublicKeyShare(payload)) => (56, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::SessionError(payload)) => (57, serde_json::to_vec(&payload)), + Message::Encryption(EncryptionMessage::SessionCompleted(payload)) => (58, serde_json::to_vec(&payload)), + + Message::Decryption(DecryptionMessage::InitializeDecryptionSession(payload)) => (100, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(payload)) => (101, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::RequestPartialDecryption(payload)) => (102, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::PartialDecryption(payload)) => (103, serde_json::to_vec(&payload)), + Message::Decryption(DecryptionMessage::DecryptionSessionError(payload)) => (104, serde_json::to_vec(&payload)), + }; + + let payload = payload.map_err(|err| Error::Serde(err.to_string()))?; + let payload_len = payload.len(); + if payload_len > u16::MAX as usize { + return Err(Error::InvalidMessage); + } + + let header = MessageHeader { + kind: message_kind, + version: 1, + size: payload_len as u16, + }; + + let mut serialized_message = serialize_header(&header)?; + serialized_message.extend(payload); + Ok(SerializedMessage(serialized_message)) +} + +/// Deserialize message. +pub fn deserialize_message(header: &MessageHeader, payload: Vec) -> Result { + Ok(match header.kind { + 1 => Message::Cluster(ClusterMessage::NodePublicKey(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 2 => Message::Cluster(ClusterMessage::NodePrivateKeySignature(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 3 => Message::Cluster(ClusterMessage::KeepAlive(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 4 => Message::Cluster(ClusterMessage::KeepAliveResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 50 => Message::Encryption(EncryptionMessage::InitializeSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 51 => Message::Encryption(EncryptionMessage::ConfirmInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 52 => Message::Encryption(EncryptionMessage::CompleteInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 53 => Message::Encryption(EncryptionMessage::KeysDissemination(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 54 => Message::Encryption(EncryptionMessage::Complaint(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 55 => Message::Encryption(EncryptionMessage::ComplaintResponse(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 56 => Message::Encryption(EncryptionMessage::PublicKeyShare(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 57 => Message::Encryption(EncryptionMessage::SessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 58 => Message::Encryption(EncryptionMessage::SessionCompleted(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + 100 => Message::Decryption(DecryptionMessage::InitializeDecryptionSession(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 101 => Message::Decryption(DecryptionMessage::ConfirmDecryptionInitialization(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 102 => Message::Decryption(DecryptionMessage::RequestPartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 103 => Message::Decryption(DecryptionMessage::PartialDecryption(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + 104 => Message::Decryption(DecryptionMessage::DecryptionSessionError(serde_json::from_slice(&payload).map_err(|err| Error::Serde(err.to_string()))?)), + + _ => return Err(Error::Serde(format!("unknown message type {}", header.kind))), + }) +} + +/// Encrypt serialized message. +pub fn encrypt_message(_key: &Secret, message: SerializedMessage) -> Result { + Ok(message) // TODO: implement me +} + +/// Decrypt serialized message. +pub fn decrypt_message(_key: &Secret, payload: Vec) -> Result, Error> { + Ok(payload) // TODO: implement me +} + +/// Compute shared encryption key. +pub fn compute_shared_key(self_secret: &Secret, other_public: &Public) -> Result { + Ok(agree(self_secret, other_public)?) +} + +/// Serialize message header. +fn serialize_header(header: &MessageHeader) -> Result, Error> { + let mut buffer = Vec::with_capacity(MESSAGE_HEADER_SIZE); + buffer.write_u8(header.version)?; + buffer.write_u8(header.kind)?; + buffer.write_u16::(header.size)?; + Ok(buffer) +} + +/// Deserialize message header. +pub fn deserialize_header(data: &[u8]) -> Result { + let mut reader = Cursor::new(data); + Ok(MessageHeader { + version: reader.read_u8()?, + kind: reader.read_u8()?, + size: reader.read_u16::()?, + }) +} + +#[cfg(test)] +pub mod tests { + use std::io; + use ethkey::{KeyPair, Public}; + use key_server_cluster::message::Message; + use super::{MESSAGE_HEADER_SIZE, MessageHeader, serialize_message, serialize_header, deserialize_header}; + + pub struct TestIo { + self_key_pair: KeyPair, + peer_public: Public, + input_buffer: io::Cursor>, + output_buffer: Vec, + expected_output_buffer: Vec, + } + + impl TestIo { + pub fn new(self_key_pair: KeyPair, peer_public: Public) -> Self { + TestIo { + self_key_pair: self_key_pair, + peer_public: peer_public, + input_buffer: io::Cursor::new(Vec::new()), + output_buffer: Vec::new(), + expected_output_buffer: Vec::new(), + } + } + + pub fn self_key_pair(&self) -> &KeyPair { + &self.self_key_pair + } + + pub fn peer_public(&self) -> &Public { + &self.peer_public + } + + pub fn add_input_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + let input_buffer = self.input_buffer.get_mut(); + for b in serialized_message { + input_buffer.push(b); + } + } + + pub fn add_output_message(&mut self, message: Message) { + let serialized_message = serialize_message(message).unwrap(); + let serialized_message: Vec<_> = serialized_message.into(); + self.expected_output_buffer.extend(serialized_message); + } + + pub fn assert_output(&self) { + assert_eq!(self.output_buffer, self.expected_output_buffer); + } + } + + impl io::Read for TestIo { + fn read(&mut self, buf: &mut [u8]) -> io::Result { + io::Read::read(&mut self.input_buffer, buf) + } + } + + impl io::Write for TestIo { + fn write(&mut self, buf: &[u8]) -> io::Result { + io::Write::write(&mut self.output_buffer, buf) + } + + fn flush(&mut self) -> io::Result<()> { + io::Write::flush(&mut self.output_buffer) + } + } + + #[test] + fn header_serialization_works() { + let header = MessageHeader { + kind: 1, + version: 2, + size: 3, + }; + + let serialized_header = serialize_header(&header).unwrap(); + assert_eq!(serialized_header.len(), MESSAGE_HEADER_SIZE); + + let deserialized_header = deserialize_header(&serialized_header).unwrap(); + assert_eq!(deserialized_header, header); + } +} diff --git a/secret_store/src/key_server_cluster/io/mod.rs b/secret_store/src/key_server_cluster/io/mod.rs new file mode 100644 index 000000000..57071038e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/mod.rs @@ -0,0 +1,34 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod deadline; +mod handshake; +mod message; +mod read_header; +mod read_payload; +mod read_message; +mod shared_tcp_stream; +mod write_message; + +pub use self::deadline::{deadline, Deadline, DeadlineStatus}; +pub use self::handshake::{handshake, accept_handshake, Handshake, HandshakeResult}; +pub use self::message::{MessageHeader, SerializedMessage, serialize_message, deserialize_message, + encrypt_message, compute_shared_key}; +pub use self::read_header::{read_header, ReadHeader}; +pub use self::read_payload::{read_payload, read_encrypted_payload, ReadPayload}; +pub use self::read_message::{read_message, read_encrypted_message, ReadMessage}; +pub use self::shared_tcp_stream::SharedTcpStream; +pub use self::write_message::{write_message, write_encrypted_message, WriteMessage}; diff --git a/secret_store/src/key_server_cluster/io/read_header.rs b/secret_store/src/key_server_cluster/io/read_header.rs new file mode 100644 index 000000000..ab7ce360e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_header.rs @@ -0,0 +1,44 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Future, Poll, Async}; +use tokio_core::io::{ReadExact, read_exact}; +use key_server_cluster::Error; +use key_server_cluster::io::message::{MESSAGE_HEADER_SIZE, MessageHeader, deserialize_header}; + +/// Create future for read single message header from the stream. +pub fn read_header(a: A) -> ReadHeader where A: io::Read { + ReadHeader { + reader: read_exact(a, [0; MESSAGE_HEADER_SIZE]), + } +} + +/// Future for read single message header from the stream. +pub struct ReadHeader { + reader: ReadExact, +} + +impl Future for ReadHeader where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let header = deserialize_header(&data); + Ok(Async::Ready((read, header))) + } +} diff --git a/secret_store/src/key_server_cluster/io/read_message.rs b/secret_store/src/key_server_cluster/io/read_message.rs new file mode 100644 index 000000000..418e5e31d --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_message.rs @@ -0,0 +1,86 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Poll, Future, Async}; +use ethkey::Secret; +use key_server_cluster::Error; +use key_server_cluster::message::Message; +use key_server_cluster::io::{read_header, ReadHeader, read_payload, read_encrypted_payload, ReadPayload}; + +/// Create future for read single message from the stream. +pub fn read_message(a: A) -> ReadMessage where A: io::Read { + ReadMessage { + key: None, + state: ReadMessageState::ReadHeader(read_header(a)), + } +} + +/// Create future for read single encrypted message from the stream. +pub fn read_encrypted_message(a: A, key: Secret) -> ReadMessage where A: io::Read { + ReadMessage { + key: Some(key), + state: ReadMessageState::ReadHeader(read_header(a)), + } +} + +enum ReadMessageState { + ReadHeader(ReadHeader), + ReadPayload(ReadPayload), + Finished, +} + +/// Future for read single message from the stream. +pub struct ReadMessage { + key: Option, + state: ReadMessageState, +} + +impl Future for ReadMessage where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ReadMessageState::ReadHeader(ref mut future) => { + let (read, header) = try_ready!(future.poll()); + let header = match header { + Ok(header) => header, + Err(err) => return Ok((read, Err(err)).into()), + }; + + let future = match self.key.take() { + Some(key) => read_encrypted_payload(read, header, key), + None => read_payload(read, header), + }; + let next = ReadMessageState::ReadPayload(future); + (next, Async::NotReady) + }, + ReadMessageState::ReadPayload(ref mut future) => { + let (read, payload) = try_ready!(future.poll()); + (ReadMessageState::Finished, Async::Ready((read, payload))) + }, + ReadMessageState::Finished => panic!("poll ReadMessage after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} diff --git a/secret_store/src/key_server_cluster/io/read_payload.rs b/secret_store/src/key_server_cluster/io/read_payload.rs new file mode 100644 index 000000000..f6df3155e --- /dev/null +++ b/secret_store/src/key_server_cluster/io/read_payload.rs @@ -0,0 +1,64 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Poll, Future}; +use tokio_core::io::{read_exact, ReadExact}; +use ethkey::Secret; +use key_server_cluster::Error; +use key_server_cluster::message::Message; +use key_server_cluster::io::message::{MessageHeader, deserialize_message, decrypt_message}; + +/// Create future for read single message payload from the stream. +pub fn read_payload(a: A, header: MessageHeader) -> ReadPayload where A: io::Read { + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: None, + } +} + +/// Create future for read single encrypted message payload from the stream. +pub fn read_encrypted_payload(a: A, header: MessageHeader, key: Secret) -> ReadPayload where A: io::Read { + ReadPayload { + reader: read_exact(a, vec![0; header.size as usize]), + header: header, + key: Some(key), + } +} + +/// Future for read single message payload from the stream. +pub struct ReadPayload { + reader: ReadExact>, + header: MessageHeader, + key: Option, +} + +impl Future for ReadPayload where A: io::Read { + type Item = (A, Result); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (read, data) = try_ready!(self.reader.poll()); + let payload = if let Some(key) = self.key.take() { + decrypt_message(&key, data) + .and_then(|data| deserialize_message(&self.header, data)) + } else { + deserialize_message(&self.header, data) + }; + Ok((read, payload).into()) + } +} diff --git a/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs b/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs new file mode 100644 index 000000000..82933c8a2 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/shared_tcp_stream.rs @@ -0,0 +1,60 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::sync::Arc; +use std::io::{Read, Write, Error}; +use tokio_core::net::TcpStream; + +/// Read+Write implementation for Arc. +pub struct SharedTcpStream { + io: Arc, +} + +impl SharedTcpStream { + pub fn new(a: Arc) -> Self { + SharedTcpStream { + io: a, + } + } +} + +impl From for SharedTcpStream { + fn from(a: TcpStream) -> Self { + SharedTcpStream::new(Arc::new(a)) + } +} + +impl Read for SharedTcpStream { + fn read(&mut self, buf: &mut [u8]) -> Result { + Read::read(&mut (&*self.io as &TcpStream), buf) + } +} + +impl Write for SharedTcpStream { + fn write(&mut self, buf: &[u8]) -> Result { + Write::write(&mut (&*self.io as &TcpStream), buf) + } + + fn flush(&mut self) -> Result<(), Error> { + Write::flush(&mut (&*self.io as &TcpStream)) + } +} + +impl Clone for SharedTcpStream { + fn clone(&self) -> Self { + SharedTcpStream::new(self.io.clone()) + } +} diff --git a/secret_store/src/key_server_cluster/io/write_message.rs b/secret_store/src/key_server_cluster/io/write_message.rs new file mode 100644 index 000000000..457673676 --- /dev/null +++ b/secret_store/src/key_server_cluster/io/write_message.rs @@ -0,0 +1,70 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use futures::{Future, Poll}; +use tokio_core::io::{WriteAll, write_all}; +use ethkey::Secret; +use key_server_cluster::message::Message; +use key_server_cluster::io::{serialize_message, encrypt_message}; + +/// Write plain message to the channel. +pub fn write_message(a: A, message: Message) -> WriteMessage where A: io::Write { + let (error, future) = match serialize_message(message) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + WriteMessage { + error: error, + future: future, + } +} + +/// Write encrypted message to the channel. +pub fn write_encrypted_message(a: A, key: &Secret, message: Message) -> WriteMessage where A: io::Write { + let (error, future) = match serialize_message(message) + .and_then(|message| encrypt_message(key, message)) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string())) { + Ok(message) => (None, write_all(a, message.into())), + Err(error) => (Some(error), write_all(a, Vec::new())), + }; + + + WriteMessage { + error: error, + future: future, + } +} + +/// Future message write. +pub struct WriteMessage { + error: Option, + future: WriteAll>, +} + +impl Future for WriteMessage where A: io::Write { + type Item = (A, Vec); + type Error = io::Error; + + fn poll(&mut self) -> Poll { + if let Some(err) = self.error.take() { + return Err(err); + } + + self.future.poll() + } +} diff --git a/secret_store/src/key_server_cluster/math.rs b/secret_store/src/key_server_cluster/math.rs index 4da17ebc7..fdda08746 100644 --- a/secret_store/src/key_server_cluster/math.rs +++ b/secret_store/src/key_server_cluster/math.rs @@ -160,7 +160,7 @@ pub fn compute_joint_secret<'a, I>(mut secret_coeffs: I) -> Result Result { +pub fn encrypt_secret(secret: &Public, joint_public: &Public) -> Result { // this is performed by KS-cluster client (or KS master) let key_pair = Random.generate()?; @@ -171,7 +171,7 @@ pub fn encrypt_secret(secret: Public, joint_public: &Public) -> Result Result(node_number: &Secret, node_secret_share: &Secret, mut other_nodes_numbers: I) -> Result where I: Iterator { - let other_node_number = other_nodes_numbers.next().expect("compute_node_shadow is called when at least two nodes are required to decrypt secret; qed"); + let other_node_number = match other_nodes_numbers.next() { + Some(other_node_number) => other_node_number, + None => return Ok(node_secret_share.clone()), + }; + let mut shadow = node_number.clone(); shadow.sub(other_node_number)?; shadow.inv()?; @@ -231,17 +235,24 @@ pub fn compute_joint_shadow_point_test<'a, I>(access_key: &Secret, common_point: } /// Decrypt data using joint shadow point. -pub fn decrypt_with_joint_shadow(access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { +pub fn decrypt_with_joint_shadow(threshold: usize, access_key: &Secret, encrypted_point: &Public, joint_shadow_point: &Public) -> Result { let mut inv_access_key = access_key.clone(); inv_access_key.inv()?; - - let mut decrypted_point = joint_shadow_point.clone(); - math::public_mul_secret(&mut decrypted_point, &inv_access_key)?; - math::public_add(&mut decrypted_point, encrypted_point)?; + + let mut mul = joint_shadow_point.clone(); + math::public_mul_secret(&mut mul, &inv_access_key)?; + + let mut decrypted_point = encrypted_point.clone(); + if threshold % 2 != 0 { + math::public_add(&mut decrypted_point, &mul)?; + } else { + math::public_sub(&mut decrypted_point, &mul)?; + } Ok(decrypted_point) } +#[cfg(test)] /// Decrypt data using joint secret (version for tests). pub fn decrypt_with_joint_secret(encrypted_point: &Public, common_point: &Public, joint_secret: &Secret) -> Result { let mut common_point_mul = common_point.clone(); @@ -262,7 +273,7 @@ pub mod tests { // === PART2: encryption using joint public key === // the next line is executed on KeyServer-client - let encrypted_secret = encrypt_secret(document_secret_plain.clone(), &joint_public).unwrap(); + let encrypted_secret = encrypt_secret(&document_secret_plain, &joint_public).unwrap(); // === PART3: decryption === @@ -285,7 +296,7 @@ pub mod tests { assert_eq!(joint_shadow_point, joint_shadow_point_test); // decrypt encrypted secret using joint shadow point - let document_secret_decrypted = decrypt_with_joint_shadow(&access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); + let document_secret_decrypted = decrypt_with_joint_shadow(t, &access_key, &encrypted_secret.encrypted_point, &joint_shadow_point).unwrap(); // decrypt encrypted secret using joint secret [just for test] let document_secret_decrypted_test = match joint_secret { @@ -298,7 +309,8 @@ pub mod tests { #[test] fn full_encryption_math_session() { - let test_cases = [(1, 3)]; + let test_cases = [(0, 2), (1, 2), (1, 3), (2, 3), (1, 4), (2, 4), (3, 4), (1, 5), (2, 5), (3, 5), (4, 5), + (1, 10), (2, 10), (3, 10), (4, 10), (5, 10), (6, 10), (7, 10), (8, 10), (9, 10)]; for &(t, n) in &test_cases { // === PART1: DKG === diff --git a/secret_store/src/key_server_cluster/message.rs b/secret_store/src/key_server_cluster/message.rs index 800dcf705..9958884a4 100644 --- a/secret_store/src/key_server_cluster/message.rs +++ b/secret_store/src/key_server_cluster/message.rs @@ -14,13 +14,42 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use std::fmt; use std::collections::{BTreeSet, BTreeMap}; -use ethkey::{Public, Secret, Signature}; -use key_server_cluster::{NodeId, SessionId}; +use ethkey::Secret; +use key_server_cluster::SessionId; +use super::{SerializableH256, SerializablePublic, SerializableSecret, SerializableSignature}; + +pub type MessageSessionId = SerializableH256; +pub type MessageNodeId = SerializablePublic; #[derive(Clone, Debug)] -/// All possible messages that can be sent during DKG. +/// All possible messages that can be sent during encryption/decryption sessions. pub enum Message { + /// Cluster message. + Cluster(ClusterMessage), + /// Encryption message. + Encryption(EncryptionMessage), + /// Decryption message. + Decryption(DecryptionMessage), +} + +#[derive(Clone, Debug)] +/// All possible cluster-level messages. +pub enum ClusterMessage { + /// Introduce node public key. + NodePublicKey(NodePublicKey), + /// Confirm that node owns its private key. + NodePrivateKeySignature(NodePrivateKeySignature), + /// Keep alive message. + KeepAlive(KeepAlive), + /// Keep alive message response. + KeepAliveResponse(KeepAliveResponse), +} + +#[derive(Clone, Debug)] +/// All possible messages that can be sent during encryption session. +pub enum EncryptionMessage { /// Initialize new DKG session. InitializeSession(InitializeSession), /// Confirm DKG session initialization. @@ -35,7 +64,15 @@ pub enum Message { ComplaintResponse(ComplaintResponse), /// Broadcast self public key portion. PublicKeyShare(PublicKeyShare), + /// When session error has occured. + SessionError(SessionError), + /// When session is completed. + SessionCompleted(SessionCompleted), +} +#[derive(Clone, Debug)] +/// All possible messages that can be sent during decryption session. +pub enum DecryptionMessage { /// Initialize decryption session. InitializeDecryptionSession(InitializeDecryptionSession), /// Confirm/reject decryption session initialization. @@ -44,125 +81,272 @@ pub enum Message { RequestPartialDecryption(RequestPartialDecryption), /// Partial decryption is completed PartialDecryption(PartialDecryption), + /// When decryption session error has occured. + DecryptionSessionError(DecryptionSessionError), } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Introduce node public key. +pub struct NodePublicKey { + /// Node identifier (aka node public key). + pub node_id: MessageNodeId, + /// Data, which must be signed by peer to prove that he owns the corresponding private key. + pub confirmation_plain: SerializableH256, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Confirm that node owns the private key of previously passed public key (aka node id). +pub struct NodePrivateKeySignature { + /// Previously passed `confirmation_plain`, signed with node private key. + pub confirmation_signed: SerializableSignature, +} + + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Ask if the node is still alive. +pub struct KeepAlive { +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// Confirm that the node is still alive. +pub struct KeepAliveResponse { +} + +#[derive(Clone, Debug, Serialize, Deserialize)] /// Initialize new DKG session. pub struct InitializeSession { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Derived generation point. Starting from originator, every node must multiply this /// point by random scalar (unknown by other nodes). At the end of initialization /// `point` will be some (k1 * k2 * ... * kn) * G = `point` where `(k1 * k2 * ... * kn)` /// is unknown for every node. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Confirm DKG session initialization. pub struct ConfirmInitialization { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Derived generation point. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Broadcast generated point to every other node. pub struct CompleteInitialization { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// All session participants along with their identification numbers. - pub nodes: BTreeMap, + pub nodes: BTreeMap, /// Decryption threshold. During decryption threshold-of-route.len() nodes must came to /// consensus to successfully decrypt message. pub threshold: usize, /// Derived generation point. - pub derived_point: Public, + pub derived_point: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Generated keys are sent to every node. pub struct KeysDissemination { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Secret 1. - pub secret1: Secret, + pub secret1: SerializableSecret, /// Secret 2. - pub secret2: Secret, + pub secret2: SerializableSecret, /// Public values. - pub publics: Vec, + pub publics: Vec, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Complaint against node is broadcasted. pub struct Complaint { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Public values. - pub against: NodeId, + pub against: MessageNodeId, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to complaint. pub struct ComplaintResponse { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Secret 1. - pub secret1: Secret, + pub secret1: SerializableSecret, /// Secret 2. - pub secret2: Secret, + pub secret2: SerializableSecret, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is sharing its public key share. pub struct PublicKeyShare { /// Session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Public key share. - pub public_share: Public, + pub public_share: SerializablePublic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When session error has occured. +pub struct SessionError { + /// Session Id. + pub session: MessageSessionId, + /// Public key share. + pub error: String, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When session is completed. +pub struct SessionCompleted { + /// Session Id. + pub session: MessageSessionId, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to decrypt data, encrypted in given session. pub struct InitializeDecryptionSession { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Requestor signature. - pub requestor_signature: Signature, + pub requestor_signature: SerializableSignature, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is responding to decryption request. pub struct ConfirmDecryptionInitialization { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Is node confirmed to make a decryption?. pub is_confirmed: bool, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node is requested to do a partial decryption. pub struct RequestPartialDecryption { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Nodes that are agreed to do a decryption. - pub nodes: BTreeSet, + pub nodes: BTreeSet, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Serialize, Deserialize)] /// Node has partially decrypted the secret. pub struct PartialDecryption { /// Encryption session Id. - pub session: SessionId, + pub session: MessageSessionId, /// Decryption session Id. - pub sub_session: Secret, + pub sub_session: SerializableSecret, /// Partially decrypted secret. - pub shadow_point: Public, + pub shadow_point: SerializablePublic, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +/// When decryption session error has occured. +pub struct DecryptionSessionError { + /// Encryption session Id. + pub session: MessageSessionId, + /// Decryption session Id. + pub sub_session: SerializableSecret, + /// Public key share. + pub error: String, +} + +impl EncryptionMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + EncryptionMessage::InitializeSession(ref msg) => &msg.session, + EncryptionMessage::ConfirmInitialization(ref msg) => &msg.session, + EncryptionMessage::CompleteInitialization(ref msg) => &msg.session, + EncryptionMessage::KeysDissemination(ref msg) => &msg.session, + EncryptionMessage::Complaint(ref msg) => &msg.session, + EncryptionMessage::ComplaintResponse(ref msg) => &msg.session, + EncryptionMessage::PublicKeyShare(ref msg) => &msg.session, + EncryptionMessage::SessionError(ref msg) => &msg.session, + EncryptionMessage::SessionCompleted(ref msg) => &msg.session, + } + } +} + +impl DecryptionMessage { + pub fn session_id(&self) -> &SessionId { + match *self { + DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.session, + DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.session, + } + } + + pub fn sub_session_id(&self) -> &Secret { + match *self { + DecryptionMessage::InitializeDecryptionSession(ref msg) => &msg.sub_session, + DecryptionMessage::ConfirmDecryptionInitialization(ref msg) => &msg.sub_session, + DecryptionMessage::RequestPartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::PartialDecryption(ref msg) => &msg.sub_session, + DecryptionMessage::DecryptionSessionError(ref msg) => &msg.sub_session, + } + } +} + +impl fmt::Display for Message { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Message::Cluster(ref message) => write!(f, "Cluster.{}", message), + Message::Encryption(ref message) => write!(f, "Encryption.{}", message), + Message::Decryption(ref message) => write!(f, "Decryption.{}", message), + } + } +} + +impl fmt::Display for ClusterMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + ClusterMessage::NodePublicKey(_) => write!(f, "NodePublicKey"), + ClusterMessage::NodePrivateKeySignature(_) => write!(f, "NodePrivateKeySignature"), + ClusterMessage::KeepAlive(_) => write!(f, "KeepAlive"), + ClusterMessage::KeepAliveResponse(_) => write!(f, "KeepAliveResponse"), + } + } +} + +impl fmt::Display for EncryptionMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + EncryptionMessage::InitializeSession(_) => write!(f, "InitializeSession"), + EncryptionMessage::ConfirmInitialization(_) => write!(f, "ConfirmInitialization"), + EncryptionMessage::CompleteInitialization(_) => write!(f, "CompleteInitialization"), + EncryptionMessage::KeysDissemination(_) => write!(f, "KeysDissemination"), + EncryptionMessage::Complaint(_) => write!(f, "Complaint"), + EncryptionMessage::ComplaintResponse(_) => write!(f, "ComplaintResponse"), + EncryptionMessage::PublicKeyShare(_) => write!(f, "PublicKeyShare"), + EncryptionMessage::SessionError(ref msg) => write!(f, "SessionError({})", msg.error), + EncryptionMessage::SessionCompleted(_) => write!(f, "SessionCompleted"), + } + } +} + +impl fmt::Display for DecryptionMessage { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + DecryptionMessage::InitializeDecryptionSession(_) => write!(f, "InitializeDecryptionSession"), + DecryptionMessage::ConfirmDecryptionInitialization(_) => write!(f, "ConfirmDecryptionInitialization"), + DecryptionMessage::RequestPartialDecryption(_) => write!(f, "RequestPartialDecryption"), + DecryptionMessage::PartialDecryption(_) => write!(f, "PartialDecryption"), + DecryptionMessage::DecryptionSessionError(_) => write!(f, "DecryptionSessionError"), + } + } } diff --git a/secret_store/src/key_server_cluster/mod.rs b/secret_store/src/key_server_cluster/mod.rs index 5d0dacd11..e889ef322 100644 --- a/secret_store/src/key_server_cluster/mod.rs +++ b/secret_store/src/key_server_cluster/mod.rs @@ -14,21 +14,38 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -#![allow(dead_code)] // TODO: remove me - -use std::collections::BTreeMap; -use ethkey::{self, Public, Secret, Signature}; +use std::fmt; +use std::io::Error as IoError; +use ethkey; +use ethcrypto; use super::types::all::DocumentAddress; +pub use super::types::all::{NodeId, EncryptionConfiguration}; pub use super::acl_storage::AclStorage; +pub use super::key_storage::{KeyStorage, DocumentKeyShare}; +pub use super::serialization::{SerializableSignature, SerializableH256, SerializableSecret, SerializablePublic}; +pub use self::cluster::{ClusterCore, ClusterConfiguration, ClusterClient}; +pub use self::encryption_session::Session as EncryptionSession; +pub use self::decryption_session::Session as DecryptionSession; + +#[cfg(test)] +pub use super::key_storage::tests::DummyKeyStorage; +#[cfg(test)] +pub use super::acl_storage::tests::DummyAclStorage; -pub type NodeId = Public; pub type SessionId = DocumentAddress; -pub type SessionIdSignature = Signature; -#[derive(Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] /// Errors which can occur during encryption/decryption session pub enum Error { + /// Invalid node address has been passed. + InvalidNodeAddress, + /// Invalid node id has been passed. + InvalidNodeId, + /// Session with the given id already exists. + DuplicateSessionId, + /// Session with the given id is unknown. + InvalidSessionId, /// Invalid number of nodes. /// There must be at least two nodes participating in encryption. /// There must be at least one node participating in decryption. @@ -39,28 +56,26 @@ pub enum Error { /// Threshold value must be in [0; n - 1], where n is a number of nodes participating in the encryption. InvalidThreshold, /// Current state of encryption/decryption session does not allow to proceed request. + /// Reschedule this request for later processing. + TooEarlyForRequest, + /// Current state of encryption/decryption session does not allow to proceed request. /// This means that either there is some comm-failure or node is misbehaving/cheating. InvalidStateForRequest, - /// Some data in passed message was recognized as invalid. + /// Message or some data in the message was recognized as invalid. /// This means that node is misbehaving/cheating. InvalidMessage, + /// Connection to node, required for this session is not established. + NodeDisconnected, /// Cryptographic error. EthKey(String), -} - -#[derive(Debug, Clone)] -/// Data, which is stored on every node after DKG && encryption is completed. -pub struct EncryptedData { - /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). - threshold: usize, - /// Nodes ids numbers. - id_numbers: BTreeMap, - /// Node secret share. - secret_share: Secret, - /// Common (shared) encryption point. - common_point: Public, - /// Encrypted point. - encrypted_point: Public, + /// I/O error has occured. + Io(String), + /// Deserialization error has occured. + Serde(String), + /// Key storage error. + KeyStorage(String), + /// Acl storage error. + AccessDenied, } impl From for Error { @@ -69,8 +84,51 @@ impl From for Error { } } +impl From for Error { + fn from(err: ethcrypto::Error) -> Self { + Error::EthKey(err.into()) + } +} + +impl From for Error { + fn from(err: IoError) -> Self { + Error::Io(err.to_string()) + } +} + +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::InvalidNodeAddress => write!(f, "invalid node address has been passed"), + Error::InvalidNodeId => write!(f, "invalid node id has been passed"), + Error::DuplicateSessionId => write!(f, "session with the same id is already registered"), + Error::InvalidSessionId => write!(f, "invalid session id has been passed"), + Error::InvalidNodesCount => write!(f, "invalid nodes count"), + Error::InvalidNodesConfiguration => write!(f, "invalid nodes configuration"), + Error::InvalidThreshold => write!(f, "invalid threshold value has been passed"), + Error::TooEarlyForRequest => write!(f, "session is not yet ready to process this request"), + Error::InvalidStateForRequest => write!(f, "session is in invalid state for processing this request"), + Error::InvalidMessage => write!(f, "invalid message is received"), + Error::NodeDisconnected => write!(f, "node required for this operation is currently disconnected"), + Error::EthKey(ref e) => write!(f, "cryptographic error {}", e), + Error::Io(ref e) => write!(f, "i/o error {}", e), + Error::Serde(ref e) => write!(f, "serde error {}", e), + Error::KeyStorage(ref e) => write!(f, "key storage error {}", e), + Error::AccessDenied => write!(f, "Access denied"), + } + } +} + +impl Into for Error { + fn into(self) -> String { + format!("{}", self) + } +} + mod cluster; mod decryption_session; mod encryption_session; +mod io; mod math; mod message; +mod net; diff --git a/secret_store/src/key_server_cluster/net/accept_connection.rs b/secret_store/src/key_server_cluster/net/accept_connection.rs new file mode 100644 index 000000000..0daa8b2da --- /dev/null +++ b/secret_store/src/key_server_cluster/net/accept_connection.rs @@ -0,0 +1,63 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::io; +use std::net::SocketAddr; +use std::time::Duration; +use std::collections::BTreeSet; +use futures::{Future, Poll}; +use tokio_core::reactor::Handle; +use tokio_core::net::TcpStream; +use ethkey::KeyPair; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::io::{accept_handshake, Handshake, Deadline, deadline}; +use key_server_cluster::net::Connection; + +/// Create future for accepting incoming connection. +pub fn accept_connection(address: SocketAddr, stream: TcpStream, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { + let accept = AcceptConnection { + handshake: accept_handshake(stream, self_key_pair, trusted_nodes), + address: address, + }; + + deadline(Duration::new(5, 0), handle, accept).expect("Failed to create timeout") +} + +/// Future for accepting incoming connection. +pub struct AcceptConnection { + handshake: Handshake, + address: SocketAddr, +} + +impl Future for AcceptConnection { + type Item = Result; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (stream, result) = try_ready!(self.handshake.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Err(err).into()), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + Ok(Ok(connection).into()) + } +} diff --git a/secret_store/src/key_server_cluster/net/connect.rs b/secret_store/src/key_server_cluster/net/connect.rs new file mode 100644 index 000000000..449168ab2 --- /dev/null +++ b/secret_store/src/key_server_cluster/net/connect.rs @@ -0,0 +1,90 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::collections::BTreeSet; +use std::io; +use std::time::Duration; +use std::net::SocketAddr; +use futures::{Future, Poll, Async}; +use tokio_core::reactor::Handle; +use tokio_core::net::{TcpStream, TcpStreamNew}; +use ethkey::KeyPair; +use key_server_cluster::{Error, NodeId}; +use key_server_cluster::io::{handshake, Handshake, Deadline, deadline}; +use key_server_cluster::net::Connection; + +/// Create future for connecting to other node. +pub fn connect(address: &SocketAddr, handle: &Handle, self_key_pair: KeyPair, trusted_nodes: BTreeSet) -> Deadline { + let connect = Connect { + state: ConnectState::TcpConnect(TcpStream::connect(address, handle)), + address: address.clone(), + self_key_pair: self_key_pair, + trusted_nodes: trusted_nodes, + }; + + deadline(Duration::new(5, 0), handle, connect).expect("Failed to create timeout") +} + +enum ConnectState { + TcpConnect(TcpStreamNew), + Handshake(Handshake), + Connected, +} + +/// Future for connecting to other node. +pub struct Connect { + state: ConnectState, + address: SocketAddr, + self_key_pair: KeyPair, + trusted_nodes: BTreeSet, +} + +impl Future for Connect { + type Item = Result; + type Error = io::Error; + + fn poll(&mut self) -> Poll { + let (next, result) = match self.state { + ConnectState::TcpConnect(ref mut future) => { + let stream = try_ready!(future.poll()); + let handshake = handshake(stream, self.self_key_pair.clone(), self.trusted_nodes.clone()); + (ConnectState::Handshake(handshake), Async::NotReady) + }, + ConnectState::Handshake(ref mut future) => { + let (stream, result) = try_ready!(future.poll()); + let result = match result { + Ok(result) => result, + Err(err) => return Ok(Async::Ready(Err(err))), + }; + let connection = Connection { + stream: stream.into(), + address: self.address, + node_id: result.node_id, + key: result.shared_key, + }; + (ConnectState::Connected, Async::Ready(Ok(connection))) + }, + ConnectState::Connected => panic!("poll Connect after it's done"), + }; + + self.state = next; + match result { + // by polling again, we register new future + Async::NotReady => self.poll(), + result => Ok(result) + } + } +} diff --git a/secret_store/src/key_server_cluster/net/connection.rs b/secret_store/src/key_server_cluster/net/connection.rs new file mode 100644 index 000000000..8125b81d3 --- /dev/null +++ b/secret_store/src/key_server_cluster/net/connection.rs @@ -0,0 +1,32 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::net; +use ethkey::Secret; +use key_server_cluster::NodeId; +use key_server_cluster::io::SharedTcpStream; + +/// Established connection data +pub struct Connection { + /// Peer address. + pub address: net::SocketAddr, + /// Connection stream. + pub stream: SharedTcpStream, + /// Peer node id. + pub node_id: NodeId, + /// Encryption key. + pub key: Secret, +} diff --git a/secret_store/src/key_server_cluster/net/mod.rs b/secret_store/src/key_server_cluster/net/mod.rs new file mode 100644 index 000000000..6abf83ceb --- /dev/null +++ b/secret_store/src/key_server_cluster/net/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +mod accept_connection; +mod connect; +mod connection; + +pub use self::accept_connection::{AcceptConnection, accept_connection}; +pub use self::connect::{Connect, connect}; +pub use self::connection::Connection; diff --git a/secret_store/src/key_storage.rs b/secret_store/src/key_storage.rs index fe7777410..e3106f221 100644 --- a/secret_store/src/key_storage.rs +++ b/secret_store/src/key_storage.rs @@ -15,15 +15,34 @@ // along with Parity. If not, see . use std::path::PathBuf; +use std::collections::BTreeMap; +use serde_json; +use ethkey::{Secret, Public}; use util::Database; -use types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; +use types::all::{Error, ServiceConfiguration, DocumentAddress, NodeId}; +use serialization::{SerializablePublic, SerializableSecret}; + +#[derive(Debug, Clone, PartialEq)] +/// Encrypted key share, stored by key storage on the single key server. +pub struct DocumentKeyShare { + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: Secret, + /// Common (shared) encryption point. + pub common_point: Public, + /// Encrypted point. + pub encrypted_point: Public, +} /// Document encryption keys storage pub trait KeyStorage: Send + Sync { /// Insert document encryption key - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error>; + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error>; /// Get document encryption key - fn get(&self, document: &DocumentAddress) -> Result; + fn get(&self, document: &DocumentAddress) -> Result; } /// Persistent document encryption keys storage @@ -31,6 +50,21 @@ pub struct PersistentKeyStorage { db: Database, } +#[derive(Serialize, Deserialize)] +/// Encrypted key share, as it is stored by key storage on the single key server. +struct SerializableDocumentKeyShare { + /// Decryption threshold (at least threshold + 1 nodes are required to decrypt data). + pub threshold: usize, + /// Nodes ids numbers. + pub id_numbers: BTreeMap, + /// Node secret share. + pub secret_share: SerializableSecret, + /// Common (shared) encryption point. + pub common_point: SerializablePublic, + /// Encrypted point. + pub encrypted_point: SerializablePublic, +} + impl PersistentKeyStorage { /// Create new persistent document encryption keys storage pub fn new(config: &ServiceConfiguration) -> Result { @@ -45,41 +79,71 @@ impl PersistentKeyStorage { } impl KeyStorage for PersistentKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { + let key: SerializableDocumentKeyShare = key.into(); + let key = serde_json::to_vec(&key).map_err(|e| Error::Database(e.to_string()))?; let mut batch = self.db.transaction(); batch.put(None, &document, &key); self.db.write(batch).map_err(Error::Database) } - fn get(&self, document: &DocumentAddress) -> Result { + fn get(&self, document: &DocumentAddress) -> Result { self.db.get(None, document) .map_err(Error::Database)? .ok_or(Error::DocumentNotFound) .map(|key| key.to_vec()) + .and_then(|key| serde_json::from_slice::(&key).map_err(|e| Error::Database(e.to_string()))) + .map(Into::into) + } +} + +impl From for SerializableDocumentKeyShare { + fn from(key: DocumentKeyShare) -> Self { + SerializableDocumentKeyShare { + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + common_point: key.common_point.into(), + encrypted_point: key.encrypted_point.into(), + } + } +} + +impl From for DocumentKeyShare { + fn from(key: SerializableDocumentKeyShare) -> Self { + DocumentKeyShare { + threshold: key.threshold, + id_numbers: key.id_numbers.into_iter().map(|(k, v)| (k.into(), v.into())).collect(), + secret_share: key.secret_share.into(), + common_point: key.common_point.into(), + encrypted_point: key.encrypted_point.into(), + } } } #[cfg(test)] pub mod tests { - use std::collections::HashMap; + use std::collections::{BTreeMap, HashMap}; use parking_lot::RwLock; use devtools::RandomTempPath; - use super::super::types::all::{Error, ServiceConfiguration, DocumentAddress, DocumentKey}; - use super::{KeyStorage, PersistentKeyStorage}; + use ethkey::{Random, Generator}; + use super::super::types::all::{Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, + DocumentAddress, EncryptionConfiguration}; + use super::{KeyStorage, PersistentKeyStorage, DocumentKeyShare}; #[derive(Default)] /// In-memory document encryption keys storage pub struct DummyKeyStorage { - keys: RwLock>, + keys: RwLock>, } impl KeyStorage for DummyKeyStorage { - fn insert(&self, document: DocumentAddress, key: DocumentKey) -> Result<(), Error> { + fn insert(&self, document: DocumentAddress, key: DocumentKeyShare) -> Result<(), Error> { self.keys.write().insert(document, key); Ok(()) } - fn get(&self, document: &DocumentAddress) -> Result { + fn get(&self, document: &DocumentAddress) -> Result { self.keys.read().get(document).cloned().ok_or(Error::DocumentNotFound) } } @@ -88,15 +152,46 @@ pub mod tests { fn persistent_key_storage() { let path = RandomTempPath::create_dir(); let config = ServiceConfiguration { - listener_addr: "0.0.0.0".to_owned(), - listener_port: 8082, + listener_address: NodeAddress { + address: "0.0.0.0".to_owned(), + port: 8082, + }, data_path: path.as_str().to_owned(), + cluster_config: ClusterConfiguration { + threads: 1, + self_private: (**Random.generate().unwrap().secret().clone()).into(), + listener_address: NodeAddress { + address: "0.0.0.0".to_owned(), + port: 8083, + }, + nodes: BTreeMap::new(), + allow_connecting_to_higher_nodes: false, + encryption_config: EncryptionConfiguration { + key_check_timeout_ms: 10, + }, + }, }; let key1 = DocumentAddress::from(1); - let value1: DocumentKey = vec![0x77, 0x88]; + let value1 = DocumentKeyShare { + threshold: 100, + id_numbers: vec![ + (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) + ].into_iter().collect(), + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Random.generate().unwrap().public().clone(), + encrypted_point: Random.generate().unwrap().public().clone(), + }; let key2 = DocumentAddress::from(2); - let value2: DocumentKey = vec![0x11, 0x22]; + let value2 = DocumentKeyShare { + threshold: 200, + id_numbers: vec![ + (Random.generate().unwrap().public().clone(), Random.generate().unwrap().secret().clone()) + ].into_iter().collect(), + secret_share: Random.generate().unwrap().secret().clone(), + common_point: Random.generate().unwrap().public().clone(), + encrypted_point: Random.generate().unwrap().public().clone(), + }; let key3 = DocumentAddress::from(3); let key_storage = PersistentKeyStorage::new(&config).unwrap(); diff --git a/secret_store/src/lib.rs b/secret_store/src/lib.rs index 41d658963..7de957991 100644 --- a/secret_store/src/lib.rs +++ b/secret_store/src/lib.rs @@ -14,17 +14,32 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +extern crate byteorder; #[macro_use] extern crate log; +#[macro_use] +extern crate futures; +extern crate futures_cpupool; extern crate hyper; extern crate parking_lot; +extern crate rustc_serialize; +extern crate serde; +extern crate serde_json; +#[macro_use] +extern crate serde_derive; +extern crate tokio_core; +extern crate tokio_service; +extern crate tokio_proto; extern crate url; +extern crate ethabi; +extern crate ethcore; extern crate ethcore_devtools as devtools; extern crate ethcore_util as util; extern crate ethcore_ipc as ipc; extern crate ethcrypto; extern crate ethkey; +extern crate native_contracts; mod key_server_cluster; mod types; @@ -38,16 +53,22 @@ mod acl_storage; mod http_listener; mod key_server; mod key_storage; +mod serialization; + +use std::sync::Arc; +use ethcore::client::Client; pub use types::all::{DocumentAddress, DocumentKey, DocumentEncryptedKey, RequestSignature, Public, - Error, ServiceConfiguration}; + Error, NodeAddress, ServiceConfiguration, ClusterConfiguration, EncryptionConfiguration}; pub use traits::{KeyServer}; /// Start new key server instance -pub fn start(config: ServiceConfiguration) -> Result, Error> { - let acl_storage = acl_storage::DummyAclStorage::default(); - let key_storage = key_storage::PersistentKeyStorage::new(&config)?; - let key_server = key_server::KeyServerImpl::new(acl_storage, key_storage); +pub fn start(client: Arc, config: ServiceConfiguration) -> Result, Error> { + use std::sync::Arc; + + let acl_storage = Arc::new(acl_storage::OnChainAclStorage::new(client)); + let key_storage = Arc::new(key_storage::PersistentKeyStorage::new(&config)?); + let key_server = key_server::KeyServerImpl::new(&config.cluster_config, acl_storage, key_storage)?; let listener = http_listener::KeyServerHttpListener::start(config, key_server)?; Ok(Box::new(listener)) } diff --git a/secret_store/src/serialization.rs b/secret_store/src/serialization.rs new file mode 100644 index 000000000..0d0e904a7 --- /dev/null +++ b/secret_store/src/serialization.rs @@ -0,0 +1,260 @@ +// Copyright 2015-2017 Parity Technologies (UK) Ltd. +// This file is part of Parity. + +// Parity is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Parity is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Parity. If not, see . + +use std::fmt; +use std::cmp::{Ord, PartialOrd, Ordering}; +use std::ops::Deref; +use rustc_serialize::hex::ToHex; +use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::de::{Visitor, Error as SerdeError}; +use ethkey::{Public, Secret, Signature}; +use util::H256; + +#[derive(Clone, Debug)] +/// Serializable Signature. +pub struct SerializableSignature(Signature); + +impl From for SerializableSignature where Signature: From { + fn from(s: T) -> SerializableSignature { + SerializableSignature(s.into()) + } +} + +impl Into for SerializableSignature { + fn into(self) -> Signature { + self.0 + } +} + +impl Deref for SerializableSignature { + type Target = Signature; + + fn deref(&self) -> &Signature { + &self.0 + } +} + +impl Serialize for SerializableSignature { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableSignature { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableSignature; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded Signature") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableSignature(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable H256. +pub struct SerializableH256(H256); + +impl From for SerializableH256 where H256: From { + fn from(s: T) -> SerializableH256 { + SerializableH256(s.into()) + } +} + +impl Into for SerializableH256 { + fn into(self) -> H256 { + self.0 + } +} + +impl Deref for SerializableH256 { + type Target = H256; + + fn deref(&self) -> &H256 { + &self.0 + } +} + +impl Serialize for SerializableH256 { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableH256 { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableH256; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded H256") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableH256(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable EC scalar/secret key. +pub struct SerializableSecret(Secret); + +impl From for SerializableSecret where Secret: From { + fn from(s: T) -> SerializableSecret { + SerializableSecret(s.into()) + } +} + +impl Into for SerializableSecret { + fn into(self) -> Secret { + self.0 + } +} + +impl Deref for SerializableSecret { + type Target = Secret; + + fn deref(&self) -> &Secret { + &self.0 + } +} + +impl Serialize for SerializableSecret { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializableSecret { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializableSecret; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded EC scalar") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializableSecret(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} + +#[derive(Clone, Debug)] +/// Serializable EC point/public key. +pub struct SerializablePublic(Public); + +impl From for SerializablePublic where Public: From { + fn from(p: T) -> SerializablePublic { + SerializablePublic(p.into()) + } +} + +impl Into for SerializablePublic { + fn into(self) -> Public { + self.0 + } +} + +impl Deref for SerializablePublic { + type Target = Public; + + fn deref(&self) -> &Public { + &self.0 + } +} + +impl Eq for SerializablePublic { } + +impl PartialEq for SerializablePublic { + fn eq(&self, other: &SerializablePublic) -> bool { + self.0.eq(&other.0) + } +} + +impl Ord for SerializablePublic { + fn cmp(&self, other: &SerializablePublic) -> Ordering { + self.0.cmp(&other.0) + } +} + +impl PartialOrd for SerializablePublic { + fn partial_cmp(&self, other: &SerializablePublic) -> Option { + self.0.partial_cmp(&other.0) + } +} + +impl Serialize for SerializablePublic { + fn serialize(&self, serializer: S) -> Result where S: Serializer { + serializer.serialize_str(&(*self.0).to_hex()) + } +} + +impl Deserialize for SerializablePublic { + fn deserialize(deserializer: D) -> Result where D: Deserializer { + struct HashVisitor; + + impl Visitor for HashVisitor { + type Value = SerializablePublic; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + write!(formatter, "a hex-encoded EC point") + } + + fn visit_str(self, value: &str) -> Result where E: SerdeError { + value.parse().map(|s| SerializablePublic(s)).map_err(SerdeError::custom) + } + + fn visit_string(self, value: String) -> Result where E: SerdeError { + self.visit_str(value.as_ref()) + } + } + + deserializer.deserialize(HashVisitor) + } +} diff --git a/secret_store/src/traits.rs b/secret_store/src/traits.rs index 9a68e9c4d..1a407e5c7 100644 --- a/secret_store/src/traits.rs +++ b/secret_store/src/traits.rs @@ -19,6 +19,8 @@ use types::all::{Error, RequestSignature, DocumentAddress, DocumentEncryptedKey} #[ipc(client_ident="RemoteKeyServer")] /// Secret store key server pub trait KeyServer: Send + Sync { + /// Generate encryption key for given document. + fn generate_document_key(&self, signature: &RequestSignature, document: &DocumentAddress, threshold: usize) -> Result; /// Request encryption key of given document for given requestor fn document_key(&self, signature: &RequestSignature, document: &DocumentAddress) -> Result; } diff --git a/secret_store/src/types/all.rs b/secret_store/src/types/all.rs index f318e6543..23e07e994 100644 --- a/secret_store/src/types/all.rs +++ b/secret_store/src/types/all.rs @@ -15,10 +15,14 @@ // along with Parity. If not, see . use std::fmt; +use std::collections::BTreeMap; use ethkey; use util; +use key_server_cluster; +/// Node id. +pub type NodeId = ethkey::Public; /// Document address type. pub type DocumentAddress = util::H256; /// Document key type. @@ -46,16 +50,53 @@ pub enum Error { Internal(String), } +#[derive(Debug)] +#[binary] +/// Secret store configuration +pub struct NodeAddress { + /// IP address. + pub address: String, + /// IP port. + pub port: u16, +} + #[derive(Debug)] #[binary] /// Secret store configuration pub struct ServiceConfiguration { - /// Interface to listen to - pub listener_addr: String, - /// Port to listen to - pub listener_port: u16, + /// HTTP listener address. + pub listener_address: NodeAddress, /// Data directory path for secret store pub data_path: String, + /// Cluster configuration. + pub cluster_config: ClusterConfiguration, +} + +#[derive(Debug)] +#[binary] +/// Key server cluster configuration +pub struct ClusterConfiguration { + /// Number of threads reserved by cluster. + pub threads: usize, + /// Private key this node holds. + pub self_private: Vec, // holds ethkey::Secret + /// This node address. + pub listener_address: NodeAddress, + /// All cluster nodes addresses. + pub nodes: BTreeMap, + /// Allow outbound connections to 'higher' nodes. + /// This is useful for tests, but slower a bit for production. + pub allow_connecting_to_higher_nodes: bool, + /// Encryption session configuration. + pub encryption_config: EncryptionConfiguration, +} + +#[derive(Clone, Debug)] +#[binary] +/// Encryption parameters. +pub struct EncryptionConfiguration { + /// Key check timeout. + pub key_check_timeout_ms: u64, } impl fmt::Display for Error { @@ -70,6 +111,21 @@ impl fmt::Display for Error { } } +impl From for Error { + fn from(err: ethkey::Error) -> Self { + Error::Internal(err.into()) + } +} + +impl From for Error { + fn from(err: key_server_cluster::Error) -> Self { + match err { + key_server_cluster::Error::AccessDenied => Error::AccessDenied, + _ => Error::Internal(err.into()), + } + } +} + impl Into for Error { fn into(self) -> String { format!("{}", self) diff --git a/sync/src/api.rs b/sync/src/api.rs index 4cdc9d37a..3e3234d84 100644 --- a/sync/src/api.rs +++ b/sync/src/api.rs @@ -43,7 +43,7 @@ pub const WARP_SYNC_PROTOCOL_ID: ProtocolId = *b"par"; /// Ethereum sync protocol pub const ETH_PROTOCOL: ProtocolId = *b"eth"; /// Ethereum light protocol -pub const LIGHT_PROTOCOL: ProtocolId = *b"plp"; +pub const LIGHT_PROTOCOL: ProtocolId = *b"pip"; /// Sync configuration #[derive(Debug, Clone, Copy)] @@ -126,7 +126,7 @@ pub struct PeerInfo { /// Eth protocol info. pub eth_info: Option, /// Light protocol info. - pub les_info: Option, + pub pip_info: Option, } /// Ethereum protocol info. @@ -141,10 +141,10 @@ pub struct EthProtocolInfo { pub difficulty: Option, } -/// LES protocol info. +/// PIP protocol info. #[derive(Debug)] #[cfg_attr(feature = "ipc", derive(Binary))] -pub struct LesProtocolInfo { +pub struct PipProtocolInfo { /// Protocol version pub version: u32, /// SHA3 of peer best block hash @@ -153,9 +153,9 @@ pub struct LesProtocolInfo { pub difficulty: U256, } -impl From for LesProtocolInfo { +impl From for PipProtocolInfo { fn from(status: light_net::Status) -> Self { - LesProtocolInfo { + PipProtocolInfo { version: status.protocol_version, head: status.head_hash, difficulty: status.head_td, @@ -184,7 +184,7 @@ pub struct EthSync { network: NetworkService, /// Main (eth/par) protocol handler eth_handler: Arc, - /// Light (les) protocol handler + /// Light (pip) protocol handler light_proto: Option>, /// The main subprotocol name subprotocol_name: [u8; 3], @@ -264,7 +264,7 @@ impl SyncProvider for EthSync { remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: eth_sync.peer_info(&peer_id), - les_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), + pip_info: light_proto.as_ref().and_then(|lp| lp.peer_status(&peer_id)).map(Into::into), }) }).collect() }).unwrap_or_else(Vec::new) @@ -408,13 +408,13 @@ impl ChainNotify for EthSync { } } -/// LES event handler. +/// PIP event handler. /// Simply queues transactions from light client peers. struct TxRelay(Arc); impl LightHandler for TxRelay { fn on_transactions(&self, ctx: &EventContext, relay: &[::ethcore::transaction::UnverifiedTransaction]) { - trace!(target: "les", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); + trace!(target: "pip", "Relaying {} transactions from peer {}", relay.len(), ctx.peer()); self.0.queue_transactions(relay.iter().map(|tx| ::rlp::encode(tx).to_vec()).collect(), ctx.peer()) } } @@ -642,6 +642,9 @@ pub trait LightSyncProvider { /// Get peers information fn peers(&self) -> Vec; + /// Get network id. + fn network_id(&self) -> u64; + /// Get the enode if available. fn enode(&self) -> Option; @@ -659,13 +662,17 @@ pub struct LightSyncParams { pub network_id: u64, /// Subprotocol name. pub subprotocol_name: [u8; 3], + /// Other handlers to attach. + pub handlers: Vec>, } /// Service for light synchronization. pub struct LightSync { proto: Arc, + sync: Arc<::light_sync::SyncInfo + Sync + Send>, network: NetworkService, subprotocol_name: [u8; 3], + network_id: u64, } impl LightSync { @@ -676,7 +683,7 @@ impl LightSync { use light_sync::LightSync as SyncHandler; // initialize light protocol handler and attach sync module. - let light_proto = { + let (sync, light_proto) = { let light_params = LightParams { network_id: params.network_id, flow_params: Default::default(), // or `None`? @@ -689,18 +696,24 @@ impl LightSync { }; let mut light_proto = LightProtocol::new(params.client.clone(), light_params); - let sync_handler = try!(SyncHandler::new(params.client.clone())); - light_proto.add_handler(Arc::new(sync_handler)); + let sync_handler = Arc::new(try!(SyncHandler::new(params.client.clone()))); + light_proto.add_handler(sync_handler.clone()); - Arc::new(light_proto) + for handler in params.handlers { + light_proto.add_handler(handler); + } + + (sync_handler, Arc::new(light_proto)) }; let service = try!(NetworkService::new(params.network_config)); Ok(LightSync { proto: light_proto, + sync: sync, network: service, subprotocol_name: params.subprotocol_name, + network_id: params.network_id, }) } @@ -715,6 +728,12 @@ impl LightSync { } } +impl ::std::ops::Deref for LightSync { + type Target = ::light_sync::SyncInfo; + + fn deref(&self) -> &Self::Target { &*self.sync } +} + impl ManageNetwork for LightSync { fn accept_unreserved_peers(&self) { self.network.set_non_reserved_mode(NonReservedPeerMode::Accept); @@ -786,7 +805,7 @@ impl LightSyncProvider for LightSync { remote_address: session_info.remote_address, local_address: session_info.local_address, eth_info: None, - les_info: self.proto.peer_status(&peer_id).map(Into::into), + pip_info: self.proto.peer_status(&peer_id).map(Into::into), }) }).collect() }).unwrap_or_else(Vec::new) @@ -796,6 +815,10 @@ impl LightSyncProvider for LightSync { self.network.external_url() } + fn network_id(&self) -> u64 { + self.network_id + } + fn transactions_stats(&self) -> BTreeMap { Default::default() // TODO } diff --git a/sync/src/light_sync/mod.rs b/sync/src/light_sync/mod.rs index 4590103e7..2bc179a21 100644 --- a/sync/src/light_sync/mod.rs +++ b/sync/src/light_sync/mod.rs @@ -32,7 +32,7 @@ //! announced blocks. //! - On bad block/response, punish peer and reset. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::mem; use std::sync::Arc; @@ -150,6 +150,19 @@ impl AncestorSearch { } } + fn requests_abandoned(self, req_ids: &[ReqId]) -> AncestorSearch { + match self { + AncestorSearch::Awaiting(id, start, req) => { + if req_ids.iter().find(|&x| x == &id).is_some() { + AncestorSearch::Queued(start) + } else { + AncestorSearch::Awaiting(id, start, req) + } + } + other => other, + } + } + fn dispatch_request(self, mut dispatcher: F) -> AncestorSearch where F: FnMut(HeadersRequest) -> Option { @@ -206,8 +219,10 @@ impl<'a> ResponseContext for ResponseCtx<'a> { /// Light client synchronization manager. See module docs for more details. pub struct LightSync { + start_block_number: u64, best_seen: Mutex>, // best seen block on the network. peers: RwLock>>, // peers which are relevant to synchronization. + pending_reqs: Mutex>, // requests from this handler. client: Arc, rng: Mutex, state: Mutex, @@ -270,7 +285,8 @@ impl Handler for LightSync { *state = match mem::replace(&mut *state, SyncState::Idle) { SyncState::Idle => SyncState::Idle, - SyncState::AncestorSearch(search) => SyncState::AncestorSearch(search), + SyncState::AncestorSearch(search) => + SyncState::AncestorSearch(search.requests_abandoned(unfulfilled)), SyncState::Rounds(round) => SyncState::Rounds(round.requests_abandoned(unfulfilled)), }; } @@ -320,6 +336,10 @@ impl Handler for LightSync { return } + if !self.pending_reqs.lock().remove(&req_id) { + return + } + let headers = match responses.get(0) { Some(&request::Response::Headers(ref response)) => &response.headers[..], Some(_) => { @@ -418,8 +438,10 @@ impl LightSync { let best_td = chain_info.pending_total_difficulty; let sync_target = match *self.best_seen.lock() { Some(ref target) if target.head_td > best_td => (target.head_num, target.head_hash), - _ => { - trace!(target: "sync", "No target to sync to."); + ref other => { + let network_score = other.as_ref().map(|target| target.head_td); + trace!(target: "sync", "No target to sync to. Network score: {:?}, Local score: {:?}", + network_score, best_td); *state = SyncState::Idle; return; } @@ -493,6 +515,7 @@ impl LightSync { for peer in &peer_ids { match ctx.request_from(*peer, request.clone()) { Ok(id) => { + self.pending_reqs.lock().insert(id.clone()); return Some(id) } Err(NetError::NoCredits) => {} @@ -523,11 +546,48 @@ impl LightSync { /// so it can act on events. pub fn new(client: Arc) -> Result { Ok(LightSync { + start_block_number: client.as_light_client().chain_info().best_block_number, best_seen: Mutex::new(None), peers: RwLock::new(HashMap::new()), + pending_reqs: Mutex::new(HashSet::new()), client: client, rng: Mutex::new(try!(OsRng::new())), state: Mutex::new(SyncState::Idle), }) } } + +/// Trait for erasing the type of a light sync object and exposing read-only methods. +pub trait SyncInfo { + /// Get the highest block advertised on the network. + fn highest_block(&self) -> Option; + + /// Get the block number at the time of sync start. + fn start_block(&self) -> u64; + + /// Whether major sync is underway. + fn is_major_importing(&self) -> bool; +} + +impl SyncInfo for LightSync { + fn highest_block(&self) -> Option { + self.best_seen.lock().as_ref().map(|x| x.head_num) + } + + fn start_block(&self) -> u64 { + self.start_block_number + } + + fn is_major_importing(&self) -> bool { + const EMPTY_QUEUE: usize = 3; + + if self.client.as_light_client().queue_info().unverified_queue_size > EMPTY_QUEUE { + return true; + } + + match *self.state.lock() { + SyncState::Idle => false, + _ => true, + } + } +} diff --git a/sync/src/light_sync/tests/test_net.rs b/sync/src/light_sync/tests/test_net.rs index 898f8766d..2319e8d35 100644 --- a/sync/src/light_sync/tests/test_net.rs +++ b/sync/src/light_sync/tests/test_net.rs @@ -207,7 +207,7 @@ impl TestNet { pub fn light(n_light: usize, n_full: usize) -> Self { let mut peers = Vec::with_capacity(n_light + n_full); for _ in 0..n_light { - let client = LightClient::new(Default::default(), &Spec::new_test(), IoChannel::disconnected()); + let client = LightClient::in_memory(Default::default(), &Spec::new_test(), IoChannel::disconnected()); peers.push(Arc::new(Peer::new_light(Arc::new(client)))) }